Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[sfrench/cifs-2.6.git] / arch / powerpc / kvm / book3s_hv.c
1 /*
2  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4  *
5  * Authors:
6  *    Paul Mackerras <paulus@au1.ibm.com>
7  *    Alexander Graf <agraf@suse.de>
8  *    Kevin Wolf <mail@kevin-wolf.de>
9  *
10  * Description: KVM functions specific to running on Book 3S
11  * processors in hypervisor mode (specifically POWER7 and later).
12  *
13  * This file is derived from arch/powerpc/kvm/book3s.c,
14  * by Alexander Graf <agraf@suse.de>.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License, version 2, as
18  * published by the Free Software Foundation.
19  */
20
21 #include <linux/kvm_host.h>
22 #include <linux/kernel.h>
23 #include <linux/err.h>
24 #include <linux/slab.h>
25 #include <linux/preempt.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/stat.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/fs.h>
31 #include <linux/anon_inodes.h>
32 #include <linux/cpu.h>
33 #include <linux/cpumask.h>
34 #include <linux/spinlock.h>
35 #include <linux/page-flags.h>
36 #include <linux/srcu.h>
37 #include <linux/miscdevice.h>
38 #include <linux/debugfs.h>
39 #include <linux/gfp.h>
40 #include <linux/vmalloc.h>
41 #include <linux/highmem.h>
42 #include <linux/hugetlb.h>
43 #include <linux/kvm_irqfd.h>
44 #include <linux/irqbypass.h>
45 #include <linux/module.h>
46 #include <linux/compiler.h>
47 #include <linux/of.h>
48
49 #include <asm/reg.h>
50 #include <asm/ppc-opcode.h>
51 #include <asm/asm-prototypes.h>
52 #include <asm/disassemble.h>
53 #include <asm/cputable.h>
54 #include <asm/cacheflush.h>
55 #include <asm/tlbflush.h>
56 #include <linux/uaccess.h>
57 #include <asm/io.h>
58 #include <asm/kvm_ppc.h>
59 #include <asm/kvm_book3s.h>
60 #include <asm/mmu_context.h>
61 #include <asm/lppaca.h>
62 #include <asm/processor.h>
63 #include <asm/cputhreads.h>
64 #include <asm/page.h>
65 #include <asm/hvcall.h>
66 #include <asm/switch_to.h>
67 #include <asm/smp.h>
68 #include <asm/dbell.h>
69 #include <asm/hmi.h>
70 #include <asm/pnv-pci.h>
71 #include <asm/mmu.h>
72 #include <asm/opal.h>
73 #include <asm/xics.h>
74 #include <asm/xive.h>
75
76 #include "book3s.h"
77
78 #define CREATE_TRACE_POINTS
79 #include "trace_hv.h"
80
81 /* #define EXIT_DEBUG */
82 /* #define EXIT_DEBUG_SIMPLE */
83 /* #define EXIT_DEBUG_INT */
84
85 /* Used to indicate that a guest page fault needs to be handled */
86 #define RESUME_PAGE_FAULT       (RESUME_GUEST | RESUME_FLAG_ARCH1)
87 /* Used to indicate that a guest passthrough interrupt needs to be handled */
88 #define RESUME_PASSTHROUGH      (RESUME_GUEST | RESUME_FLAG_ARCH2)
89
90 /* Used as a "null" value for timebase values */
91 #define TB_NIL  (~(u64)0)
92
93 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
94
95 static int dynamic_mt_modes = 6;
96 module_param(dynamic_mt_modes, int, 0644);
97 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
98 static int target_smt_mode;
99 module_param(target_smt_mode, int, 0644);
100 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
101
102 static bool indep_threads_mode = true;
103 module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
105
106 #ifdef CONFIG_KVM_XICS
107 static struct kernel_param_ops module_param_ops = {
108         .set = param_set_int,
109         .get = param_get_int,
110 };
111
112 module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
113 MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
114
115 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
116 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
117 #endif
118
119 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
120 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
121
122 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
123                 int *ip)
124 {
125         int i = *ip;
126         struct kvm_vcpu *vcpu;
127
128         while (++i < MAX_SMT_THREADS) {
129                 vcpu = READ_ONCE(vc->runnable_threads[i]);
130                 if (vcpu) {
131                         *ip = i;
132                         return vcpu;
133                 }
134         }
135         return NULL;
136 }
137
138 /* Used to traverse the list of runnable threads for a given vcore */
139 #define for_each_runnable_thread(i, vcpu, vc) \
140         for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
141
142 static bool kvmppc_ipi_thread(int cpu)
143 {
144         unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
145
146         /* On POWER9 we can use msgsnd to IPI any cpu */
147         if (cpu_has_feature(CPU_FTR_ARCH_300)) {
148                 msg |= get_hard_smp_processor_id(cpu);
149                 smp_mb();
150                 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
151                 return true;
152         }
153
154         /* On POWER8 for IPIs to threads in the same core, use msgsnd */
155         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
156                 preempt_disable();
157                 if (cpu_first_thread_sibling(cpu) ==
158                     cpu_first_thread_sibling(smp_processor_id())) {
159                         msg |= cpu_thread_in_core(cpu);
160                         smp_mb();
161                         __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
162                         preempt_enable();
163                         return true;
164                 }
165                 preempt_enable();
166         }
167
168 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
169         if (cpu >= 0 && cpu < nr_cpu_ids) {
170                 if (paca[cpu].kvm_hstate.xics_phys) {
171                         xics_wake_cpu(cpu);
172                         return true;
173                 }
174                 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
175                 return true;
176         }
177 #endif
178
179         return false;
180 }
181
182 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
183 {
184         int cpu;
185         struct swait_queue_head *wqp;
186
187         wqp = kvm_arch_vcpu_wq(vcpu);
188         if (swq_has_sleeper(wqp)) {
189                 swake_up(wqp);
190                 ++vcpu->stat.halt_wakeup;
191         }
192
193         cpu = READ_ONCE(vcpu->arch.thread_cpu);
194         if (cpu >= 0 && kvmppc_ipi_thread(cpu))
195                 return;
196
197         /* CPU points to the first thread of the core */
198         cpu = vcpu->cpu;
199         if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
200                 smp_send_reschedule(cpu);
201 }
202
203 /*
204  * We use the vcpu_load/put functions to measure stolen time.
205  * Stolen time is counted as time when either the vcpu is able to
206  * run as part of a virtual core, but the task running the vcore
207  * is preempted or sleeping, or when the vcpu needs something done
208  * in the kernel by the task running the vcpu, but that task is
209  * preempted or sleeping.  Those two things have to be counted
210  * separately, since one of the vcpu tasks will take on the job
211  * of running the core, and the other vcpu tasks in the vcore will
212  * sleep waiting for it to do that, but that sleep shouldn't count
213  * as stolen time.
214  *
215  * Hence we accumulate stolen time when the vcpu can run as part of
216  * a vcore using vc->stolen_tb, and the stolen time when the vcpu
217  * needs its task to do other things in the kernel (for example,
218  * service a page fault) in busy_stolen.  We don't accumulate
219  * stolen time for a vcore when it is inactive, or for a vcpu
220  * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
221  * a misnomer; it means that the vcpu task is not executing in
222  * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
223  * the kernel.  We don't have any way of dividing up that time
224  * between time that the vcpu is genuinely stopped, time that
225  * the task is actively working on behalf of the vcpu, and time
226  * that the task is preempted, so we don't count any of it as
227  * stolen.
228  *
229  * Updates to busy_stolen are protected by arch.tbacct_lock;
230  * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
231  * lock.  The stolen times are measured in units of timebase ticks.
232  * (Note that the != TB_NIL checks below are purely defensive;
233  * they should never fail.)
234  */
235
236 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
237 {
238         unsigned long flags;
239
240         spin_lock_irqsave(&vc->stoltb_lock, flags);
241         vc->preempt_tb = mftb();
242         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
243 }
244
245 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
246 {
247         unsigned long flags;
248
249         spin_lock_irqsave(&vc->stoltb_lock, flags);
250         if (vc->preempt_tb != TB_NIL) {
251                 vc->stolen_tb += mftb() - vc->preempt_tb;
252                 vc->preempt_tb = TB_NIL;
253         }
254         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
255 }
256
257 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
258 {
259         struct kvmppc_vcore *vc = vcpu->arch.vcore;
260         unsigned long flags;
261
262         /*
263          * We can test vc->runner without taking the vcore lock,
264          * because only this task ever sets vc->runner to this
265          * vcpu, and once it is set to this vcpu, only this task
266          * ever sets it to NULL.
267          */
268         if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
269                 kvmppc_core_end_stolen(vc);
270
271         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
272         if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
273             vcpu->arch.busy_preempt != TB_NIL) {
274                 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
275                 vcpu->arch.busy_preempt = TB_NIL;
276         }
277         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
278 }
279
280 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
281 {
282         struct kvmppc_vcore *vc = vcpu->arch.vcore;
283         unsigned long flags;
284
285         if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
286                 kvmppc_core_start_stolen(vc);
287
288         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
289         if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
290                 vcpu->arch.busy_preempt = mftb();
291         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
292 }
293
294 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
295 {
296         /*
297          * Check for illegal transactional state bit combination
298          * and if we find it, force the TS field to a safe state.
299          */
300         if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
301                 msr &= ~MSR_TS_MASK;
302         vcpu->arch.shregs.msr = msr;
303         kvmppc_end_cede(vcpu);
304 }
305
306 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
307 {
308         vcpu->arch.pvr = pvr;
309 }
310
311 /* Dummy value used in computing PCR value below */
312 #define PCR_ARCH_300    (PCR_ARCH_207 << 1)
313
314 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
315 {
316         unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
317         struct kvmppc_vcore *vc = vcpu->arch.vcore;
318
319         /* We can (emulate) our own architecture version and anything older */
320         if (cpu_has_feature(CPU_FTR_ARCH_300))
321                 host_pcr_bit = PCR_ARCH_300;
322         else if (cpu_has_feature(CPU_FTR_ARCH_207S))
323                 host_pcr_bit = PCR_ARCH_207;
324         else if (cpu_has_feature(CPU_FTR_ARCH_206))
325                 host_pcr_bit = PCR_ARCH_206;
326         else
327                 host_pcr_bit = PCR_ARCH_205;
328
329         /* Determine lowest PCR bit needed to run guest in given PVR level */
330         guest_pcr_bit = host_pcr_bit;
331         if (arch_compat) {
332                 switch (arch_compat) {
333                 case PVR_ARCH_205:
334                         guest_pcr_bit = PCR_ARCH_205;
335                         break;
336                 case PVR_ARCH_206:
337                 case PVR_ARCH_206p:
338                         guest_pcr_bit = PCR_ARCH_206;
339                         break;
340                 case PVR_ARCH_207:
341                         guest_pcr_bit = PCR_ARCH_207;
342                         break;
343                 case PVR_ARCH_300:
344                         guest_pcr_bit = PCR_ARCH_300;
345                         break;
346                 default:
347                         return -EINVAL;
348                 }
349         }
350
351         /* Check requested PCR bits don't exceed our capabilities */
352         if (guest_pcr_bit > host_pcr_bit)
353                 return -EINVAL;
354
355         spin_lock(&vc->lock);
356         vc->arch_compat = arch_compat;
357         /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
358         vc->pcr = host_pcr_bit - guest_pcr_bit;
359         spin_unlock(&vc->lock);
360
361         return 0;
362 }
363
364 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
365 {
366         int r;
367
368         pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
369         pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
370                vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
371         for (r = 0; r < 16; ++r)
372                 pr_err("r%2d = %.16lx  r%d = %.16lx\n",
373                        r, kvmppc_get_gpr(vcpu, r),
374                        r+16, kvmppc_get_gpr(vcpu, r+16));
375         pr_err("ctr = %.16lx  lr  = %.16lx\n",
376                vcpu->arch.ctr, vcpu->arch.lr);
377         pr_err("srr0 = %.16llx srr1 = %.16llx\n",
378                vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
379         pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
380                vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
381         pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
382                vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
383         pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
384                vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
385         pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
386         pr_err("fault dar = %.16lx dsisr = %.8x\n",
387                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
388         pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
389         for (r = 0; r < vcpu->arch.slb_max; ++r)
390                 pr_err("  ESID = %.16llx VSID = %.16llx\n",
391                        vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
392         pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
393                vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
394                vcpu->arch.last_inst);
395 }
396
397 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
398 {
399         struct kvm_vcpu *ret;
400
401         mutex_lock(&kvm->lock);
402         ret = kvm_get_vcpu_by_id(kvm, id);
403         mutex_unlock(&kvm->lock);
404         return ret;
405 }
406
407 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
408 {
409         vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
410         vpa->yield_count = cpu_to_be32(1);
411 }
412
413 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
414                    unsigned long addr, unsigned long len)
415 {
416         /* check address is cacheline aligned */
417         if (addr & (L1_CACHE_BYTES - 1))
418                 return -EINVAL;
419         spin_lock(&vcpu->arch.vpa_update_lock);
420         if (v->next_gpa != addr || v->len != len) {
421                 v->next_gpa = addr;
422                 v->len = addr ? len : 0;
423                 v->update_pending = 1;
424         }
425         spin_unlock(&vcpu->arch.vpa_update_lock);
426         return 0;
427 }
428
429 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
430 struct reg_vpa {
431         u32 dummy;
432         union {
433                 __be16 hword;
434                 __be32 word;
435         } length;
436 };
437
438 static int vpa_is_registered(struct kvmppc_vpa *vpap)
439 {
440         if (vpap->update_pending)
441                 return vpap->next_gpa != 0;
442         return vpap->pinned_addr != NULL;
443 }
444
445 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
446                                        unsigned long flags,
447                                        unsigned long vcpuid, unsigned long vpa)
448 {
449         struct kvm *kvm = vcpu->kvm;
450         unsigned long len, nb;
451         void *va;
452         struct kvm_vcpu *tvcpu;
453         int err;
454         int subfunc;
455         struct kvmppc_vpa *vpap;
456
457         tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
458         if (!tvcpu)
459                 return H_PARAMETER;
460
461         subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
462         if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
463             subfunc == H_VPA_REG_SLB) {
464                 /* Registering new area - address must be cache-line aligned */
465                 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
466                         return H_PARAMETER;
467
468                 /* convert logical addr to kernel addr and read length */
469                 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
470                 if (va == NULL)
471                         return H_PARAMETER;
472                 if (subfunc == H_VPA_REG_VPA)
473                         len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
474                 else
475                         len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
476                 kvmppc_unpin_guest_page(kvm, va, vpa, false);
477
478                 /* Check length */
479                 if (len > nb || len < sizeof(struct reg_vpa))
480                         return H_PARAMETER;
481         } else {
482                 vpa = 0;
483                 len = 0;
484         }
485
486         err = H_PARAMETER;
487         vpap = NULL;
488         spin_lock(&tvcpu->arch.vpa_update_lock);
489
490         switch (subfunc) {
491         case H_VPA_REG_VPA:             /* register VPA */
492                 /*
493                  * The size of our lppaca is 1kB because of the way we align
494                  * it for the guest to avoid crossing a 4kB boundary. We only
495                  * use 640 bytes of the structure though, so we should accept
496                  * clients that set a size of 640.
497                  */
498                 if (len < 640)
499                         break;
500                 vpap = &tvcpu->arch.vpa;
501                 err = 0;
502                 break;
503
504         case H_VPA_REG_DTL:             /* register DTL */
505                 if (len < sizeof(struct dtl_entry))
506                         break;
507                 len -= len % sizeof(struct dtl_entry);
508
509                 /* Check that they have previously registered a VPA */
510                 err = H_RESOURCE;
511                 if (!vpa_is_registered(&tvcpu->arch.vpa))
512                         break;
513
514                 vpap = &tvcpu->arch.dtl;
515                 err = 0;
516                 break;
517
518         case H_VPA_REG_SLB:             /* register SLB shadow buffer */
519                 /* Check that they have previously registered a VPA */
520                 err = H_RESOURCE;
521                 if (!vpa_is_registered(&tvcpu->arch.vpa))
522                         break;
523
524                 vpap = &tvcpu->arch.slb_shadow;
525                 err = 0;
526                 break;
527
528         case H_VPA_DEREG_VPA:           /* deregister VPA */
529                 /* Check they don't still have a DTL or SLB buf registered */
530                 err = H_RESOURCE;
531                 if (vpa_is_registered(&tvcpu->arch.dtl) ||
532                     vpa_is_registered(&tvcpu->arch.slb_shadow))
533                         break;
534
535                 vpap = &tvcpu->arch.vpa;
536                 err = 0;
537                 break;
538
539         case H_VPA_DEREG_DTL:           /* deregister DTL */
540                 vpap = &tvcpu->arch.dtl;
541                 err = 0;
542                 break;
543
544         case H_VPA_DEREG_SLB:           /* deregister SLB shadow buffer */
545                 vpap = &tvcpu->arch.slb_shadow;
546                 err = 0;
547                 break;
548         }
549
550         if (vpap) {
551                 vpap->next_gpa = vpa;
552                 vpap->len = len;
553                 vpap->update_pending = 1;
554         }
555
556         spin_unlock(&tvcpu->arch.vpa_update_lock);
557
558         return err;
559 }
560
561 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
562 {
563         struct kvm *kvm = vcpu->kvm;
564         void *va;
565         unsigned long nb;
566         unsigned long gpa;
567
568         /*
569          * We need to pin the page pointed to by vpap->next_gpa,
570          * but we can't call kvmppc_pin_guest_page under the lock
571          * as it does get_user_pages() and down_read().  So we
572          * have to drop the lock, pin the page, then get the lock
573          * again and check that a new area didn't get registered
574          * in the meantime.
575          */
576         for (;;) {
577                 gpa = vpap->next_gpa;
578                 spin_unlock(&vcpu->arch.vpa_update_lock);
579                 va = NULL;
580                 nb = 0;
581                 if (gpa)
582                         va = kvmppc_pin_guest_page(kvm, gpa, &nb);
583                 spin_lock(&vcpu->arch.vpa_update_lock);
584                 if (gpa == vpap->next_gpa)
585                         break;
586                 /* sigh... unpin that one and try again */
587                 if (va)
588                         kvmppc_unpin_guest_page(kvm, va, gpa, false);
589         }
590
591         vpap->update_pending = 0;
592         if (va && nb < vpap->len) {
593                 /*
594                  * If it's now too short, it must be that userspace
595                  * has changed the mappings underlying guest memory,
596                  * so unregister the region.
597                  */
598                 kvmppc_unpin_guest_page(kvm, va, gpa, false);
599                 va = NULL;
600         }
601         if (vpap->pinned_addr)
602                 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
603                                         vpap->dirty);
604         vpap->gpa = gpa;
605         vpap->pinned_addr = va;
606         vpap->dirty = false;
607         if (va)
608                 vpap->pinned_end = va + vpap->len;
609 }
610
611 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
612 {
613         if (!(vcpu->arch.vpa.update_pending ||
614               vcpu->arch.slb_shadow.update_pending ||
615               vcpu->arch.dtl.update_pending))
616                 return;
617
618         spin_lock(&vcpu->arch.vpa_update_lock);
619         if (vcpu->arch.vpa.update_pending) {
620                 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
621                 if (vcpu->arch.vpa.pinned_addr)
622                         init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
623         }
624         if (vcpu->arch.dtl.update_pending) {
625                 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
626                 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
627                 vcpu->arch.dtl_index = 0;
628         }
629         if (vcpu->arch.slb_shadow.update_pending)
630                 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
631         spin_unlock(&vcpu->arch.vpa_update_lock);
632 }
633
634 /*
635  * Return the accumulated stolen time for the vcore up until `now'.
636  * The caller should hold the vcore lock.
637  */
638 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
639 {
640         u64 p;
641         unsigned long flags;
642
643         spin_lock_irqsave(&vc->stoltb_lock, flags);
644         p = vc->stolen_tb;
645         if (vc->vcore_state != VCORE_INACTIVE &&
646             vc->preempt_tb != TB_NIL)
647                 p += now - vc->preempt_tb;
648         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
649         return p;
650 }
651
652 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
653                                     struct kvmppc_vcore *vc)
654 {
655         struct dtl_entry *dt;
656         struct lppaca *vpa;
657         unsigned long stolen;
658         unsigned long core_stolen;
659         u64 now;
660         unsigned long flags;
661
662         dt = vcpu->arch.dtl_ptr;
663         vpa = vcpu->arch.vpa.pinned_addr;
664         now = mftb();
665         core_stolen = vcore_stolen_time(vc, now);
666         stolen = core_stolen - vcpu->arch.stolen_logged;
667         vcpu->arch.stolen_logged = core_stolen;
668         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
669         stolen += vcpu->arch.busy_stolen;
670         vcpu->arch.busy_stolen = 0;
671         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
672         if (!dt || !vpa)
673                 return;
674         memset(dt, 0, sizeof(struct dtl_entry));
675         dt->dispatch_reason = 7;
676         dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
677         dt->timebase = cpu_to_be64(now + vc->tb_offset);
678         dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
679         dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
680         dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
681         ++dt;
682         if (dt == vcpu->arch.dtl.pinned_end)
683                 dt = vcpu->arch.dtl.pinned_addr;
684         vcpu->arch.dtl_ptr = dt;
685         /* order writing *dt vs. writing vpa->dtl_idx */
686         smp_wmb();
687         vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
688         vcpu->arch.dtl.dirty = true;
689 }
690
691 /* See if there is a doorbell interrupt pending for a vcpu */
692 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
693 {
694         int thr;
695         struct kvmppc_vcore *vc;
696
697         if (vcpu->arch.doorbell_request)
698                 return true;
699         /*
700          * Ensure that the read of vcore->dpdes comes after the read
701          * of vcpu->doorbell_request.  This barrier matches the
702          * lwsync in book3s_hv_rmhandlers.S just before the
703          * fast_guest_return label.
704          */
705         smp_rmb();
706         vc = vcpu->arch.vcore;
707         thr = vcpu->vcpu_id - vc->first_vcpuid;
708         return !!(vc->dpdes & (1 << thr));
709 }
710
711 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
712 {
713         if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
714                 return true;
715         if ((!vcpu->arch.vcore->arch_compat) &&
716             cpu_has_feature(CPU_FTR_ARCH_207S))
717                 return true;
718         return false;
719 }
720
721 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
722                              unsigned long resource, unsigned long value1,
723                              unsigned long value2)
724 {
725         switch (resource) {
726         case H_SET_MODE_RESOURCE_SET_CIABR:
727                 if (!kvmppc_power8_compatible(vcpu))
728                         return H_P2;
729                 if (value2)
730                         return H_P4;
731                 if (mflags)
732                         return H_UNSUPPORTED_FLAG_START;
733                 /* Guests can't breakpoint the hypervisor */
734                 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
735                         return H_P3;
736                 vcpu->arch.ciabr  = value1;
737                 return H_SUCCESS;
738         case H_SET_MODE_RESOURCE_SET_DAWR:
739                 if (!kvmppc_power8_compatible(vcpu))
740                         return H_P2;
741                 if (mflags)
742                         return H_UNSUPPORTED_FLAG_START;
743                 if (value2 & DABRX_HYP)
744                         return H_P4;
745                 vcpu->arch.dawr  = value1;
746                 vcpu->arch.dawrx = value2;
747                 return H_SUCCESS;
748         default:
749                 return H_TOO_HARD;
750         }
751 }
752
753 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
754 {
755         struct kvmppc_vcore *vcore = target->arch.vcore;
756
757         /*
758          * We expect to have been called by the real mode handler
759          * (kvmppc_rm_h_confer()) which would have directly returned
760          * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
761          * have useful work to do and should not confer) so we don't
762          * recheck that here.
763          */
764
765         spin_lock(&vcore->lock);
766         if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
767             vcore->vcore_state != VCORE_INACTIVE &&
768             vcore->runner)
769                 target = vcore->runner;
770         spin_unlock(&vcore->lock);
771
772         return kvm_vcpu_yield_to(target);
773 }
774
775 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
776 {
777         int yield_count = 0;
778         struct lppaca *lppaca;
779
780         spin_lock(&vcpu->arch.vpa_update_lock);
781         lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
782         if (lppaca)
783                 yield_count = be32_to_cpu(lppaca->yield_count);
784         spin_unlock(&vcpu->arch.vpa_update_lock);
785         return yield_count;
786 }
787
788 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
789 {
790         unsigned long req = kvmppc_get_gpr(vcpu, 3);
791         unsigned long target, ret = H_SUCCESS;
792         int yield_count;
793         struct kvm_vcpu *tvcpu;
794         int idx, rc;
795
796         if (req <= MAX_HCALL_OPCODE &&
797             !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
798                 return RESUME_HOST;
799
800         switch (req) {
801         case H_CEDE:
802                 break;
803         case H_PROD:
804                 target = kvmppc_get_gpr(vcpu, 4);
805                 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
806                 if (!tvcpu) {
807                         ret = H_PARAMETER;
808                         break;
809                 }
810                 tvcpu->arch.prodded = 1;
811                 smp_mb();
812                 if (tvcpu->arch.ceded)
813                         kvmppc_fast_vcpu_kick_hv(tvcpu);
814                 break;
815         case H_CONFER:
816                 target = kvmppc_get_gpr(vcpu, 4);
817                 if (target == -1)
818                         break;
819                 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
820                 if (!tvcpu) {
821                         ret = H_PARAMETER;
822                         break;
823                 }
824                 yield_count = kvmppc_get_gpr(vcpu, 5);
825                 if (kvmppc_get_yield_count(tvcpu) != yield_count)
826                         break;
827                 kvm_arch_vcpu_yield_to(tvcpu);
828                 break;
829         case H_REGISTER_VPA:
830                 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
831                                         kvmppc_get_gpr(vcpu, 5),
832                                         kvmppc_get_gpr(vcpu, 6));
833                 break;
834         case H_RTAS:
835                 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
836                         return RESUME_HOST;
837
838                 idx = srcu_read_lock(&vcpu->kvm->srcu);
839                 rc = kvmppc_rtas_hcall(vcpu);
840                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
841
842                 if (rc == -ENOENT)
843                         return RESUME_HOST;
844                 else if (rc == 0)
845                         break;
846
847                 /* Send the error out to userspace via KVM_RUN */
848                 return rc;
849         case H_LOGICAL_CI_LOAD:
850                 ret = kvmppc_h_logical_ci_load(vcpu);
851                 if (ret == H_TOO_HARD)
852                         return RESUME_HOST;
853                 break;
854         case H_LOGICAL_CI_STORE:
855                 ret = kvmppc_h_logical_ci_store(vcpu);
856                 if (ret == H_TOO_HARD)
857                         return RESUME_HOST;
858                 break;
859         case H_SET_MODE:
860                 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
861                                         kvmppc_get_gpr(vcpu, 5),
862                                         kvmppc_get_gpr(vcpu, 6),
863                                         kvmppc_get_gpr(vcpu, 7));
864                 if (ret == H_TOO_HARD)
865                         return RESUME_HOST;
866                 break;
867         case H_XIRR:
868         case H_CPPR:
869         case H_EOI:
870         case H_IPI:
871         case H_IPOLL:
872         case H_XIRR_X:
873                 if (kvmppc_xics_enabled(vcpu)) {
874                         if (xive_enabled()) {
875                                 ret = H_NOT_AVAILABLE;
876                                 return RESUME_GUEST;
877                         }
878                         ret = kvmppc_xics_hcall(vcpu, req);
879                         break;
880                 }
881                 return RESUME_HOST;
882         case H_PUT_TCE:
883                 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
884                                                 kvmppc_get_gpr(vcpu, 5),
885                                                 kvmppc_get_gpr(vcpu, 6));
886                 if (ret == H_TOO_HARD)
887                         return RESUME_HOST;
888                 break;
889         case H_PUT_TCE_INDIRECT:
890                 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
891                                                 kvmppc_get_gpr(vcpu, 5),
892                                                 kvmppc_get_gpr(vcpu, 6),
893                                                 kvmppc_get_gpr(vcpu, 7));
894                 if (ret == H_TOO_HARD)
895                         return RESUME_HOST;
896                 break;
897         case H_STUFF_TCE:
898                 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
899                                                 kvmppc_get_gpr(vcpu, 5),
900                                                 kvmppc_get_gpr(vcpu, 6),
901                                                 kvmppc_get_gpr(vcpu, 7));
902                 if (ret == H_TOO_HARD)
903                         return RESUME_HOST;
904                 break;
905         default:
906                 return RESUME_HOST;
907         }
908         kvmppc_set_gpr(vcpu, 3, ret);
909         vcpu->arch.hcall_needed = 0;
910         return RESUME_GUEST;
911 }
912
913 static int kvmppc_hcall_impl_hv(unsigned long cmd)
914 {
915         switch (cmd) {
916         case H_CEDE:
917         case H_PROD:
918         case H_CONFER:
919         case H_REGISTER_VPA:
920         case H_SET_MODE:
921         case H_LOGICAL_CI_LOAD:
922         case H_LOGICAL_CI_STORE:
923 #ifdef CONFIG_KVM_XICS
924         case H_XIRR:
925         case H_CPPR:
926         case H_EOI:
927         case H_IPI:
928         case H_IPOLL:
929         case H_XIRR_X:
930 #endif
931                 return 1;
932         }
933
934         /* See if it's in the real-mode table */
935         return kvmppc_hcall_impl_hv_realmode(cmd);
936 }
937
938 static int kvmppc_emulate_debug_inst(struct kvm_run *run,
939                                         struct kvm_vcpu *vcpu)
940 {
941         u32 last_inst;
942
943         if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
944                                         EMULATE_DONE) {
945                 /*
946                  * Fetch failed, so return to guest and
947                  * try executing it again.
948                  */
949                 return RESUME_GUEST;
950         }
951
952         if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
953                 run->exit_reason = KVM_EXIT_DEBUG;
954                 run->debug.arch.address = kvmppc_get_pc(vcpu);
955                 return RESUME_HOST;
956         } else {
957                 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
958                 return RESUME_GUEST;
959         }
960 }
961
962 static void do_nothing(void *x)
963 {
964 }
965
966 static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
967 {
968         int thr, cpu, pcpu, nthreads;
969         struct kvm_vcpu *v;
970         unsigned long dpdes;
971
972         nthreads = vcpu->kvm->arch.emul_smt_mode;
973         dpdes = 0;
974         cpu = vcpu->vcpu_id & ~(nthreads - 1);
975         for (thr = 0; thr < nthreads; ++thr, ++cpu) {
976                 v = kvmppc_find_vcpu(vcpu->kvm, cpu);
977                 if (!v)
978                         continue;
979                 /*
980                  * If the vcpu is currently running on a physical cpu thread,
981                  * interrupt it in order to pull it out of the guest briefly,
982                  * which will update its vcore->dpdes value.
983                  */
984                 pcpu = READ_ONCE(v->cpu);
985                 if (pcpu >= 0)
986                         smp_call_function_single(pcpu, do_nothing, NULL, 1);
987                 if (kvmppc_doorbell_pending(v))
988                         dpdes |= 1 << thr;
989         }
990         return dpdes;
991 }
992
993 /*
994  * On POWER9, emulate doorbell-related instructions in order to
995  * give the guest the illusion of running on a multi-threaded core.
996  * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
997  * and mfspr DPDES.
998  */
999 static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1000 {
1001         u32 inst, rb, thr;
1002         unsigned long arg;
1003         struct kvm *kvm = vcpu->kvm;
1004         struct kvm_vcpu *tvcpu;
1005
1006         if (!cpu_has_feature(CPU_FTR_ARCH_300))
1007                 return EMULATE_FAIL;
1008         if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1009                 return RESUME_GUEST;
1010         if (get_op(inst) != 31)
1011                 return EMULATE_FAIL;
1012         rb = get_rb(inst);
1013         thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1014         switch (get_xop(inst)) {
1015         case OP_31_XOP_MSGSNDP:
1016                 arg = kvmppc_get_gpr(vcpu, rb);
1017                 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1018                         break;
1019                 arg &= 0x3f;
1020                 if (arg >= kvm->arch.emul_smt_mode)
1021                         break;
1022                 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1023                 if (!tvcpu)
1024                         break;
1025                 if (!tvcpu->arch.doorbell_request) {
1026                         tvcpu->arch.doorbell_request = 1;
1027                         kvmppc_fast_vcpu_kick_hv(tvcpu);
1028                 }
1029                 break;
1030         case OP_31_XOP_MSGCLRP:
1031                 arg = kvmppc_get_gpr(vcpu, rb);
1032                 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1033                         break;
1034                 vcpu->arch.vcore->dpdes = 0;
1035                 vcpu->arch.doorbell_request = 0;
1036                 break;
1037         case OP_31_XOP_MFSPR:
1038                 switch (get_sprn(inst)) {
1039                 case SPRN_TIR:
1040                         arg = thr;
1041                         break;
1042                 case SPRN_DPDES:
1043                         arg = kvmppc_read_dpdes(vcpu);
1044                         break;
1045                 default:
1046                         return EMULATE_FAIL;
1047                 }
1048                 kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1049                 break;
1050         default:
1051                 return EMULATE_FAIL;
1052         }
1053         kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1054         return RESUME_GUEST;
1055 }
1056
1057 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1058                                  struct task_struct *tsk)
1059 {
1060         int r = RESUME_HOST;
1061
1062         vcpu->stat.sum_exits++;
1063
1064         /*
1065          * This can happen if an interrupt occurs in the last stages
1066          * of guest entry or the first stages of guest exit (i.e. after
1067          * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1068          * and before setting it to KVM_GUEST_MODE_HOST_HV).
1069          * That can happen due to a bug, or due to a machine check
1070          * occurring at just the wrong time.
1071          */
1072         if (vcpu->arch.shregs.msr & MSR_HV) {
1073                 printk(KERN_EMERG "KVM trap in HV mode!\n");
1074                 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1075                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
1076                         vcpu->arch.shregs.msr);
1077                 kvmppc_dump_regs(vcpu);
1078                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1079                 run->hw.hardware_exit_reason = vcpu->arch.trap;
1080                 return RESUME_HOST;
1081         }
1082         run->exit_reason = KVM_EXIT_UNKNOWN;
1083         run->ready_for_interrupt_injection = 1;
1084         switch (vcpu->arch.trap) {
1085         /* We're good on these - the host merely wanted to get our attention */
1086         case BOOK3S_INTERRUPT_HV_DECREMENTER:
1087                 vcpu->stat.dec_exits++;
1088                 r = RESUME_GUEST;
1089                 break;
1090         case BOOK3S_INTERRUPT_EXTERNAL:
1091         case BOOK3S_INTERRUPT_H_DOORBELL:
1092         case BOOK3S_INTERRUPT_H_VIRT:
1093                 vcpu->stat.ext_intr_exits++;
1094                 r = RESUME_GUEST;
1095                 break;
1096         /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1097         case BOOK3S_INTERRUPT_HMI:
1098         case BOOK3S_INTERRUPT_PERFMON:
1099         case BOOK3S_INTERRUPT_SYSTEM_RESET:
1100                 r = RESUME_GUEST;
1101                 break;
1102         case BOOK3S_INTERRUPT_MACHINE_CHECK:
1103                 /* Exit to guest with KVM_EXIT_NMI as exit reason */
1104                 run->exit_reason = KVM_EXIT_NMI;
1105                 run->hw.hardware_exit_reason = vcpu->arch.trap;
1106                 /* Clear out the old NMI status from run->flags */
1107                 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1108                 /* Now set the NMI status */
1109                 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1110                         run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1111                 else
1112                         run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1113
1114                 r = RESUME_HOST;
1115                 /* Print the MCE event to host console. */
1116                 machine_check_print_event_info(&vcpu->arch.mce_evt, false);
1117                 break;
1118         case BOOK3S_INTERRUPT_PROGRAM:
1119         {
1120                 ulong flags;
1121                 /*
1122                  * Normally program interrupts are delivered directly
1123                  * to the guest by the hardware, but we can get here
1124                  * as a result of a hypervisor emulation interrupt
1125                  * (e40) getting turned into a 700 by BML RTAS.
1126                  */
1127                 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1128                 kvmppc_core_queue_program(vcpu, flags);
1129                 r = RESUME_GUEST;
1130                 break;
1131         }
1132         case BOOK3S_INTERRUPT_SYSCALL:
1133         {
1134                 /* hcall - punt to userspace */
1135                 int i;
1136
1137                 /* hypercall with MSR_PR has already been handled in rmode,
1138                  * and never reaches here.
1139                  */
1140
1141                 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1142                 for (i = 0; i < 9; ++i)
1143                         run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1144                 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1145                 vcpu->arch.hcall_needed = 1;
1146                 r = RESUME_HOST;
1147                 break;
1148         }
1149         /*
1150          * We get these next two if the guest accesses a page which it thinks
1151          * it has mapped but which is not actually present, either because
1152          * it is for an emulated I/O device or because the corresonding
1153          * host page has been paged out.  Any other HDSI/HISI interrupts
1154          * have been handled already.
1155          */
1156         case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1157                 r = RESUME_PAGE_FAULT;
1158                 break;
1159         case BOOK3S_INTERRUPT_H_INST_STORAGE:
1160                 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1161                 vcpu->arch.fault_dsisr = 0;
1162                 r = RESUME_PAGE_FAULT;
1163                 break;
1164         /*
1165          * This occurs if the guest executes an illegal instruction.
1166          * If the guest debug is disabled, generate a program interrupt
1167          * to the guest. If guest debug is enabled, we need to check
1168          * whether the instruction is a software breakpoint instruction.
1169          * Accordingly return to Guest or Host.
1170          */
1171         case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1172                 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1173                         vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1174                                 swab32(vcpu->arch.emul_inst) :
1175                                 vcpu->arch.emul_inst;
1176                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1177                         r = kvmppc_emulate_debug_inst(run, vcpu);
1178                 } else {
1179                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1180                         r = RESUME_GUEST;
1181                 }
1182                 break;
1183         /*
1184          * This occurs if the guest (kernel or userspace), does something that
1185          * is prohibited by HFSCR.
1186          * On POWER9, this could be a doorbell instruction that we need
1187          * to emulate.
1188          * Otherwise, we just generate a program interrupt to the guest.
1189          */
1190         case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1191                 r = EMULATE_FAIL;
1192                 if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
1193                         r = kvmppc_emulate_doorbell_instr(vcpu);
1194                 if (r == EMULATE_FAIL) {
1195                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1196                         r = RESUME_GUEST;
1197                 }
1198                 break;
1199         case BOOK3S_INTERRUPT_HV_RM_HARD:
1200                 r = RESUME_PASSTHROUGH;
1201                 break;
1202         default:
1203                 kvmppc_dump_regs(vcpu);
1204                 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1205                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
1206                         vcpu->arch.shregs.msr);
1207                 run->hw.hardware_exit_reason = vcpu->arch.trap;
1208                 r = RESUME_HOST;
1209                 break;
1210         }
1211
1212         return r;
1213 }
1214
1215 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1216                                             struct kvm_sregs *sregs)
1217 {
1218         int i;
1219
1220         memset(sregs, 0, sizeof(struct kvm_sregs));
1221         sregs->pvr = vcpu->arch.pvr;
1222         for (i = 0; i < vcpu->arch.slb_max; i++) {
1223                 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1224                 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1225         }
1226
1227         return 0;
1228 }
1229
1230 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1231                                             struct kvm_sregs *sregs)
1232 {
1233         int i, j;
1234
1235         /* Only accept the same PVR as the host's, since we can't spoof it */
1236         if (sregs->pvr != vcpu->arch.pvr)
1237                 return -EINVAL;
1238
1239         j = 0;
1240         for (i = 0; i < vcpu->arch.slb_nr; i++) {
1241                 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1242                         vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1243                         vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1244                         ++j;
1245                 }
1246         }
1247         vcpu->arch.slb_max = j;
1248
1249         return 0;
1250 }
1251
1252 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1253                 bool preserve_top32)
1254 {
1255         struct kvm *kvm = vcpu->kvm;
1256         struct kvmppc_vcore *vc = vcpu->arch.vcore;
1257         u64 mask;
1258
1259         mutex_lock(&kvm->lock);
1260         spin_lock(&vc->lock);
1261         /*
1262          * If ILE (interrupt little-endian) has changed, update the
1263          * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1264          */
1265         if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1266                 struct kvm_vcpu *vcpu;
1267                 int i;
1268
1269                 kvm_for_each_vcpu(i, vcpu, kvm) {
1270                         if (vcpu->arch.vcore != vc)
1271                                 continue;
1272                         if (new_lpcr & LPCR_ILE)
1273                                 vcpu->arch.intr_msr |= MSR_LE;
1274                         else
1275                                 vcpu->arch.intr_msr &= ~MSR_LE;
1276                 }
1277         }
1278
1279         /*
1280          * Userspace can only modify DPFD (default prefetch depth),
1281          * ILE (interrupt little-endian) and TC (translation control).
1282          * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
1283          */
1284         mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1285         if (cpu_has_feature(CPU_FTR_ARCH_207S))
1286                 mask |= LPCR_AIL;
1287         /*
1288          * On POWER9, allow userspace to enable large decrementer for the
1289          * guest, whether or not the host has it enabled.
1290          */
1291         if (cpu_has_feature(CPU_FTR_ARCH_300))
1292                 mask |= LPCR_LD;
1293
1294         /* Broken 32-bit version of LPCR must not clear top bits */
1295         if (preserve_top32)
1296                 mask &= 0xFFFFFFFF;
1297         vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1298         spin_unlock(&vc->lock);
1299         mutex_unlock(&kvm->lock);
1300 }
1301
1302 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1303                                  union kvmppc_one_reg *val)
1304 {
1305         int r = 0;
1306         long int i;
1307
1308         switch (id) {
1309         case KVM_REG_PPC_DEBUG_INST:
1310                 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1311                 break;
1312         case KVM_REG_PPC_HIOR:
1313                 *val = get_reg_val(id, 0);
1314                 break;
1315         case KVM_REG_PPC_DABR:
1316                 *val = get_reg_val(id, vcpu->arch.dabr);
1317                 break;
1318         case KVM_REG_PPC_DABRX:
1319                 *val = get_reg_val(id, vcpu->arch.dabrx);
1320                 break;
1321         case KVM_REG_PPC_DSCR:
1322                 *val = get_reg_val(id, vcpu->arch.dscr);
1323                 break;
1324         case KVM_REG_PPC_PURR:
1325                 *val = get_reg_val(id, vcpu->arch.purr);
1326                 break;
1327         case KVM_REG_PPC_SPURR:
1328                 *val = get_reg_val(id, vcpu->arch.spurr);
1329                 break;
1330         case KVM_REG_PPC_AMR:
1331                 *val = get_reg_val(id, vcpu->arch.amr);
1332                 break;
1333         case KVM_REG_PPC_UAMOR:
1334                 *val = get_reg_val(id, vcpu->arch.uamor);
1335                 break;
1336         case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1337                 i = id - KVM_REG_PPC_MMCR0;
1338                 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1339                 break;
1340         case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1341                 i = id - KVM_REG_PPC_PMC1;
1342                 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1343                 break;
1344         case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1345                 i = id - KVM_REG_PPC_SPMC1;
1346                 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1347                 break;
1348         case KVM_REG_PPC_SIAR:
1349                 *val = get_reg_val(id, vcpu->arch.siar);
1350                 break;
1351         case KVM_REG_PPC_SDAR:
1352                 *val = get_reg_val(id, vcpu->arch.sdar);
1353                 break;
1354         case KVM_REG_PPC_SIER:
1355                 *val = get_reg_val(id, vcpu->arch.sier);
1356                 break;
1357         case KVM_REG_PPC_IAMR:
1358                 *val = get_reg_val(id, vcpu->arch.iamr);
1359                 break;
1360         case KVM_REG_PPC_PSPB:
1361                 *val = get_reg_val(id, vcpu->arch.pspb);
1362                 break;
1363         case KVM_REG_PPC_DPDES:
1364                 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1365                 break;
1366         case KVM_REG_PPC_VTB:
1367                 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1368                 break;
1369         case KVM_REG_PPC_DAWR:
1370                 *val = get_reg_val(id, vcpu->arch.dawr);
1371                 break;
1372         case KVM_REG_PPC_DAWRX:
1373                 *val = get_reg_val(id, vcpu->arch.dawrx);
1374                 break;
1375         case KVM_REG_PPC_CIABR:
1376                 *val = get_reg_val(id, vcpu->arch.ciabr);
1377                 break;
1378         case KVM_REG_PPC_CSIGR:
1379                 *val = get_reg_val(id, vcpu->arch.csigr);
1380                 break;
1381         case KVM_REG_PPC_TACR:
1382                 *val = get_reg_val(id, vcpu->arch.tacr);
1383                 break;
1384         case KVM_REG_PPC_TCSCR:
1385                 *val = get_reg_val(id, vcpu->arch.tcscr);
1386                 break;
1387         case KVM_REG_PPC_PID:
1388                 *val = get_reg_val(id, vcpu->arch.pid);
1389                 break;
1390         case KVM_REG_PPC_ACOP:
1391                 *val = get_reg_val(id, vcpu->arch.acop);
1392                 break;
1393         case KVM_REG_PPC_WORT:
1394                 *val = get_reg_val(id, vcpu->arch.wort);
1395                 break;
1396         case KVM_REG_PPC_TIDR:
1397                 *val = get_reg_val(id, vcpu->arch.tid);
1398                 break;
1399         case KVM_REG_PPC_PSSCR:
1400                 *val = get_reg_val(id, vcpu->arch.psscr);
1401                 break;
1402         case KVM_REG_PPC_VPA_ADDR:
1403                 spin_lock(&vcpu->arch.vpa_update_lock);
1404                 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1405                 spin_unlock(&vcpu->arch.vpa_update_lock);
1406                 break;
1407         case KVM_REG_PPC_VPA_SLB:
1408                 spin_lock(&vcpu->arch.vpa_update_lock);
1409                 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1410                 val->vpaval.length = vcpu->arch.slb_shadow.len;
1411                 spin_unlock(&vcpu->arch.vpa_update_lock);
1412                 break;
1413         case KVM_REG_PPC_VPA_DTL:
1414                 spin_lock(&vcpu->arch.vpa_update_lock);
1415                 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1416                 val->vpaval.length = vcpu->arch.dtl.len;
1417                 spin_unlock(&vcpu->arch.vpa_update_lock);
1418                 break;
1419         case KVM_REG_PPC_TB_OFFSET:
1420                 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1421                 break;
1422         case KVM_REG_PPC_LPCR:
1423         case KVM_REG_PPC_LPCR_64:
1424                 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1425                 break;
1426         case KVM_REG_PPC_PPR:
1427                 *val = get_reg_val(id, vcpu->arch.ppr);
1428                 break;
1429 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1430         case KVM_REG_PPC_TFHAR:
1431                 *val = get_reg_val(id, vcpu->arch.tfhar);
1432                 break;
1433         case KVM_REG_PPC_TFIAR:
1434                 *val = get_reg_val(id, vcpu->arch.tfiar);
1435                 break;
1436         case KVM_REG_PPC_TEXASR:
1437                 *val = get_reg_val(id, vcpu->arch.texasr);
1438                 break;
1439         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1440                 i = id - KVM_REG_PPC_TM_GPR0;
1441                 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1442                 break;
1443         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1444         {
1445                 int j;
1446                 i = id - KVM_REG_PPC_TM_VSR0;
1447                 if (i < 32)
1448                         for (j = 0; j < TS_FPRWIDTH; j++)
1449                                 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1450                 else {
1451                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1452                                 val->vval = vcpu->arch.vr_tm.vr[i-32];
1453                         else
1454                                 r = -ENXIO;
1455                 }
1456                 break;
1457         }
1458         case KVM_REG_PPC_TM_CR:
1459                 *val = get_reg_val(id, vcpu->arch.cr_tm);
1460                 break;
1461         case KVM_REG_PPC_TM_XER:
1462                 *val = get_reg_val(id, vcpu->arch.xer_tm);
1463                 break;
1464         case KVM_REG_PPC_TM_LR:
1465                 *val = get_reg_val(id, vcpu->arch.lr_tm);
1466                 break;
1467         case KVM_REG_PPC_TM_CTR:
1468                 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1469                 break;
1470         case KVM_REG_PPC_TM_FPSCR:
1471                 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1472                 break;
1473         case KVM_REG_PPC_TM_AMR:
1474                 *val = get_reg_val(id, vcpu->arch.amr_tm);
1475                 break;
1476         case KVM_REG_PPC_TM_PPR:
1477                 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1478                 break;
1479         case KVM_REG_PPC_TM_VRSAVE:
1480                 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1481                 break;
1482         case KVM_REG_PPC_TM_VSCR:
1483                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1484                         *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1485                 else
1486                         r = -ENXIO;
1487                 break;
1488         case KVM_REG_PPC_TM_DSCR:
1489                 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1490                 break;
1491         case KVM_REG_PPC_TM_TAR:
1492                 *val = get_reg_val(id, vcpu->arch.tar_tm);
1493                 break;
1494 #endif
1495         case KVM_REG_PPC_ARCH_COMPAT:
1496                 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1497                 break;
1498         default:
1499                 r = -EINVAL;
1500                 break;
1501         }
1502
1503         return r;
1504 }
1505
1506 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1507                                  union kvmppc_one_reg *val)
1508 {
1509         int r = 0;
1510         long int i;
1511         unsigned long addr, len;
1512
1513         switch (id) {
1514         case KVM_REG_PPC_HIOR:
1515                 /* Only allow this to be set to zero */
1516                 if (set_reg_val(id, *val))
1517                         r = -EINVAL;
1518                 break;
1519         case KVM_REG_PPC_DABR:
1520                 vcpu->arch.dabr = set_reg_val(id, *val);
1521                 break;
1522         case KVM_REG_PPC_DABRX:
1523                 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1524                 break;
1525         case KVM_REG_PPC_DSCR:
1526                 vcpu->arch.dscr = set_reg_val(id, *val);
1527                 break;
1528         case KVM_REG_PPC_PURR:
1529                 vcpu->arch.purr = set_reg_val(id, *val);
1530                 break;
1531         case KVM_REG_PPC_SPURR:
1532                 vcpu->arch.spurr = set_reg_val(id, *val);
1533                 break;
1534         case KVM_REG_PPC_AMR:
1535                 vcpu->arch.amr = set_reg_val(id, *val);
1536                 break;
1537         case KVM_REG_PPC_UAMOR:
1538                 vcpu->arch.uamor = set_reg_val(id, *val);
1539                 break;
1540         case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1541                 i = id - KVM_REG_PPC_MMCR0;
1542                 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1543                 break;
1544         case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1545                 i = id - KVM_REG_PPC_PMC1;
1546                 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1547                 break;
1548         case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1549                 i = id - KVM_REG_PPC_SPMC1;
1550                 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1551                 break;
1552         case KVM_REG_PPC_SIAR:
1553                 vcpu->arch.siar = set_reg_val(id, *val);
1554                 break;
1555         case KVM_REG_PPC_SDAR:
1556                 vcpu->arch.sdar = set_reg_val(id, *val);
1557                 break;
1558         case KVM_REG_PPC_SIER:
1559                 vcpu->arch.sier = set_reg_val(id, *val);
1560                 break;
1561         case KVM_REG_PPC_IAMR:
1562                 vcpu->arch.iamr = set_reg_val(id, *val);
1563                 break;
1564         case KVM_REG_PPC_PSPB:
1565                 vcpu->arch.pspb = set_reg_val(id, *val);
1566                 break;
1567         case KVM_REG_PPC_DPDES:
1568                 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1569                 break;
1570         case KVM_REG_PPC_VTB:
1571                 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1572                 break;
1573         case KVM_REG_PPC_DAWR:
1574                 vcpu->arch.dawr = set_reg_val(id, *val);
1575                 break;
1576         case KVM_REG_PPC_DAWRX:
1577                 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1578                 break;
1579         case KVM_REG_PPC_CIABR:
1580                 vcpu->arch.ciabr = set_reg_val(id, *val);
1581                 /* Don't allow setting breakpoints in hypervisor code */
1582                 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1583                         vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
1584                 break;
1585         case KVM_REG_PPC_CSIGR:
1586                 vcpu->arch.csigr = set_reg_val(id, *val);
1587                 break;
1588         case KVM_REG_PPC_TACR:
1589                 vcpu->arch.tacr = set_reg_val(id, *val);
1590                 break;
1591         case KVM_REG_PPC_TCSCR:
1592                 vcpu->arch.tcscr = set_reg_val(id, *val);
1593                 break;
1594         case KVM_REG_PPC_PID:
1595                 vcpu->arch.pid = set_reg_val(id, *val);
1596                 break;
1597         case KVM_REG_PPC_ACOP:
1598                 vcpu->arch.acop = set_reg_val(id, *val);
1599                 break;
1600         case KVM_REG_PPC_WORT:
1601                 vcpu->arch.wort = set_reg_val(id, *val);
1602                 break;
1603         case KVM_REG_PPC_TIDR:
1604                 vcpu->arch.tid = set_reg_val(id, *val);
1605                 break;
1606         case KVM_REG_PPC_PSSCR:
1607                 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
1608                 break;
1609         case KVM_REG_PPC_VPA_ADDR:
1610                 addr = set_reg_val(id, *val);
1611                 r = -EINVAL;
1612                 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1613                               vcpu->arch.dtl.next_gpa))
1614                         break;
1615                 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1616                 break;
1617         case KVM_REG_PPC_VPA_SLB:
1618                 addr = val->vpaval.addr;
1619                 len = val->vpaval.length;
1620                 r = -EINVAL;
1621                 if (addr && !vcpu->arch.vpa.next_gpa)
1622                         break;
1623                 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1624                 break;
1625         case KVM_REG_PPC_VPA_DTL:
1626                 addr = val->vpaval.addr;
1627                 len = val->vpaval.length;
1628                 r = -EINVAL;
1629                 if (addr && (len < sizeof(struct dtl_entry) ||
1630                              !vcpu->arch.vpa.next_gpa))
1631                         break;
1632                 len -= len % sizeof(struct dtl_entry);
1633                 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1634                 break;
1635         case KVM_REG_PPC_TB_OFFSET:
1636                 /*
1637                  * POWER9 DD1 has an erratum where writing TBU40 causes
1638                  * the timebase to lose ticks.  So we don't let the
1639                  * timebase offset be changed on P9 DD1.  (It is
1640                  * initialized to zero.)
1641                  */
1642                 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1643                         break;
1644                 /* round up to multiple of 2^24 */
1645                 vcpu->arch.vcore->tb_offset =
1646                         ALIGN(set_reg_val(id, *val), 1UL << 24);
1647                 break;
1648         case KVM_REG_PPC_LPCR:
1649                 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1650                 break;
1651         case KVM_REG_PPC_LPCR_64:
1652                 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1653                 break;
1654         case KVM_REG_PPC_PPR:
1655                 vcpu->arch.ppr = set_reg_val(id, *val);
1656                 break;
1657 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1658         case KVM_REG_PPC_TFHAR:
1659                 vcpu->arch.tfhar = set_reg_val(id, *val);
1660                 break;
1661         case KVM_REG_PPC_TFIAR:
1662                 vcpu->arch.tfiar = set_reg_val(id, *val);
1663                 break;
1664         case KVM_REG_PPC_TEXASR:
1665                 vcpu->arch.texasr = set_reg_val(id, *val);
1666                 break;
1667         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1668                 i = id - KVM_REG_PPC_TM_GPR0;
1669                 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1670                 break;
1671         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1672         {
1673                 int j;
1674                 i = id - KVM_REG_PPC_TM_VSR0;
1675                 if (i < 32)
1676                         for (j = 0; j < TS_FPRWIDTH; j++)
1677                                 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1678                 else
1679                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1680                                 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1681                         else
1682                                 r = -ENXIO;
1683                 break;
1684         }
1685         case KVM_REG_PPC_TM_CR:
1686                 vcpu->arch.cr_tm = set_reg_val(id, *val);
1687                 break;
1688         case KVM_REG_PPC_TM_XER:
1689                 vcpu->arch.xer_tm = set_reg_val(id, *val);
1690                 break;
1691         case KVM_REG_PPC_TM_LR:
1692                 vcpu->arch.lr_tm = set_reg_val(id, *val);
1693                 break;
1694         case KVM_REG_PPC_TM_CTR:
1695                 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1696                 break;
1697         case KVM_REG_PPC_TM_FPSCR:
1698                 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1699                 break;
1700         case KVM_REG_PPC_TM_AMR:
1701                 vcpu->arch.amr_tm = set_reg_val(id, *val);
1702                 break;
1703         case KVM_REG_PPC_TM_PPR:
1704                 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1705                 break;
1706         case KVM_REG_PPC_TM_VRSAVE:
1707                 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1708                 break;
1709         case KVM_REG_PPC_TM_VSCR:
1710                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1711                         vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1712                 else
1713                         r = - ENXIO;
1714                 break;
1715         case KVM_REG_PPC_TM_DSCR:
1716                 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1717                 break;
1718         case KVM_REG_PPC_TM_TAR:
1719                 vcpu->arch.tar_tm = set_reg_val(id, *val);
1720                 break;
1721 #endif
1722         case KVM_REG_PPC_ARCH_COMPAT:
1723                 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1724                 break;
1725         default:
1726                 r = -EINVAL;
1727                 break;
1728         }
1729
1730         return r;
1731 }
1732
1733 /*
1734  * On POWER9, threads are independent and can be in different partitions.
1735  * Therefore we consider each thread to be a subcore.
1736  * There is a restriction that all threads have to be in the same
1737  * MMU mode (radix or HPT), unfortunately, but since we only support
1738  * HPT guests on a HPT host so far, that isn't an impediment yet.
1739  */
1740 static int threads_per_vcore(struct kvm *kvm)
1741 {
1742         if (kvm->arch.threads_indep)
1743                 return 1;
1744         return threads_per_subcore;
1745 }
1746
1747 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1748 {
1749         struct kvmppc_vcore *vcore;
1750
1751         vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1752
1753         if (vcore == NULL)
1754                 return NULL;
1755
1756         spin_lock_init(&vcore->lock);
1757         spin_lock_init(&vcore->stoltb_lock);
1758         init_swait_queue_head(&vcore->wq);
1759         vcore->preempt_tb = TB_NIL;
1760         vcore->lpcr = kvm->arch.lpcr;
1761         vcore->first_vcpuid = core * kvm->arch.smt_mode;
1762         vcore->kvm = kvm;
1763         INIT_LIST_HEAD(&vcore->preempt_list);
1764
1765         return vcore;
1766 }
1767
1768 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1769 static struct debugfs_timings_element {
1770         const char *name;
1771         size_t offset;
1772 } timings[] = {
1773         {"rm_entry",    offsetof(struct kvm_vcpu, arch.rm_entry)},
1774         {"rm_intr",     offsetof(struct kvm_vcpu, arch.rm_intr)},
1775         {"rm_exit",     offsetof(struct kvm_vcpu, arch.rm_exit)},
1776         {"guest",       offsetof(struct kvm_vcpu, arch.guest_time)},
1777         {"cede",        offsetof(struct kvm_vcpu, arch.cede_time)},
1778 };
1779
1780 #define N_TIMINGS       (ARRAY_SIZE(timings))
1781
1782 struct debugfs_timings_state {
1783         struct kvm_vcpu *vcpu;
1784         unsigned int    buflen;
1785         char            buf[N_TIMINGS * 100];
1786 };
1787
1788 static int debugfs_timings_open(struct inode *inode, struct file *file)
1789 {
1790         struct kvm_vcpu *vcpu = inode->i_private;
1791         struct debugfs_timings_state *p;
1792
1793         p = kzalloc(sizeof(*p), GFP_KERNEL);
1794         if (!p)
1795                 return -ENOMEM;
1796
1797         kvm_get_kvm(vcpu->kvm);
1798         p->vcpu = vcpu;
1799         file->private_data = p;
1800
1801         return nonseekable_open(inode, file);
1802 }
1803
1804 static int debugfs_timings_release(struct inode *inode, struct file *file)
1805 {
1806         struct debugfs_timings_state *p = file->private_data;
1807
1808         kvm_put_kvm(p->vcpu->kvm);
1809         kfree(p);
1810         return 0;
1811 }
1812
1813 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1814                                     size_t len, loff_t *ppos)
1815 {
1816         struct debugfs_timings_state *p = file->private_data;
1817         struct kvm_vcpu *vcpu = p->vcpu;
1818         char *s, *buf_end;
1819         struct kvmhv_tb_accumulator tb;
1820         u64 count;
1821         loff_t pos;
1822         ssize_t n;
1823         int i, loops;
1824         bool ok;
1825
1826         if (!p->buflen) {
1827                 s = p->buf;
1828                 buf_end = s + sizeof(p->buf);
1829                 for (i = 0; i < N_TIMINGS; ++i) {
1830                         struct kvmhv_tb_accumulator *acc;
1831
1832                         acc = (struct kvmhv_tb_accumulator *)
1833                                 ((unsigned long)vcpu + timings[i].offset);
1834                         ok = false;
1835                         for (loops = 0; loops < 1000; ++loops) {
1836                                 count = acc->seqcount;
1837                                 if (!(count & 1)) {
1838                                         smp_rmb();
1839                                         tb = *acc;
1840                                         smp_rmb();
1841                                         if (count == acc->seqcount) {
1842                                                 ok = true;
1843                                                 break;
1844                                         }
1845                                 }
1846                                 udelay(1);
1847                         }
1848                         if (!ok)
1849                                 snprintf(s, buf_end - s, "%s: stuck\n",
1850                                         timings[i].name);
1851                         else
1852                                 snprintf(s, buf_end - s,
1853                                         "%s: %llu %llu %llu %llu\n",
1854                                         timings[i].name, count / 2,
1855                                         tb_to_ns(tb.tb_total),
1856                                         tb_to_ns(tb.tb_min),
1857                                         tb_to_ns(tb.tb_max));
1858                         s += strlen(s);
1859                 }
1860                 p->buflen = s - p->buf;
1861         }
1862
1863         pos = *ppos;
1864         if (pos >= p->buflen)
1865                 return 0;
1866         if (len > p->buflen - pos)
1867                 len = p->buflen - pos;
1868         n = copy_to_user(buf, p->buf + pos, len);
1869         if (n) {
1870                 if (n == len)
1871                         return -EFAULT;
1872                 len -= n;
1873         }
1874         *ppos = pos + len;
1875         return len;
1876 }
1877
1878 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1879                                      size_t len, loff_t *ppos)
1880 {
1881         return -EACCES;
1882 }
1883
1884 static const struct file_operations debugfs_timings_ops = {
1885         .owner   = THIS_MODULE,
1886         .open    = debugfs_timings_open,
1887         .release = debugfs_timings_release,
1888         .read    = debugfs_timings_read,
1889         .write   = debugfs_timings_write,
1890         .llseek  = generic_file_llseek,
1891 };
1892
1893 /* Create a debugfs directory for the vcpu */
1894 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1895 {
1896         char buf[16];
1897         struct kvm *kvm = vcpu->kvm;
1898
1899         snprintf(buf, sizeof(buf), "vcpu%u", id);
1900         if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1901                 return;
1902         vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1903         if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1904                 return;
1905         vcpu->arch.debugfs_timings =
1906                 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1907                                     vcpu, &debugfs_timings_ops);
1908 }
1909
1910 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1911 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1912 {
1913 }
1914 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1915
1916 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1917                                                    unsigned int id)
1918 {
1919         struct kvm_vcpu *vcpu;
1920         int err;
1921         int core;
1922         struct kvmppc_vcore *vcore;
1923
1924         err = -ENOMEM;
1925         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1926         if (!vcpu)
1927                 goto out;
1928
1929         err = kvm_vcpu_init(vcpu, kvm, id);
1930         if (err)
1931                 goto free_vcpu;
1932
1933         vcpu->arch.shared = &vcpu->arch.shregs;
1934 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1935         /*
1936          * The shared struct is never shared on HV,
1937          * so we can always use host endianness
1938          */
1939 #ifdef __BIG_ENDIAN__
1940         vcpu->arch.shared_big_endian = true;
1941 #else
1942         vcpu->arch.shared_big_endian = false;
1943 #endif
1944 #endif
1945         vcpu->arch.mmcr[0] = MMCR0_FC;
1946         vcpu->arch.ctrl = CTRL_RUNLATCH;
1947         /* default to host PVR, since we can't spoof it */
1948         kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1949         spin_lock_init(&vcpu->arch.vpa_update_lock);
1950         spin_lock_init(&vcpu->arch.tbacct_lock);
1951         vcpu->arch.busy_preempt = TB_NIL;
1952         vcpu->arch.intr_msr = MSR_SF | MSR_ME;
1953
1954         /*
1955          * Set the default HFSCR for the guest from the host value.
1956          * This value is only used on POWER9.
1957          * On POWER9 DD1, TM doesn't work, so we make sure to
1958          * prevent the guest from using it.
1959          * On POWER9, we want to virtualize the doorbell facility, so we
1960          * turn off the HFSCR bit, which causes those instructions to trap.
1961          */
1962         vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
1963         if (!cpu_has_feature(CPU_FTR_TM))
1964                 vcpu->arch.hfscr &= ~HFSCR_TM;
1965         if (cpu_has_feature(CPU_FTR_ARCH_300))
1966                 vcpu->arch.hfscr &= ~HFSCR_MSGP;
1967
1968         kvmppc_mmu_book3s_hv_init(vcpu);
1969
1970         vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1971
1972         init_waitqueue_head(&vcpu->arch.cpu_run);
1973
1974         mutex_lock(&kvm->lock);
1975         vcore = NULL;
1976         err = -EINVAL;
1977         core = id / kvm->arch.smt_mode;
1978         if (core < KVM_MAX_VCORES) {
1979                 vcore = kvm->arch.vcores[core];
1980                 if (!vcore) {
1981                         err = -ENOMEM;
1982                         vcore = kvmppc_vcore_create(kvm, core);
1983                         kvm->arch.vcores[core] = vcore;
1984                         kvm->arch.online_vcores++;
1985                 }
1986         }
1987         mutex_unlock(&kvm->lock);
1988
1989         if (!vcore)
1990                 goto free_vcpu;
1991
1992         spin_lock(&vcore->lock);
1993         ++vcore->num_threads;
1994         spin_unlock(&vcore->lock);
1995         vcpu->arch.vcore = vcore;
1996         vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1997         vcpu->arch.thread_cpu = -1;
1998         vcpu->arch.prev_cpu = -1;
1999
2000         vcpu->arch.cpu_type = KVM_CPU_3S_64;
2001         kvmppc_sanity_check(vcpu);
2002
2003         debugfs_vcpu_init(vcpu, id);
2004
2005         return vcpu;
2006
2007 free_vcpu:
2008         kmem_cache_free(kvm_vcpu_cache, vcpu);
2009 out:
2010         return ERR_PTR(err);
2011 }
2012
2013 static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2014                               unsigned long flags)
2015 {
2016         int err;
2017         int esmt = 0;
2018
2019         if (flags)
2020                 return -EINVAL;
2021         if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2022                 return -EINVAL;
2023         if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2024                 /*
2025                  * On POWER8 (or POWER7), the threading mode is "strict",
2026                  * so we pack smt_mode vcpus per vcore.
2027                  */
2028                 if (smt_mode > threads_per_subcore)
2029                         return -EINVAL;
2030         } else {
2031                 /*
2032                  * On POWER9, the threading mode is "loose",
2033                  * so each vcpu gets its own vcore.
2034                  */
2035                 esmt = smt_mode;
2036                 smt_mode = 1;
2037         }
2038         mutex_lock(&kvm->lock);
2039         err = -EBUSY;
2040         if (!kvm->arch.online_vcores) {
2041                 kvm->arch.smt_mode = smt_mode;
2042                 kvm->arch.emul_smt_mode = esmt;
2043                 err = 0;
2044         }
2045         mutex_unlock(&kvm->lock);
2046
2047         return err;
2048 }
2049
2050 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2051 {
2052         if (vpa->pinned_addr)
2053                 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2054                                         vpa->dirty);
2055 }
2056
2057 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
2058 {
2059         spin_lock(&vcpu->arch.vpa_update_lock);
2060         unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2061         unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2062         unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2063         spin_unlock(&vcpu->arch.vpa_update_lock);
2064         kvm_vcpu_uninit(vcpu);
2065         kmem_cache_free(kvm_vcpu_cache, vcpu);
2066 }
2067
2068 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2069 {
2070         /* Indicate we want to get back into the guest */
2071         return 1;
2072 }
2073
2074 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
2075 {
2076         unsigned long dec_nsec, now;
2077
2078         now = get_tb();
2079         if (now > vcpu->arch.dec_expires) {
2080                 /* decrementer has already gone negative */
2081                 kvmppc_core_queue_dec(vcpu);
2082                 kvmppc_core_prepare_to_enter(vcpu);
2083                 return;
2084         }
2085         dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
2086                    / tb_ticks_per_sec;
2087         hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2088         vcpu->arch.timer_running = 1;
2089 }
2090
2091 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
2092 {
2093         vcpu->arch.ceded = 0;
2094         if (vcpu->arch.timer_running) {
2095                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2096                 vcpu->arch.timer_running = 0;
2097         }
2098 }
2099
2100 extern int __kvmppc_vcore_entry(void);
2101
2102 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2103                                    struct kvm_vcpu *vcpu)
2104 {
2105         u64 now;
2106
2107         if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2108                 return;
2109         spin_lock_irq(&vcpu->arch.tbacct_lock);
2110         now = mftb();
2111         vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2112                 vcpu->arch.stolen_logged;
2113         vcpu->arch.busy_preempt = now;
2114         vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2115         spin_unlock_irq(&vcpu->arch.tbacct_lock);
2116         --vc->n_runnable;
2117         WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2118 }
2119
2120 static int kvmppc_grab_hwthread(int cpu)
2121 {
2122         struct paca_struct *tpaca;
2123         long timeout = 10000;
2124
2125         tpaca = &paca[cpu];
2126
2127         /* Ensure the thread won't go into the kernel if it wakes */
2128         tpaca->kvm_hstate.kvm_vcpu = NULL;
2129         tpaca->kvm_hstate.kvm_vcore = NULL;
2130         tpaca->kvm_hstate.napping = 0;
2131         smp_wmb();
2132         tpaca->kvm_hstate.hwthread_req = 1;
2133
2134         /*
2135          * If the thread is already executing in the kernel (e.g. handling
2136          * a stray interrupt), wait for it to get back to nap mode.
2137          * The smp_mb() is to ensure that our setting of hwthread_req
2138          * is visible before we look at hwthread_state, so if this
2139          * races with the code at system_reset_pSeries and the thread
2140          * misses our setting of hwthread_req, we are sure to see its
2141          * setting of hwthread_state, and vice versa.
2142          */
2143         smp_mb();
2144         while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2145                 if (--timeout <= 0) {
2146                         pr_err("KVM: couldn't grab cpu %d\n", cpu);
2147                         return -EBUSY;
2148                 }
2149                 udelay(1);
2150         }
2151         return 0;
2152 }
2153
2154 static void kvmppc_release_hwthread(int cpu)
2155 {
2156         struct paca_struct *tpaca;
2157
2158         tpaca = &paca[cpu];
2159         tpaca->kvm_hstate.hwthread_req = 0;
2160         tpaca->kvm_hstate.kvm_vcpu = NULL;
2161         tpaca->kvm_hstate.kvm_vcore = NULL;
2162         tpaca->kvm_hstate.kvm_split_mode = NULL;
2163 }
2164
2165 static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2166 {
2167         int i;
2168
2169         cpu = cpu_first_thread_sibling(cpu);
2170         cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2171         /*
2172          * Make sure setting of bit in need_tlb_flush precedes
2173          * testing of cpu_in_guest bits.  The matching barrier on
2174          * the other side is the first smp_mb() in kvmppc_run_core().
2175          */
2176         smp_mb();
2177         for (i = 0; i < threads_per_core; ++i)
2178                 if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest))
2179                         smp_call_function_single(cpu + i, do_nothing, NULL, 1);
2180 }
2181
2182 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2183 {
2184         struct kvm *kvm = vcpu->kvm;
2185
2186         /*
2187          * With radix, the guest can do TLB invalidations itself,
2188          * and it could choose to use the local form (tlbiel) if
2189          * it is invalidating a translation that has only ever been
2190          * used on one vcpu.  However, that doesn't mean it has
2191          * only ever been used on one physical cpu, since vcpus
2192          * can move around between pcpus.  To cope with this, when
2193          * a vcpu moves from one pcpu to another, we need to tell
2194          * any vcpus running on the same core as this vcpu previously
2195          * ran to flush the TLB.  The TLB is shared between threads,
2196          * so we use a single bit in .need_tlb_flush for all 4 threads.
2197          */
2198         if (vcpu->arch.prev_cpu != pcpu) {
2199                 if (vcpu->arch.prev_cpu >= 0 &&
2200                     cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
2201                     cpu_first_thread_sibling(pcpu))
2202                         radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
2203                 vcpu->arch.prev_cpu = pcpu;
2204         }
2205 }
2206
2207 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
2208 {
2209         int cpu;
2210         struct paca_struct *tpaca;
2211         struct kvm *kvm = vc->kvm;
2212
2213         cpu = vc->pcpu;
2214         if (vcpu) {
2215                 if (vcpu->arch.timer_running) {
2216                         hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2217                         vcpu->arch.timer_running = 0;
2218                 }
2219                 cpu += vcpu->arch.ptid;
2220                 vcpu->cpu = vc->pcpu;
2221                 vcpu->arch.thread_cpu = cpu;
2222                 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2223         }
2224         tpaca = &paca[cpu];
2225         tpaca->kvm_hstate.kvm_vcpu = vcpu;
2226         tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
2227         /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
2228         smp_wmb();
2229         tpaca->kvm_hstate.kvm_vcore = vc;
2230         if (cpu != smp_processor_id())
2231                 kvmppc_ipi_thread(cpu);
2232 }
2233
2234 static void kvmppc_wait_for_nap(int n_threads)
2235 {
2236         int cpu = smp_processor_id();
2237         int i, loops;
2238
2239         if (n_threads <= 1)
2240                 return;
2241         for (loops = 0; loops < 1000000; ++loops) {
2242                 /*
2243                  * Check if all threads are finished.
2244                  * We set the vcore pointer when starting a thread
2245                  * and the thread clears it when finished, so we look
2246                  * for any threads that still have a non-NULL vcore ptr.
2247                  */
2248                 for (i = 1; i < n_threads; ++i)
2249                         if (paca[cpu + i].kvm_hstate.kvm_vcore)
2250                                 break;
2251                 if (i == n_threads) {
2252                         HMT_medium();
2253                         return;
2254                 }
2255                 HMT_low();
2256         }
2257         HMT_medium();
2258         for (i = 1; i < n_threads; ++i)
2259                 if (paca[cpu + i].kvm_hstate.kvm_vcore)
2260                         pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
2261 }
2262
2263 /*
2264  * Check that we are on thread 0 and that any other threads in
2265  * this core are off-line.  Then grab the threads so they can't
2266  * enter the kernel.
2267  */
2268 static int on_primary_thread(void)
2269 {
2270         int cpu = smp_processor_id();
2271         int thr;
2272
2273         /* Are we on a primary subcore? */
2274         if (cpu_thread_in_subcore(cpu))
2275                 return 0;
2276
2277         thr = 0;
2278         while (++thr < threads_per_subcore)
2279                 if (cpu_online(cpu + thr))
2280                         return 0;
2281
2282         /* Grab all hw threads so they can't go into the kernel */
2283         for (thr = 1; thr < threads_per_subcore; ++thr) {
2284                 if (kvmppc_grab_hwthread(cpu + thr)) {
2285                         /* Couldn't grab one; let the others go */
2286                         do {
2287                                 kvmppc_release_hwthread(cpu + thr);
2288                         } while (--thr > 0);
2289                         return 0;
2290                 }
2291         }
2292         return 1;
2293 }
2294
2295 /*
2296  * A list of virtual cores for each physical CPU.
2297  * These are vcores that could run but their runner VCPU tasks are
2298  * (or may be) preempted.
2299  */
2300 struct preempted_vcore_list {
2301         struct list_head        list;
2302         spinlock_t              lock;
2303 };
2304
2305 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
2306
2307 static void init_vcore_lists(void)
2308 {
2309         int cpu;
2310
2311         for_each_possible_cpu(cpu) {
2312                 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
2313                 spin_lock_init(&lp->lock);
2314                 INIT_LIST_HEAD(&lp->list);
2315         }
2316 }
2317
2318 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
2319 {
2320         struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2321
2322         vc->vcore_state = VCORE_PREEMPT;
2323         vc->pcpu = smp_processor_id();
2324         if (vc->num_threads < threads_per_vcore(vc->kvm)) {
2325                 spin_lock(&lp->lock);
2326                 list_add_tail(&vc->preempt_list, &lp->list);
2327                 spin_unlock(&lp->lock);
2328         }
2329
2330         /* Start accumulating stolen time */
2331         kvmppc_core_start_stolen(vc);
2332 }
2333
2334 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
2335 {
2336         struct preempted_vcore_list *lp;
2337
2338         kvmppc_core_end_stolen(vc);
2339         if (!list_empty(&vc->preempt_list)) {
2340                 lp = &per_cpu(preempted_vcores, vc->pcpu);
2341                 spin_lock(&lp->lock);
2342                 list_del_init(&vc->preempt_list);
2343                 spin_unlock(&lp->lock);
2344         }
2345         vc->vcore_state = VCORE_INACTIVE;
2346 }
2347
2348 /*
2349  * This stores information about the virtual cores currently
2350  * assigned to a physical core.
2351  */
2352 struct core_info {
2353         int             n_subcores;
2354         int             max_subcore_threads;
2355         int             total_threads;
2356         int             subcore_threads[MAX_SUBCORES];
2357         struct kvmppc_vcore *vc[MAX_SUBCORES];
2358 };
2359
2360 /*
2361  * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2362  * respectively in 2-way micro-threading (split-core) mode on POWER8.
2363  */
2364 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2365
2366 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2367 {
2368         memset(cip, 0, sizeof(*cip));
2369         cip->n_subcores = 1;
2370         cip->max_subcore_threads = vc->num_threads;
2371         cip->total_threads = vc->num_threads;
2372         cip->subcore_threads[0] = vc->num_threads;
2373         cip->vc[0] = vc;
2374 }
2375
2376 static bool subcore_config_ok(int n_subcores, int n_threads)
2377 {
2378         /*
2379          * POWER9 "SMT4" cores are permanently in what is effectively a 4-way split-core
2380          * mode, with one thread per subcore.
2381          */
2382         if (cpu_has_feature(CPU_FTR_ARCH_300))
2383                 return n_subcores <= 4 && n_threads == 1;
2384
2385         /* On POWER8, can only dynamically split if unsplit to begin with */
2386         if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2387                 return false;
2388         if (n_subcores > MAX_SUBCORES)
2389                 return false;
2390         if (n_subcores > 1) {
2391                 if (!(dynamic_mt_modes & 2))
2392                         n_subcores = 4;
2393                 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2394                         return false;
2395         }
2396
2397         return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
2398 }
2399
2400 static void init_vcore_to_run(struct kvmppc_vcore *vc)
2401 {
2402         vc->entry_exit_map = 0;
2403         vc->in_guest = 0;
2404         vc->napping_threads = 0;
2405         vc->conferring_threads = 0;
2406 }
2407
2408 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2409 {
2410         int n_threads = vc->num_threads;
2411         int sub;
2412
2413         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2414                 return false;
2415
2416         /* POWER9 currently requires all threads to be in the same MMU mode */
2417         if (cpu_has_feature(CPU_FTR_ARCH_300) &&
2418             kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
2419                 return false;
2420
2421         if (n_threads < cip->max_subcore_threads)
2422                 n_threads = cip->max_subcore_threads;
2423         if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
2424                 return false;
2425         cip->max_subcore_threads = n_threads;
2426
2427         sub = cip->n_subcores;
2428         ++cip->n_subcores;
2429         cip->total_threads += vc->num_threads;
2430         cip->subcore_threads[sub] = vc->num_threads;
2431         cip->vc[sub] = vc;
2432         init_vcore_to_run(vc);
2433         list_del_init(&vc->preempt_list);
2434
2435         return true;
2436 }
2437
2438 /*
2439  * Work out whether it is possible to piggyback the execution of
2440  * vcore *pvc onto the execution of the other vcores described in *cip.
2441  */
2442 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2443                           int target_threads)
2444 {
2445         if (cip->total_threads + pvc->num_threads > target_threads)
2446                 return false;
2447
2448         return can_dynamic_split(pvc, cip);
2449 }
2450
2451 static void prepare_threads(struct kvmppc_vcore *vc)
2452 {
2453         int i;
2454         struct kvm_vcpu *vcpu;
2455
2456         for_each_runnable_thread(i, vcpu, vc) {
2457                 if (signal_pending(vcpu->arch.run_task))
2458                         vcpu->arch.ret = -EINTR;
2459                 else if (vcpu->arch.vpa.update_pending ||
2460                          vcpu->arch.slb_shadow.update_pending ||
2461                          vcpu->arch.dtl.update_pending)
2462                         vcpu->arch.ret = RESUME_GUEST;
2463                 else
2464                         continue;
2465                 kvmppc_remove_runnable(vc, vcpu);
2466                 wake_up(&vcpu->arch.cpu_run);
2467         }
2468 }
2469
2470 static void collect_piggybacks(struct core_info *cip, int target_threads)
2471 {
2472         struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2473         struct kvmppc_vcore *pvc, *vcnext;
2474
2475         spin_lock(&lp->lock);
2476         list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2477                 if (!spin_trylock(&pvc->lock))
2478                         continue;
2479                 prepare_threads(pvc);
2480                 if (!pvc->n_runnable) {
2481                         list_del_init(&pvc->preempt_list);
2482                         if (pvc->runner == NULL) {
2483                                 pvc->vcore_state = VCORE_INACTIVE;
2484                                 kvmppc_core_end_stolen(pvc);
2485                         }
2486                         spin_unlock(&pvc->lock);
2487                         continue;
2488                 }
2489                 if (!can_piggyback(pvc, cip, target_threads)) {
2490                         spin_unlock(&pvc->lock);
2491                         continue;
2492                 }
2493                 kvmppc_core_end_stolen(pvc);
2494                 pvc->vcore_state = VCORE_PIGGYBACK;
2495                 if (cip->total_threads >= target_threads)
2496                         break;
2497         }
2498         spin_unlock(&lp->lock);
2499 }
2500
2501 static bool recheck_signals(struct core_info *cip)
2502 {
2503         int sub, i;
2504         struct kvm_vcpu *vcpu;
2505
2506         for (sub = 0; sub < cip->n_subcores; ++sub)
2507                 for_each_runnable_thread(i, vcpu, cip->vc[sub])
2508                         if (signal_pending(vcpu->arch.run_task))
2509                                 return true;
2510         return false;
2511 }
2512
2513 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
2514 {
2515         int still_running = 0, i;
2516         u64 now;
2517         long ret;
2518         struct kvm_vcpu *vcpu;
2519
2520         spin_lock(&vc->lock);
2521         now = get_tb();
2522         for_each_runnable_thread(i, vcpu, vc) {
2523                 /* cancel pending dec exception if dec is positive */
2524                 if (now < vcpu->arch.dec_expires &&
2525                     kvmppc_core_pending_dec(vcpu))
2526                         kvmppc_core_dequeue_dec(vcpu);
2527
2528                 trace_kvm_guest_exit(vcpu);
2529
2530                 ret = RESUME_GUEST;
2531                 if (vcpu->arch.trap)
2532                         ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
2533                                                     vcpu->arch.run_task);
2534
2535                 vcpu->arch.ret = ret;
2536                 vcpu->arch.trap = 0;
2537
2538                 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2539                         if (vcpu->arch.pending_exceptions)
2540                                 kvmppc_core_prepare_to_enter(vcpu);
2541                         if (vcpu->arch.ceded)
2542                                 kvmppc_set_timer(vcpu);
2543                         else
2544                                 ++still_running;
2545                 } else {
2546                         kvmppc_remove_runnable(vc, vcpu);
2547                         wake_up(&vcpu->arch.cpu_run);
2548                 }
2549         }
2550         if (!is_master) {
2551                 if (still_running > 0) {
2552                         kvmppc_vcore_preempt(vc);
2553                 } else if (vc->runner) {
2554                         vc->vcore_state = VCORE_PREEMPT;
2555                         kvmppc_core_start_stolen(vc);
2556                 } else {
2557                         vc->vcore_state = VCORE_INACTIVE;
2558                 }
2559                 if (vc->n_runnable > 0 && vc->runner == NULL) {
2560                         /* make sure there's a candidate runner awake */
2561                         i = -1;
2562                         vcpu = next_runnable_thread(vc, &i);
2563                         wake_up(&vcpu->arch.cpu_run);
2564                 }
2565         }
2566         spin_unlock(&vc->lock);
2567 }
2568
2569 /*
2570  * Clear core from the list of active host cores as we are about to
2571  * enter the guest. Only do this if it is the primary thread of the
2572  * core (not if a subcore) that is entering the guest.
2573  */
2574 static inline int kvmppc_clear_host_core(unsigned int cpu)
2575 {
2576         int core;
2577
2578         if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
2579                 return 0;
2580         /*
2581          * Memory barrier can be omitted here as we will do a smp_wmb()
2582          * later in kvmppc_start_thread and we need ensure that state is
2583          * visible to other CPUs only after we enter guest.
2584          */
2585         core = cpu >> threads_shift;
2586         kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
2587         return 0;
2588 }
2589
2590 /*
2591  * Advertise this core as an active host core since we exited the guest
2592  * Only need to do this if it is the primary thread of the core that is
2593  * exiting.
2594  */
2595 static inline int kvmppc_set_host_core(unsigned int cpu)
2596 {
2597         int core;
2598
2599         if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
2600                 return 0;
2601
2602         /*
2603          * Memory barrier can be omitted here because we do a spin_unlock
2604          * immediately after this which provides the memory barrier.
2605          */
2606         core = cpu >> threads_shift;
2607         kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
2608         return 0;
2609 }
2610
2611 static void set_irq_happened(int trap)
2612 {
2613         switch (trap) {
2614         case BOOK3S_INTERRUPT_EXTERNAL:
2615                 local_paca->irq_happened |= PACA_IRQ_EE;
2616                 break;
2617         case BOOK3S_INTERRUPT_H_DOORBELL:
2618                 local_paca->irq_happened |= PACA_IRQ_DBELL;
2619                 break;
2620         case BOOK3S_INTERRUPT_HMI:
2621                 local_paca->irq_happened |= PACA_IRQ_HMI;
2622                 break;
2623         case BOOK3S_INTERRUPT_SYSTEM_RESET:
2624                 replay_system_reset();
2625                 break;
2626         }
2627 }
2628
2629 /*
2630  * Run a set of guest threads on a physical core.
2631  * Called with vc->lock held.
2632  */
2633 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2634 {
2635         struct kvm_vcpu *vcpu;
2636         int i;
2637         int srcu_idx;
2638         struct core_info core_info;
2639         struct kvmppc_vcore *pvc;
2640         struct kvm_split_mode split_info, *sip;
2641         int split, subcore_size, active;
2642         int sub;
2643         bool thr0_done;
2644         unsigned long cmd_bit, stat_bit;
2645         int pcpu, thr;
2646         int target_threads;
2647         int controlled_threads;
2648         int trap;
2649         bool is_power8;
2650         bool hpt_on_radix;
2651
2652         /*
2653          * Remove from the list any threads that have a signal pending
2654          * or need a VPA update done
2655          */
2656         prepare_threads(vc);
2657
2658         /* if the runner is no longer runnable, let the caller pick a new one */
2659         if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
2660                 return;
2661
2662         /*
2663          * Initialize *vc.
2664          */
2665         init_vcore_to_run(vc);
2666         vc->preempt_tb = TB_NIL;
2667
2668         /*
2669          * Number of threads that we will be controlling: the same as
2670          * the number of threads per subcore, except on POWER9,
2671          * where it's 1 because the threads are (mostly) independent.
2672          */
2673         controlled_threads = threads_per_vcore(vc->kvm);
2674
2675         /*
2676          * Make sure we are running on primary threads, and that secondary
2677          * threads are offline.  Also check if the number of threads in this
2678          * guest are greater than the current system threads per guest.
2679          * On POWER9, we need to be not in independent-threads mode if
2680          * this is a HPT guest on a radix host.
2681          */
2682         hpt_on_radix = radix_enabled() && !kvm_is_radix(vc->kvm);
2683         if (((controlled_threads > 1) &&
2684              ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
2685             (hpt_on_radix && vc->kvm->arch.threads_indep)) {
2686                 for_each_runnable_thread(i, vcpu, vc) {
2687                         vcpu->arch.ret = -EBUSY;
2688                         kvmppc_remove_runnable(vc, vcpu);
2689                         wake_up(&vcpu->arch.cpu_run);
2690                 }
2691                 goto out;
2692         }
2693
2694         /*
2695          * See if we could run any other vcores on the physical core
2696          * along with this one.
2697          */
2698         init_core_info(&core_info, vc);
2699         pcpu = smp_processor_id();
2700         target_threads = controlled_threads;
2701         if (target_smt_mode && target_smt_mode < target_threads)
2702                 target_threads = target_smt_mode;
2703         if (vc->num_threads < target_threads)
2704                 collect_piggybacks(&core_info, target_threads);
2705
2706         /*
2707          * On radix, arrange for TLB flushing if necessary.
2708          * This has to be done before disabling interrupts since
2709          * it uses smp_call_function().
2710          */
2711         pcpu = smp_processor_id();
2712         if (kvm_is_radix(vc->kvm)) {
2713                 for (sub = 0; sub < core_info.n_subcores; ++sub)
2714                         for_each_runnable_thread(i, vcpu, core_info.vc[sub])
2715                                 kvmppc_prepare_radix_vcpu(vcpu, pcpu);
2716         }
2717
2718         /*
2719          * Hard-disable interrupts, and check resched flag and signals.
2720          * If we need to reschedule or deliver a signal, clean up
2721          * and return without going into the guest(s).
2722          * If the mmu_ready flag has been cleared, don't go into the
2723          * guest because that means a HPT resize operation is in progress.
2724          */
2725         local_irq_disable();
2726         hard_irq_disable();
2727         if (lazy_irq_pending() || need_resched() ||
2728             recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
2729                 local_irq_enable();
2730                 vc->vcore_state = VCORE_INACTIVE;
2731                 /* Unlock all except the primary vcore */
2732                 for (sub = 1; sub < core_info.n_subcores; ++sub) {
2733                         pvc = core_info.vc[sub];
2734                         /* Put back on to the preempted vcores list */
2735                         kvmppc_vcore_preempt(pvc);
2736                         spin_unlock(&pvc->lock);
2737                 }
2738                 for (i = 0; i < controlled_threads; ++i)
2739                         kvmppc_release_hwthread(pcpu + i);
2740                 return;
2741         }
2742
2743         kvmppc_clear_host_core(pcpu);
2744
2745         /* Decide on micro-threading (split-core) mode */
2746         subcore_size = threads_per_subcore;
2747         cmd_bit = stat_bit = 0;
2748         split = core_info.n_subcores;
2749         sip = NULL;
2750         is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
2751                 && !cpu_has_feature(CPU_FTR_ARCH_300);
2752
2753         if (split > 1 || hpt_on_radix) {
2754                 sip = &split_info;
2755                 memset(&split_info, 0, sizeof(split_info));
2756                 for (sub = 0; sub < core_info.n_subcores; ++sub)
2757                         split_info.vc[sub] = core_info.vc[sub];
2758
2759                 if (is_power8) {
2760                         if (split == 2 && (dynamic_mt_modes & 2)) {
2761                                 cmd_bit = HID0_POWER8_1TO2LPAR;
2762                                 stat_bit = HID0_POWER8_2LPARMODE;
2763                         } else {
2764                                 split = 4;
2765                                 cmd_bit = HID0_POWER8_1TO4LPAR;
2766                                 stat_bit = HID0_POWER8_4LPARMODE;
2767                         }
2768                         subcore_size = MAX_SMT_THREADS / split;
2769                         split_info.rpr = mfspr(SPRN_RPR);
2770                         split_info.pmmar = mfspr(SPRN_PMMAR);
2771                         split_info.ldbar = mfspr(SPRN_LDBAR);
2772                         split_info.subcore_size = subcore_size;
2773                 } else {
2774                         split_info.subcore_size = 1;
2775                         if (hpt_on_radix) {
2776                                 /* Use the split_info for LPCR/LPIDR changes */
2777                                 split_info.lpcr_req = vc->lpcr;
2778                                 split_info.lpidr_req = vc->kvm->arch.lpid;
2779                                 split_info.host_lpcr = vc->kvm->arch.host_lpcr;
2780                                 split_info.do_set = 1;
2781                         }
2782                 }
2783
2784                 /* order writes to split_info before kvm_split_mode pointer */
2785                 smp_wmb();
2786         }
2787
2788         for (thr = 0; thr < controlled_threads; ++thr) {
2789                 paca[pcpu + thr].kvm_hstate.tid = thr;
2790                 paca[pcpu + thr].kvm_hstate.napping = 0;
2791                 paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
2792         }
2793
2794         /* Initiate micro-threading (split-core) on POWER8 if required */
2795         if (cmd_bit) {
2796                 unsigned long hid0 = mfspr(SPRN_HID0);
2797
2798                 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
2799                 mb();
2800                 mtspr(SPRN_HID0, hid0);
2801                 isync();
2802                 for (;;) {
2803                         hid0 = mfspr(SPRN_HID0);
2804                         if (hid0 & stat_bit)
2805                                 break;
2806                         cpu_relax();
2807                 }
2808         }
2809
2810         /* Start all the threads */
2811         active = 0;
2812         for (sub = 0; sub < core_info.n_subcores; ++sub) {
2813                 thr = is_power8 ? subcore_thread_map[sub] : sub;
2814                 thr0_done = false;
2815                 active |= 1 << thr;
2816                 pvc = core_info.vc[sub];
2817                 pvc->pcpu = pcpu + thr;
2818                 for_each_runnable_thread(i, vcpu, pvc) {
2819                         kvmppc_start_thread(vcpu, pvc);
2820                         kvmppc_create_dtl_entry(vcpu, pvc);
2821                         trace_kvm_guest_enter(vcpu);
2822                         if (!vcpu->arch.ptid)
2823                                 thr0_done = true;
2824                         active |= 1 << (thr + vcpu->arch.ptid);
2825                 }
2826                 /*
2827                  * We need to start the first thread of each subcore
2828                  * even if it doesn't have a vcpu.
2829                  */
2830                 if (!thr0_done)
2831                         kvmppc_start_thread(NULL, pvc);
2832                 thr += pvc->num_threads;
2833         }
2834
2835         /*
2836          * Ensure that split_info.do_nap is set after setting
2837          * the vcore pointer in the PACA of the secondaries.
2838          */
2839         smp_mb();
2840
2841         /*
2842          * When doing micro-threading, poke the inactive threads as well.
2843          * This gets them to the nap instruction after kvm_do_nap,
2844          * which reduces the time taken to unsplit later.
2845          * For POWER9 HPT guest on radix host, we need all the secondary
2846          * threads woken up so they can do the LPCR/LPIDR change.
2847          */
2848         if (cmd_bit || hpt_on_radix) {
2849                 split_info.do_nap = 1;  /* ask secondaries to nap when done */
2850                 for (thr = 1; thr < threads_per_subcore; ++thr)
2851                         if (!(active & (1 << thr)))
2852                                 kvmppc_ipi_thread(pcpu + thr);
2853         }
2854
2855         vc->vcore_state = VCORE_RUNNING;
2856         preempt_disable();
2857
2858         trace_kvmppc_run_core(vc, 0);
2859
2860         for (sub = 0; sub < core_info.n_subcores; ++sub)
2861                 spin_unlock(&core_info.vc[sub]->lock);
2862
2863         /*
2864          * Interrupts will be enabled once we get into the guest,
2865          * so tell lockdep that we're about to enable interrupts.
2866          */
2867         trace_hardirqs_on();
2868
2869         guest_enter();
2870
2871         srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2872
2873         trap = __kvmppc_vcore_entry();
2874
2875         srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2876
2877         guest_exit();
2878
2879         trace_hardirqs_off();
2880         set_irq_happened(trap);
2881
2882         spin_lock(&vc->lock);
2883         /* prevent other vcpu threads from doing kvmppc_start_thread() now */
2884         vc->vcore_state = VCORE_EXITING;
2885
2886         /* wait for secondary threads to finish writing their state to memory */
2887         kvmppc_wait_for_nap(controlled_threads);
2888
2889         /* Return to whole-core mode if we split the core earlier */
2890         if (cmd_bit) {
2891                 unsigned long hid0 = mfspr(SPRN_HID0);
2892                 unsigned long loops = 0;
2893
2894                 hid0 &= ~HID0_POWER8_DYNLPARDIS;
2895                 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
2896                 mb();
2897                 mtspr(SPRN_HID0, hid0);
2898                 isync();
2899                 for (;;) {
2900                         hid0 = mfspr(SPRN_HID0);
2901                         if (!(hid0 & stat_bit))
2902                                 break;
2903                         cpu_relax();
2904                         ++loops;
2905                 }
2906         } else if (hpt_on_radix) {
2907                 /* Wait for all threads to have seen final sync */
2908                 for (thr = 1; thr < controlled_threads; ++thr) {
2909                         while (paca[pcpu + thr].kvm_hstate.kvm_split_mode) {
2910                                 HMT_low();
2911                                 barrier();
2912                         }
2913                         HMT_medium();
2914                 }
2915         }
2916         split_info.do_nap = 0;
2917
2918         kvmppc_set_host_core(pcpu);
2919
2920         local_irq_enable();
2921
2922         /* Let secondaries go back to the offline loop */
2923         for (i = 0; i < controlled_threads; ++i) {
2924                 kvmppc_release_hwthread(pcpu + i);
2925                 if (sip && sip->napped[i])
2926                         kvmppc_ipi_thread(pcpu + i);
2927                 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
2928         }
2929
2930         spin_unlock(&vc->lock);
2931
2932         /* make sure updates to secondary vcpu structs are visible now */
2933         smp_mb();
2934
2935         for (sub = 0; sub < core_info.n_subcores; ++sub) {
2936                 pvc = core_info.vc[sub];
2937                 post_guest_process(pvc, pvc == vc);
2938         }
2939
2940         spin_lock(&vc->lock);
2941         preempt_enable();
2942
2943  out:
2944         vc->vcore_state = VCORE_INACTIVE;
2945         trace_kvmppc_run_core(vc, 1);
2946 }
2947
2948 /*
2949  * Wait for some other vcpu thread to execute us, and
2950  * wake us up when we need to handle something in the host.
2951  */
2952 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
2953                                  struct kvm_vcpu *vcpu, int wait_state)
2954 {
2955         DEFINE_WAIT(wait);
2956
2957         prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
2958         if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2959                 spin_unlock(&vc->lock);
2960                 schedule();
2961                 spin_lock(&vc->lock);
2962         }
2963         finish_wait(&vcpu->arch.cpu_run, &wait);
2964 }
2965
2966 static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
2967 {
2968         /* 10us base */
2969         if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
2970                 vc->halt_poll_ns = 10000;
2971         else
2972                 vc->halt_poll_ns *= halt_poll_ns_grow;
2973 }
2974
2975 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
2976 {
2977         if (halt_poll_ns_shrink == 0)
2978                 vc->halt_poll_ns = 0;
2979         else
2980                 vc->halt_poll_ns /= halt_poll_ns_shrink;
2981 }
2982
2983 #ifdef CONFIG_KVM_XICS
2984 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
2985 {
2986         if (!xive_enabled())
2987                 return false;
2988         return vcpu->arch.xive_saved_state.pipr <
2989                 vcpu->arch.xive_saved_state.cppr;
2990 }
2991 #else
2992 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
2993 {
2994         return false;
2995 }
2996 #endif /* CONFIG_KVM_XICS */
2997
2998 static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
2999 {
3000         if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
3001             kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
3002                 return true;
3003
3004         return false;
3005 }
3006
3007 /*
3008  * Check to see if any of the runnable vcpus on the vcore have pending
3009  * exceptions or are no longer ceded
3010  */
3011 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
3012 {
3013         struct kvm_vcpu *vcpu;
3014         int i;
3015
3016         for_each_runnable_thread(i, vcpu, vc) {
3017                 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
3018                         return 1;
3019         }
3020
3021         return 0;
3022 }
3023
3024 /*
3025  * All the vcpus in this vcore are idle, so wait for a decrementer
3026  * or external interrupt to one of the vcpus.  vc->lock is held.
3027  */
3028 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
3029 {
3030         ktime_t cur, start_poll, start_wait;
3031         int do_sleep = 1;
3032         u64 block_ns;
3033         DECLARE_SWAITQUEUE(wait);
3034
3035         /* Poll for pending exceptions and ceded state */
3036         cur = start_poll = ktime_get();
3037         if (vc->halt_poll_ns) {
3038                 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
3039                 ++vc->runner->stat.halt_attempted_poll;
3040
3041                 vc->vcore_state = VCORE_POLLING;
3042                 spin_unlock(&vc->lock);
3043
3044                 do {
3045                         if (kvmppc_vcore_check_block(vc)) {
3046                                 do_sleep = 0;
3047                                 break;
3048                         }
3049                         cur = ktime_get();
3050                 } while (single_task_running() && ktime_before(cur, stop));
3051
3052                 spin_lock(&vc->lock);
3053                 vc->vcore_state = VCORE_INACTIVE;
3054
3055                 if (!do_sleep) {
3056                         ++vc->runner->stat.halt_successful_poll;
3057                         goto out;
3058                 }
3059         }
3060
3061         prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
3062
3063         if (kvmppc_vcore_check_block(vc)) {
3064                 finish_swait(&vc->wq, &wait);
3065                 do_sleep = 0;
3066                 /* If we polled, count this as a successful poll */
3067                 if (vc->halt_poll_ns)
3068                         ++vc->runner->stat.halt_successful_poll;
3069                 goto out;
3070         }
3071
3072         start_wait = ktime_get();
3073
3074         vc->vcore_state = VCORE_SLEEPING;
3075         trace_kvmppc_vcore_blocked(vc, 0);
3076         spin_unlock(&vc->lock);
3077         schedule();
3078         finish_swait(&vc->wq, &wait);
3079         spin_lock(&vc->lock);
3080         vc->vcore_state = VCORE_INACTIVE;
3081         trace_kvmppc_vcore_blocked(vc, 1);
3082         ++vc->runner->stat.halt_successful_wait;
3083
3084         cur = ktime_get();
3085
3086 out:
3087         block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
3088
3089         /* Attribute wait time */
3090         if (do_sleep) {
3091                 vc->runner->stat.halt_wait_ns +=
3092                         ktime_to_ns(cur) - ktime_to_ns(start_wait);
3093                 /* Attribute failed poll time */
3094                 if (vc->halt_poll_ns)
3095                         vc->runner->stat.halt_poll_fail_ns +=
3096                                 ktime_to_ns(start_wait) -
3097                                 ktime_to_ns(start_poll);
3098         } else {
3099                 /* Attribute successful poll time */
3100                 if (vc->halt_poll_ns)
3101                         vc->runner->stat.halt_poll_success_ns +=
3102                                 ktime_to_ns(cur) -
3103                                 ktime_to_ns(start_poll);
3104         }
3105
3106         /* Adjust poll time */
3107         if (halt_poll_ns) {
3108                 if (block_ns <= vc->halt_poll_ns)
3109                         ;
3110                 /* We slept and blocked for longer than the max halt time */
3111                 else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
3112                         shrink_halt_poll_ns(vc);
3113                 /* We slept and our poll time is too small */
3114                 else if (vc->halt_poll_ns < halt_poll_ns &&
3115                                 block_ns < halt_poll_ns)
3116                         grow_halt_poll_ns(vc);
3117                 if (vc->halt_poll_ns > halt_poll_ns)
3118                         vc->halt_poll_ns = halt_poll_ns;
3119         } else
3120                 vc->halt_poll_ns = 0;
3121
3122         trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
3123 }
3124
3125 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3126 {
3127         int r = 0;
3128         struct kvm *kvm = vcpu->kvm;
3129
3130         mutex_lock(&kvm->lock);
3131         if (!kvm->arch.mmu_ready) {
3132                 if (!kvm_is_radix(kvm))
3133                         r = kvmppc_hv_setup_htab_rma(vcpu);
3134                 if (!r) {
3135                         if (cpu_has_feature(CPU_FTR_ARCH_300))
3136                                 kvmppc_setup_partition_table(kvm);
3137                         kvm->arch.mmu_ready = 1;
3138                 }
3139         }
3140         mutex_unlock(&kvm->lock);
3141         return r;
3142 }
3143
3144 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3145 {
3146         int n_ceded, i, r;
3147         struct kvmppc_vcore *vc;
3148         struct kvm_vcpu *v;
3149
3150         trace_kvmppc_run_vcpu_enter(vcpu);
3151
3152         kvm_run->exit_reason = 0;
3153         vcpu->arch.ret = RESUME_GUEST;
3154         vcpu->arch.trap = 0;
3155         kvmppc_update_vpas(vcpu);
3156
3157         /*
3158          * Synchronize with other threads in this virtual core
3159          */
3160         vc = vcpu->arch.vcore;
3161         spin_lock(&vc->lock);
3162         vcpu->arch.ceded = 0;
3163         vcpu->arch.run_task = current;
3164         vcpu->arch.kvm_run = kvm_run;
3165         vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
3166         vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
3167         vcpu->arch.busy_preempt = TB_NIL;
3168         WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
3169         ++vc->n_runnable;
3170
3171         /*
3172          * This happens the first time this is called for a vcpu.
3173          * If the vcore is already running, we may be able to start
3174          * this thread straight away and have it join in.
3175          */
3176         if (!signal_pending(current)) {
3177                 if (vc->vcore_state == VCORE_PIGGYBACK) {
3178                         if (spin_trylock(&vc->lock)) {
3179                                 if (vc->vcore_state == VCORE_RUNNING &&
3180                                     !VCORE_IS_EXITING(vc)) {
3181                                         kvmppc_create_dtl_entry(vcpu, vc);
3182                                         kvmppc_start_thread(vcpu, vc);
3183                                         trace_kvm_guest_enter(vcpu);
3184                                 }
3185                                 spin_unlock(&vc->lock);
3186                         }
3187                 } else if (vc->vcore_state == VCORE_RUNNING &&
3188                            !VCORE_IS_EXITING(vc)) {
3189                         kvmppc_create_dtl_entry(vcpu, vc);
3190                         kvmppc_start_thread(vcpu, vc);
3191                         trace_kvm_guest_enter(vcpu);
3192                 } else if (vc->vcore_state == VCORE_SLEEPING) {
3193                         swake_up(&vc->wq);
3194                 }
3195
3196         }
3197
3198         while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
3199                !signal_pending(current)) {
3200                 /* See if the MMU is ready to go */
3201                 if (!vcpu->kvm->arch.mmu_ready) {
3202                         spin_unlock(&vc->lock);
3203                         r = kvmhv_setup_mmu(vcpu);
3204                         spin_lock(&vc->lock);
3205                         if (r) {
3206                                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3207                                 kvm_run->fail_entry.
3208                                         hardware_entry_failure_reason = 0;
3209                                 vcpu->arch.ret = r;
3210                                 break;
3211                         }
3212                 }
3213
3214                 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
3215                         kvmppc_vcore_end_preempt(vc);
3216
3217                 if (vc->vcore_state != VCORE_INACTIVE) {
3218                         kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
3219                         continue;
3220                 }
3221                 for_each_runnable_thread(i, v, vc) {
3222                         kvmppc_core_prepare_to_enter(v);
3223                         if (signal_pending(v->arch.run_task)) {
3224                                 kvmppc_remove_runnable(vc, v);
3225                                 v->stat.signal_exits++;
3226                                 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
3227                                 v->arch.ret = -EINTR;
3228                                 wake_up(&v->arch.cpu_run);
3229                         }
3230                 }
3231                 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
3232                         break;
3233                 n_ceded = 0;
3234                 for_each_runnable_thread(i, v, vc) {
3235                         if (!kvmppc_vcpu_woken(v))
3236                                 n_ceded += v->arch.ceded;
3237                         else
3238                                 v->arch.ceded = 0;
3239                 }
3240                 vc->runner = vcpu;
3241                 if (n_ceded == vc->n_runnable) {
3242                         kvmppc_vcore_blocked(vc);
3243                 } else if (need_resched()) {
3244                         kvmppc_vcore_preempt(vc);
3245                         /* Let something else run */
3246                         cond_resched_lock(&vc->lock);
3247                         if (vc->vcore_state == VCORE_PREEMPT)
3248                                 kvmppc_vcore_end_preempt(vc);
3249                 } else {
3250                         kvmppc_run_core(vc);
3251                 }
3252                 vc->runner = NULL;
3253         }
3254
3255         while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
3256                (vc->vcore_state == VCORE_RUNNING ||
3257                 vc->vcore_state == VCORE_EXITING ||
3258                 vc->vcore_state == VCORE_PIGGYBACK))
3259                 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
3260
3261         if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
3262                 kvmppc_vcore_end_preempt(vc);
3263
3264         if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
3265                 kvmppc_remove_runnable(vc, vcpu);
3266                 vcpu->stat.signal_exits++;
3267                 kvm_run->exit_reason = KVM_EXIT_INTR;
3268                 vcpu->arch.ret = -EINTR;
3269         }
3270
3271         if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
3272                 /* Wake up some vcpu to run the core */
3273                 i = -1;
3274                 v = next_runnable_thread(vc, &i);
3275                 wake_up(&v->arch.cpu_run);
3276         }
3277
3278         trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
3279         spin_unlock(&vc->lock);
3280         return vcpu->arch.ret;
3281 }
3282
3283 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3284 {
3285         int r;
3286         int srcu_idx;
3287         unsigned long ebb_regs[3] = {}; /* shut up GCC */
3288         unsigned long user_tar = 0;
3289         unsigned int user_vrsave;
3290         struct kvm *kvm;
3291
3292         if (!vcpu->arch.sane) {
3293                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3294                 return -EINVAL;
3295         }
3296
3297         /*
3298          * Don't allow entry with a suspended transaction, because
3299          * the guest entry/exit code will lose it.
3300          * If the guest has TM enabled, save away their TM-related SPRs
3301          * (they will get restored by the TM unavailable interrupt).
3302          */
3303 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3304         if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
3305             (current->thread.regs->msr & MSR_TM)) {
3306                 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
3307                         run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3308                         run->fail_entry.hardware_entry_failure_reason = 0;
3309                         return -EINVAL;
3310                 }
3311                 /* Enable TM so we can read the TM SPRs */
3312                 mtmsr(mfmsr() | MSR_TM);
3313                 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
3314                 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
3315                 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
3316                 current->thread.regs->msr &= ~MSR_TM;
3317         }
3318 #endif
3319
3320         kvmppc_core_prepare_to_enter(vcpu);
3321
3322         /* No need to go into the guest when all we'll do is come back out */
3323         if (signal_pending(current)) {
3324                 run->exit_reason = KVM_EXIT_INTR;
3325                 return -EINTR;
3326         }
3327
3328         kvm = vcpu->kvm;
3329         atomic_inc(&kvm->arch.vcpus_running);
3330         /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
3331         smp_mb();
3332
3333         flush_all_to_thread(current);
3334
3335         /* Save userspace EBB and other register values */
3336         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3337                 ebb_regs[0] = mfspr(SPRN_EBBHR);
3338                 ebb_regs[1] = mfspr(SPRN_EBBRR);
3339                 ebb_regs[2] = mfspr(SPRN_BESCR);
3340                 user_tar = mfspr(SPRN_TAR);
3341         }
3342         user_vrsave = mfspr(SPRN_VRSAVE);
3343
3344         vcpu->arch.wqp = &vcpu->arch.vcore->wq;
3345         vcpu->arch.pgdir = current->mm->pgd;
3346         vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
3347
3348         do {
3349                 r = kvmppc_run_vcpu(run, vcpu);
3350
3351                 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
3352                     !(vcpu->arch.shregs.msr & MSR_PR)) {
3353                         trace_kvm_hcall_enter(vcpu);
3354                         r = kvmppc_pseries_do_hcall(vcpu);
3355                         trace_kvm_hcall_exit(vcpu, r);
3356                         kvmppc_core_prepare_to_enter(vcpu);
3357                 } else if (r == RESUME_PAGE_FAULT) {
3358                         srcu_idx = srcu_read_lock(&kvm->srcu);
3359                         r = kvmppc_book3s_hv_page_fault(run, vcpu,
3360                                 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
3361                         srcu_read_unlock(&kvm->srcu, srcu_idx);
3362                 } else if (r == RESUME_PASSTHROUGH) {
3363                         if (WARN_ON(xive_enabled()))
3364                                 r = H_SUCCESS;
3365                         else
3366                                 r = kvmppc_xics_rm_complete(vcpu, 0);
3367                 }
3368         } while (is_kvmppc_resume_guest(r));
3369
3370         /* Restore userspace EBB and other register values */
3371         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3372                 mtspr(SPRN_EBBHR, ebb_regs[0]);
3373                 mtspr(SPRN_EBBRR, ebb_regs[1]);
3374                 mtspr(SPRN_BESCR, ebb_regs[2]);
3375                 mtspr(SPRN_TAR, user_tar);
3376                 mtspr(SPRN_FSCR, current->thread.fscr);
3377         }
3378         mtspr(SPRN_VRSAVE, user_vrsave);
3379
3380         vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
3381         atomic_dec(&kvm->arch.vcpus_running);
3382         return r;
3383 }
3384
3385 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
3386                                      int shift, int sllp)
3387 {
3388         (*sps)->page_shift = shift;
3389         (*sps)->slb_enc = sllp;
3390         (*sps)->enc[0].page_shift = shift;
3391         (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift);
3392         /*
3393          * Add 16MB MPSS support (may get filtered out by userspace)
3394          */
3395         if (shift != 24) {
3396                 int penc = kvmppc_pgsize_lp_encoding(shift, 24);
3397                 if (penc != -1) {
3398                         (*sps)->enc[1].page_shift = 24;
3399                         (*sps)->enc[1].pte_enc = penc;
3400                 }
3401         }
3402         (*sps)++;
3403 }
3404
3405 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
3406                                          struct kvm_ppc_smmu_info *info)
3407 {
3408         struct kvm_ppc_one_seg_page_size *sps;
3409
3410         /*
3411          * POWER7, POWER8 and POWER9 all support 32 storage keys for data.
3412          * POWER7 doesn't support keys for instruction accesses,
3413          * POWER8 and POWER9 do.
3414          */
3415         info->data_keys = 32;
3416         info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0;
3417
3418         /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */
3419         info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS;
3420         info->slb_size = 32;
3421
3422         /* We only support these sizes for now, and no muti-size segments */
3423         sps = &info->sps[0];
3424         kvmppc_add_seg_page_size(&sps, 12, 0);
3425         kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01);
3426         kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L);
3427
3428         return 0;
3429 }
3430
3431 /*
3432  * Get (and clear) the dirty memory log for a memory slot.
3433  */
3434 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
3435                                          struct kvm_dirty_log *log)
3436 {
3437         struct kvm_memslots *slots;
3438         struct kvm_memory_slot *memslot;
3439         int i, r;
3440         unsigned long n;
3441         unsigned long *buf, *p;
3442         struct kvm_vcpu *vcpu;
3443
3444         mutex_lock(&kvm->slots_lock);
3445
3446         r = -EINVAL;
3447         if (log->slot >= KVM_USER_MEM_SLOTS)
3448                 goto out;
3449
3450         slots = kvm_memslots(kvm);
3451         memslot = id_to_memslot(slots, log->slot);
3452         r = -ENOENT;
3453         if (!memslot->dirty_bitmap)
3454                 goto out;
3455
3456         /*
3457          * Use second half of bitmap area because both HPT and radix
3458          * accumulate bits in the first half.
3459          */
3460         n = kvm_dirty_bitmap_bytes(memslot);
3461         buf = memslot->dirty_bitmap + n / sizeof(long);
3462         memset(buf, 0, n);
3463
3464         if (kvm_is_radix(kvm))
3465                 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
3466         else
3467                 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
3468         if (r)
3469                 goto out;
3470
3471         /*
3472          * We accumulate dirty bits in the first half of the
3473          * memslot's dirty_bitmap area, for when pages are paged
3474          * out or modified by the host directly.  Pick up these
3475          * bits and add them to the map.
3476          */
3477         p = memslot->dirty_bitmap;
3478         for (i = 0; i < n / sizeof(long); ++i)
3479                 buf[i] |= xchg(&p[i], 0);
3480
3481         /* Harvest dirty bits from VPA and DTL updates */
3482         /* Note: we never modify the SLB shadow buffer areas */
3483         kvm_for_each_vcpu(i, vcpu, kvm) {
3484                 spin_lock(&vcpu->arch.vpa_update_lock);
3485                 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
3486                 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
3487                 spin_unlock(&vcpu->arch.vpa_update_lock);
3488         }
3489
3490         r = -EFAULT;
3491         if (copy_to_user(log->dirty_bitmap, buf, n))
3492                 goto out;
3493
3494         r = 0;
3495 out:
3496         mutex_unlock(&kvm->slots_lock);
3497         return r;
3498 }
3499
3500 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
3501                                         struct kvm_memory_slot *dont)
3502 {
3503         if (!dont || free->arch.rmap != dont->arch.rmap) {
3504                 vfree(free->arch.rmap);
3505                 free->arch.rmap = NULL;
3506         }
3507 }
3508
3509 static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
3510                                          unsigned long npages)
3511 {
3512         slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
3513         if (!slot->arch.rmap)
3514                 return -ENOMEM;
3515
3516         return 0;
3517 }
3518
3519 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
3520                                         struct kvm_memory_slot *memslot,
3521                                         const struct kvm_userspace_memory_region *mem)
3522 {
3523         return 0;
3524 }
3525
3526 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
3527                                 const struct kvm_userspace_memory_region *mem,
3528                                 const struct kvm_memory_slot *old,
3529                                 const struct kvm_memory_slot *new)
3530 {
3531         unsigned long npages = mem->memory_size >> PAGE_SHIFT;
3532
3533         /*
3534          * If we are making a new memslot, it might make
3535          * some address that was previously cached as emulated
3536          * MMIO be no longer emulated MMIO, so invalidate
3537          * all the caches of emulated MMIO translations.
3538          */
3539         if (npages)
3540                 atomic64_inc(&kvm->arch.mmio_update);
3541 }
3542
3543 /*
3544  * Update LPCR values in kvm->arch and in vcores.
3545  * Caller must hold kvm->lock.
3546  */
3547 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
3548 {
3549         long int i;
3550         u32 cores_done = 0;
3551
3552         if ((kvm->arch.lpcr & mask) == lpcr)
3553                 return;
3554
3555         kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
3556
3557         for (i = 0; i < KVM_MAX_VCORES; ++i) {
3558                 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
3559                 if (!vc)
3560                         continue;
3561                 spin_lock(&vc->lock);
3562                 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
3563                 spin_unlock(&vc->lock);
3564                 if (++cores_done >= kvm->arch.online_vcores)
3565                         break;
3566         }
3567 }
3568
3569 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
3570 {
3571         return;
3572 }
3573
3574 void kvmppc_setup_partition_table(struct kvm *kvm)
3575 {
3576         unsigned long dw0, dw1;
3577
3578         if (!kvm_is_radix(kvm)) {
3579                 /* PS field - page size for VRMA */
3580                 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
3581                         ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
3582                 /* HTABSIZE and HTABORG fields */
3583                 dw0 |= kvm->arch.sdr1;
3584
3585                 /* Second dword as set by userspace */
3586                 dw1 = kvm->arch.process_table;
3587         } else {
3588                 dw0 = PATB_HR | radix__get_tree_size() |
3589                         __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
3590                 dw1 = PATB_GR | kvm->arch.process_table;
3591         }
3592
3593         mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
3594 }
3595
3596 /*
3597  * Set up HPT (hashed page table) and RMA (real-mode area).
3598  * Must be called with kvm->lock held.
3599  */
3600 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
3601 {
3602         int err = 0;
3603         struct kvm *kvm = vcpu->kvm;
3604         unsigned long hva;
3605         struct kvm_memory_slot *memslot;
3606         struct vm_area_struct *vma;
3607         unsigned long lpcr = 0, senc;
3608         unsigned long psize, porder;
3609         int srcu_idx;
3610
3611         /* Allocate hashed page table (if not done already) and reset it */
3612         if (!kvm->arch.hpt.virt) {
3613                 int order = KVM_DEFAULT_HPT_ORDER;
3614                 struct kvm_hpt_info info;
3615
3616                 err = kvmppc_allocate_hpt(&info, order);
3617                 /* If we get here, it means userspace didn't specify a
3618                  * size explicitly.  So, try successively smaller
3619                  * sizes if the default failed. */
3620                 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER)
3621                         err  = kvmppc_allocate_hpt(&info, order);
3622
3623                 if (err < 0) {
3624                         pr_err("KVM: Couldn't alloc HPT\n");
3625                         goto out;
3626                 }
3627
3628                 kvmppc_set_hpt(kvm, &info);
3629         }
3630
3631         /* Look up the memslot for guest physical address 0 */
3632         srcu_idx = srcu_read_lock(&kvm->srcu);
3633         memslot = gfn_to_memslot(kvm, 0);
3634
3635         /* We must have some memory at 0 by now */
3636         err = -EINVAL;
3637         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
3638                 goto out_srcu;
3639
3640         /* Look up the VMA for the start of this memory slot */
3641         hva = memslot->userspace_addr;
3642         down_read(&current->mm->mmap_sem);
3643         vma = find_vma(current->mm, hva);
3644         if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
3645                 goto up_out;
3646
3647         psize = vma_kernel_pagesize(vma);
3648         porder = __ilog2(psize);
3649
3650         up_read(&current->mm->mmap_sem);
3651
3652         /* We can handle 4k, 64k or 16M pages in the VRMA */
3653         err = -EINVAL;
3654         if (!(psize == 0x1000 || psize == 0x10000 ||
3655               psize == 0x1000000))
3656                 goto out_srcu;
3657
3658         senc = slb_pgsize_encoding(psize);
3659         kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
3660                 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3661         /* Create HPTEs in the hash page table for the VRMA */
3662         kvmppc_map_vrma(vcpu, memslot, porder);
3663
3664         /* Update VRMASD field in the LPCR */
3665         if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
3666                 /* the -4 is to account for senc values starting at 0x10 */
3667                 lpcr = senc << (LPCR_VRMASD_SH - 4);
3668                 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
3669         }
3670
3671         /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
3672         smp_wmb();
3673         err = 0;
3674  out_srcu:
3675         srcu_read_unlock(&kvm->srcu, srcu_idx);
3676  out:
3677         return err;
3678
3679  up_out:
3680         up_read(&current->mm->mmap_sem);
3681         goto out_srcu;
3682 }
3683
3684 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
3685 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
3686 {
3687         kvmppc_free_radix(kvm);
3688         kvmppc_update_lpcr(kvm, LPCR_VPM1,
3689                            LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
3690         kvmppc_rmap_reset(kvm);
3691         kvm->arch.radix = 0;
3692         kvm->arch.process_table = 0;
3693         return 0;
3694 }
3695
3696 /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
3697 int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
3698 {
3699         int err;
3700
3701         err = kvmppc_init_vm_radix(kvm);
3702         if (err)
3703                 return err;
3704
3705         kvmppc_free_hpt(&kvm->arch.hpt);
3706         kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
3707                            LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
3708         kvm->arch.radix = 1;
3709         return 0;
3710 }
3711
3712 #ifdef CONFIG_KVM_XICS
3713 /*
3714  * Allocate a per-core structure for managing state about which cores are
3715  * running in the host versus the guest and for exchanging data between
3716  * real mode KVM and CPU running in the host.
3717  * This is only done for the first VM.
3718  * The allocated structure stays even if all VMs have stopped.
3719  * It is only freed when the kvm-hv module is unloaded.
3720  * It's OK for this routine to fail, we just don't support host
3721  * core operations like redirecting H_IPI wakeups.
3722  */
3723 void kvmppc_alloc_host_rm_ops(void)
3724 {
3725         struct kvmppc_host_rm_ops *ops;
3726         unsigned long l_ops;
3727         int cpu, core;
3728         int size;
3729
3730         /* Not the first time here ? */
3731         if (kvmppc_host_rm_ops_hv != NULL)
3732                 return;
3733
3734         ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
3735         if (!ops)
3736                 return;
3737
3738         size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
3739         ops->rm_core = kzalloc(size, GFP_KERNEL);
3740
3741         if (!ops->rm_core) {
3742                 kfree(ops);
3743                 return;
3744         }
3745
3746         cpus_read_lock();
3747
3748         for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
3749                 if (!cpu_online(cpu))
3750                         continue;
3751
3752                 core = cpu >> threads_shift;
3753                 ops->rm_core[core].rm_state.in_host = 1;
3754         }
3755
3756         ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
3757
3758         /*
3759          * Make the contents of the kvmppc_host_rm_ops structure visible
3760          * to other CPUs before we assign it to the global variable.
3761          * Do an atomic assignment (no locks used here), but if someone
3762          * beats us to it, just free our copy and return.
3763          */
3764         smp_wmb();
3765         l_ops = (unsigned long) ops;
3766
3767         if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
3768                 cpus_read_unlock();
3769                 kfree(ops->rm_core);
3770                 kfree(ops);
3771                 return;
3772         }
3773
3774         cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
3775                                              "ppc/kvm_book3s:prepare",
3776                                              kvmppc_set_host_core,
3777                                              kvmppc_clear_host_core);
3778         cpus_read_unlock();
3779 }
3780
3781 void kvmppc_free_host_rm_ops(void)
3782 {
3783         if (kvmppc_host_rm_ops_hv) {
3784                 cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
3785                 kfree(kvmppc_host_rm_ops_hv->rm_core);
3786                 kfree(kvmppc_host_rm_ops_hv);
3787                 kvmppc_host_rm_ops_hv = NULL;
3788         }
3789 }
3790 #endif
3791
3792 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
3793 {
3794         unsigned long lpcr, lpid;
3795         char buf[32];
3796         int ret;
3797
3798         /* Allocate the guest's logical partition ID */
3799
3800         lpid = kvmppc_alloc_lpid();
3801         if ((long)lpid < 0)
3802                 return -ENOMEM;
3803         kvm->arch.lpid = lpid;
3804
3805         kvmppc_alloc_host_rm_ops();
3806
3807         /*
3808          * Since we don't flush the TLB when tearing down a VM,
3809          * and this lpid might have previously been used,
3810          * make sure we flush on each core before running the new VM.
3811          * On POWER9, the tlbie in mmu_partition_table_set_entry()
3812          * does this flush for us.
3813          */
3814         if (!cpu_has_feature(CPU_FTR_ARCH_300))
3815                 cpumask_setall(&kvm->arch.need_tlb_flush);
3816
3817         /* Start out with the default set of hcalls enabled */
3818         memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
3819                sizeof(kvm->arch.enabled_hcalls));
3820
3821         if (!cpu_has_feature(CPU_FTR_ARCH_300))
3822                 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
3823
3824         /* Init LPCR for virtual RMA mode */
3825         kvm->arch.host_lpid = mfspr(SPRN_LPID);
3826         kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
3827         lpcr &= LPCR_PECE | LPCR_LPES;
3828         lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
3829                 LPCR_VPM0 | LPCR_VPM1;
3830         kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
3831                 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3832         /* On POWER8 turn on online bit to enable PURR/SPURR */
3833         if (cpu_has_feature(CPU_FTR_ARCH_207S))
3834                 lpcr |= LPCR_ONL;
3835         /*
3836          * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
3837          * Set HVICE bit to enable hypervisor virtualization interrupts.
3838          * Set HEIC to prevent OS interrupts to go to hypervisor (should
3839          * be unnecessary but better safe than sorry in case we re-enable
3840          * EE in HV mode with this LPCR still set)
3841          */
3842         if (cpu_has_feature(CPU_FTR_ARCH_300)) {
3843                 lpcr &= ~LPCR_VPM0;
3844                 lpcr |= LPCR_HVICE | LPCR_HEIC;
3845
3846                 /*
3847                  * If xive is enabled, we route 0x500 interrupts directly
3848                  * to the guest.
3849                  */
3850                 if (xive_enabled())
3851                         lpcr |= LPCR_LPES;
3852         }
3853
3854         /*
3855          * If the host uses radix, the guest starts out as radix.
3856          */
3857         if (radix_enabled()) {
3858                 kvm->arch.radix = 1;
3859                 kvm->arch.mmu_ready = 1;
3860                 lpcr &= ~LPCR_VPM1;
3861                 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
3862                 ret = kvmppc_init_vm_radix(kvm);
3863                 if (ret) {
3864                         kvmppc_free_lpid(kvm->arch.lpid);
3865                         return ret;
3866                 }
3867                 kvmppc_setup_partition_table(kvm);
3868         }
3869
3870         kvm->arch.lpcr = lpcr;
3871
3872         /* Initialization for future HPT resizes */
3873         kvm->arch.resize_hpt = NULL;
3874
3875         /*
3876          * Work out how many sets the TLB has, for the use of
3877          * the TLB invalidation loop in book3s_hv_rmhandlers.S.
3878          */
3879         if (radix_enabled())
3880                 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX;     /* 128 */
3881         else if (cpu_has_feature(CPU_FTR_ARCH_300))
3882                 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH;      /* 256 */
3883         else if (cpu_has_feature(CPU_FTR_ARCH_207S))
3884                 kvm->arch.tlb_sets = POWER8_TLB_SETS;           /* 512 */
3885         else
3886                 kvm->arch.tlb_sets = POWER7_TLB_SETS;           /* 128 */
3887
3888         /*
3889          * Track that we now have a HV mode VM active. This blocks secondary
3890          * CPU threads from coming online.
3891          * On POWER9, we only need to do this if the "indep_threads_mode"
3892          * module parameter has been set to N.
3893          */
3894         if (cpu_has_feature(CPU_FTR_ARCH_300))
3895                 kvm->arch.threads_indep = indep_threads_mode;
3896         if (!kvm->arch.threads_indep)
3897                 kvm_hv_vm_activated();
3898
3899         /*
3900          * Initialize smt_mode depending on processor.
3901          * POWER8 and earlier have to use "strict" threading, where
3902          * all vCPUs in a vcore have to run on the same (sub)core,
3903          * whereas on POWER9 the threads can each run a different
3904          * guest.
3905          */
3906         if (!cpu_has_feature(CPU_FTR_ARCH_300))
3907                 kvm->arch.smt_mode = threads_per_subcore;
3908         else
3909                 kvm->arch.smt_mode = 1;
3910         kvm->arch.emul_smt_mode = 1;
3911
3912         /*
3913          * Create a debugfs directory for the VM
3914          */
3915         snprintf(buf, sizeof(buf), "vm%d", current->pid);
3916         kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
3917         if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
3918                 kvmppc_mmu_debugfs_init(kvm);
3919
3920         return 0;
3921 }
3922
3923 static void kvmppc_free_vcores(struct kvm *kvm)
3924 {
3925         long int i;
3926
3927         for (i = 0; i < KVM_MAX_VCORES; ++i)
3928                 kfree(kvm->arch.vcores[i]);
3929         kvm->arch.online_vcores = 0;
3930 }
3931
3932 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
3933 {
3934         debugfs_remove_recursive(kvm->arch.debugfs_dir);
3935
3936         if (!kvm->arch.threads_indep)
3937                 kvm_hv_vm_deactivated();
3938
3939         kvmppc_free_vcores(kvm);
3940
3941         kvmppc_free_lpid(kvm->arch.lpid);
3942
3943         if (kvm_is_radix(kvm))
3944                 kvmppc_free_radix(kvm);
3945         else
3946                 kvmppc_free_hpt(&kvm->arch.hpt);
3947
3948         kvmppc_free_pimap(kvm);
3949 }
3950
3951 /* We don't need to emulate any privileged instructions or dcbz */
3952 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3953                                      unsigned int inst, int *advance)
3954 {
3955         return EMULATE_FAIL;
3956 }
3957
3958 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
3959                                         ulong spr_val)
3960 {
3961         return EMULATE_FAIL;
3962 }
3963
3964 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
3965                                         ulong *spr_val)
3966 {
3967         return EMULATE_FAIL;
3968 }
3969
3970 static int kvmppc_core_check_processor_compat_hv(void)
3971 {
3972         if (!cpu_has_feature(CPU_FTR_HVMODE) ||
3973             !cpu_has_feature(CPU_FTR_ARCH_206))
3974                 return -EIO;
3975
3976         return 0;
3977 }
3978
3979 #ifdef CONFIG_KVM_XICS
3980
3981 void kvmppc_free_pimap(struct kvm *kvm)
3982 {
3983         kfree(kvm->arch.pimap);
3984 }
3985
3986 static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
3987 {
3988         return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
3989 }
3990
3991 static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
3992 {
3993         struct irq_desc *desc;
3994         struct kvmppc_irq_map *irq_map;
3995         struct kvmppc_passthru_irqmap *pimap;
3996         struct irq_chip *chip;
3997         int i, rc = 0;
3998
3999         if (!kvm_irq_bypass)
4000                 return 1;
4001
4002         desc = irq_to_desc(host_irq);
4003         if (!desc)
4004                 return -EIO;
4005
4006         mutex_lock(&kvm->lock);
4007
4008         pimap = kvm->arch.pimap;
4009         if (pimap == NULL) {
4010                 /* First call, allocate structure to hold IRQ map */
4011                 pimap = kvmppc_alloc_pimap();
4012                 if (pimap == NULL) {
4013                         mutex_unlock(&kvm->lock);
4014                         return -ENOMEM;
4015                 }
4016                 kvm->arch.pimap = pimap;
4017         }
4018
4019         /*
4020          * For now, we only support interrupts for which the EOI operation
4021          * is an OPAL call followed by a write to XIRR, since that's
4022          * what our real-mode EOI code does, or a XIVE interrupt
4023          */
4024         chip = irq_data_get_irq_chip(&desc->irq_data);
4025         if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
4026                 pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
4027                         host_irq, guest_gsi);
4028                 mutex_unlock(&kvm->lock);
4029                 return -ENOENT;
4030         }
4031
4032         /*
4033          * See if we already have an entry for this guest IRQ number.
4034          * If it's mapped to a hardware IRQ number, that's an error,
4035          * otherwise re-use this entry.
4036          */
4037         for (i = 0; i < pimap->n_mapped; i++) {
4038                 if (guest_gsi == pimap->mapped[i].v_hwirq) {
4039                         if (pimap->mapped[i].r_hwirq) {
4040                                 mutex_unlock(&kvm->lock);
4041                                 return -EINVAL;
4042                         }
4043                         break;
4044                 }
4045         }
4046
4047         if (i == KVMPPC_PIRQ_MAPPED) {
4048                 mutex_unlock(&kvm->lock);
4049                 return -EAGAIN;         /* table is full */
4050         }
4051
4052         irq_map = &pimap->mapped[i];
4053
4054         irq_map->v_hwirq = guest_gsi;
4055         irq_map->desc = desc;
4056
4057         /*
4058          * Order the above two stores before the next to serialize with
4059          * the KVM real mode handler.
4060          */
4061         smp_wmb();
4062         irq_map->r_hwirq = desc->irq_data.hwirq;
4063
4064         if (i == pimap->n_mapped)
4065                 pimap->n_mapped++;
4066
4067         if (xive_enabled())
4068                 rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
4069         else
4070                 kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
4071         if (rc)
4072                 irq_map->r_hwirq = 0;
4073
4074         mutex_unlock(&kvm->lock);
4075
4076         return 0;
4077 }
4078
4079 static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
4080 {
4081         struct irq_desc *desc;
4082         struct kvmppc_passthru_irqmap *pimap;
4083         int i, rc = 0;
4084
4085         if (!kvm_irq_bypass)
4086                 return 0;
4087
4088         desc = irq_to_desc(host_irq);
4089         if (!desc)
4090                 return -EIO;
4091
4092         mutex_lock(&kvm->lock);
4093         if (!kvm->arch.pimap)
4094                 goto unlock;
4095
4096         pimap = kvm->arch.pimap;
4097
4098         for (i = 0; i < pimap->n_mapped; i++) {
4099                 if (guest_gsi == pimap->mapped[i].v_hwirq)
4100                         break;
4101         }
4102
4103         if (i == pimap->n_mapped) {
4104                 mutex_unlock(&kvm->lock);
4105                 return -ENODEV;
4106         }
4107
4108         if (xive_enabled())
4109                 rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
4110         else
4111                 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
4112
4113         /* invalidate the entry (what do do on error from the above ?) */
4114         pimap->mapped[i].r_hwirq = 0;
4115
4116         /*
4117          * We don't free this structure even when the count goes to
4118          * zero. The structure is freed when we destroy the VM.
4119          */
4120  unlock:
4121         mutex_unlock(&kvm->lock);
4122         return rc;
4123 }
4124
4125 static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
4126                                              struct irq_bypass_producer *prod)
4127 {
4128         int ret = 0;
4129         struct kvm_kernel_irqfd *irqfd =
4130                 container_of(cons, struct kvm_kernel_irqfd, consumer);
4131
4132         irqfd->producer = prod;
4133
4134         ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
4135         if (ret)
4136                 pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
4137                         prod->irq, irqfd->gsi, ret);
4138
4139         return ret;
4140 }
4141
4142 static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
4143                                               struct irq_bypass_producer *prod)
4144 {
4145         int ret;
4146         struct kvm_kernel_irqfd *irqfd =
4147                 container_of(cons, struct kvm_kernel_irqfd, consumer);
4148
4149         irqfd->producer = NULL;
4150
4151         /*
4152          * When producer of consumer is unregistered, we change back to
4153          * default external interrupt handling mode - KVM real mode
4154          * will switch back to host.
4155          */
4156         ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
4157         if (ret)
4158                 pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
4159                         prod->irq, irqfd->gsi, ret);
4160 }
4161 #endif
4162
4163 static long kvm_arch_vm_ioctl_hv(struct file *filp,
4164                                  unsigned int ioctl, unsigned long arg)
4165 {
4166         struct kvm *kvm __maybe_unused = filp->private_data;
4167         void __user *argp = (void __user *)arg;
4168         long r;
4169
4170         switch (ioctl) {
4171
4172         case KVM_PPC_ALLOCATE_HTAB: {
4173                 u32 htab_order;
4174
4175                 r = -EFAULT;
4176                 if (get_user(htab_order, (u32 __user *)argp))
4177                         break;
4178                 r = kvmppc_alloc_reset_hpt(kvm, htab_order);
4179                 if (r)
4180                         break;
4181                 r = 0;
4182                 break;
4183         }
4184
4185         case KVM_PPC_GET_HTAB_FD: {
4186                 struct kvm_get_htab_fd ghf;
4187
4188                 r = -EFAULT;
4189                 if (copy_from_user(&ghf, argp, sizeof(ghf)))
4190                         break;
4191                 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
4192                 break;
4193         }
4194
4195         case KVM_PPC_RESIZE_HPT_PREPARE: {
4196                 struct kvm_ppc_resize_hpt rhpt;
4197
4198                 r = -EFAULT;
4199                 if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
4200                         break;
4201
4202                 r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt);
4203                 break;
4204         }
4205
4206         case KVM_PPC_RESIZE_HPT_COMMIT: {
4207                 struct kvm_ppc_resize_hpt rhpt;
4208
4209                 r = -EFAULT;
4210                 if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
4211                         break;
4212
4213                 r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt);
4214                 break;
4215         }
4216
4217         default:
4218                 r = -ENOTTY;
4219         }
4220
4221         return r;
4222 }
4223
4224 /*
4225  * List of hcall numbers to enable by default.
4226  * For compatibility with old userspace, we enable by default
4227  * all hcalls that were implemented before the hcall-enabling
4228  * facility was added.  Note this list should not include H_RTAS.
4229  */
4230 static unsigned int default_hcall_list[] = {
4231         H_REMOVE,
4232         H_ENTER,
4233         H_READ,
4234         H_PROTECT,
4235         H_BULK_REMOVE,
4236         H_GET_TCE,
4237         H_PUT_TCE,
4238         H_SET_DABR,
4239         H_SET_XDABR,
4240         H_CEDE,
4241         H_PROD,
4242         H_CONFER,
4243         H_REGISTER_VPA,
4244 #ifdef CONFIG_KVM_XICS
4245         H_EOI,
4246         H_CPPR,
4247         H_IPI,
4248         H_IPOLL,
4249         H_XIRR,
4250         H_XIRR_X,
4251 #endif
4252         0
4253 };
4254
4255 static void init_default_hcalls(void)
4256 {
4257         int i;
4258         unsigned int hcall;
4259
4260         for (i = 0; default_hcall_list[i]; ++i) {
4261                 hcall = default_hcall_list[i];
4262                 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
4263                 __set_bit(hcall / 4, default_enabled_hcalls);
4264         }
4265 }
4266
4267 static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
4268 {
4269         unsigned long lpcr;
4270         int radix;
4271         int err;
4272
4273         /* If not on a POWER9, reject it */
4274         if (!cpu_has_feature(CPU_FTR_ARCH_300))
4275                 return -ENODEV;
4276
4277         /* If any unknown flags set, reject it */
4278         if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE))
4279                 return -EINVAL;
4280
4281         /* GR (guest radix) bit in process_table field must match */
4282         radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX);
4283         if (!!(cfg->process_table & PATB_GR) != radix)
4284                 return -EINVAL;
4285
4286         /* Process table size field must be reasonable, i.e. <= 24 */
4287         if ((cfg->process_table & PRTS_MASK) > 24)
4288                 return -EINVAL;
4289
4290         /* We can change a guest to/from radix now, if the host is radix */
4291         if (radix && !radix_enabled())
4292                 return -EINVAL;
4293
4294         mutex_lock(&kvm->lock);
4295         if (radix != kvm_is_radix(kvm)) {
4296                 if (kvm->arch.mmu_ready) {
4297                         kvm->arch.mmu_ready = 0;
4298                         /* order mmu_ready vs. vcpus_running */
4299                         smp_mb();
4300                         if (atomic_read(&kvm->arch.vcpus_running)) {
4301                                 kvm->arch.mmu_ready = 1;
4302                                 err = -EBUSY;
4303                                 goto out_unlock;
4304                         }
4305                 }
4306                 if (radix)
4307                         err = kvmppc_switch_mmu_to_radix(kvm);
4308                 else
4309                         err = kvmppc_switch_mmu_to_hpt(kvm);
4310                 if (err)
4311                         goto out_unlock;
4312         }
4313
4314         kvm->arch.process_table = cfg->process_table;
4315         kvmppc_setup_partition_table(kvm);
4316
4317         lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
4318         kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
4319         err = 0;
4320
4321  out_unlock:
4322         mutex_unlock(&kvm->lock);
4323         return err;
4324 }
4325
4326 static struct kvmppc_ops kvm_ops_hv = {
4327         .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
4328         .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
4329         .get_one_reg = kvmppc_get_one_reg_hv,
4330         .set_one_reg = kvmppc_set_one_reg_hv,
4331         .vcpu_load   = kvmppc_core_vcpu_load_hv,
4332         .vcpu_put    = kvmppc_core_vcpu_put_hv,
4333         .set_msr     = kvmppc_set_msr_hv,
4334         .vcpu_run    = kvmppc_vcpu_run_hv,
4335         .vcpu_create = kvmppc_core_vcpu_create_hv,
4336         .vcpu_free   = kvmppc_core_vcpu_free_hv,
4337         .check_requests = kvmppc_core_check_requests_hv,
4338         .get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
4339         .flush_memslot  = kvmppc_core_flush_memslot_hv,
4340         .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
4341         .commit_memory_region  = kvmppc_core_commit_memory_region_hv,
4342         .unmap_hva = kvm_unmap_hva_hv,
4343         .unmap_hva_range = kvm_unmap_hva_range_hv,
4344         .age_hva  = kvm_age_hva_hv,
4345         .test_age_hva = kvm_test_age_hva_hv,
4346         .set_spte_hva = kvm_set_spte_hva_hv,
4347         .mmu_destroy  = kvmppc_mmu_destroy_hv,
4348         .free_memslot = kvmppc_core_free_memslot_hv,
4349         .create_memslot = kvmppc_core_create_memslot_hv,
4350         .init_vm =  kvmppc_core_init_vm_hv,
4351         .destroy_vm = kvmppc_core_destroy_vm_hv,
4352         .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
4353         .emulate_op = kvmppc_core_emulate_op_hv,
4354         .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
4355         .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
4356         .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
4357         .arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
4358         .hcall_implemented = kvmppc_hcall_impl_hv,
4359 #ifdef CONFIG_KVM_XICS
4360         .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
4361         .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
4362 #endif
4363         .configure_mmu = kvmhv_configure_mmu,
4364         .get_rmmu_info = kvmhv_get_rmmu_info,
4365         .set_smt_mode = kvmhv_set_smt_mode,
4366 };
4367
4368 static int kvm_init_subcore_bitmap(void)
4369 {
4370         int i, j;
4371         int nr_cores = cpu_nr_cores();
4372         struct sibling_subcore_state *sibling_subcore_state;
4373
4374         for (i = 0; i < nr_cores; i++) {
4375                 int first_cpu = i * threads_per_core;
4376                 int node = cpu_to_node(first_cpu);
4377
4378                 /* Ignore if it is already allocated. */
4379                 if (paca[first_cpu].sibling_subcore_state)
4380                         continue;
4381
4382                 sibling_subcore_state =
4383                         kmalloc_node(sizeof(struct sibling_subcore_state),
4384                                                         GFP_KERNEL, node);
4385                 if (!sibling_subcore_state)
4386                         return -ENOMEM;
4387
4388                 memset(sibling_subcore_state, 0,
4389                                 sizeof(struct sibling_subcore_state));
4390
4391                 for (j = 0; j < threads_per_core; j++) {
4392                         int cpu = first_cpu + j;
4393
4394                         paca[cpu].sibling_subcore_state = sibling_subcore_state;
4395                 }
4396         }
4397         return 0;
4398 }
4399
4400 static int kvmppc_radix_possible(void)
4401 {
4402         return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled();
4403 }
4404
4405 static int kvmppc_book3s_init_hv(void)
4406 {
4407         int r;
4408         /*
4409          * FIXME!! Do we need to check on all cpus ?
4410          */
4411         r = kvmppc_core_check_processor_compat_hv();
4412         if (r < 0)
4413                 return -ENODEV;
4414
4415         r = kvm_init_subcore_bitmap();
4416         if (r)
4417                 return r;
4418
4419         /*
4420          * We need a way of accessing the XICS interrupt controller,
4421          * either directly, via paca[cpu].kvm_hstate.xics_phys, or
4422          * indirectly, via OPAL.
4423          */
4424 #ifdef CONFIG_SMP
4425         if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
4426                 struct device_node *np;
4427
4428                 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
4429                 if (!np) {
4430                         pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
4431                         return -ENODEV;
4432                 }
4433         }
4434 #endif
4435
4436         kvm_ops_hv.owner = THIS_MODULE;
4437         kvmppc_hv_ops = &kvm_ops_hv;
4438
4439         init_default_hcalls();
4440
4441         init_vcore_lists();
4442
4443         r = kvmppc_mmu_hv_init();
4444         if (r)
4445                 return r;
4446
4447         if (kvmppc_radix_possible())
4448                 r = kvmppc_radix_init();
4449         return r;
4450 }
4451
4452 static void kvmppc_book3s_exit_hv(void)
4453 {
4454         kvmppc_free_host_rm_ops();
4455         if (kvmppc_radix_possible())
4456                 kvmppc_radix_exit();
4457         kvmppc_hv_ops = NULL;
4458 }
4459
4460 module_init(kvmppc_book3s_init_hv);
4461 module_exit(kvmppc_book3s_exit_hv);
4462 MODULE_LICENSE("GPL");
4463 MODULE_ALIAS_MISCDEV(KVM_MINOR);
4464 MODULE_ALIAS("devname:kvm");