ea8a28fd6f31e544b3ab15e7b695f5ad42a2fe2e
[sfrench/cifs-2.6.git] / arch / powerpc / kernel / process.c
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/uaccess.h>
41 #include <linux/elf-randomize.h>
42
43 #include <asm/pgtable.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/mmu.h>
47 #include <asm/prom.h>
48 #include <asm/machdep.h>
49 #include <asm/time.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/tm.h>
54 #include <asm/debug.h>
55 #ifdef CONFIG_PPC64
56 #include <asm/firmware.h>
57 #endif
58 #include <asm/code-patching.h>
59 #include <asm/exec.h>
60 #include <asm/livepatch.h>
61
62 #include <linux/kprobes.h>
63 #include <linux/kdebug.h>
64
65 /* Transactional Memory debug */
66 #ifdef TM_DEBUG_SW
67 #define TM_DEBUG(x...) printk(KERN_INFO x)
68 #else
69 #define TM_DEBUG(x...) do { } while(0)
70 #endif
71
72 extern unsigned long _get_SP(void);
73
74 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
75 static void check_if_tm_restore_required(struct task_struct *tsk)
76 {
77         /*
78          * If we are saving the current thread's registers, and the
79          * thread is in a transactional state, set the TIF_RESTORE_TM
80          * bit so that we know to restore the registers before
81          * returning to userspace.
82          */
83         if (tsk == current && tsk->thread.regs &&
84             MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
85             !test_thread_flag(TIF_RESTORE_TM)) {
86                 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
87                 set_thread_flag(TIF_RESTORE_TM);
88         }
89 }
90 #else
91 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
92 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
93
94 bool strict_msr_control;
95 EXPORT_SYMBOL(strict_msr_control);
96
97 static int __init enable_strict_msr_control(char *str)
98 {
99         strict_msr_control = true;
100         pr_info("Enabling strict facility control\n");
101
102         return 0;
103 }
104 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
105
106 void msr_check_and_set(unsigned long bits)
107 {
108         unsigned long oldmsr = mfmsr();
109         unsigned long newmsr;
110
111         newmsr = oldmsr | bits;
112
113 #ifdef CONFIG_VSX
114         if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
115                 newmsr |= MSR_VSX;
116 #endif
117
118         if (oldmsr != newmsr)
119                 mtmsr_isync(newmsr);
120 }
121
122 void __msr_check_and_clear(unsigned long bits)
123 {
124         unsigned long oldmsr = mfmsr();
125         unsigned long newmsr;
126
127         newmsr = oldmsr & ~bits;
128
129 #ifdef CONFIG_VSX
130         if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
131                 newmsr &= ~MSR_VSX;
132 #endif
133
134         if (oldmsr != newmsr)
135                 mtmsr_isync(newmsr);
136 }
137 EXPORT_SYMBOL(__msr_check_and_clear);
138
139 #ifdef CONFIG_PPC_FPU
140 void __giveup_fpu(struct task_struct *tsk)
141 {
142         save_fpu(tsk);
143         tsk->thread.regs->msr &= ~MSR_FP;
144 #ifdef CONFIG_VSX
145         if (cpu_has_feature(CPU_FTR_VSX))
146                 tsk->thread.regs->msr &= ~MSR_VSX;
147 #endif
148 }
149
150 void giveup_fpu(struct task_struct *tsk)
151 {
152         check_if_tm_restore_required(tsk);
153
154         msr_check_and_set(MSR_FP);
155         __giveup_fpu(tsk);
156         msr_check_and_clear(MSR_FP);
157 }
158 EXPORT_SYMBOL(giveup_fpu);
159
160 /*
161  * Make sure the floating-point register state in the
162  * the thread_struct is up to date for task tsk.
163  */
164 void flush_fp_to_thread(struct task_struct *tsk)
165 {
166         if (tsk->thread.regs) {
167                 /*
168                  * We need to disable preemption here because if we didn't,
169                  * another process could get scheduled after the regs->msr
170                  * test but before we have finished saving the FP registers
171                  * to the thread_struct.  That process could take over the
172                  * FPU, and then when we get scheduled again we would store
173                  * bogus values for the remaining FP registers.
174                  */
175                 preempt_disable();
176                 if (tsk->thread.regs->msr & MSR_FP) {
177                         /*
178                          * This should only ever be called for current or
179                          * for a stopped child process.  Since we save away
180                          * the FP register state on context switch,
181                          * there is something wrong if a stopped child appears
182                          * to still have its FP state in the CPU registers.
183                          */
184                         BUG_ON(tsk != current);
185                         giveup_fpu(tsk);
186                 }
187                 preempt_enable();
188         }
189 }
190 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
191
192 void enable_kernel_fp(void)
193 {
194         WARN_ON(preemptible());
195
196         msr_check_and_set(MSR_FP);
197
198         if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
199                 check_if_tm_restore_required(current);
200                 __giveup_fpu(current);
201         }
202 }
203 EXPORT_SYMBOL(enable_kernel_fp);
204
205 static int restore_fp(struct task_struct *tsk) {
206         if (tsk->thread.load_fp) {
207                 load_fp_state(&current->thread.fp_state);
208                 current->thread.load_fp++;
209                 return 1;
210         }
211         return 0;
212 }
213 #else
214 static int restore_fp(struct task_struct *tsk) { return 0; }
215 #endif /* CONFIG_PPC_FPU */
216
217 #ifdef CONFIG_ALTIVEC
218 #define loadvec(thr) ((thr).load_vec)
219
220 static void __giveup_altivec(struct task_struct *tsk)
221 {
222         save_altivec(tsk);
223         tsk->thread.regs->msr &= ~MSR_VEC;
224 #ifdef CONFIG_VSX
225         if (cpu_has_feature(CPU_FTR_VSX))
226                 tsk->thread.regs->msr &= ~MSR_VSX;
227 #endif
228 }
229
230 void giveup_altivec(struct task_struct *tsk)
231 {
232         check_if_tm_restore_required(tsk);
233
234         msr_check_and_set(MSR_VEC);
235         __giveup_altivec(tsk);
236         msr_check_and_clear(MSR_VEC);
237 }
238 EXPORT_SYMBOL(giveup_altivec);
239
240 void enable_kernel_altivec(void)
241 {
242         WARN_ON(preemptible());
243
244         msr_check_and_set(MSR_VEC);
245
246         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
247                 check_if_tm_restore_required(current);
248                 __giveup_altivec(current);
249         }
250 }
251 EXPORT_SYMBOL(enable_kernel_altivec);
252
253 /*
254  * Make sure the VMX/Altivec register state in the
255  * the thread_struct is up to date for task tsk.
256  */
257 void flush_altivec_to_thread(struct task_struct *tsk)
258 {
259         if (tsk->thread.regs) {
260                 preempt_disable();
261                 if (tsk->thread.regs->msr & MSR_VEC) {
262                         BUG_ON(tsk != current);
263                         giveup_altivec(tsk);
264                 }
265                 preempt_enable();
266         }
267 }
268 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
269
270 static int restore_altivec(struct task_struct *tsk)
271 {
272         if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
273                 load_vr_state(&tsk->thread.vr_state);
274                 tsk->thread.used_vr = 1;
275                 tsk->thread.load_vec++;
276
277                 return 1;
278         }
279         return 0;
280 }
281 #else
282 #define loadvec(thr) 0
283 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
284 #endif /* CONFIG_ALTIVEC */
285
286 #ifdef CONFIG_VSX
287 static void __giveup_vsx(struct task_struct *tsk)
288 {
289         if (tsk->thread.regs->msr & MSR_FP)
290                 __giveup_fpu(tsk);
291         if (tsk->thread.regs->msr & MSR_VEC)
292                 __giveup_altivec(tsk);
293         tsk->thread.regs->msr &= ~MSR_VSX;
294 }
295
296 static void giveup_vsx(struct task_struct *tsk)
297 {
298         check_if_tm_restore_required(tsk);
299
300         msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
301         __giveup_vsx(tsk);
302         msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
303 }
304
305 static void save_vsx(struct task_struct *tsk)
306 {
307         if (tsk->thread.regs->msr & MSR_FP)
308                 save_fpu(tsk);
309         if (tsk->thread.regs->msr & MSR_VEC)
310                 save_altivec(tsk);
311 }
312
313 void enable_kernel_vsx(void)
314 {
315         WARN_ON(preemptible());
316
317         msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
318
319         if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
320                 check_if_tm_restore_required(current);
321                 if (current->thread.regs->msr & MSR_FP)
322                         __giveup_fpu(current);
323                 if (current->thread.regs->msr & MSR_VEC)
324                         __giveup_altivec(current);
325                 __giveup_vsx(current);
326         }
327 }
328 EXPORT_SYMBOL(enable_kernel_vsx);
329
330 void flush_vsx_to_thread(struct task_struct *tsk)
331 {
332         if (tsk->thread.regs) {
333                 preempt_disable();
334                 if (tsk->thread.regs->msr & MSR_VSX) {
335                         BUG_ON(tsk != current);
336                         giveup_vsx(tsk);
337                 }
338                 preempt_enable();
339         }
340 }
341 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
342
343 static int restore_vsx(struct task_struct *tsk)
344 {
345         if (cpu_has_feature(CPU_FTR_VSX)) {
346                 tsk->thread.used_vsr = 1;
347                 return 1;
348         }
349
350         return 0;
351 }
352 #else
353 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
354 static inline void save_vsx(struct task_struct *tsk) { }
355 #endif /* CONFIG_VSX */
356
357 #ifdef CONFIG_SPE
358 void giveup_spe(struct task_struct *tsk)
359 {
360         check_if_tm_restore_required(tsk);
361
362         msr_check_and_set(MSR_SPE);
363         __giveup_spe(tsk);
364         msr_check_and_clear(MSR_SPE);
365 }
366 EXPORT_SYMBOL(giveup_spe);
367
368 void enable_kernel_spe(void)
369 {
370         WARN_ON(preemptible());
371
372         msr_check_and_set(MSR_SPE);
373
374         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
375                 check_if_tm_restore_required(current);
376                 __giveup_spe(current);
377         }
378 }
379 EXPORT_SYMBOL(enable_kernel_spe);
380
381 void flush_spe_to_thread(struct task_struct *tsk)
382 {
383         if (tsk->thread.regs) {
384                 preempt_disable();
385                 if (tsk->thread.regs->msr & MSR_SPE) {
386                         BUG_ON(tsk != current);
387                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
388                         giveup_spe(tsk);
389                 }
390                 preempt_enable();
391         }
392 }
393 #endif /* CONFIG_SPE */
394
395 static unsigned long msr_all_available;
396
397 static int __init init_msr_all_available(void)
398 {
399 #ifdef CONFIG_PPC_FPU
400         msr_all_available |= MSR_FP;
401 #endif
402 #ifdef CONFIG_ALTIVEC
403         if (cpu_has_feature(CPU_FTR_ALTIVEC))
404                 msr_all_available |= MSR_VEC;
405 #endif
406 #ifdef CONFIG_VSX
407         if (cpu_has_feature(CPU_FTR_VSX))
408                 msr_all_available |= MSR_VSX;
409 #endif
410 #ifdef CONFIG_SPE
411         if (cpu_has_feature(CPU_FTR_SPE))
412                 msr_all_available |= MSR_SPE;
413 #endif
414
415         return 0;
416 }
417 early_initcall(init_msr_all_available);
418
419 void giveup_all(struct task_struct *tsk)
420 {
421         unsigned long usermsr;
422
423         if (!tsk->thread.regs)
424                 return;
425
426         usermsr = tsk->thread.regs->msr;
427
428         if ((usermsr & msr_all_available) == 0)
429                 return;
430
431         msr_check_and_set(msr_all_available);
432
433 #ifdef CONFIG_PPC_FPU
434         if (usermsr & MSR_FP)
435                 __giveup_fpu(tsk);
436 #endif
437 #ifdef CONFIG_ALTIVEC
438         if (usermsr & MSR_VEC)
439                 __giveup_altivec(tsk);
440 #endif
441 #ifdef CONFIG_VSX
442         if (usermsr & MSR_VSX)
443                 __giveup_vsx(tsk);
444 #endif
445 #ifdef CONFIG_SPE
446         if (usermsr & MSR_SPE)
447                 __giveup_spe(tsk);
448 #endif
449
450         msr_check_and_clear(msr_all_available);
451 }
452 EXPORT_SYMBOL(giveup_all);
453
454 void restore_math(struct pt_regs *regs)
455 {
456         unsigned long msr;
457
458         if (!current->thread.load_fp && !loadvec(current->thread))
459                 return;
460
461         msr = regs->msr;
462         msr_check_and_set(msr_all_available);
463
464         /*
465          * Only reload if the bit is not set in the user MSR, the bit BEING set
466          * indicates that the registers are hot
467          */
468         if ((!(msr & MSR_FP)) && restore_fp(current))
469                 msr |= MSR_FP | current->thread.fpexc_mode;
470
471         if ((!(msr & MSR_VEC)) && restore_altivec(current))
472                 msr |= MSR_VEC;
473
474         if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
475                         restore_vsx(current)) {
476                 msr |= MSR_VSX;
477         }
478
479         msr_check_and_clear(msr_all_available);
480
481         regs->msr = msr;
482 }
483
484 void save_all(struct task_struct *tsk)
485 {
486         unsigned long usermsr;
487
488         if (!tsk->thread.regs)
489                 return;
490
491         usermsr = tsk->thread.regs->msr;
492
493         if ((usermsr & msr_all_available) == 0)
494                 return;
495
496         msr_check_and_set(msr_all_available);
497
498         /*
499          * Saving the way the register space is in hardware, save_vsx boils
500          * down to a save_fpu() and save_altivec()
501          */
502         if (usermsr & MSR_VSX) {
503                 save_vsx(tsk);
504         } else {
505                 if (usermsr & MSR_FP)
506                         save_fpu(tsk);
507
508                 if (usermsr & MSR_VEC)
509                         save_altivec(tsk);
510         }
511
512         if (usermsr & MSR_SPE)
513                 __giveup_spe(tsk);
514
515         msr_check_and_clear(msr_all_available);
516 }
517
518 void flush_all_to_thread(struct task_struct *tsk)
519 {
520         if (tsk->thread.regs) {
521                 preempt_disable();
522                 BUG_ON(tsk != current);
523                 save_all(tsk);
524
525 #ifdef CONFIG_SPE
526                 if (tsk->thread.regs->msr & MSR_SPE)
527                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
528 #endif
529
530                 preempt_enable();
531         }
532 }
533 EXPORT_SYMBOL(flush_all_to_thread);
534
535 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
536 void do_send_trap(struct pt_regs *regs, unsigned long address,
537                   unsigned long error_code, int signal_code, int breakpt)
538 {
539         siginfo_t info;
540
541         current->thread.trap_nr = signal_code;
542         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
543                         11, SIGSEGV) == NOTIFY_STOP)
544                 return;
545
546         /* Deliver the signal to userspace */
547         info.si_signo = SIGTRAP;
548         info.si_errno = breakpt;        /* breakpoint or watchpoint id */
549         info.si_code = signal_code;
550         info.si_addr = (void __user *)address;
551         force_sig_info(SIGTRAP, &info, current);
552 }
553 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
554 void do_break (struct pt_regs *regs, unsigned long address,
555                     unsigned long error_code)
556 {
557         siginfo_t info;
558
559         current->thread.trap_nr = TRAP_HWBKPT;
560         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
561                         11, SIGSEGV) == NOTIFY_STOP)
562                 return;
563
564         if (debugger_break_match(regs))
565                 return;
566
567         /* Clear the breakpoint */
568         hw_breakpoint_disable();
569
570         /* Deliver the signal to userspace */
571         info.si_signo = SIGTRAP;
572         info.si_errno = 0;
573         info.si_code = TRAP_HWBKPT;
574         info.si_addr = (void __user *)address;
575         force_sig_info(SIGTRAP, &info, current);
576 }
577 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
578
579 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
580
581 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
582 /*
583  * Set the debug registers back to their default "safe" values.
584  */
585 static void set_debug_reg_defaults(struct thread_struct *thread)
586 {
587         thread->debug.iac1 = thread->debug.iac2 = 0;
588 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
589         thread->debug.iac3 = thread->debug.iac4 = 0;
590 #endif
591         thread->debug.dac1 = thread->debug.dac2 = 0;
592 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
593         thread->debug.dvc1 = thread->debug.dvc2 = 0;
594 #endif
595         thread->debug.dbcr0 = 0;
596 #ifdef CONFIG_BOOKE
597         /*
598          * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
599          */
600         thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
601                         DBCR1_IAC3US | DBCR1_IAC4US;
602         /*
603          * Force Data Address Compare User/Supervisor bits to be User-only
604          * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
605          */
606         thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
607 #else
608         thread->debug.dbcr1 = 0;
609 #endif
610 }
611
612 static void prime_debug_regs(struct debug_reg *debug)
613 {
614         /*
615          * We could have inherited MSR_DE from userspace, since
616          * it doesn't get cleared on exception entry.  Make sure
617          * MSR_DE is clear before we enable any debug events.
618          */
619         mtmsr(mfmsr() & ~MSR_DE);
620
621         mtspr(SPRN_IAC1, debug->iac1);
622         mtspr(SPRN_IAC2, debug->iac2);
623 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
624         mtspr(SPRN_IAC3, debug->iac3);
625         mtspr(SPRN_IAC4, debug->iac4);
626 #endif
627         mtspr(SPRN_DAC1, debug->dac1);
628         mtspr(SPRN_DAC2, debug->dac2);
629 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
630         mtspr(SPRN_DVC1, debug->dvc1);
631         mtspr(SPRN_DVC2, debug->dvc2);
632 #endif
633         mtspr(SPRN_DBCR0, debug->dbcr0);
634         mtspr(SPRN_DBCR1, debug->dbcr1);
635 #ifdef CONFIG_BOOKE
636         mtspr(SPRN_DBCR2, debug->dbcr2);
637 #endif
638 }
639 /*
640  * Unless neither the old or new thread are making use of the
641  * debug registers, set the debug registers from the values
642  * stored in the new thread.
643  */
644 void switch_booke_debug_regs(struct debug_reg *new_debug)
645 {
646         if ((current->thread.debug.dbcr0 & DBCR0_IDM)
647                 || (new_debug->dbcr0 & DBCR0_IDM))
648                         prime_debug_regs(new_debug);
649 }
650 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
651 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
652 #ifndef CONFIG_HAVE_HW_BREAKPOINT
653 static void set_debug_reg_defaults(struct thread_struct *thread)
654 {
655         thread->hw_brk.address = 0;
656         thread->hw_brk.type = 0;
657         set_breakpoint(&thread->hw_brk);
658 }
659 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
660 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
661
662 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
663 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
664 {
665         mtspr(SPRN_DAC1, dabr);
666 #ifdef CONFIG_PPC_47x
667         isync();
668 #endif
669         return 0;
670 }
671 #elif defined(CONFIG_PPC_BOOK3S)
672 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
673 {
674         mtspr(SPRN_DABR, dabr);
675         if (cpu_has_feature(CPU_FTR_DABRX))
676                 mtspr(SPRN_DABRX, dabrx);
677         return 0;
678 }
679 #else
680 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
681 {
682         return -EINVAL;
683 }
684 #endif
685
686 static inline int set_dabr(struct arch_hw_breakpoint *brk)
687 {
688         unsigned long dabr, dabrx;
689
690         dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
691         dabrx = ((brk->type >> 3) & 0x7);
692
693         if (ppc_md.set_dabr)
694                 return ppc_md.set_dabr(dabr, dabrx);
695
696         return __set_dabr(dabr, dabrx);
697 }
698
699 static inline int set_dawr(struct arch_hw_breakpoint *brk)
700 {
701         unsigned long dawr, dawrx, mrd;
702
703         dawr = brk->address;
704
705         dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
706                                    << (63 - 58); //* read/write bits */
707         dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
708                                    << (63 - 59); //* translate */
709         dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
710                                    >> 3; //* PRIM bits */
711         /* dawr length is stored in field MDR bits 48:53.  Matches range in
712            doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
713            0b111111=64DW.
714            brk->len is in bytes.
715            This aligns up to double word size, shifts and does the bias.
716         */
717         mrd = ((brk->len + 7) >> 3) - 1;
718         dawrx |= (mrd & 0x3f) << (63 - 53);
719
720         if (ppc_md.set_dawr)
721                 return ppc_md.set_dawr(dawr, dawrx);
722         mtspr(SPRN_DAWR, dawr);
723         mtspr(SPRN_DAWRX, dawrx);
724         return 0;
725 }
726
727 void __set_breakpoint(struct arch_hw_breakpoint *brk)
728 {
729         memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
730
731         if (cpu_has_feature(CPU_FTR_DAWR))
732                 set_dawr(brk);
733         else
734                 set_dabr(brk);
735 }
736
737 void set_breakpoint(struct arch_hw_breakpoint *brk)
738 {
739         preempt_disable();
740         __set_breakpoint(brk);
741         preempt_enable();
742 }
743
744 #ifdef CONFIG_PPC64
745 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
746 #endif
747
748 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
749                               struct arch_hw_breakpoint *b)
750 {
751         if (a->address != b->address)
752                 return false;
753         if (a->type != b->type)
754                 return false;
755         if (a->len != b->len)
756                 return false;
757         return true;
758 }
759
760 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
761 static void tm_reclaim_thread(struct thread_struct *thr,
762                               struct thread_info *ti, uint8_t cause)
763 {
764         unsigned long msr_diff = 0;
765
766         /*
767          * If FP/VSX registers have been already saved to the
768          * thread_struct, move them to the transact_fp array.
769          * We clear the TIF_RESTORE_TM bit since after the reclaim
770          * the thread will no longer be transactional.
771          */
772         if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
773                 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
774                 if (msr_diff & MSR_FP)
775                         memcpy(&thr->transact_fp, &thr->fp_state,
776                                sizeof(struct thread_fp_state));
777                 if (msr_diff & MSR_VEC)
778                         memcpy(&thr->transact_vr, &thr->vr_state,
779                                sizeof(struct thread_vr_state));
780                 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
781                 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
782         }
783
784         /*
785          * Use the current MSR TM suspended bit to track if we have
786          * checkpointed state outstanding.
787          * On signal delivery, we'd normally reclaim the checkpointed
788          * state to obtain stack pointer (see:get_tm_stackpointer()).
789          * This will then directly return to userspace without going
790          * through __switch_to(). However, if the stack frame is bad,
791          * we need to exit this thread which calls __switch_to() which
792          * will again attempt to reclaim the already saved tm state.
793          * Hence we need to check that we've not already reclaimed
794          * this state.
795          * We do this using the current MSR, rather tracking it in
796          * some specific thread_struct bit, as it has the additional
797          * benifit of checking for a potential TM bad thing exception.
798          */
799         if (!MSR_TM_SUSPENDED(mfmsr()))
800                 return;
801
802         tm_reclaim(thr, thr->regs->msr, cause);
803
804         /* Having done the reclaim, we now have the checkpointed
805          * FP/VSX values in the registers.  These might be valid
806          * even if we have previously called enable_kernel_fp() or
807          * flush_fp_to_thread(), so update thr->regs->msr to
808          * indicate their current validity.
809          */
810         thr->regs->msr |= msr_diff;
811 }
812
813 void tm_reclaim_current(uint8_t cause)
814 {
815         tm_enable();
816         tm_reclaim_thread(&current->thread, current_thread_info(), cause);
817 }
818
819 static inline void tm_reclaim_task(struct task_struct *tsk)
820 {
821         /* We have to work out if we're switching from/to a task that's in the
822          * middle of a transaction.
823          *
824          * In switching we need to maintain a 2nd register state as
825          * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
826          * checkpointed (tbegin) state in ckpt_regs and saves the transactional
827          * (current) FPRs into oldtask->thread.transact_fpr[].
828          *
829          * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
830          */
831         struct thread_struct *thr = &tsk->thread;
832
833         if (!thr->regs)
834                 return;
835
836         if (!MSR_TM_ACTIVE(thr->regs->msr))
837                 goto out_and_saveregs;
838
839         /* Stash the original thread MSR, as giveup_fpu et al will
840          * modify it.  We hold onto it to see whether the task used
841          * FP & vector regs.  If the TIF_RESTORE_TM flag is set,
842          * ckpt_regs.msr is already set.
843          */
844         if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
845                 thr->ckpt_regs.msr = thr->regs->msr;
846
847         TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
848                  "ccr=%lx, msr=%lx, trap=%lx)\n",
849                  tsk->pid, thr->regs->nip,
850                  thr->regs->ccr, thr->regs->msr,
851                  thr->regs->trap);
852
853         tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
854
855         TM_DEBUG("--- tm_reclaim on pid %d complete\n",
856                  tsk->pid);
857
858 out_and_saveregs:
859         /* Always save the regs here, even if a transaction's not active.
860          * This context-switches a thread's TM info SPRs.  We do it here to
861          * be consistent with the restore path (in recheckpoint) which
862          * cannot happen later in _switch().
863          */
864         tm_save_sprs(thr);
865 }
866
867 extern void __tm_recheckpoint(struct thread_struct *thread,
868                               unsigned long orig_msr);
869
870 void tm_recheckpoint(struct thread_struct *thread,
871                      unsigned long orig_msr)
872 {
873         unsigned long flags;
874
875         /* We really can't be interrupted here as the TEXASR registers can't
876          * change and later in the trecheckpoint code, we have a userspace R1.
877          * So let's hard disable over this region.
878          */
879         local_irq_save(flags);
880         hard_irq_disable();
881
882         /* The TM SPRs are restored here, so that TEXASR.FS can be set
883          * before the trecheckpoint and no explosion occurs.
884          */
885         tm_restore_sprs(thread);
886
887         __tm_recheckpoint(thread, orig_msr);
888
889         local_irq_restore(flags);
890 }
891
892 static inline void tm_recheckpoint_new_task(struct task_struct *new)
893 {
894         unsigned long msr;
895
896         if (!cpu_has_feature(CPU_FTR_TM))
897                 return;
898
899         /* Recheckpoint the registers of the thread we're about to switch to.
900          *
901          * If the task was using FP, we non-lazily reload both the original and
902          * the speculative FP register states.  This is because the kernel
903          * doesn't see if/when a TM rollback occurs, so if we take an FP
904          * unavoidable later, we are unable to determine which set of FP regs
905          * need to be restored.
906          */
907         if (!new->thread.regs)
908                 return;
909
910         if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
911                 tm_restore_sprs(&new->thread);
912                 return;
913         }
914         msr = new->thread.ckpt_regs.msr;
915         /* Recheckpoint to restore original checkpointed register state. */
916         TM_DEBUG("*** tm_recheckpoint of pid %d "
917                  "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
918                  new->pid, new->thread.regs->msr, msr);
919
920         /* This loads the checkpointed FP/VEC state, if used */
921         tm_recheckpoint(&new->thread, msr);
922
923         /* This loads the speculative FP/VEC state, if used */
924         if (msr & MSR_FP) {
925                 do_load_up_transact_fpu(&new->thread);
926                 new->thread.regs->msr |=
927                         (MSR_FP | new->thread.fpexc_mode);
928         }
929 #ifdef CONFIG_ALTIVEC
930         if (msr & MSR_VEC) {
931                 do_load_up_transact_altivec(&new->thread);
932                 new->thread.regs->msr |= MSR_VEC;
933         }
934 #endif
935         /* We may as well turn on VSX too since all the state is restored now */
936         if (msr & MSR_VSX)
937                 new->thread.regs->msr |= MSR_VSX;
938
939         TM_DEBUG("*** tm_recheckpoint of pid %d complete "
940                  "(kernel msr 0x%lx)\n",
941                  new->pid, mfmsr());
942 }
943
944 static inline void __switch_to_tm(struct task_struct *prev)
945 {
946         if (cpu_has_feature(CPU_FTR_TM)) {
947                 tm_enable();
948                 tm_reclaim_task(prev);
949         }
950 }
951
952 /*
953  * This is called if we are on the way out to userspace and the
954  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
955  * FP and/or vector state and does so if necessary.
956  * If userspace is inside a transaction (whether active or
957  * suspended) and FP/VMX/VSX instructions have ever been enabled
958  * inside that transaction, then we have to keep them enabled
959  * and keep the FP/VMX/VSX state loaded while ever the transaction
960  * continues.  The reason is that if we didn't, and subsequently
961  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
962  * we don't know whether it's the same transaction, and thus we
963  * don't know which of the checkpointed state and the transactional
964  * state to use.
965  */
966 void restore_tm_state(struct pt_regs *regs)
967 {
968         unsigned long msr_diff;
969
970         clear_thread_flag(TIF_RESTORE_TM);
971         if (!MSR_TM_ACTIVE(regs->msr))
972                 return;
973
974         msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
975         msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
976
977         restore_math(regs);
978
979         regs->msr |= msr_diff;
980 }
981
982 #else
983 #define tm_recheckpoint_new_task(new)
984 #define __switch_to_tm(prev)
985 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
986
987 static inline void save_sprs(struct thread_struct *t)
988 {
989 #ifdef CONFIG_ALTIVEC
990         if (cpu_has_feature(CPU_FTR_ALTIVEC))
991                 t->vrsave = mfspr(SPRN_VRSAVE);
992 #endif
993 #ifdef CONFIG_PPC_BOOK3S_64
994         if (cpu_has_feature(CPU_FTR_DSCR))
995                 t->dscr = mfspr(SPRN_DSCR);
996
997         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
998                 t->bescr = mfspr(SPRN_BESCR);
999                 t->ebbhr = mfspr(SPRN_EBBHR);
1000                 t->ebbrr = mfspr(SPRN_EBBRR);
1001
1002                 t->fscr = mfspr(SPRN_FSCR);
1003
1004                 /*
1005                  * Note that the TAR is not available for use in the kernel.
1006                  * (To provide this, the TAR should be backed up/restored on
1007                  * exception entry/exit instead, and be in pt_regs.  FIXME,
1008                  * this should be in pt_regs anyway (for debug).)
1009                  */
1010                 t->tar = mfspr(SPRN_TAR);
1011         }
1012 #endif
1013 }
1014
1015 static inline void restore_sprs(struct thread_struct *old_thread,
1016                                 struct thread_struct *new_thread)
1017 {
1018 #ifdef CONFIG_ALTIVEC
1019         if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1020             old_thread->vrsave != new_thread->vrsave)
1021                 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1022 #endif
1023 #ifdef CONFIG_PPC_BOOK3S_64
1024         if (cpu_has_feature(CPU_FTR_DSCR)) {
1025                 u64 dscr = get_paca()->dscr_default;
1026                 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
1027
1028                 if (new_thread->dscr_inherit) {
1029                         dscr = new_thread->dscr;
1030                         fscr |= FSCR_DSCR;
1031                 }
1032
1033                 if (old_thread->dscr != dscr)
1034                         mtspr(SPRN_DSCR, dscr);
1035
1036                 if (old_thread->fscr != fscr)
1037                         mtspr(SPRN_FSCR, fscr);
1038         }
1039
1040         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1041                 if (old_thread->bescr != new_thread->bescr)
1042                         mtspr(SPRN_BESCR, new_thread->bescr);
1043                 if (old_thread->ebbhr != new_thread->ebbhr)
1044                         mtspr(SPRN_EBBHR, new_thread->ebbhr);
1045                 if (old_thread->ebbrr != new_thread->ebbrr)
1046                         mtspr(SPRN_EBBRR, new_thread->ebbrr);
1047
1048                 if (old_thread->tar != new_thread->tar)
1049                         mtspr(SPRN_TAR, new_thread->tar);
1050         }
1051 #endif
1052 }
1053
1054 struct task_struct *__switch_to(struct task_struct *prev,
1055         struct task_struct *new)
1056 {
1057         struct thread_struct *new_thread, *old_thread;
1058         struct task_struct *last;
1059 #ifdef CONFIG_PPC_BOOK3S_64
1060         struct ppc64_tlb_batch *batch;
1061 #endif
1062
1063         new_thread = &new->thread;
1064         old_thread = &current->thread;
1065
1066         WARN_ON(!irqs_disabled());
1067
1068 #ifdef CONFIG_PPC64
1069         /*
1070          * Collect processor utilization data per process
1071          */
1072         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1073                 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1074                 long unsigned start_tb, current_tb;
1075                 start_tb = old_thread->start_tb;
1076                 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1077                 old_thread->accum_tb += (current_tb - start_tb);
1078                 new_thread->start_tb = current_tb;
1079         }
1080 #endif /* CONFIG_PPC64 */
1081
1082 #ifdef CONFIG_PPC_STD_MMU_64
1083         batch = this_cpu_ptr(&ppc64_tlb_batch);
1084         if (batch->active) {
1085                 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1086                 if (batch->index)
1087                         __flush_tlb_pending(batch);
1088                 batch->active = 0;
1089         }
1090 #endif /* CONFIG_PPC_STD_MMU_64 */
1091
1092 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1093         switch_booke_debug_regs(&new->thread.debug);
1094 #else
1095 /*
1096  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1097  * schedule DABR
1098  */
1099 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1100         if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1101                 __set_breakpoint(&new->thread.hw_brk);
1102 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1103 #endif
1104
1105         /*
1106          * We need to save SPRs before treclaim/trecheckpoint as these will
1107          * change a number of them.
1108          */
1109         save_sprs(&prev->thread);
1110
1111         __switch_to_tm(prev);
1112
1113         /* Save FPU, Altivec, VSX and SPE state */
1114         giveup_all(prev);
1115
1116         /*
1117          * We can't take a PMU exception inside _switch() since there is a
1118          * window where the kernel stack SLB and the kernel stack are out
1119          * of sync. Hard disable here.
1120          */
1121         hard_irq_disable();
1122
1123         tm_recheckpoint_new_task(new);
1124
1125         /*
1126          * Call restore_sprs() before calling _switch(). If we move it after
1127          * _switch() then we miss out on calling it for new tasks. The reason
1128          * for this is we manually create a stack frame for new tasks that
1129          * directly returns through ret_from_fork() or
1130          * ret_from_kernel_thread(). See copy_thread() for details.
1131          */
1132         restore_sprs(old_thread, new_thread);
1133
1134         last = _switch(old_thread, new_thread);
1135
1136 #ifdef CONFIG_PPC_STD_MMU_64
1137         if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1138                 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1139                 batch = this_cpu_ptr(&ppc64_tlb_batch);
1140                 batch->active = 1;
1141         }
1142
1143         if (current_thread_info()->task->thread.regs)
1144                 restore_math(current_thread_info()->task->thread.regs);
1145 #endif /* CONFIG_PPC_STD_MMU_64 */
1146
1147         return last;
1148 }
1149
1150 static int instructions_to_print = 16;
1151
1152 static void show_instructions(struct pt_regs *regs)
1153 {
1154         int i;
1155         unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1156                         sizeof(int));
1157
1158         printk("Instruction dump:");
1159
1160         for (i = 0; i < instructions_to_print; i++) {
1161                 int instr;
1162
1163                 if (!(i % 8))
1164                         printk("\n");
1165
1166 #if !defined(CONFIG_BOOKE)
1167                 /* If executing with the IMMU off, adjust pc rather
1168                  * than print XXXXXXXX.
1169                  */
1170                 if (!(regs->msr & MSR_IR))
1171                         pc = (unsigned long)phys_to_virt(pc);
1172 #endif
1173
1174                 if (!__kernel_text_address(pc) ||
1175                      probe_kernel_address((unsigned int __user *)pc, instr)) {
1176                         printk(KERN_CONT "XXXXXXXX ");
1177                 } else {
1178                         if (regs->nip == pc)
1179                                 printk(KERN_CONT "<%08x> ", instr);
1180                         else
1181                                 printk(KERN_CONT "%08x ", instr);
1182                 }
1183
1184                 pc += sizeof(int);
1185         }
1186
1187         printk("\n");
1188 }
1189
1190 struct regbit {
1191         unsigned long bit;
1192         const char *name;
1193 };
1194
1195 static struct regbit msr_bits[] = {
1196 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1197         {MSR_SF,        "SF"},
1198         {MSR_HV,        "HV"},
1199 #endif
1200         {MSR_VEC,       "VEC"},
1201         {MSR_VSX,       "VSX"},
1202 #ifdef CONFIG_BOOKE
1203         {MSR_CE,        "CE"},
1204 #endif
1205         {MSR_EE,        "EE"},
1206         {MSR_PR,        "PR"},
1207         {MSR_FP,        "FP"},
1208         {MSR_ME,        "ME"},
1209 #ifdef CONFIG_BOOKE
1210         {MSR_DE,        "DE"},
1211 #else
1212         {MSR_SE,        "SE"},
1213         {MSR_BE,        "BE"},
1214 #endif
1215         {MSR_IR,        "IR"},
1216         {MSR_DR,        "DR"},
1217         {MSR_PMM,       "PMM"},
1218 #ifndef CONFIG_BOOKE
1219         {MSR_RI,        "RI"},
1220         {MSR_LE,        "LE"},
1221 #endif
1222         {0,             NULL}
1223 };
1224
1225 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1226 {
1227         const char *s = "";
1228
1229         for (; bits->bit; ++bits)
1230                 if (val & bits->bit) {
1231                         printk("%s%s", s, bits->name);
1232                         s = sep;
1233                 }
1234 }
1235
1236 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1237 static struct regbit msr_tm_bits[] = {
1238         {MSR_TS_T,      "T"},
1239         {MSR_TS_S,      "S"},
1240         {MSR_TM,        "E"},
1241         {0,             NULL}
1242 };
1243
1244 static void print_tm_bits(unsigned long val)
1245 {
1246 /*
1247  * This only prints something if at least one of the TM bit is set.
1248  * Inside the TM[], the output means:
1249  *   E: Enabled         (bit 32)
1250  *   S: Suspended       (bit 33)
1251  *   T: Transactional   (bit 34)
1252  */
1253         if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1254                 printk(",TM[");
1255                 print_bits(val, msr_tm_bits, "");
1256                 printk("]");
1257         }
1258 }
1259 #else
1260 static void print_tm_bits(unsigned long val) {}
1261 #endif
1262
1263 static void print_msr_bits(unsigned long val)
1264 {
1265         printk("<");
1266         print_bits(val, msr_bits, ",");
1267         print_tm_bits(val);
1268         printk(">");
1269 }
1270
1271 #ifdef CONFIG_PPC64
1272 #define REG             "%016lx"
1273 #define REGS_PER_LINE   4
1274 #define LAST_VOLATILE   13
1275 #else
1276 #define REG             "%08lx"
1277 #define REGS_PER_LINE   8
1278 #define LAST_VOLATILE   12
1279 #endif
1280
1281 void show_regs(struct pt_regs * regs)
1282 {
1283         int i, trap;
1284
1285         show_regs_print_info(KERN_DEFAULT);
1286
1287         printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1288                regs->nip, regs->link, regs->ctr);
1289         printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
1290                regs, regs->trap, print_tainted(), init_utsname()->release);
1291         printk("MSR: "REG" ", regs->msr);
1292         print_msr_bits(regs->msr);
1293         printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
1294         trap = TRAP(regs);
1295         if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1296                 printk("CFAR: "REG" ", regs->orig_gpr3);
1297         if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1298 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1299                 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1300 #else
1301                 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1302 #endif
1303 #ifdef CONFIG_PPC64
1304         printk("SOFTE: %ld ", regs->softe);
1305 #endif
1306 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1307         if (MSR_TM_ACTIVE(regs->msr))
1308                 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1309 #endif
1310
1311         for (i = 0;  i < 32;  i++) {
1312                 if ((i % REGS_PER_LINE) == 0)
1313                         printk("\nGPR%02d: ", i);
1314                 printk(REG " ", regs->gpr[i]);
1315                 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1316                         break;
1317         }
1318         printk("\n");
1319 #ifdef CONFIG_KALLSYMS
1320         /*
1321          * Lookup NIP late so we have the best change of getting the
1322          * above info out without failing
1323          */
1324         printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1325         printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1326 #endif
1327         show_stack(current, (unsigned long *) regs->gpr[1]);
1328         if (!user_mode(regs))
1329                 show_instructions(regs);
1330 }
1331
1332 void exit_thread(void)
1333 {
1334 }
1335
1336 void flush_thread(void)
1337 {
1338 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1339         flush_ptrace_hw_breakpoint(current);
1340 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1341         set_debug_reg_defaults(&current->thread);
1342 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1343 }
1344
1345 void
1346 release_thread(struct task_struct *t)
1347 {
1348 }
1349
1350 /*
1351  * this gets called so that we can store coprocessor state into memory and
1352  * copy the current task into the new thread.
1353  */
1354 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1355 {
1356         flush_all_to_thread(src);
1357         /*
1358          * Flush TM state out so we can copy it.  __switch_to_tm() does this
1359          * flush but it removes the checkpointed state from the current CPU and
1360          * transitions the CPU out of TM mode.  Hence we need to call
1361          * tm_recheckpoint_new_task() (on the same task) to restore the
1362          * checkpointed state back and the TM mode.
1363          */
1364         __switch_to_tm(src);
1365         tm_recheckpoint_new_task(src);
1366
1367         *dst = *src;
1368
1369         clear_task_ebb(dst);
1370
1371         return 0;
1372 }
1373
1374 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1375 {
1376 #ifdef CONFIG_PPC_STD_MMU_64
1377         unsigned long sp_vsid;
1378         unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1379
1380         if (radix_enabled())
1381                 return;
1382
1383         if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1384                 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1385                         << SLB_VSID_SHIFT_1T;
1386         else
1387                 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1388                         << SLB_VSID_SHIFT;
1389         sp_vsid |= SLB_VSID_KERNEL | llp;
1390         p->thread.ksp_vsid = sp_vsid;
1391 #endif
1392 }
1393
1394 /*
1395  * Copy a thread..
1396  */
1397
1398 /*
1399  * Copy architecture-specific thread state
1400  */
1401 int copy_thread(unsigned long clone_flags, unsigned long usp,
1402                 unsigned long kthread_arg, struct task_struct *p)
1403 {
1404         struct pt_regs *childregs, *kregs;
1405         extern void ret_from_fork(void);
1406         extern void ret_from_kernel_thread(void);
1407         void (*f)(void);
1408         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1409         struct thread_info *ti = task_thread_info(p);
1410
1411         klp_init_thread_info(ti);
1412
1413         /* Copy registers */
1414         sp -= sizeof(struct pt_regs);
1415         childregs = (struct pt_regs *) sp;
1416         if (unlikely(p->flags & PF_KTHREAD)) {
1417                 /* kernel thread */
1418                 memset(childregs, 0, sizeof(struct pt_regs));
1419                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1420                 /* function */
1421                 if (usp)
1422                         childregs->gpr[14] = ppc_function_entry((void *)usp);
1423 #ifdef CONFIG_PPC64
1424                 clear_tsk_thread_flag(p, TIF_32BIT);
1425                 childregs->softe = 1;
1426 #endif
1427                 childregs->gpr[15] = kthread_arg;
1428                 p->thread.regs = NULL;  /* no user register state */
1429                 ti->flags |= _TIF_RESTOREALL;
1430                 f = ret_from_kernel_thread;
1431         } else {
1432                 /* user thread */
1433                 struct pt_regs *regs = current_pt_regs();
1434                 CHECK_FULL_REGS(regs);
1435                 *childregs = *regs;
1436                 if (usp)
1437                         childregs->gpr[1] = usp;
1438                 p->thread.regs = childregs;
1439                 childregs->gpr[3] = 0;  /* Result from fork() */
1440                 if (clone_flags & CLONE_SETTLS) {
1441 #ifdef CONFIG_PPC64
1442                         if (!is_32bit_task())
1443                                 childregs->gpr[13] = childregs->gpr[6];
1444                         else
1445 #endif
1446                                 childregs->gpr[2] = childregs->gpr[6];
1447                 }
1448
1449                 f = ret_from_fork;
1450         }
1451         childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1452         sp -= STACK_FRAME_OVERHEAD;
1453
1454         /*
1455          * The way this works is that at some point in the future
1456          * some task will call _switch to switch to the new task.
1457          * That will pop off the stack frame created below and start
1458          * the new task running at ret_from_fork.  The new task will
1459          * do some house keeping and then return from the fork or clone
1460          * system call, using the stack frame created above.
1461          */
1462         ((unsigned long *)sp)[0] = 0;
1463         sp -= sizeof(struct pt_regs);
1464         kregs = (struct pt_regs *) sp;
1465         sp -= STACK_FRAME_OVERHEAD;
1466         p->thread.ksp = sp;
1467 #ifdef CONFIG_PPC32
1468         p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1469                                 _ALIGN_UP(sizeof(struct thread_info), 16);
1470 #endif
1471 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1472         p->thread.ptrace_bps[0] = NULL;
1473 #endif
1474
1475         p->thread.fp_save_area = NULL;
1476 #ifdef CONFIG_ALTIVEC
1477         p->thread.vr_save_area = NULL;
1478 #endif
1479
1480         setup_ksp_vsid(p, sp);
1481
1482 #ifdef CONFIG_PPC64 
1483         if (cpu_has_feature(CPU_FTR_DSCR)) {
1484                 p->thread.dscr_inherit = current->thread.dscr_inherit;
1485                 p->thread.dscr = mfspr(SPRN_DSCR);
1486         }
1487         if (cpu_has_feature(CPU_FTR_HAS_PPR))
1488                 p->thread.ppr = INIT_PPR;
1489 #endif
1490         kregs->nip = ppc_function_entry(f);
1491         return 0;
1492 }
1493
1494 /*
1495  * Set up a thread for executing a new program
1496  */
1497 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1498 {
1499 #ifdef CONFIG_PPC64
1500         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1501 #endif
1502
1503         /*
1504          * If we exec out of a kernel thread then thread.regs will not be
1505          * set.  Do it now.
1506          */
1507         if (!current->thread.regs) {
1508                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1509                 current->thread.regs = regs - 1;
1510         }
1511
1512         memset(regs->gpr, 0, sizeof(regs->gpr));
1513         regs->ctr = 0;
1514         regs->link = 0;
1515         regs->xer = 0;
1516         regs->ccr = 0;
1517         regs->gpr[1] = sp;
1518
1519         /*
1520          * We have just cleared all the nonvolatile GPRs, so make
1521          * FULL_REGS(regs) return true.  This is necessary to allow
1522          * ptrace to examine the thread immediately after exec.
1523          */
1524         regs->trap &= ~1UL;
1525
1526 #ifdef CONFIG_PPC32
1527         regs->mq = 0;
1528         regs->nip = start;
1529         regs->msr = MSR_USER;
1530 #else
1531         if (!is_32bit_task()) {
1532                 unsigned long entry;
1533
1534                 if (is_elf2_task()) {
1535                         /* Look ma, no function descriptors! */
1536                         entry = start;
1537
1538                         /*
1539                          * Ulrich says:
1540                          *   The latest iteration of the ABI requires that when
1541                          *   calling a function (at its global entry point),
1542                          *   the caller must ensure r12 holds the entry point
1543                          *   address (so that the function can quickly
1544                          *   establish addressability).
1545                          */
1546                         regs->gpr[12] = start;
1547                         /* Make sure that's restored on entry to userspace. */
1548                         set_thread_flag(TIF_RESTOREALL);
1549                 } else {
1550                         unsigned long toc;
1551
1552                         /* start is a relocated pointer to the function
1553                          * descriptor for the elf _start routine.  The first
1554                          * entry in the function descriptor is the entry
1555                          * address of _start and the second entry is the TOC
1556                          * value we need to use.
1557                          */
1558                         __get_user(entry, (unsigned long __user *)start);
1559                         __get_user(toc, (unsigned long __user *)start+1);
1560
1561                         /* Check whether the e_entry function descriptor entries
1562                          * need to be relocated before we can use them.
1563                          */
1564                         if (load_addr != 0) {
1565                                 entry += load_addr;
1566                                 toc   += load_addr;
1567                         }
1568                         regs->gpr[2] = toc;
1569                 }
1570                 regs->nip = entry;
1571                 regs->msr = MSR_USER64;
1572         } else {
1573                 regs->nip = start;
1574                 regs->gpr[2] = 0;
1575                 regs->msr = MSR_USER32;
1576         }
1577 #endif
1578 #ifdef CONFIG_VSX
1579         current->thread.used_vsr = 0;
1580 #endif
1581         memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1582         current->thread.fp_save_area = NULL;
1583 #ifdef CONFIG_ALTIVEC
1584         memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1585         current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1586         current->thread.vr_save_area = NULL;
1587         current->thread.vrsave = 0;
1588         current->thread.used_vr = 0;
1589 #endif /* CONFIG_ALTIVEC */
1590 #ifdef CONFIG_SPE
1591         memset(current->thread.evr, 0, sizeof(current->thread.evr));
1592         current->thread.acc = 0;
1593         current->thread.spefscr = 0;
1594         current->thread.used_spe = 0;
1595 #endif /* CONFIG_SPE */
1596 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1597         if (cpu_has_feature(CPU_FTR_TM))
1598                 regs->msr |= MSR_TM;
1599         current->thread.tm_tfhar = 0;
1600         current->thread.tm_texasr = 0;
1601         current->thread.tm_tfiar = 0;
1602 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1603 }
1604 EXPORT_SYMBOL(start_thread);
1605
1606 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1607                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1608
1609 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1610 {
1611         struct pt_regs *regs = tsk->thread.regs;
1612
1613         /* This is a bit hairy.  If we are an SPE enabled  processor
1614          * (have embedded fp) we store the IEEE exception enable flags in
1615          * fpexc_mode.  fpexc_mode is also used for setting FP exception
1616          * mode (asyn, precise, disabled) for 'Classic' FP. */
1617         if (val & PR_FP_EXC_SW_ENABLE) {
1618 #ifdef CONFIG_SPE
1619                 if (cpu_has_feature(CPU_FTR_SPE)) {
1620                         /*
1621                          * When the sticky exception bits are set
1622                          * directly by userspace, it must call prctl
1623                          * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1624                          * in the existing prctl settings) or
1625                          * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1626                          * the bits being set).  <fenv.h> functions
1627                          * saving and restoring the whole
1628                          * floating-point environment need to do so
1629                          * anyway to restore the prctl settings from
1630                          * the saved environment.
1631                          */
1632                         tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1633                         tsk->thread.fpexc_mode = val &
1634                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1635                         return 0;
1636                 } else {
1637                         return -EINVAL;
1638                 }
1639 #else
1640                 return -EINVAL;
1641 #endif
1642         }
1643
1644         /* on a CONFIG_SPE this does not hurt us.  The bits that
1645          * __pack_fe01 use do not overlap with bits used for
1646          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1647          * on CONFIG_SPE implementations are reserved so writing to
1648          * them does not change anything */
1649         if (val > PR_FP_EXC_PRECISE)
1650                 return -EINVAL;
1651         tsk->thread.fpexc_mode = __pack_fe01(val);
1652         if (regs != NULL && (regs->msr & MSR_FP) != 0)
1653                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1654                         | tsk->thread.fpexc_mode;
1655         return 0;
1656 }
1657
1658 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1659 {
1660         unsigned int val;
1661
1662         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1663 #ifdef CONFIG_SPE
1664                 if (cpu_has_feature(CPU_FTR_SPE)) {
1665                         /*
1666                          * When the sticky exception bits are set
1667                          * directly by userspace, it must call prctl
1668                          * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1669                          * in the existing prctl settings) or
1670                          * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1671                          * the bits being set).  <fenv.h> functions
1672                          * saving and restoring the whole
1673                          * floating-point environment need to do so
1674                          * anyway to restore the prctl settings from
1675                          * the saved environment.
1676                          */
1677                         tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1678                         val = tsk->thread.fpexc_mode;
1679                 } else
1680                         return -EINVAL;
1681 #else
1682                 return -EINVAL;
1683 #endif
1684         else
1685                 val = __unpack_fe01(tsk->thread.fpexc_mode);
1686         return put_user(val, (unsigned int __user *) adr);
1687 }
1688
1689 int set_endian(struct task_struct *tsk, unsigned int val)
1690 {
1691         struct pt_regs *regs = tsk->thread.regs;
1692
1693         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1694             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1695                 return -EINVAL;
1696
1697         if (regs == NULL)
1698                 return -EINVAL;
1699
1700         if (val == PR_ENDIAN_BIG)
1701                 regs->msr &= ~MSR_LE;
1702         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1703                 regs->msr |= MSR_LE;
1704         else
1705                 return -EINVAL;
1706
1707         return 0;
1708 }
1709
1710 int get_endian(struct task_struct *tsk, unsigned long adr)
1711 {
1712         struct pt_regs *regs = tsk->thread.regs;
1713         unsigned int val;
1714
1715         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1716             !cpu_has_feature(CPU_FTR_REAL_LE))
1717                 return -EINVAL;
1718
1719         if (regs == NULL)
1720                 return -EINVAL;
1721
1722         if (regs->msr & MSR_LE) {
1723                 if (cpu_has_feature(CPU_FTR_REAL_LE))
1724                         val = PR_ENDIAN_LITTLE;
1725                 else
1726                         val = PR_ENDIAN_PPC_LITTLE;
1727         } else
1728                 val = PR_ENDIAN_BIG;
1729
1730         return put_user(val, (unsigned int __user *)adr);
1731 }
1732
1733 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1734 {
1735         tsk->thread.align_ctl = val;
1736         return 0;
1737 }
1738
1739 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1740 {
1741         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1742 }
1743
1744 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1745                                   unsigned long nbytes)
1746 {
1747         unsigned long stack_page;
1748         unsigned long cpu = task_cpu(p);
1749
1750         /*
1751          * Avoid crashing if the stack has overflowed and corrupted
1752          * task_cpu(p), which is in the thread_info struct.
1753          */
1754         if (cpu < NR_CPUS && cpu_possible(cpu)) {
1755                 stack_page = (unsigned long) hardirq_ctx[cpu];
1756                 if (sp >= stack_page + sizeof(struct thread_struct)
1757                     && sp <= stack_page + THREAD_SIZE - nbytes)
1758                         return 1;
1759
1760                 stack_page = (unsigned long) softirq_ctx[cpu];
1761                 if (sp >= stack_page + sizeof(struct thread_struct)
1762                     && sp <= stack_page + THREAD_SIZE - nbytes)
1763                         return 1;
1764         }
1765         return 0;
1766 }
1767
1768 int validate_sp(unsigned long sp, struct task_struct *p,
1769                        unsigned long nbytes)
1770 {
1771         unsigned long stack_page = (unsigned long)task_stack_page(p);
1772
1773         if (sp >= stack_page + sizeof(struct thread_struct)
1774             && sp <= stack_page + THREAD_SIZE - nbytes)
1775                 return 1;
1776
1777         return valid_irq_stack(sp, p, nbytes);
1778 }
1779
1780 EXPORT_SYMBOL(validate_sp);
1781
1782 unsigned long get_wchan(struct task_struct *p)
1783 {
1784         unsigned long ip, sp;
1785         int count = 0;
1786
1787         if (!p || p == current || p->state == TASK_RUNNING)
1788                 return 0;
1789
1790         sp = p->thread.ksp;
1791         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1792                 return 0;
1793
1794         do {
1795                 sp = *(unsigned long *)sp;
1796                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1797                         return 0;
1798                 if (count > 0) {
1799                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1800                         if (!in_sched_functions(ip))
1801                                 return ip;
1802                 }
1803         } while (count++ < 16);
1804         return 0;
1805 }
1806
1807 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1808
1809 void show_stack(struct task_struct *tsk, unsigned long *stack)
1810 {
1811         unsigned long sp, ip, lr, newsp;
1812         int count = 0;
1813         int firstframe = 1;
1814 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1815         int curr_frame = current->curr_ret_stack;
1816         extern void return_to_handler(void);
1817         unsigned long rth = (unsigned long)return_to_handler;
1818 #endif
1819
1820         sp = (unsigned long) stack;
1821         if (tsk == NULL)
1822                 tsk = current;
1823         if (sp == 0) {
1824                 if (tsk == current)
1825                         sp = current_stack_pointer();
1826                 else
1827                         sp = tsk->thread.ksp;
1828         }
1829
1830         lr = 0;
1831         printk("Call Trace:\n");
1832         do {
1833                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1834                         return;
1835
1836                 stack = (unsigned long *) sp;
1837                 newsp = stack[0];
1838                 ip = stack[STACK_FRAME_LR_SAVE];
1839                 if (!firstframe || ip != lr) {
1840                         printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1841 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1842                         if ((ip == rth) && curr_frame >= 0) {
1843                                 printk(" (%pS)",
1844                                        (void *)current->ret_stack[curr_frame].ret);
1845                                 curr_frame--;
1846                         }
1847 #endif
1848                         if (firstframe)
1849                                 printk(" (unreliable)");
1850                         printk("\n");
1851                 }
1852                 firstframe = 0;
1853
1854                 /*
1855                  * See if this is an exception frame.
1856                  * We look for the "regshere" marker in the current frame.
1857                  */
1858                 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1859                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1860                         struct pt_regs *regs = (struct pt_regs *)
1861                                 (sp + STACK_FRAME_OVERHEAD);
1862                         lr = regs->link;
1863                         printk("--- interrupt: %lx at %pS\n    LR = %pS\n",
1864                                regs->trap, (void *)regs->nip, (void *)lr);
1865                         firstframe = 1;
1866                 }
1867
1868                 sp = newsp;
1869         } while (count++ < kstack_depth_to_print);
1870 }
1871
1872 #ifdef CONFIG_PPC64
1873 /* Called with hard IRQs off */
1874 void notrace __ppc64_runlatch_on(void)
1875 {
1876         struct thread_info *ti = current_thread_info();
1877         unsigned long ctrl;
1878
1879         ctrl = mfspr(SPRN_CTRLF);
1880         ctrl |= CTRL_RUNLATCH;
1881         mtspr(SPRN_CTRLT, ctrl);
1882
1883         ti->local_flags |= _TLF_RUNLATCH;
1884 }
1885
1886 /* Called with hard IRQs off */
1887 void notrace __ppc64_runlatch_off(void)
1888 {
1889         struct thread_info *ti = current_thread_info();
1890         unsigned long ctrl;
1891
1892         ti->local_flags &= ~_TLF_RUNLATCH;
1893
1894         ctrl = mfspr(SPRN_CTRLF);
1895         ctrl &= ~CTRL_RUNLATCH;
1896         mtspr(SPRN_CTRLT, ctrl);
1897 }
1898 #endif /* CONFIG_PPC64 */
1899
1900 unsigned long arch_align_stack(unsigned long sp)
1901 {
1902         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1903                 sp -= get_random_int() & ~PAGE_MASK;
1904         return sp & ~0xf;
1905 }
1906
1907 static inline unsigned long brk_rnd(void)
1908 {
1909         unsigned long rnd = 0;
1910
1911         /* 8MB for 32bit, 1GB for 64bit */
1912         if (is_32bit_task())
1913                 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1914         else
1915                 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1916
1917         return rnd << PAGE_SHIFT;
1918 }
1919
1920 unsigned long arch_randomize_brk(struct mm_struct *mm)
1921 {
1922         unsigned long base = mm->brk;
1923         unsigned long ret;
1924
1925 #ifdef CONFIG_PPC_STD_MMU_64
1926         /*
1927          * If we are using 1TB segments and we are allowed to randomise
1928          * the heap, we can put it above 1TB so it is backed by a 1TB
1929          * segment. Otherwise the heap will be in the bottom 1TB
1930          * which always uses 256MB segments and this may result in a
1931          * performance penalty. We don't need to worry about radix. For
1932          * radix, mmu_highuser_ssize remains unchanged from 256MB.
1933          */
1934         if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1935                 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1936 #endif
1937
1938         ret = PAGE_ALIGN(base + brk_rnd());
1939
1940         if (ret < mm->brk)
1941                 return mm->brk;
1942
1943         return ret;
1944 }
1945