Merge branch 'akpm' (patches from Andrew Morton)
[sfrench/cifs-2.6.git] / arch / s390 / kernel / nmi.c
1 /*
2  *   Machine check handler
3  *
4  *    Copyright IBM Corp. 2000, 2009
5  *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
6  *               Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *               Cornelia Huck <cornelia.huck@de.ibm.com>,
8  *               Heiko Carstens <heiko.carstens@de.ibm.com>,
9  */
10
11 #include <linux/kernel_stat.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/hardirq.h>
15 #include <linux/time.h>
16 #include <linux/module.h>
17 #include <asm/lowcore.h>
18 #include <asm/smp.h>
19 #include <asm/etr.h>
20 #include <asm/cputime.h>
21 #include <asm/nmi.h>
22 #include <asm/crw.h>
23 #include <asm/switch_to.h>
24
25 struct mcck_struct {
26         int kill_task;
27         int channel_report;
28         int warning;
29         unsigned long long mcck_code;
30 };
31
32 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
33
34 static void s390_handle_damage(char *msg)
35 {
36         smp_send_stop();
37         disabled_wait((unsigned long) __builtin_return_address(0));
38         while (1);
39 }
40
41 /*
42  * Main machine check handler function. Will be called with interrupts enabled
43  * or disabled and machine checks enabled or disabled.
44  */
45 void s390_handle_mcck(void)
46 {
47         unsigned long flags;
48         struct mcck_struct mcck;
49
50         /*
51          * Disable machine checks and get the current state of accumulated
52          * machine checks. Afterwards delete the old state and enable machine
53          * checks again.
54          */
55         local_irq_save(flags);
56         local_mcck_disable();
57         mcck = __get_cpu_var(cpu_mcck);
58         memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
59         clear_cpu_flag(CIF_MCCK_PENDING);
60         local_mcck_enable();
61         local_irq_restore(flags);
62
63         if (mcck.channel_report)
64                 crw_handle_channel_report();
65         /*
66          * A warning may remain for a prolonged period on the bare iron.
67          * (actually until the machine is powered off, or the problem is gone)
68          * So we just stop listening for the WARNING MCH and avoid continuously
69          * being interrupted.  One caveat is however, that we must do this per
70          * processor and cannot use the smp version of ctl_clear_bit().
71          * On VM we only get one interrupt per virtally presented machinecheck.
72          * Though one suffices, we may get one interrupt per (virtual) cpu.
73          */
74         if (mcck.warning) {     /* WARNING pending ? */
75                 static int mchchk_wng_posted = 0;
76
77                 /* Use single cpu clear, as we cannot handle smp here. */
78                 __ctl_clear_bit(14, 24);        /* Disable WARNING MCH */
79                 if (xchg(&mchchk_wng_posted, 1) == 0)
80                         kill_cad_pid(SIGPWR, 1);
81         }
82         if (mcck.kill_task) {
83                 local_irq_enable();
84                 printk(KERN_EMERG "mcck: Terminating task because of machine "
85                        "malfunction (code 0x%016llx).\n", mcck.mcck_code);
86                 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
87                        current->comm, current->pid);
88                 do_exit(SIGSEGV);
89         }
90 }
91 EXPORT_SYMBOL_GPL(s390_handle_mcck);
92
93 /*
94  * returns 0 if all registers could be validated
95  * returns 1 otherwise
96  */
97 static int notrace s390_revalidate_registers(struct mci *mci)
98 {
99         int kill_task;
100         u64 zero;
101         void *fpt_save_area, *fpt_creg_save_area;
102
103         kill_task = 0;
104         zero = 0;
105
106         if (!mci->gr) {
107                 /*
108                  * General purpose registers couldn't be restored and have
109                  * unknown contents. Process needs to be terminated.
110                  */
111                 kill_task = 1;
112         }
113         if (!mci->fp) {
114                 /*
115                  * Floating point registers can't be restored and
116                  * therefore the process needs to be terminated.
117                  */
118                 kill_task = 1;
119         }
120 #ifndef CONFIG_64BIT
121         asm volatile(
122                 "       ld      0,0(%0)\n"
123                 "       ld      2,8(%0)\n"
124                 "       ld      4,16(%0)\n"
125                 "       ld      6,24(%0)"
126                 : : "a" (&S390_lowcore.floating_pt_save_area));
127 #endif
128
129         if (MACHINE_HAS_IEEE) {
130 #ifdef CONFIG_64BIT
131                 fpt_save_area = &S390_lowcore.floating_pt_save_area;
132                 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
133 #else
134                 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
135                 fpt_creg_save_area = fpt_save_area + 128;
136 #endif
137                 if (!mci->fc) {
138                         /*
139                          * Floating point control register can't be restored.
140                          * Task will be terminated.
141                          */
142                         asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
143                         kill_task = 1;
144
145                 } else
146                         asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
147
148                 asm volatile(
149                         "       ld      0,0(%0)\n"
150                         "       ld      1,8(%0)\n"
151                         "       ld      2,16(%0)\n"
152                         "       ld      3,24(%0)\n"
153                         "       ld      4,32(%0)\n"
154                         "       ld      5,40(%0)\n"
155                         "       ld      6,48(%0)\n"
156                         "       ld      7,56(%0)\n"
157                         "       ld      8,64(%0)\n"
158                         "       ld      9,72(%0)\n"
159                         "       ld      10,80(%0)\n"
160                         "       ld      11,88(%0)\n"
161                         "       ld      12,96(%0)\n"
162                         "       ld      13,104(%0)\n"
163                         "       ld      14,112(%0)\n"
164                         "       ld      15,120(%0)\n"
165                         : : "a" (fpt_save_area));
166         }
167
168 #ifdef CONFIG_64BIT
169         /* Revalidate vector registers */
170         if (MACHINE_HAS_VX && current->thread.vxrs) {
171                 if (!mci->vr) {
172                         /*
173                          * Vector registers can't be restored and therefore
174                          * the process needs to be terminated.
175                          */
176                         kill_task = 1;
177                 }
178                 restore_vx_regs((__vector128 *)
179                                 S390_lowcore.vector_save_area_addr);
180         }
181 #endif
182         /* Revalidate access registers */
183         asm volatile(
184                 "       lam     0,15,0(%0)"
185                 : : "a" (&S390_lowcore.access_regs_save_area));
186         if (!mci->ar) {
187                 /*
188                  * Access registers have unknown contents.
189                  * Terminating task.
190                  */
191                 kill_task = 1;
192         }
193         /* Revalidate control registers */
194         if (!mci->cr) {
195                 /*
196                  * Control registers have unknown contents.
197                  * Can't recover and therefore stopping machine.
198                  */
199                 s390_handle_damage("invalid control registers.");
200         } else {
201 #ifdef CONFIG_64BIT
202                 asm volatile(
203                         "       lctlg   0,15,0(%0)"
204                         : : "a" (&S390_lowcore.cregs_save_area));
205 #else
206                 asm volatile(
207                         "       lctl    0,15,0(%0)"
208                         : : "a" (&S390_lowcore.cregs_save_area));
209 #endif
210         }
211         /*
212          * We don't even try to revalidate the TOD register, since we simply
213          * can't write something sensible into that register.
214          */
215 #ifdef CONFIG_64BIT
216         /*
217          * See if we can revalidate the TOD programmable register with its
218          * old contents (should be zero) otherwise set it to zero.
219          */
220         if (!mci->pr)
221                 asm volatile(
222                         "       sr      0,0\n"
223                         "       sckpf"
224                         : : : "0", "cc");
225         else
226                 asm volatile(
227                         "       l       0,0(%0)\n"
228                         "       sckpf"
229                         : : "a" (&S390_lowcore.tod_progreg_save_area)
230                         : "0", "cc");
231 #endif
232         /* Revalidate clock comparator register */
233         set_clock_comparator(S390_lowcore.clock_comparator);
234         /* Check if old PSW is valid */
235         if (!mci->wp)
236                 /*
237                  * Can't tell if we come from user or kernel mode
238                  * -> stopping machine.
239                  */
240                 s390_handle_damage("old psw invalid.");
241
242         if (!mci->ms || !mci->pm || !mci->ia)
243                 kill_task = 1;
244
245         return kill_task;
246 }
247
248 #define MAX_IPD_COUNT   29
249 #define MAX_IPD_TIME    (5 * 60 * USEC_PER_SEC) /* 5 minutes */
250
251 #define ED_STP_ISLAND   6       /* External damage STP island check */
252 #define ED_STP_SYNC     7       /* External damage STP sync check */
253 #define ED_ETR_SYNC     12      /* External damage ETR sync check */
254 #define ED_ETR_SWITCH   13      /* External damage ETR switch to local */
255
256 /*
257  * machine check handler.
258  */
259 void notrace s390_do_machine_check(struct pt_regs *regs)
260 {
261         static int ipd_count;
262         static DEFINE_SPINLOCK(ipd_lock);
263         static unsigned long long last_ipd;
264         struct mcck_struct *mcck;
265         unsigned long long tmp;
266         struct mci *mci;
267         int umode;
268
269         nmi_enter();
270         inc_irq_stat(NMI_NMI);
271         mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
272         mcck = &__get_cpu_var(cpu_mcck);
273         umode = user_mode(regs);
274
275         if (mci->sd) {
276                 /* System damage -> stopping machine */
277                 s390_handle_damage("received system damage machine check.");
278         }
279         if (mci->pd) {
280                 if (mci->b) {
281                         /* Processing backup -> verify if we can survive this */
282                         u64 z_mcic, o_mcic, t_mcic;
283 #ifdef CONFIG_64BIT
284                         z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
285                         o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
286                                   1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
287                                   1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
288                                   1ULL<<16);
289 #else
290                         z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
291                                   1ULL<<29);
292                         o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
293                                   1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
294                                   1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
295 #endif
296                         t_mcic = *(u64 *)mci;
297
298                         if (((t_mcic & z_mcic) != 0) ||
299                             ((t_mcic & o_mcic) != o_mcic)) {
300                                 s390_handle_damage("processing backup machine "
301                                                    "check with damage.");
302                         }
303
304                         /*
305                          * Nullifying exigent condition, therefore we might
306                          * retry this instruction.
307                          */
308                         spin_lock(&ipd_lock);
309                         tmp = get_tod_clock();
310                         if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
311                                 ipd_count++;
312                         else
313                                 ipd_count = 1;
314                         last_ipd = tmp;
315                         if (ipd_count == MAX_IPD_COUNT)
316                                 s390_handle_damage("too many ipd retries.");
317                         spin_unlock(&ipd_lock);
318                 } else {
319                         /* Processing damage -> stopping machine */
320                         s390_handle_damage("received instruction processing "
321                                            "damage machine check.");
322                 }
323         }
324         if (s390_revalidate_registers(mci)) {
325                 if (umode) {
326                         /*
327                          * Couldn't restore all register contents while in
328                          * user mode -> mark task for termination.
329                          */
330                         mcck->kill_task = 1;
331                         mcck->mcck_code = *(unsigned long long *) mci;
332                         set_cpu_flag(CIF_MCCK_PENDING);
333                 } else {
334                         /*
335                          * Couldn't restore all register contents while in
336                          * kernel mode -> stopping machine.
337                          */
338                         s390_handle_damage("unable to revalidate registers.");
339                 }
340         }
341         if (mci->cd) {
342                 /* Timing facility damage */
343                 s390_handle_damage("TOD clock damaged");
344         }
345         if (mci->ed && mci->ec) {
346                 /* External damage */
347                 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
348                         etr_sync_check();
349                 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
350                         etr_switch_to_local();
351                 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
352                         stp_sync_check();
353                 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
354                         stp_island_check();
355         }
356         if (mci->se)
357                 /* Storage error uncorrected */
358                 s390_handle_damage("received storage error uncorrected "
359                                    "machine check.");
360         if (mci->ke)
361                 /* Storage key-error uncorrected */
362                 s390_handle_damage("received storage key-error uncorrected "
363                                    "machine check.");
364         if (mci->ds && mci->fa)
365                 /* Storage degradation */
366                 s390_handle_damage("received storage degradation machine "
367                                    "check.");
368         if (mci->cp) {
369                 /* Channel report word pending */
370                 mcck->channel_report = 1;
371                 set_cpu_flag(CIF_MCCK_PENDING);
372         }
373         if (mci->w) {
374                 /* Warning pending */
375                 mcck->warning = 1;
376                 set_cpu_flag(CIF_MCCK_PENDING);
377         }
378         nmi_exit();
379 }
380
381 static int __init machine_check_init(void)
382 {
383         ctl_set_bit(14, 25);    /* enable external damage MCH */
384         ctl_set_bit(14, 27);    /* enable system recovery MCH */
385         ctl_set_bit(14, 24);    /* enable warning MCH */
386         return 0;
387 }
388 arch_initcall(machine_check_init);