Merge nommu tree
[sfrench/cifs-2.6.git] / arch / powerpc / platforms / pseries / setup.c
1 /*
2  *  64-bit pSeries and RS/6000 setup code.
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Adapted from 'alpha' version by Gary Thomas
6  *  Modified by Cort Dougan (cort@cs.nmt.edu)
7  *  Modified by PPC64 Team, IBM Corp
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14
15 /*
16  * bootup setup stuff..
17  */
18
19 #undef DEBUG
20
21 #include <linux/config.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/a.out.h>
32 #include <linux/tty.h>
33 #include <linux/major.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/init.h>
37 #include <linux/ioport.h>
38 #include <linux/console.h>
39 #include <linux/pci.h>
40 #include <linux/utsname.h>
41 #include <linux/adb.h>
42 #include <linux/module.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/seq_file.h>
46 #include <linux/root_dev.h>
47
48 #include <asm/mmu.h>
49 #include <asm/processor.h>
50 #include <asm/io.h>
51 #include <asm/pgtable.h>
52 #include <asm/prom.h>
53 #include <asm/rtas.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/iommu.h>
56 #include <asm/dma.h>
57 #include <asm/machdep.h>
58 #include <asm/irq.h>
59 #include <asm/kexec.h>
60 #include <asm/time.h>
61 #include <asm/nvram.h>
62 #include "xics.h"
63 #include <asm/pmc.h>
64 #include <asm/mpic.h>
65 #include <asm/ppc-pci.h>
66 #include <asm/i8259.h>
67 #include <asm/udbg.h>
68 #include <asm/smp.h>
69
70 #include "plpar_wrappers.h"
71 #include "ras.h"
72 #include "firmware.h"
73
74 #ifdef DEBUG
75 #define DBG(fmt...) udbg_printf(fmt)
76 #else
77 #define DBG(fmt...)
78 #endif
79
80 extern void find_udbg_vterm(void);
81
82 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
83
84 static void pseries_shared_idle(void);
85 static void pseries_dedicated_idle(void);
86
87 struct mpic *pSeries_mpic;
88
89 static void pSeries_show_cpuinfo(struct seq_file *m)
90 {
91         struct device_node *root;
92         const char *model = "";
93
94         root = of_find_node_by_path("/");
95         if (root)
96                 model = get_property(root, "model", NULL);
97         seq_printf(m, "machine\t\t: CHRP %s\n", model);
98         of_node_put(root);
99 }
100
101 /* Initialize firmware assisted non-maskable interrupts if
102  * the firmware supports this feature.
103  */
104 static void __init fwnmi_init(void)
105 {
106         unsigned long system_reset_addr, machine_check_addr;
107
108         int ibm_nmi_register = rtas_token("ibm,nmi-register");
109         if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
110                 return;
111
112         /* If the kernel's not linked at zero we point the firmware at low
113          * addresses anyway, and use a trampoline to get to the real code. */
114         system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
115         machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
116
117         if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
118                                 machine_check_addr))
119                 fwnmi_active = 1;
120 }
121
122 static void __init pSeries_init_mpic(void)
123 {
124         unsigned int *addrp;
125         struct device_node *np;
126         unsigned long intack = 0;
127
128         /* All ISUs are setup, complete initialization */
129         mpic_init(pSeries_mpic);
130
131         /* Check what kind of cascade ACK we have */
132         if (!(np = of_find_node_by_name(NULL, "pci"))
133             || !(addrp = (unsigned int *)
134                  get_property(np, "8259-interrupt-acknowledge", NULL)))
135                 printk(KERN_ERR "Cannot find pci to get ack address\n");
136         else
137                 intack = addrp[prom_n_addr_cells(np)-1];
138         of_node_put(np);
139
140         /* Setup the legacy interrupts & controller */
141         i8259_init(intack, 0);
142
143         /* Hook cascade to mpic */
144         mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
145 }
146
147 static void __init pSeries_setup_mpic(void)
148 {
149         unsigned int *opprop;
150         unsigned long openpic_addr = 0;
151         unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
152         struct device_node *root;
153         int irq_count;
154
155         /* Find the Open PIC if present */
156         root = of_find_node_by_path("/");
157         opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
158         if (opprop != 0) {
159                 int n = prom_n_addr_cells(root);
160
161                 for (openpic_addr = 0; n > 0; --n)
162                         openpic_addr = (openpic_addr << 32) + *opprop++;
163                 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
164         }
165         of_node_put(root);
166
167         BUG_ON(openpic_addr == 0);
168
169         /* Get the sense values from OF */
170         prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
171         
172         /* Setup the openpic driver */
173         irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
174         pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
175                                   16, 16, irq_count, /* isu size, irq offset, irq count */ 
176                                   NR_IRQS - 4, /* ipi offset */
177                                   senses, irq_count, /* sense & sense size */
178                                   " MPIC     ");
179 }
180
181 static void pseries_lpar_enable_pmcs(void)
182 {
183         unsigned long set, reset;
184
185         power4_enable_pmcs();
186
187         set = 1UL << 63;
188         reset = 0;
189         plpar_hcall_norets(H_PERFMON, set, reset);
190
191         /* instruct hypervisor to maintain PMCs */
192         if (firmware_has_feature(FW_FEATURE_SPLPAR))
193                 get_lppaca()->pmcregs_in_use = 1;
194 }
195
196 static void __init pSeries_setup_arch(void)
197 {
198         /* Fixup ppc_md depending on the type of interrupt controller */
199         if (ppc64_interrupt_controller == IC_OPEN_PIC) {
200                 ppc_md.init_IRQ       = pSeries_init_mpic;
201                 ppc_md.get_irq        = mpic_get_irq;
202                 /* Allocate the mpic now, so that find_and_init_phbs() can
203                  * fill the ISUs */
204                 pSeries_setup_mpic();
205         } else {
206                 ppc_md.init_IRQ       = xics_init_IRQ;
207                 ppc_md.get_irq        = xics_get_irq;
208         }
209
210 #ifdef CONFIG_SMP
211         smp_init_pSeries();
212 #endif
213         /* openpic global configuration register (64-bit format). */
214         /* openpic Interrupt Source Unit pointer (64-bit format). */
215         /* python0 facility area (mmio) (64-bit format) REAL address. */
216
217         /* init to some ~sane value until calibrate_delay() runs */
218         loops_per_jiffy = 50000000;
219
220         if (ROOT_DEV == 0) {
221                 printk("No ramdisk, default root is /dev/sda2\n");
222                 ROOT_DEV = Root_SDA2;
223         }
224
225         fwnmi_init();
226
227         /* Find and initialize PCI host bridges */
228         init_pci_config_tokens();
229         find_and_init_phbs();
230         eeh_init();
231
232         pSeries_nvram_init();
233
234         /* Choose an idle loop */
235         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
236                 vpa_init(boot_cpuid);
237                 if (get_lppaca()->shared_proc) {
238                         printk(KERN_INFO "Using shared processor idle loop\n");
239                         ppc_md.idle_loop = pseries_shared_idle;
240                 } else {
241                         printk(KERN_INFO "Using dedicated idle loop\n");
242                         ppc_md.idle_loop = pseries_dedicated_idle;
243                 }
244         } else {
245                 printk(KERN_INFO "Using default idle loop\n");
246                 ppc_md.idle_loop = default_idle;
247         }
248
249         if (firmware_has_feature(FW_FEATURE_LPAR))
250                 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
251         else
252                 ppc_md.enable_pmcs = power4_enable_pmcs;
253 }
254
255 static int __init pSeries_init_panel(void)
256 {
257         /* Manually leave the kernel version on the panel. */
258         ppc_md.progress("Linux ppc64\n", 0);
259         ppc_md.progress(system_utsname.version, 0);
260
261         return 0;
262 }
263 arch_initcall(pSeries_init_panel);
264
265 static  void __init pSeries_discover_pic(void)
266 {
267         struct device_node *np;
268         char *typep;
269
270         /*
271          * Setup interrupt mapping options that are needed for finish_device_tree
272          * to properly parse the OF interrupt tree & do the virtual irq mapping
273          */
274         __irq_offset_value = NUM_ISA_INTERRUPTS;
275         ppc64_interrupt_controller = IC_INVALID;
276         for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
277                 typep = (char *)get_property(np, "compatible", NULL);
278                 if (strstr(typep, "open-pic")) {
279                         ppc64_interrupt_controller = IC_OPEN_PIC;
280                         break;
281                 } else if (strstr(typep, "ppc-xicp")) {
282                         ppc64_interrupt_controller = IC_PPC_XIC;
283                         break;
284                 }
285         }
286         if (ppc64_interrupt_controller == IC_INVALID)
287                 printk("pSeries_discover_pic: failed to recognize"
288                         " interrupt-controller\n");
289
290 }
291
292 static void pSeries_mach_cpu_die(void)
293 {
294         local_irq_disable();
295         idle_task_exit();
296         /* Some hardware requires clearing the CPPR, while other hardware does not
297          * it is safe either way
298          */
299         pSeriesLP_cppr_info(0, 0);
300         rtas_stop_self();
301         /* Should never get here... */
302         BUG();
303         for(;;);
304 }
305
306 static int pseries_set_dabr(unsigned long dabr)
307 {
308         return plpar_hcall_norets(H_SET_DABR, dabr);
309 }
310
311 static int pseries_set_xdabr(unsigned long dabr)
312 {
313         /* We want to catch accesses from kernel and userspace */
314         return plpar_hcall_norets(H_SET_XDABR, dabr,
315                         H_DABRX_KERNEL | H_DABRX_USER);
316 }
317
318 /*
319  * Early initialization.  Relocation is on but do not reference unbolted pages
320  */
321 static void __init pSeries_init_early(void)
322 {
323         DBG(" -> pSeries_init_early()\n");
324
325         fw_feature_init();
326         
327         if (firmware_has_feature(FW_FEATURE_LPAR))
328                 hpte_init_lpar();
329         else
330                 hpte_init_native();
331
332         if (firmware_has_feature(FW_FEATURE_LPAR))
333                 find_udbg_vterm();
334
335         if (firmware_has_feature(FW_FEATURE_DABR))
336                 ppc_md.set_dabr = pseries_set_dabr;
337         else if (firmware_has_feature(FW_FEATURE_XDABR))
338                 ppc_md.set_dabr = pseries_set_xdabr;
339
340         iommu_init_early_pSeries();
341
342         pSeries_discover_pic();
343
344         DBG(" <- pSeries_init_early()\n");
345 }
346
347
348 static int pSeries_check_legacy_ioport(unsigned int baseport)
349 {
350         struct device_node *np;
351
352 #define I8042_DATA_REG  0x60
353 #define FDC_BASE        0x3f0
354
355
356         switch(baseport) {
357         case I8042_DATA_REG:
358                 np = of_find_node_by_type(NULL, "8042");
359                 if (np == NULL)
360                         return -ENODEV;
361                 of_node_put(np);
362                 break;
363         case FDC_BASE:
364                 np = of_find_node_by_type(NULL, "fdc");
365                 if (np == NULL)
366                         return -ENODEV;
367                 of_node_put(np);
368                 break;
369         }
370         return 0;
371 }
372
373 /*
374  * Called very early, MMU is off, device-tree isn't unflattened
375  */
376 extern struct machdep_calls pSeries_md;
377
378 static int __init pSeries_probe(int platform)
379 {
380         if (platform != PLATFORM_PSERIES &&
381             platform != PLATFORM_PSERIES_LPAR)
382                 return 0;
383
384         /* if we have some ppc_md fixups for LPAR to do, do
385          * it here ...
386          */
387
388         if (platform == PLATFORM_PSERIES_LPAR)
389                 ppc64_firmware_features |= FW_FEATURE_LPAR;
390
391         return 1;
392 }
393
394 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
395
396 static inline void dedicated_idle_sleep(unsigned int cpu)
397 {
398         struct lppaca *plppaca = &lppaca[cpu ^ 1];
399
400         /* Only sleep if the other thread is not idle */
401         if (!(plppaca->idle)) {
402                 local_irq_disable();
403
404                 /*
405                  * We are about to sleep the thread and so wont be polling any
406                  * more.
407                  */
408                 clear_thread_flag(TIF_POLLING_NRFLAG);
409                 smp_mb__after_clear_bit();
410
411                 /*
412                  * SMT dynamic mode. Cede will result in this thread going
413                  * dormant, if the partner thread is still doing work.  Thread
414                  * wakes up if partner goes idle, an interrupt is presented, or
415                  * a prod occurs.  Returning from the cede enables external
416                  * interrupts.
417                  */
418                 if (!need_resched())
419                         cede_processor();
420                 else
421                         local_irq_enable();
422                 set_thread_flag(TIF_POLLING_NRFLAG);
423         } else {
424                 /*
425                  * Give the HV an opportunity at the processor, since we are
426                  * not doing any work.
427                  */
428                 poll_pending();
429         }
430 }
431
432 static void pseries_dedicated_idle(void)
433
434         unsigned int cpu = smp_processor_id();
435         unsigned long start_snooze;
436         unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
437         set_thread_flag(TIF_POLLING_NRFLAG);
438
439         while (1) {
440                 /*
441                  * Indicate to the HV that we are idle. Now would be
442                  * a good time to find other work to dispatch.
443                  */
444                 get_lppaca()->idle = 1;
445
446                 if (!need_resched()) {
447                         start_snooze = get_tb() +
448                                 *smt_snooze_delay * tb_ticks_per_usec;
449
450                         while (!need_resched() && !cpu_is_offline(cpu)) {
451                                 ppc64_runlatch_off();
452
453                                 /*
454                                  * Go into low thread priority and possibly
455                                  * low power mode.
456                                  */
457                                 HMT_low();
458                                 HMT_very_low();
459
460                                 if (*smt_snooze_delay != 0 &&
461                                     get_tb() > start_snooze) {
462                                         HMT_medium();
463                                         dedicated_idle_sleep(cpu);
464                                 }
465
466                         }
467
468                         HMT_medium();
469                 }
470
471                 get_lppaca()->idle = 0;
472                 ppc64_runlatch_on();
473
474                 preempt_enable_no_resched();
475                 schedule();
476                 preempt_disable();
477
478                 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
479                         cpu_die();
480         }
481 }
482
483 static void pseries_shared_idle(void)
484 {
485         unsigned int cpu = smp_processor_id();
486
487         while (1) {
488                 /*
489                  * Indicate to the HV that we are idle. Now would be
490                  * a good time to find other work to dispatch.
491                  */
492                 get_lppaca()->idle = 1;
493
494                 while (!need_resched() && !cpu_is_offline(cpu)) {
495                         local_irq_disable();
496                         ppc64_runlatch_off();
497
498                         /*
499                          * Yield the processor to the hypervisor.  We return if
500                          * an external interrupt occurs (which are driven prior
501                          * to returning here) or if a prod occurs from another
502                          * processor. When returning here, external interrupts
503                          * are enabled.
504                          *
505                          * Check need_resched() again with interrupts disabled
506                          * to avoid a race.
507                          */
508                         if (!need_resched())
509                                 cede_processor();
510                         else
511                                 local_irq_enable();
512
513                         HMT_medium();
514                 }
515
516                 get_lppaca()->idle = 0;
517                 ppc64_runlatch_on();
518
519                 preempt_enable_no_resched();
520                 schedule();
521                 preempt_disable();
522
523                 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
524                         cpu_die();
525         }
526 }
527
528 static int pSeries_pci_probe_mode(struct pci_bus *bus)
529 {
530         if (firmware_has_feature(FW_FEATURE_LPAR))
531                 return PCI_PROBE_DEVTREE;
532         return PCI_PROBE_NORMAL;
533 }
534
535 #ifdef CONFIG_KEXEC
536 static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
537 {
538         /* Don't risk a hypervisor call if we're crashing */
539         if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
540                 unsigned long vpa = __pa(get_lppaca());
541
542                 if (unregister_vpa(hard_smp_processor_id(), vpa)) {
543                         printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
544                                         "failed\n", smp_processor_id(),
545                                         hard_smp_processor_id());
546                 }
547         }
548
549         if (ppc64_interrupt_controller == IC_OPEN_PIC)
550                 mpic_teardown_this_cpu(secondary);
551         else
552                 xics_teardown_cpu(secondary);
553 }
554 #endif
555
556 struct machdep_calls __initdata pSeries_md = {
557         .probe                  = pSeries_probe,
558         .setup_arch             = pSeries_setup_arch,
559         .init_early             = pSeries_init_early,
560         .show_cpuinfo           = pSeries_show_cpuinfo,
561         .log_error              = pSeries_log_error,
562         .pcibios_fixup          = pSeries_final_fixup,
563         .pci_probe_mode         = pSeries_pci_probe_mode,
564         .irq_bus_setup          = pSeries_irq_bus_setup,
565         .restart                = rtas_restart,
566         .power_off              = rtas_power_off,
567         .halt                   = rtas_halt,
568         .panic                  = rtas_os_term,
569         .cpu_die                = pSeries_mach_cpu_die,
570         .get_boot_time          = rtas_get_boot_time,
571         .get_rtc_time           = rtas_get_rtc_time,
572         .set_rtc_time           = rtas_set_rtc_time,
573         .calibrate_decr         = generic_calibrate_decr,
574         .progress               = rtas_progress,
575         .check_legacy_ioport    = pSeries_check_legacy_ioport,
576         .system_reset_exception = pSeries_system_reset_exception,
577         .machine_check_exception = pSeries_machine_check_exception,
578 #ifdef CONFIG_KEXEC
579         .kexec_cpu_down         = pseries_kexec_cpu_down,
580         .machine_kexec          = default_machine_kexec,
581         .machine_kexec_prepare  = default_machine_kexec_prepare,
582         .machine_crash_shutdown = default_machine_crash_shutdown,
583 #endif
584 };