Merge master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / arch / powerpc / mm / hash_utils_64.c
1 /*
2  * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3  *   {mikejc|engebret}@us.ibm.com
4  *
5  *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6  *
7  * SMP scalability work:
8  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  * 
10  *    Module name: htab.c
11  *
12  *    Description:
13  *      PowerPC Hashed Page Table functions
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  */
20
21 #undef DEBUG
22 #undef DEBUG_LOW
23
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/ctype.h>
31 #include <linux/cache.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34
35 #include <asm/processor.h>
36 #include <asm/pgtable.h>
37 #include <asm/mmu.h>
38 #include <asm/mmu_context.h>
39 #include <asm/page.h>
40 #include <asm/types.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/machdep.h>
44 #include <asm/lmb.h>
45 #include <asm/abs_addr.h>
46 #include <asm/tlbflush.h>
47 #include <asm/io.h>
48 #include <asm/eeh.h>
49 #include <asm/tlb.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cputable.h>
52 #include <asm/abs_addr.h>
53 #include <asm/sections.h>
54
55 #ifdef DEBUG
56 #define DBG(fmt...) udbg_printf(fmt)
57 #else
58 #define DBG(fmt...)
59 #endif
60
61 #ifdef DEBUG_LOW
62 #define DBG_LOW(fmt...) udbg_printf(fmt)
63 #else
64 #define DBG_LOW(fmt...)
65 #endif
66
67 #define KB (1024)
68 #define MB (1024*KB)
69
70 /*
71  * Note:  pte   --> Linux PTE
72  *        HPTE  --> PowerPC Hashed Page Table Entry
73  *
74  * Execution context:
75  *   htab_initialize is called with the MMU off (of course), but
76  *   the kernel has been copied down to zero so it can directly
77  *   reference global data.  At this point it is very difficult
78  *   to print debug info.
79  *
80  */
81
82 #ifdef CONFIG_U3_DART
83 extern unsigned long dart_tablebase;
84 #endif /* CONFIG_U3_DART */
85
86 static unsigned long _SDR1;
87 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
88
89 hpte_t *htab_address;
90 unsigned long htab_size_bytes;
91 unsigned long htab_hash_mask;
92 int mmu_linear_psize = MMU_PAGE_4K;
93 int mmu_virtual_psize = MMU_PAGE_4K;
94 int mmu_vmalloc_psize = MMU_PAGE_4K;
95 int mmu_io_psize = MMU_PAGE_4K;
96 #ifdef CONFIG_HUGETLB_PAGE
97 int mmu_huge_psize = MMU_PAGE_16M;
98 unsigned int HPAGE_SHIFT;
99 #endif
100 #ifdef CONFIG_PPC_64K_PAGES
101 int mmu_ci_restrictions;
102 #endif
103 #ifdef CONFIG_DEBUG_PAGEALLOC
104 static u8 *linear_map_hash_slots;
105 static unsigned long linear_map_hash_count;
106 static DEFINE_SPINLOCK(linear_map_hash_lock);
107 #endif /* CONFIG_DEBUG_PAGEALLOC */
108
109 /* There are definitions of page sizes arrays to be used when none
110  * is provided by the firmware.
111  */
112
113 /* Pre-POWER4 CPUs (4k pages only)
114  */
115 struct mmu_psize_def mmu_psize_defaults_old[] = {
116         [MMU_PAGE_4K] = {
117                 .shift  = 12,
118                 .sllp   = 0,
119                 .penc   = 0,
120                 .avpnm  = 0,
121                 .tlbiel = 0,
122         },
123 };
124
125 /* POWER4, GPUL, POWER5
126  *
127  * Support for 16Mb large pages
128  */
129 struct mmu_psize_def mmu_psize_defaults_gp[] = {
130         [MMU_PAGE_4K] = {
131                 .shift  = 12,
132                 .sllp   = 0,
133                 .penc   = 0,
134                 .avpnm  = 0,
135                 .tlbiel = 1,
136         },
137         [MMU_PAGE_16M] = {
138                 .shift  = 24,
139                 .sllp   = SLB_VSID_L,
140                 .penc   = 0,
141                 .avpnm  = 0x1UL,
142                 .tlbiel = 0,
143         },
144 };
145
146
147 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
148                       unsigned long pstart, unsigned long mode, int psize)
149 {
150         unsigned long vaddr, paddr;
151         unsigned int step, shift;
152         unsigned long tmp_mode;
153         int ret = 0;
154
155         shift = mmu_psize_defs[psize].shift;
156         step = 1 << shift;
157
158         for (vaddr = vstart, paddr = pstart; vaddr < vend;
159              vaddr += step, paddr += step) {
160                 unsigned long hash, hpteg;
161                 unsigned long vsid = get_kernel_vsid(vaddr);
162                 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
163
164                 tmp_mode = mode;
165                 
166                 /* Make non-kernel text non-executable */
167                 if (!in_kernel_text(vaddr))
168                         tmp_mode = mode | HPTE_R_N;
169
170                 hash = hpt_hash(va, shift);
171                 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
172
173                 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
174
175                 BUG_ON(!ppc_md.hpte_insert);
176                 ret = ppc_md.hpte_insert(hpteg, va, paddr,
177                                 tmp_mode, HPTE_V_BOLTED, psize);
178
179                 if (ret < 0)
180                         break;
181 #ifdef CONFIG_DEBUG_PAGEALLOC
182                 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
183                         linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
184 #endif /* CONFIG_DEBUG_PAGEALLOC */
185         }
186         return ret < 0 ? ret : 0;
187 }
188
189 static int __init htab_dt_scan_page_sizes(unsigned long node,
190                                           const char *uname, int depth,
191                                           void *data)
192 {
193         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
194         u32 *prop;
195         unsigned long size = 0;
196
197         /* We are scanning "cpu" nodes only */
198         if (type == NULL || strcmp(type, "cpu") != 0)
199                 return 0;
200
201         prop = (u32 *)of_get_flat_dt_prop(node,
202                                           "ibm,segment-page-sizes", &size);
203         if (prop != NULL) {
204                 DBG("Page sizes from device-tree:\n");
205                 size /= 4;
206                 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
207                 while(size > 0) {
208                         unsigned int shift = prop[0];
209                         unsigned int slbenc = prop[1];
210                         unsigned int lpnum = prop[2];
211                         unsigned int lpenc = 0;
212                         struct mmu_psize_def *def;
213                         int idx = -1;
214
215                         size -= 3; prop += 3;
216                         while(size > 0 && lpnum) {
217                                 if (prop[0] == shift)
218                                         lpenc = prop[1];
219                                 prop += 2; size -= 2;
220                                 lpnum--;
221                         }
222                         switch(shift) {
223                         case 0xc:
224                                 idx = MMU_PAGE_4K;
225                                 break;
226                         case 0x10:
227                                 idx = MMU_PAGE_64K;
228                                 break;
229                         case 0x14:
230                                 idx = MMU_PAGE_1M;
231                                 break;
232                         case 0x18:
233                                 idx = MMU_PAGE_16M;
234                                 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
235                                 break;
236                         case 0x22:
237                                 idx = MMU_PAGE_16G;
238                                 break;
239                         }
240                         if (idx < 0)
241                                 continue;
242                         def = &mmu_psize_defs[idx];
243                         def->shift = shift;
244                         if (shift <= 23)
245                                 def->avpnm = 0;
246                         else
247                                 def->avpnm = (1 << (shift - 23)) - 1;
248                         def->sllp = slbenc;
249                         def->penc = lpenc;
250                         /* We don't know for sure what's up with tlbiel, so
251                          * for now we only set it for 4K and 64K pages
252                          */
253                         if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
254                                 def->tlbiel = 1;
255                         else
256                                 def->tlbiel = 0;
257
258                         DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
259                             "tlbiel=%d, penc=%d\n",
260                             idx, shift, def->sllp, def->avpnm, def->tlbiel,
261                             def->penc);
262                 }
263                 return 1;
264         }
265         return 0;
266 }
267
268
269 static void __init htab_init_page_sizes(void)
270 {
271         int rc;
272
273         /* Default to 4K pages only */
274         memcpy(mmu_psize_defs, mmu_psize_defaults_old,
275                sizeof(mmu_psize_defaults_old));
276
277         /*
278          * Try to find the available page sizes in the device-tree
279          */
280         rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
281         if (rc != 0)  /* Found */
282                 goto found;
283
284         /*
285          * Not in the device-tree, let's fallback on known size
286          * list for 16M capable GP & GR
287          */
288         if (cpu_has_feature(CPU_FTR_16M_PAGE))
289                 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
290                        sizeof(mmu_psize_defaults_gp));
291  found:
292 #ifndef CONFIG_DEBUG_PAGEALLOC
293         /*
294          * Pick a size for the linear mapping. Currently, we only support
295          * 16M, 1M and 4K which is the default
296          */
297         if (mmu_psize_defs[MMU_PAGE_16M].shift)
298                 mmu_linear_psize = MMU_PAGE_16M;
299         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
300                 mmu_linear_psize = MMU_PAGE_1M;
301 #endif /* CONFIG_DEBUG_PAGEALLOC */
302
303 #ifdef CONFIG_PPC_64K_PAGES
304         /*
305          * Pick a size for the ordinary pages. Default is 4K, we support
306          * 64K for user mappings and vmalloc if supported by the processor.
307          * We only use 64k for ioremap if the processor
308          * (and firmware) support cache-inhibited large pages.
309          * If not, we use 4k and set mmu_ci_restrictions so that
310          * hash_page knows to switch processes that use cache-inhibited
311          * mappings to 4k pages.
312          */
313         if (mmu_psize_defs[MMU_PAGE_64K].shift) {
314                 mmu_virtual_psize = MMU_PAGE_64K;
315                 mmu_vmalloc_psize = MMU_PAGE_64K;
316                 if (mmu_linear_psize == MMU_PAGE_4K)
317                         mmu_linear_psize = MMU_PAGE_64K;
318                 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
319                         mmu_io_psize = MMU_PAGE_64K;
320                 else
321                         mmu_ci_restrictions = 1;
322         }
323 #endif /* CONFIG_PPC_64K_PAGES */
324
325         printk(KERN_DEBUG "Page orders: linear mapping = %d, "
326                "virtual = %d, io = %d\n",
327                mmu_psize_defs[mmu_linear_psize].shift,
328                mmu_psize_defs[mmu_virtual_psize].shift,
329                mmu_psize_defs[mmu_io_psize].shift);
330
331 #ifdef CONFIG_HUGETLB_PAGE
332         /* Init large page size. Currently, we pick 16M or 1M depending
333          * on what is available
334          */
335         if (mmu_psize_defs[MMU_PAGE_16M].shift)
336                 mmu_huge_psize = MMU_PAGE_16M;
337         /* With 4k/4level pagetables, we can't (for now) cope with a
338          * huge page size < PMD_SIZE */
339         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
340                 mmu_huge_psize = MMU_PAGE_1M;
341
342         /* Calculate HPAGE_SHIFT and sanity check it */
343         if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
344             mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
345                 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
346         else
347                 HPAGE_SHIFT = 0; /* No huge pages dude ! */
348 #endif /* CONFIG_HUGETLB_PAGE */
349 }
350
351 static int __init htab_dt_scan_pftsize(unsigned long node,
352                                        const char *uname, int depth,
353                                        void *data)
354 {
355         char *type = of_get_flat_dt_prop(node, "device_type", NULL);
356         u32 *prop;
357
358         /* We are scanning "cpu" nodes only */
359         if (type == NULL || strcmp(type, "cpu") != 0)
360                 return 0;
361
362         prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
363         if (prop != NULL) {
364                 /* pft_size[0] is the NUMA CEC cookie */
365                 ppc64_pft_size = prop[1];
366                 return 1;
367         }
368         return 0;
369 }
370
371 static unsigned long __init htab_get_table_size(void)
372 {
373         unsigned long mem_size, rnd_mem_size, pteg_count;
374
375         /* If hash size isn't already provided by the platform, we try to
376          * retrieve it from the device-tree. If it's not there neither, we
377          * calculate it now based on the total RAM size
378          */
379         if (ppc64_pft_size == 0)
380                 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
381         if (ppc64_pft_size)
382                 return 1UL << ppc64_pft_size;
383
384         /* round mem_size up to next power of 2 */
385         mem_size = lmb_phys_mem_size();
386         rnd_mem_size = 1UL << __ilog2(mem_size);
387         if (rnd_mem_size < mem_size)
388                 rnd_mem_size <<= 1;
389
390         /* # pages / 2 */
391         pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
392
393         return pteg_count << 7;
394 }
395
396 #ifdef CONFIG_MEMORY_HOTPLUG
397 void create_section_mapping(unsigned long start, unsigned long end)
398 {
399                 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
400                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
401                         mmu_linear_psize));
402 }
403 #endif /* CONFIG_MEMORY_HOTPLUG */
404
405 static inline void make_bl(unsigned int *insn_addr, void *func)
406 {
407         unsigned long funcp = *((unsigned long *)func);
408         int offset = funcp - (unsigned long)insn_addr;
409
410         *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
411         flush_icache_range((unsigned long)insn_addr, 4+
412                            (unsigned long)insn_addr);
413 }
414
415 static void __init htab_finish_init(void)
416 {
417         extern unsigned int *htab_call_hpte_insert1;
418         extern unsigned int *htab_call_hpte_insert2;
419         extern unsigned int *htab_call_hpte_remove;
420         extern unsigned int *htab_call_hpte_updatepp;
421
422 #ifdef CONFIG_PPC_64K_PAGES
423         extern unsigned int *ht64_call_hpte_insert1;
424         extern unsigned int *ht64_call_hpte_insert2;
425         extern unsigned int *ht64_call_hpte_remove;
426         extern unsigned int *ht64_call_hpte_updatepp;
427
428         make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
429         make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
430         make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
431         make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
432 #endif /* CONFIG_PPC_64K_PAGES */
433
434         make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
435         make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
436         make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
437         make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
438 }
439
440 void __init htab_initialize(void)
441 {
442         unsigned long table;
443         unsigned long pteg_count;
444         unsigned long mode_rw;
445         unsigned long base = 0, size = 0;
446         int i;
447
448         extern unsigned long tce_alloc_start, tce_alloc_end;
449
450         DBG(" -> htab_initialize()\n");
451
452         /* Initialize page sizes */
453         htab_init_page_sizes();
454
455         /*
456          * Calculate the required size of the htab.  We want the number of
457          * PTEGs to equal one half the number of real pages.
458          */ 
459         htab_size_bytes = htab_get_table_size();
460         pteg_count = htab_size_bytes >> 7;
461
462         htab_hash_mask = pteg_count - 1;
463
464         if (firmware_has_feature(FW_FEATURE_LPAR)) {
465                 /* Using a hypervisor which owns the htab */
466                 htab_address = NULL;
467                 _SDR1 = 0; 
468         } else {
469                 /* Find storage for the HPT.  Must be contiguous in
470                  * the absolute address space.
471                  */
472                 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
473
474                 DBG("Hash table allocated at %lx, size: %lx\n", table,
475                     htab_size_bytes);
476
477                 htab_address = abs_to_virt(table);
478
479                 /* htab absolute addr + encoded htabsize */
480                 _SDR1 = table + __ilog2(pteg_count) - 11;
481
482                 /* Initialize the HPT with no entries */
483                 memset((void *)table, 0, htab_size_bytes);
484
485                 /* Set SDR1 */
486                 mtspr(SPRN_SDR1, _SDR1);
487         }
488
489         mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
490
491 #ifdef CONFIG_DEBUG_PAGEALLOC
492         linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
493         linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
494                                                     1, lmb.rmo_size));
495         memset(linear_map_hash_slots, 0, linear_map_hash_count);
496 #endif /* CONFIG_DEBUG_PAGEALLOC */
497
498         /* On U3 based machines, we need to reserve the DART area and
499          * _NOT_ map it to avoid cache paradoxes as it's remapped non
500          * cacheable later on
501          */
502
503         /* create bolted the linear mapping in the hash table */
504         for (i=0; i < lmb.memory.cnt; i++) {
505                 base = (unsigned long)__va(lmb.memory.region[i].base);
506                 size = lmb.memory.region[i].size;
507
508                 DBG("creating mapping for region: %lx : %lx\n", base, size);
509
510 #ifdef CONFIG_U3_DART
511                 /* Do not map the DART space. Fortunately, it will be aligned
512                  * in such a way that it will not cross two lmb regions and
513                  * will fit within a single 16Mb page.
514                  * The DART space is assumed to be a full 16Mb region even if
515                  * we only use 2Mb of that space. We will use more of it later
516                  * for AGP GART. We have to use a full 16Mb large page.
517                  */
518                 DBG("DART base: %lx\n", dart_tablebase);
519
520                 if (dart_tablebase != 0 && dart_tablebase >= base
521                     && dart_tablebase < (base + size)) {
522                         unsigned long dart_table_end = dart_tablebase + 16 * MB;
523                         if (base != dart_tablebase)
524                                 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
525                                                         __pa(base), mode_rw,
526                                                         mmu_linear_psize));
527                         if ((base + size) > dart_table_end)
528                                 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
529                                                         base + size,
530                                                         __pa(dart_table_end),
531                                                          mode_rw,
532                                                          mmu_linear_psize));
533                         continue;
534                 }
535 #endif /* CONFIG_U3_DART */
536                 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
537                                         mode_rw, mmu_linear_psize));
538        }
539
540         /*
541          * If we have a memory_limit and we've allocated TCEs then we need to
542          * explicitly map the TCE area at the top of RAM. We also cope with the
543          * case that the TCEs start below memory_limit.
544          * tce_alloc_start/end are 16MB aligned so the mapping should work
545          * for either 4K or 16MB pages.
546          */
547         if (tce_alloc_start) {
548                 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
549                 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
550
551                 if (base + size >= tce_alloc_start)
552                         tce_alloc_start = base + size + 1;
553
554                 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
555                                          __pa(tce_alloc_start), mode_rw,
556                                          mmu_linear_psize));
557         }
558
559         htab_finish_init();
560
561         DBG(" <- htab_initialize()\n");
562 }
563 #undef KB
564 #undef MB
565
566 void htab_initialize_secondary(void)
567 {
568         if (!firmware_has_feature(FW_FEATURE_LPAR))
569                 mtspr(SPRN_SDR1, _SDR1);
570 }
571
572 /*
573  * Called by asm hashtable.S for doing lazy icache flush
574  */
575 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
576 {
577         struct page *page;
578
579         if (!pfn_valid(pte_pfn(pte)))
580                 return pp;
581
582         page = pte_page(pte);
583
584         /* page is dirty */
585         if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
586                 if (trap == 0x400) {
587                         __flush_dcache_icache(page_address(page));
588                         set_bit(PG_arch_1, &page->flags);
589                 } else
590                         pp |= HPTE_R_N;
591         }
592         return pp;
593 }
594
595 /*
596  * Demote a segment to using 4k pages.
597  * For now this makes the whole process use 4k pages.
598  */
599 void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
600 {
601 #ifdef CONFIG_PPC_64K_PAGES
602         if (mm->context.user_psize == MMU_PAGE_4K)
603                 return;
604         mm->context.user_psize = MMU_PAGE_4K;
605         mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
606         get_paca()->context = mm->context;
607         slb_flush_and_rebolt();
608 #ifdef CONFIG_SPE_BASE
609         spu_flush_all_slbs(mm);
610 #endif
611 #endif
612 }
613
614 EXPORT_SYMBOL_GPL(demote_segment_4k);
615
616 /* Result code is:
617  *  0 - handled
618  *  1 - normal page fault
619  * -1 - critical hash insertion error
620  */
621 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
622 {
623         void *pgdir;
624         unsigned long vsid;
625         struct mm_struct *mm;
626         pte_t *ptep;
627         cpumask_t tmp;
628         int rc, user_region = 0, local = 0;
629         int psize;
630
631         DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
632                 ea, access, trap);
633
634         if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
635                 DBG_LOW(" out of pgtable range !\n");
636                 return 1;
637         }
638
639         /* Get region & vsid */
640         switch (REGION_ID(ea)) {
641         case USER_REGION_ID:
642                 user_region = 1;
643                 mm = current->mm;
644                 if (! mm) {
645                         DBG_LOW(" user region with no mm !\n");
646                         return 1;
647                 }
648                 vsid = get_vsid(mm->context.id, ea);
649                 psize = mm->context.user_psize;
650                 break;
651         case VMALLOC_REGION_ID:
652                 mm = &init_mm;
653                 vsid = get_kernel_vsid(ea);
654                 if (ea < VMALLOC_END)
655                         psize = mmu_vmalloc_psize;
656                 else
657                         psize = mmu_io_psize;
658                 break;
659         default:
660                 /* Not a valid range
661                  * Send the problem up to do_page_fault 
662                  */
663                 return 1;
664         }
665         DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
666
667         /* Get pgdir */
668         pgdir = mm->pgd;
669         if (pgdir == NULL)
670                 return 1;
671
672         /* Check CPU locality */
673         tmp = cpumask_of_cpu(smp_processor_id());
674         if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
675                 local = 1;
676
677         /* Handle hugepage regions */
678         if (unlikely(in_hugepage_area(mm->context, ea))) {
679                 DBG_LOW(" -> huge page !\n");
680                 return hash_huge_page(mm, access, ea, vsid, local, trap);
681         }
682
683         /* Get PTE and page size from page tables */
684         ptep = find_linux_pte(pgdir, ea);
685         if (ptep == NULL || !pte_present(*ptep)) {
686                 DBG_LOW(" no PTE !\n");
687                 return 1;
688         }
689
690 #ifndef CONFIG_PPC_64K_PAGES
691         DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
692 #else
693         DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
694                 pte_val(*(ptep + PTRS_PER_PTE)));
695 #endif
696         /* Pre-check access permissions (will be re-checked atomically
697          * in __hash_page_XX but this pre-check is a fast path
698          */
699         if (access & ~pte_val(*ptep)) {
700                 DBG_LOW(" no access !\n");
701                 return 1;
702         }
703
704         /* Do actual hashing */
705 #ifndef CONFIG_PPC_64K_PAGES
706         rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
707 #else
708         /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
709         if (pte_val(*ptep) & _PAGE_4K_PFN) {
710                 demote_segment_4k(mm, ea);
711                 psize = MMU_PAGE_4K;
712         }
713
714         if (mmu_ci_restrictions) {
715                 /* If this PTE is non-cacheable, switch to 4k */
716                 if (psize == MMU_PAGE_64K &&
717                     (pte_val(*ptep) & _PAGE_NO_CACHE)) {
718                         if (user_region) {
719                                 demote_segment_4k(mm, ea);
720                                 psize = MMU_PAGE_4K;
721                         } else if (ea < VMALLOC_END) {
722                                 /*
723                                  * some driver did a non-cacheable mapping
724                                  * in vmalloc space, so switch vmalloc
725                                  * to 4k pages
726                                  */
727                                 printk(KERN_ALERT "Reducing vmalloc segment "
728                                        "to 4kB pages because of "
729                                        "non-cacheable mapping\n");
730                                 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
731                         }
732 #ifdef CONFIG_SPE_BASE
733                         spu_flush_all_slbs(mm);
734 #endif
735                 }
736                 if (user_region) {
737                         if (psize != get_paca()->context.user_psize) {
738                                 get_paca()->context = mm->context;
739                                 slb_flush_and_rebolt();
740                         }
741                 } else if (get_paca()->vmalloc_sllp !=
742                            mmu_psize_defs[mmu_vmalloc_psize].sllp) {
743                         get_paca()->vmalloc_sllp =
744                                 mmu_psize_defs[mmu_vmalloc_psize].sllp;
745                         slb_flush_and_rebolt();
746                 }
747         }
748         if (psize == MMU_PAGE_64K)
749                 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
750         else
751                 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
752 #endif /* CONFIG_PPC_64K_PAGES */
753
754 #ifndef CONFIG_PPC_64K_PAGES
755         DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
756 #else
757         DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
758                 pte_val(*(ptep + PTRS_PER_PTE)));
759 #endif
760         DBG_LOW(" -> rc=%d\n", rc);
761         return rc;
762 }
763 EXPORT_SYMBOL_GPL(hash_page);
764
765 void hash_preload(struct mm_struct *mm, unsigned long ea,
766                   unsigned long access, unsigned long trap)
767 {
768         unsigned long vsid;
769         void *pgdir;
770         pte_t *ptep;
771         cpumask_t mask;
772         unsigned long flags;
773         int local = 0;
774
775         /* We don't want huge pages prefaulted for now
776          */
777         if (unlikely(in_hugepage_area(mm->context, ea)))
778                 return;
779
780         DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
781                 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
782
783         /* Get PTE, VSID, access mask */
784         pgdir = mm->pgd;
785         if (pgdir == NULL)
786                 return;
787         ptep = find_linux_pte(pgdir, ea);
788         if (!ptep)
789                 return;
790         vsid = get_vsid(mm->context.id, ea);
791
792         /* Hash it in */
793         local_irq_save(flags);
794         mask = cpumask_of_cpu(smp_processor_id());
795         if (cpus_equal(mm->cpu_vm_mask, mask))
796                 local = 1;
797 #ifndef CONFIG_PPC_64K_PAGES
798         __hash_page_4K(ea, access, vsid, ptep, trap, local);
799 #else
800         if (mmu_ci_restrictions) {
801                 /* If this PTE is non-cacheable, switch to 4k */
802                 if (mm->context.user_psize == MMU_PAGE_64K &&
803                     (pte_val(*ptep) & _PAGE_NO_CACHE))
804                         demote_segment_4k(mm, ea);
805         }
806         if (mm->context.user_psize == MMU_PAGE_64K)
807                 __hash_page_64K(ea, access, vsid, ptep, trap, local);
808         else
809                 __hash_page_4K(ea, access, vsid, ptep, trap, local);
810 #endif /* CONFIG_PPC_64K_PAGES */
811         local_irq_restore(flags);
812 }
813
814 void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
815 {
816         unsigned long hash, index, shift, hidx, slot;
817
818         DBG_LOW("flush_hash_page(va=%016x)\n", va);
819         pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
820                 hash = hpt_hash(va, shift);
821                 hidx = __rpte_to_hidx(pte, index);
822                 if (hidx & _PTEIDX_SECONDARY)
823                         hash = ~hash;
824                 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
825                 slot += hidx & _PTEIDX_GROUP_IX;
826                 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
827                 ppc_md.hpte_invalidate(slot, va, psize, local);
828         } pte_iterate_hashed_end();
829 }
830
831 void flush_hash_range(unsigned long number, int local)
832 {
833         if (ppc_md.flush_hash_range)
834                 ppc_md.flush_hash_range(number, local);
835         else {
836                 int i;
837                 struct ppc64_tlb_batch *batch =
838                         &__get_cpu_var(ppc64_tlb_batch);
839
840                 for (i = 0; i < number; i++)
841                         flush_hash_page(batch->vaddr[i], batch->pte[i],
842                                         batch->psize, local);
843         }
844 }
845
846 /*
847  * low_hash_fault is called when we the low level hash code failed
848  * to instert a PTE due to an hypervisor error
849  */
850 void low_hash_fault(struct pt_regs *regs, unsigned long address)
851 {
852         if (user_mode(regs)) {
853                 siginfo_t info;
854
855                 info.si_signo = SIGBUS;
856                 info.si_errno = 0;
857                 info.si_code = BUS_ADRERR;
858                 info.si_addr = (void __user *)address;
859                 force_sig_info(SIGBUS, &info, current);
860                 return;
861         }
862         bad_page_fault(regs, address, SIGBUS);
863 }
864
865 #ifdef CONFIG_DEBUG_PAGEALLOC
866 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
867 {
868         unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr);
869         unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
870         unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
871                 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
872         int ret;
873
874         hash = hpt_hash(va, PAGE_SHIFT);
875         hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
876
877         ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
878                                  mode, HPTE_V_BOLTED, mmu_linear_psize);
879         BUG_ON (ret < 0);
880         spin_lock(&linear_map_hash_lock);
881         BUG_ON(linear_map_hash_slots[lmi] & 0x80);
882         linear_map_hash_slots[lmi] = ret | 0x80;
883         spin_unlock(&linear_map_hash_lock);
884 }
885
886 static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
887 {
888         unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr);
889         unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
890
891         hash = hpt_hash(va, PAGE_SHIFT);
892         spin_lock(&linear_map_hash_lock);
893         BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
894         hidx = linear_map_hash_slots[lmi] & 0x7f;
895         linear_map_hash_slots[lmi] = 0;
896         spin_unlock(&linear_map_hash_lock);
897         if (hidx & _PTEIDX_SECONDARY)
898                 hash = ~hash;
899         slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
900         slot += hidx & _PTEIDX_GROUP_IX;
901         ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0);
902 }
903
904 void kernel_map_pages(struct page *page, int numpages, int enable)
905 {
906         unsigned long flags, vaddr, lmi;
907         int i;
908
909         local_irq_save(flags);
910         for (i = 0; i < numpages; i++, page++) {
911                 vaddr = (unsigned long)page_address(page);
912                 lmi = __pa(vaddr) >> PAGE_SHIFT;
913                 if (lmi >= linear_map_hash_count)
914                         continue;
915                 if (enable)
916                         kernel_map_linear_page(vaddr, lmi);
917                 else
918                         kernel_unmap_linear_page(vaddr, lmi);
919         }
920         local_irq_restore(flags);
921 }
922 #endif /* CONFIG_DEBUG_PAGEALLOC */