flush icache before set_pte() on ia64: flush icache at set_pte
[sfrench/cifs-2.6.git] / arch / ia64 / mm / init.c
index 2da841110727b223c4e9c4720a6351a94c92a7c1..a096b30734f3dc3db88857fd5889512718ad6c6e 100644 (file)
@@ -39,9 +39,6 @@
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
-DEFINE_PER_CPU(long, __pgtable_quicklist_size);
-
 extern void ia64_tlb_init (void);
 
 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
@@ -56,64 +53,13 @@ EXPORT_SYMBOL(vmem_map);
 struct page *zero_page_memmap_ptr;     /* map entry for zero page */
 EXPORT_SYMBOL(zero_page_memmap_ptr);
 
-#define MIN_PGT_PAGES                  25UL
-#define MAX_PGT_FREES_PER_PASS         16L
-#define PGT_FRACTION_OF_NODE_MEM       16
-
-static inline long
-max_pgt_pages(void)
-{
-       u64 node_free_pages, max_pgt_pages;
-
-#ifndef        CONFIG_NUMA
-       node_free_pages = nr_free_pages();
-#else
-       node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
-#endif
-       max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
-       max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
-       return max_pgt_pages;
-}
-
-static inline long
-min_pages_to_free(void)
-{
-       long pages_to_free;
-
-       pages_to_free = pgtable_quicklist_size - max_pgt_pages();
-       pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
-       return pages_to_free;
-}
-
 void
-check_pgt_cache(void)
-{
-       long pages_to_free;
-
-       if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
-               return;
-
-       preempt_disable();
-       while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
-               while (pages_to_free--) {
-                       free_page((unsigned long)pgtable_quicklist_alloc());
-               }
-               preempt_enable();
-               preempt_disable();
-       }
-       preempt_enable();
-}
-
-void
-lazy_mmu_prot_update (pte_t pte)
+__ia64_sync_icache_dcache (pte_t pte)
 {
        unsigned long addr;
        struct page *page;
        unsigned long order;
 
-       if (!pte_exec(pte))
-               return;                         /* not an executable page... */
-
        page = pte_page(pte);
        addr = (unsigned long) page_address(page);
 
@@ -355,7 +301,7 @@ setup_gate (void)
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
-       unsigned long psr, pta, impl_va_bits;
+       unsigned long pta, impl_va_bits;
        extern void __devinit tlb_init (void);
 
 #ifdef CONFIG_DISABLE_VHPT
@@ -364,15 +310,6 @@ ia64_mmu_init (void *my_cpu_data)
 #      define VHPT_ENABLE_BIT  1
 #endif
 
-       /* Pin mapping for percpu area into TLB */
-       psr = ia64_clear_ic();
-       ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
-                pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
-                PERCPU_PAGE_SHIFT);
-
-       ia64_set_psr(psr);
-       ia64_srlz_i();
-
        /*
         * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
         * address space.  The IA-64 architecture guarantees that at least 50 bits of