void online_page(struct page *page)
{
ClearPageReserved(page);
- set_page_count(page, 0);
- free_cold_page(page);
+ init_page_count(page);
+ __free_page(page);
totalram_pages++;
num_physpages++;
}
-/*
- * This works only for the non-NUMA case. Later, we'll need a lookup
- * to convert from real physical addresses to nid, that doesn't use
- * pfn_to_nid().
- */
int __devinit add_memory(u64 start, u64 size)
{
- struct pglist_data *pgdata = NODE_DATA(0);
+ struct pglist_data *pgdata;
struct zone *zone;
+ int nid;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- start += KERNELBASE;
+ nid = hot_add_scn_to_nid(start);
+ pgdata = NODE_DATA(nid);
+
+ start = (unsigned long)__va(start);
create_section_mapping(start, start + size);
/* this should work for most non-highmem platforms */
bootmap_pages = bootmem_bootmap_pages(total_pages);
start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
- BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
{
clear_page(page);
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
/*
* We shouldnt have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
-
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &pg->flags))
- clear_bit(PG_arch_1, &pg->flags);
+ flush_dcache_page(pg);
}
EXPORT_SYMBOL(clear_user_page);
return;
#endif
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
-
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &pg->flags))
- clear_bit(PG_arch_1, &pg->flags);
+ flush_dcache_page(pg);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
- * This must always be called with the mm->page_table_lock held
+ * This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)