mm: make lowmem_page_address() use PFN_PHYS() for improved portability
authorChris Metcalf <cmetcalf@tilera.com>
Mon, 24 May 2010 21:32:53 +0000 (14:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 25 May 2010 15:07:02 +0000 (08:07 -0700)
This ensures that platforms with lowmem PAs above 32 bits work correctly
by avoiding truncating the PA during a left shift.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: Barry Song <21cnbao@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h

index 963f908af9d055d09ab0d83b7d66db933f9150b8..b969efb03787ee69546995ad378ac54b6bc37e3b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
+#include <linux/pfn.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -595,7 +596,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
 
 static __always_inline void *lowmem_page_address(struct page *page)
 {
-       return __va(page_to_pfn(page) << PAGE_SHIFT);
+       return __va(PFN_PHYS(page_to_pfn(page)));
 }
 
 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)