1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
5 * A simple uncached page allocator using the generic allocator. This
6 * allocator first utilizes the spare (spill) pages found in the EFI
7 * memmap and will then start converting cached pages to uncached ones
8 * at a granule at a time. Node awareness is implemented by having a
9 * pool of pages per node.
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/efi.h>
19 #include <linux/nmi.h>
20 #include <linux/genalloc.h>
21 #include <linux/gfp.h>
24 #include <asm/pgtable.h>
25 #include <linux/atomic.h>
26 #include <asm/tlbflush.h>
27 #include <asm/sn/arch.h>
30 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
32 struct uncached_pool {
33 struct gen_pool *pool;
34 struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
35 int nchunks_added; /* #of converted chunks added to pool */
36 atomic_t status; /* smp called function's return status*/
39 #define MAX_CONVERTED_CHUNKS_PER_NODE 2
41 struct uncached_pool uncached_pools[MAX_NUMNODES];
44 static void uncached_ipi_visibility(void *data)
47 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
49 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
50 if ((status != PAL_VISIBILITY_OK) &&
51 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
52 atomic_inc(&uc_pool->status);
56 static void uncached_ipi_mc_drain(void *data)
59 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
61 status = ia64_pal_mc_drain();
62 if (status != PAL_STATUS_SUCCESS)
63 atomic_inc(&uc_pool->status);
68 * Add a new chunk of uncached memory pages to the specified pool.
70 * @pool: pool to add new chunk of uncached memory to
71 * @nid: node id of node to allocate memory from, or -1
73 * This is accomplished by first allocating a granule of cached memory pages
74 * and then converting them to uncached memory pages.
76 static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
79 int status, i, nchunks_added = uc_pool->nchunks_added;
80 unsigned long c_addr, uc_addr;
82 if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
83 return -1; /* interrupted by a signal */
85 if (uc_pool->nchunks_added > nchunks_added) {
86 /* someone added a new chunk while we were waiting */
87 mutex_unlock(&uc_pool->add_chunk_mutex);
91 if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
92 mutex_unlock(&uc_pool->add_chunk_mutex);
96 /* attempt to allocate a granule's worth of cached memory pages */
98 page = __alloc_pages_node(nid,
99 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
100 IA64_GRANULE_SHIFT-PAGE_SHIFT);
102 mutex_unlock(&uc_pool->add_chunk_mutex);
106 /* convert the memory pages from cached to uncached */
108 c_addr = (unsigned long)page_address(page);
109 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
112 * There's a small race here where it's possible for someone to
113 * access the page through /dev/mem halfway through the conversion
114 * to uncached - not sure it's really worth bothering about
116 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
117 SetPageUncached(&page[i]);
119 flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
121 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
122 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
123 atomic_set(&uc_pool->status, 0);
124 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
125 if (status || atomic_read(&uc_pool->status))
127 } else if (status != PAL_VISIBILITY_OK)
132 if (ia64_platform_is("sn2"))
133 sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
135 flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
137 /* flush the just introduced uncached translation from the TLB */
138 local_flush_tlb_all();
142 status = ia64_pal_mc_drain();
143 if (status != PAL_STATUS_SUCCESS)
145 atomic_set(&uc_pool->status, 0);
146 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
147 if (status || atomic_read(&uc_pool->status))
151 * The chunk of memory pages has been converted to uncached so now we
152 * can add it to the pool.
154 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
158 uc_pool->nchunks_added++;
159 mutex_unlock(&uc_pool->add_chunk_mutex);
162 /* failed to convert or add the chunk so give it back to the kernel */
164 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
165 ClearPageUncached(&page[i]);
167 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
168 mutex_unlock(&uc_pool->add_chunk_mutex);
174 * uncached_alloc_page
176 * @starting_nid: node id of node to start with, or -1
177 * @n_pages: number of contiguous pages to allocate
179 * Allocate the specified number of contiguous uncached pages on the
180 * the requested node. If not enough contiguous uncached pages are available
181 * on the requested node, roundrobin starting with the next higher node.
183 unsigned long uncached_alloc_page(int starting_nid, int n_pages)
185 unsigned long uc_addr;
186 struct uncached_pool *uc_pool;
189 if (unlikely(starting_nid >= MAX_NUMNODES))
192 if (starting_nid < 0)
193 starting_nid = numa_node_id();
197 if (!node_state(nid, N_HIGH_MEMORY))
199 uc_pool = &uncached_pools[nid];
200 if (uc_pool->pool == NULL)
203 uc_addr = gen_pool_alloc(uc_pool->pool,
204 n_pages * PAGE_SIZE);
207 } while (uncached_add_chunk(uc_pool, nid) == 0);
209 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
213 EXPORT_SYMBOL(uncached_alloc_page);
219 * @uc_addr: uncached address of first page to free
220 * @n_pages: number of contiguous pages to free
222 * Free the specified number of uncached pages.
224 void uncached_free_page(unsigned long uc_addr, int n_pages)
226 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
227 struct gen_pool *pool = uncached_pools[nid].pool;
229 if (unlikely(pool == NULL))
232 if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
233 panic("uncached_free_page invalid address %lx\n", uc_addr);
235 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
237 EXPORT_SYMBOL(uncached_free_page);
241 * uncached_build_memmap,
243 * @uc_start: uncached starting address of a chunk of uncached memory
244 * @uc_end: uncached ending address of a chunk of uncached memory
245 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
247 * Called at boot time to build a map of pages that can be used for
248 * memory special operations.
250 static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
252 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
253 struct gen_pool *pool = uncached_pools[nid].pool;
254 size_t size = uc_end - uc_start;
256 touch_softlockup_watchdog();
259 memset((char *)uc_start, 0, size);
260 (void) gen_pool_add(pool, uc_start, size, nid);
266 static int __init uncached_init(void)
270 for_each_node_state(nid, N_ONLINE) {
271 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
272 mutex_init(&uncached_pools[nid].add_chunk_mutex);
275 efi_memmap_walk_uc(uncached_build_memmap, NULL);
279 __initcall(uncached_init);