Merge branches 'work.misc' and 'work.dcache' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / arch / x86 / boot / compressed / kaslr.c
index b87a7582853dd34a91b8d007a9a146574a4ca674..302517929932bb52405d5396c57f084691090e5e 100644 (file)
@@ -102,7 +102,7 @@ static bool memmap_too_large;
 
 
 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
-unsigned long long mem_limit = ULLONG_MAX;
+static unsigned long long mem_limit = ULLONG_MAX;
 
 
 enum mem_avoid_index {
@@ -215,7 +215,36 @@ static void mem_avoid_memmap(char *str)
                memmap_too_large = true;
 }
 
-static int handle_mem_memmap(void)
+/* Store the number of 1GB huge pages which users specified: */
+static unsigned long max_gb_huge_pages;
+
+static void parse_gb_huge_pages(char *param, char *val)
+{
+       static bool gbpage_sz;
+       char *p;
+
+       if (!strcmp(param, "hugepagesz")) {
+               p = val;
+               if (memparse(p, &p) != PUD_SIZE) {
+                       gbpage_sz = false;
+                       return;
+               }
+
+               if (gbpage_sz)
+                       warn("Repeatedly set hugeTLB page size of 1G!\n");
+               gbpage_sz = true;
+               return;
+       }
+
+       if (!strcmp(param, "hugepages") && gbpage_sz) {
+               p = val;
+               max_gb_huge_pages = simple_strtoull(p, &p, 0);
+               return;
+       }
+}
+
+
+static int handle_mem_options(void)
 {
        char *args = (char *)get_cmd_line_ptr();
        size_t len = strlen((char *)args);
@@ -223,7 +252,8 @@ static int handle_mem_memmap(void)
        char *param, *val;
        u64 mem_size;
 
-       if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+       if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
+               !strstr(args, "hugepages"))
                return 0;
 
        tmp_cmdline = malloc(len + 1);
@@ -248,6 +278,8 @@ static int handle_mem_memmap(void)
 
                if (!strcmp(param, "memmap")) {
                        mem_avoid_memmap(val);
+               } else if (strstr(param, "hugepages")) {
+                       parse_gb_huge_pages(param, val);
                } else if (!strcmp(param, "mem")) {
                        char *p = val;
 
@@ -387,7 +419,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
        /* We don't need to set a mapping for setup_data. */
 
        /* Mark the memmap regions we need to avoid */
-       handle_mem_memmap();
+       handle_mem_options();
 
 #ifdef CONFIG_X86_VERBOSE_BOOTUP
        /* Make sure video RAM can be used. */
@@ -466,6 +498,60 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
        }
 }
 
+/*
+ * Skip as many 1GB huge pages as possible in the passed region
+ * according to the number which users specified:
+ */
+static void
+process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
+{
+       unsigned long addr, size = 0;
+       struct mem_vector tmp;
+       int i = 0;
+
+       if (!max_gb_huge_pages) {
+               store_slot_info(region, image_size);
+               return;
+       }
+
+       addr = ALIGN(region->start, PUD_SIZE);
+       /* Did we raise the address above the passed in memory entry? */
+       if (addr < region->start + region->size)
+               size = region->size - (addr - region->start);
+
+       /* Check how many 1GB huge pages can be filtered out: */
+       while (size > PUD_SIZE && max_gb_huge_pages) {
+               size -= PUD_SIZE;
+               max_gb_huge_pages--;
+               i++;
+       }
+
+       /* No good 1GB huge pages found: */
+       if (!i) {
+               store_slot_info(region, image_size);
+               return;
+       }
+
+       /*
+        * Skip those 'i'*1GB good huge pages, and continue checking and
+        * processing the remaining head or tail part of the passed region
+        * if available.
+        */
+
+       if (addr >= region->start + image_size) {
+               tmp.start = region->start;
+               tmp.size = addr - region->start;
+               store_slot_info(&tmp, image_size);
+       }
+
+       size  = region->size - (addr - region->start) - i * PUD_SIZE;
+       if (size >= image_size) {
+               tmp.start = addr + i * PUD_SIZE;
+               tmp.size = size;
+               store_slot_info(&tmp, image_size);
+       }
+}
+
 static unsigned long slots_fetch_random(void)
 {
        unsigned long slot;
@@ -546,7 +632,7 @@ static void process_mem_region(struct mem_vector *entry,
 
                /* If nothing overlaps, store the region and return. */
                if (!mem_avoid_overlap(&region, &overlap)) {
-                       store_slot_info(&region, image_size);
+                       process_gb_huge_pages(&region, image_size);
                        return;
                }
 
@@ -556,7 +642,7 @@ static void process_mem_region(struct mem_vector *entry,
 
                        beginning.start = region.start;
                        beginning.size = overlap.start - region.start;
-                       store_slot_info(&beginning, image_size);
+                       process_gb_huge_pages(&beginning, image_size);
                }
 
                /* Return if overlap extends to or past end of region. */