mm/vmacache, sched/headers: Introduce 'struct vmacache' and move it from <linux/sched...
authorIngo Molnar <mingo@kernel.org>
Fri, 3 Feb 2017 10:03:31 +0000 (11:03 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 2 Mar 2017 07:42:25 +0000 (08:42 +0100)
The <linux/sched.h> header includes various vmacache related defines,
which are arguably misplaced.

Move them to mm_types.h and minimize the sched.h impact by putting
all task vmacache state into a new 'struct vmacache' structure.

No change in functionality.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/mm_types.h
include/linux/sched.h
include/linux/vmacache.h
kernel/debug/debug_core.c
mm/nommu.c
mm/vmacache.c

index 4f6d440ad78551e919cf3988d436de1b9b2a80c2..137797cd7b5052313d684f874ed6814e8518ab10 100644 (file)
@@ -360,6 +360,18 @@ struct vm_area_struct {
        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 };
 
+/*
+ * The per task VMA cache array:
+ */
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
+struct vmacache {
+       u32 seqnum;
+       struct vm_area_struct *vmas[VMACACHE_SIZE];
+};
+
 struct core_thread {
        struct task_struct *task;
        struct core_thread *next;
index 3f61baac928b036c488070ebe9d39c60b8ce5cd0..e87c97e1a94778d7be38303a3f0d9c576bfac6dd 100644 (file)
@@ -134,10 +134,6 @@ struct blk_plug;
 struct filename;
 struct nameidata;
 
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
 /*
  * These are the constant used to fake the fixed-point load-average
  * counting. Some notes:
@@ -1550,9 +1546,10 @@ struct task_struct {
 #endif
 
        struct mm_struct *mm, *active_mm;
-       /* per-thread vma caching */
-       u32 vmacache_seqnum;
-       struct vm_area_struct *vmacache[VMACACHE_SIZE];
+
+       /* Per-thread vma caching: */
+       struct vmacache vmacache;
+
 #if defined(SPLIT_RSS_COUNTING)
        struct task_rss_stat    rss_stat;
 #endif
index c3fa0fd43949952957603b35e28b26ddc53fb0d3..1081db987391d24d7385dc7550fa137163284dd2 100644 (file)
@@ -12,7 +12,7 @@
 
 static inline void vmacache_flush(struct task_struct *tsk)
 {
-       memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
+       memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
 }
 
 extern void vmacache_flush_all(struct mm_struct *mm);
index 79517e5549f119aff663ff5572507c70671a2b41..a603ef28f70c98fb2a967abc0d66fe317195bf0c 100644 (file)
@@ -232,9 +232,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
                int i;
 
                for (i = 0; i < VMACACHE_SIZE; i++) {
-                       if (!current->vmacache[i])
+                       if (!current->vmacache.vmas[i])
                                continue;
-                       flush_cache_range(current->vmacache[i],
+                       flush_cache_range(current->vmacache.vmas[i],
                                          addr, addr + BREAK_INSTR_SIZE);
                }
        }
index fe9f4fa4a7a7415df8dd750aeec99c52fd53830e..aae06e854552dd765231e204039e4ba97de46204 100644 (file)
@@ -757,7 +757,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        mm->map_count--;
        for (i = 0; i < VMACACHE_SIZE; i++) {
                /* if the vma is cached, invalidate the entire cache */
-               if (curr->vmacache[i] == vma) {
+               if (curr->vmacache.vmas[i] == vma) {
                        vmacache_invalidate(mm);
                        break;
                }
index 035fdeb35b43b936a0e247f3e7b658caf55e9404..7c233f8e20eef9ccf0d75a73d7ee8f96a42ef68f 100644 (file)
@@ -60,7 +60,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm)
 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
 {
        if (vmacache_valid_mm(newvma->vm_mm))
-               current->vmacache[VMACACHE_HASH(addr)] = newvma;
+               current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
 }
 
 static bool vmacache_valid(struct mm_struct *mm)
@@ -71,12 +71,12 @@ static bool vmacache_valid(struct mm_struct *mm)
                return false;
 
        curr = current;
-       if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
+       if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
                /*
                 * First attempt will always be invalid, initialize
                 * the new cache for this task here.
                 */
-               curr->vmacache_seqnum = mm->vmacache_seqnum;
+               curr->vmacache.seqnum = mm->vmacache_seqnum;
                vmacache_flush(curr);
                return false;
        }
@@ -93,7 +93,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
                return NULL;
 
        for (i = 0; i < VMACACHE_SIZE; i++) {
-               struct vm_area_struct *vma = current->vmacache[i];
+               struct vm_area_struct *vma = current->vmacache.vmas[i];
 
                if (!vma)
                        continue;
@@ -121,7 +121,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
                return NULL;
 
        for (i = 0; i < VMACACHE_SIZE; i++) {
-               struct vm_area_struct *vma = current->vmacache[i];
+               struct vm_area_struct *vma = current->vmacache.vmas[i];
 
                if (vma && vma->vm_start == start && vma->vm_end == end) {
                        count_vm_vmacache_event(VMACACHE_FIND_HITS);