sched/headers: Move the NUMA balancing interfaces from <linux/sched.h> to <linux...
authorIngo Molnar <mingo@kernel.org>
Thu, 2 Feb 2017 11:39:17 +0000 (12:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 3 Mar 2017 00:43:40 +0000 (01:43 +0100)
Split out the interface between the scheduler and the MM which
deals with page fault driven NUMA balancing, into the new
<linux/sched/numa_balancing.h> header.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched.h
include/linux/sched/numa_balancing.h

index aa60812b4b7a643bb6ef09ac1c43f855c1b6cb16..fcaea1e7b08a1d08fd68952ed15c5b917a91e4f3 100644 (file)
@@ -1236,41 +1236,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
 }
 #endif
 
-#define TNF_MIGRATED   0x01
-#define TNF_NO_GROUP   0x02
-#define TNF_SHARED     0x04
-#define TNF_FAULT_LOCAL        0x08
-#define TNF_MIGRATE_FAIL 0x10
-
-#ifdef CONFIG_NUMA_BALANCING
-extern void task_numa_fault(int last_node, int node, int pages, int flags);
-extern pid_t task_numa_group_id(struct task_struct *p);
-extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
-extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
-                                       int src_nid, int dst_cpu);
-#else
-static inline void task_numa_fault(int last_node, int node, int pages,
-                                  int flags)
-{
-}
-static inline pid_t task_numa_group_id(struct task_struct *p)
-{
-       return 0;
-}
-static inline void set_numabalancing_state(bool enabled)
-{
-}
-static inline void task_numa_free(struct task_struct *p)
-{
-}
-static inline bool should_numa_migrate_memory(struct task_struct *p,
-                               struct page *page, int src_nid, int dst_cpu)
-{
-       return true;
-}
-#endif
-
 static inline struct pid *task_pid(struct task_struct *task)
 {
        return task->pids[PIDTYPE_PID].pid;
index 999182279f785e1cab380b6a7c9305739b9b8232..35d5fc77b4be9b2cec488d57fcbe00f27f2f4db1 100644 (file)
@@ -1,6 +1,46 @@
 #ifndef _LINUX_SCHED_NUMA_BALANCING_H
 #define _LINUX_SCHED_NUMA_BALANCING_H
 
+/*
+ * This is the interface between the scheduler and the MM that
+ * implements memory access pattern based NUMA-balancing:
+ */
+
 #include <linux/sched.h>
 
+#define TNF_MIGRATED   0x01
+#define TNF_NO_GROUP   0x02
+#define TNF_SHARED     0x04
+#define TNF_FAULT_LOCAL        0x08
+#define TNF_MIGRATE_FAIL 0x10
+
+#ifdef CONFIG_NUMA_BALANCING
+extern void task_numa_fault(int last_node, int node, int pages, int flags);
+extern pid_t task_numa_group_id(struct task_struct *p);
+extern void set_numabalancing_state(bool enabled);
+extern void task_numa_free(struct task_struct *p);
+extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
+                                       int src_nid, int dst_cpu);
+#else
+static inline void task_numa_fault(int last_node, int node, int pages,
+                                  int flags)
+{
+}
+static inline pid_t task_numa_group_id(struct task_struct *p)
+{
+       return 0;
+}
+static inline void set_numabalancing_state(bool enabled)
+{
+}
+static inline void task_numa_free(struct task_struct *p)
+{
+}
+static inline bool should_numa_migrate_memory(struct task_struct *p,
+                               struct page *page, int src_nid, int dst_cpu)
+{
+       return true;
+}
+#endif
+
 #endif /* _LINUX_SCHED_NUMA_BALANCING_H */