2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #ifndef __ASM_X86_PMEM_H__
14 #define __ASM_X86_PMEM_H__
16 #include <linux/uaccess.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cpufeature.h>
19 #include <asm/special_insns.h>
21 #ifdef CONFIG_ARCH_HAS_PMEM_API
23 * arch_memcpy_to_pmem - copy data to persistent memory
24 * @dst: destination buffer for the copy
25 * @src: source buffer for the copy
26 * @n: length of the copy in bytes
28 * Copy data to persistent memory media via non-temporal stores so that
29 * a subsequent pmem driver flush operation will drain posted write queues.
31 static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
36 * We are copying between two kernel buffers, if
37 * __copy_from_user_inatomic_nocache() returns an error (page
38 * fault) we would have already reported a general protection fault
39 * before the WARN+BUG.
41 rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
42 if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
43 __func__, dst, src, rem))
48 * arch_wb_cache_pmem - write back a cache range with CLWB
49 * @vaddr: virtual start address
50 * @size: number of bytes to write back
52 * Write back a cache range using the CLWB (cache line write back)
53 * instruction. Note that @size is internally rounded up to be cache
56 static inline void arch_wb_cache_pmem(void *addr, size_t size)
58 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
59 unsigned long clflush_mask = x86_clflush_size - 1;
60 void *vend = addr + size;
63 for (p = (void *)((unsigned long)addr & ~clflush_mask);
64 p < vend; p += x86_clflush_size)
69 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
70 * @addr: PMEM destination address
71 * @bytes: number of bytes to copy
72 * @i: iterator with source data
74 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
76 static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
81 /* TODO: skip the write-back by always using non-temporal stores */
82 len = copy_from_iter_nocache(addr, bytes, i);
85 * In the iovec case on x86_64 copy_from_iter_nocache() uses
86 * non-temporal stores for the bulk of the transfer, but we need
87 * to manually flush if the transfer is unaligned. A cached
88 * memory copy is used when destination or size is not naturally
90 * - Require 8-byte alignment when size is 8 bytes or larger.
91 * - Require 4-byte alignment when size is 4 bytes.
93 * In the non-iovec case the entire destination needs to be
96 if (iter_is_iovec(i)) {
97 unsigned long flushed, dest = (unsigned long) addr;
100 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
101 arch_wb_cache_pmem(addr, bytes);
103 if (!IS_ALIGNED(dest, 8)) {
104 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
105 arch_wb_cache_pmem(addr, 1);
108 flushed = dest - (unsigned long) addr;
109 if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
110 arch_wb_cache_pmem(addr + bytes - 1, 1);
113 arch_wb_cache_pmem(addr, bytes);
119 * arch_clear_pmem - zero a PMEM memory range
120 * @addr: virtual start address
121 * @size: number of bytes to zero
123 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
125 static inline void arch_clear_pmem(void *addr, size_t size)
127 memset(addr, 0, size);
128 arch_wb_cache_pmem(addr, size);
131 static inline void arch_invalidate_pmem(void *addr, size_t size)
133 clflush_cache_range(addr, size);
135 #endif /* CONFIG_ARCH_HAS_PMEM_API */
136 #endif /* __ASM_X86_PMEM_H__ */