1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2014 Darius Rad <darius@bluespec.com>
5 * Copyright (C) 2017 SiFive
8 #include <linux/syscalls.h>
9 #include <asm/cacheflush.h>
10 #include <asm/cpufeature.h>
11 #include <asm/hwprobe.h>
13 #include <asm/vector.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm-generic/mman-common.h>
18 #include <vdso/vsyscall.h>
20 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long prot, unsigned long flags,
22 unsigned long fd, off_t offset,
23 unsigned long page_shift_offset)
25 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
28 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
29 offset >> (PAGE_SHIFT - page_shift_offset));
33 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
34 unsigned long, prot, unsigned long, flags,
35 unsigned long, fd, off_t, offset)
37 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
41 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
42 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
43 unsigned long, prot, unsigned long, flags,
44 unsigned long, fd, off_t, offset)
47 * Note that the shift for mmap2 is constant (12),
48 * regardless of PAGE_SIZE
50 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
55 * Allows the instruction cache to be flushed from userspace. Despite RISC-V
56 * having a direct 'fence.i' instruction available to userspace (which we
57 * can't trap!), that's not actually viable when running on Linux because the
58 * kernel might schedule a process on another hart. There is no way for
59 * userspace to handle this without invoking the kernel (as it doesn't know the
60 * thread->hart mappings), so we've defined a RISC-V specific system call to
61 * flush the instruction cache.
63 * sys_riscv_flush_icache() is defined to flush the instruction cache over an
64 * address range, with the flush applying to either all threads or just the
65 * caller. We don't currently do anything with the address range, that's just
66 * in there for forwards compatibility.
68 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
71 /* Check the reserved flags. */
72 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
75 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
81 * The hwprobe interface, for allowing userspace to probe to see which features
82 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more
85 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
86 const struct cpumask *cpus)
92 for_each_cpu(cpu, cpus) {
96 case RISCV_HWPROBE_KEY_MVENDORID:
97 cpu_id = riscv_cached_mvendorid(cpu);
99 case RISCV_HWPROBE_KEY_MIMPID:
100 cpu_id = riscv_cached_mimpid(cpu);
102 case RISCV_HWPROBE_KEY_MARCHID:
103 cpu_id = riscv_cached_marchid(cpu);
113 * If there's a mismatch for the given set, return -1 in the
125 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
126 const struct cpumask *cpus)
133 pair->value |= RISCV_HWPROBE_IMA_FD;
135 if (riscv_isa_extension_available(NULL, c))
136 pair->value |= RISCV_HWPROBE_IMA_C;
139 pair->value |= RISCV_HWPROBE_IMA_V;
142 * Loop through and record extensions that 1) anyone has, and 2) anyone
145 for_each_cpu(cpu, cpus) {
146 struct riscv_isainfo *isainfo = &hart_isa[cpu];
148 if (riscv_isa_extension_available(isainfo->isa, ZBA))
149 pair->value |= RISCV_HWPROBE_EXT_ZBA;
151 missing |= RISCV_HWPROBE_EXT_ZBA;
153 if (riscv_isa_extension_available(isainfo->isa, ZBB))
154 pair->value |= RISCV_HWPROBE_EXT_ZBB;
156 missing |= RISCV_HWPROBE_EXT_ZBB;
158 if (riscv_isa_extension_available(isainfo->isa, ZBS))
159 pair->value |= RISCV_HWPROBE_EXT_ZBS;
161 missing |= RISCV_HWPROBE_EXT_ZBS;
164 /* Now turn off reporting features if any CPU is missing it. */
165 pair->value &= ~missing;
168 static u64 hwprobe_misaligned(const struct cpumask *cpus)
173 for_each_cpu(cpu, cpus) {
174 int this_perf = per_cpu(misaligned_access_speed, cpu);
179 if (perf != this_perf) {
180 perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
186 return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
191 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
192 const struct cpumask *cpus)
195 case RISCV_HWPROBE_KEY_MVENDORID:
196 case RISCV_HWPROBE_KEY_MARCHID:
197 case RISCV_HWPROBE_KEY_MIMPID:
198 hwprobe_arch_id(pair, cpus);
201 * The kernel already assumes that the base single-letter ISA
202 * extensions are supported on all harts, and only supports the
203 * IMA base, so just cheat a bit here and tell that to
206 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
207 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
210 case RISCV_HWPROBE_KEY_IMA_EXT_0:
211 hwprobe_isa_ext0(pair, cpus);
214 case RISCV_HWPROBE_KEY_CPUPERF_0:
215 pair->value = hwprobe_misaligned(cpus);
219 * For forward compatibility, unknown keys don't fail the whole
220 * call, but get their element key set to -1 and value set to 0
221 * indicating they're unrecognized.
230 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
231 size_t pair_count, size_t cpu_count,
232 unsigned long __user *cpus_user,
239 /* Check the reserved flags. */
244 * The interface supports taking in a CPU mask, and returns values that
245 * are consistent across that mask. Allow userspace to specify NULL and
246 * 0 as a shortcut to all online CPUs.
248 cpumask_clear(&cpus);
249 if (!cpu_count && !cpus_user) {
250 cpumask_copy(&cpus, cpu_online_mask);
252 if (cpu_count > cpumask_size())
253 cpu_count = cpumask_size();
255 ret = copy_from_user(&cpus, cpus_user, cpu_count);
260 * Userspace must provide at least one online CPU, without that
261 * there's no way to define what is supported.
263 cpumask_and(&cpus, &cpus, cpu_online_mask);
264 if (cpumask_empty(&cpus))
268 for (out = 0; out < pair_count; out++, pairs++) {
269 struct riscv_hwprobe pair;
271 if (get_user(pair.key, &pairs->key))
275 hwprobe_one_pair(&pair, &cpus);
276 ret = put_user(pair.key, &pairs->key);
278 ret = put_user(pair.value, &pairs->value);
289 static int __init init_hwprobe_vdso_data(void)
291 struct vdso_data *vd = __arch_get_k_vdso_data();
292 struct arch_vdso_data *avd = &vd->arch_data;
294 struct riscv_hwprobe pair;
298 * Initialize vDSO data with the answers for the "all CPUs" case, to
299 * save a syscall in the common case.
301 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
303 hwprobe_one_pair(&pair, cpu_online_mask);
305 WARN_ON_ONCE(pair.key < 0);
307 avd->all_cpu_hwprobe_values[key] = pair.value;
309 * Smash together the vendor, arch, and impl IDs to see if
310 * they're all 0 or any negative.
312 if (key <= RISCV_HWPROBE_KEY_MIMPID)
313 id_bitsmash |= pair.value;
317 * If the arch, vendor, and implementation ID are all the same across
318 * all harts, then assume all CPUs are the same, and allow the vDSO to
319 * answer queries for arbitrary masks. However if all values are 0 (not
320 * populated) or any value returns -1 (varies across CPUs), then the
321 * vDSO should defer to the kernel for exotic cpu masks.
323 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
327 arch_initcall_sync(init_hwprobe_vdso_data);
329 #endif /* CONFIG_MMU */
331 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
332 size_t, pair_count, size_t, cpu_count, unsigned long __user *,
333 cpus, unsigned int, flags)
335 return do_riscv_hwprobe(pairs, pair_count, cpu_count,
339 /* Not defined using SYSCALL_DEFINE0 to avoid error injection */
340 asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)