1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 #ifndef _ASM_PROCESSOR_H
6 #define _ASM_PROCESSOR_H
8 #include <linux/atomic.h>
9 #include <linux/cpumask.h>
10 #include <linux/sizes.h>
13 #include <asm/cpu-info.h>
14 #include <asm/hw_breakpoint.h>
15 #include <asm/loongarch.h>
16 #include <asm/vdso/processor.h>
17 #include <uapi/asm/ptrace.h>
18 #include <uapi/asm/sigcontext.h>
22 #define TASK_SIZE 0x80000000UL
23 #define TASK_SIZE_MIN TASK_SIZE
24 #define STACK_TOP_MAX TASK_SIZE
26 #define TASK_IS_32BIT_ADDR 1
32 #define TASK_SIZE32 0x100000000UL
33 #define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
35 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
36 #define TASK_SIZE_MIN TASK_SIZE32
37 #define STACK_TOP_MAX TASK_SIZE64
39 #define TASK_SIZE_OF(tsk) \
40 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
42 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
46 #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
48 unsigned long stack_top(void);
49 #define STACK_TOP stack_top()
52 * This decides where the kernel will search for a free chunk of vm
53 * space during mmap's.
55 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
57 #define FPU_REG_WIDTH 256
58 #define FPU_ALIGN __attribute__((aligned(32)))
61 __u32 val32[FPU_REG_WIDTH / 32];
62 __u64 val64[FPU_REG_WIDTH / 64];
65 #define FPR_IDX(width, idx) (idx)
67 #define BUILD_FPR_ACCESS(width) \
68 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
70 return fpr->val##width[FPR_IDX(width, idx)]; \
73 static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
76 fpr->val##width[FPR_IDX(width, idx)] = val; \
82 struct loongarch_fpu {
84 uint64_t fcc; /* 8x8 */
85 union fpureg fpr[NUM_FPU_REGS];
88 #define INIT_CPUMASK { \
92 #define ARCH_MIN_TASKALIGN 32
94 struct loongarch_vdso_info;
97 * If you change thread_struct remember to change the #defines below too!
99 struct thread_struct {
100 /* Main processor registers. */
101 unsigned long reg01, reg03, reg22; /* ra sp fp */
102 unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
103 unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
105 /* __schedule() return address / call frame address */
106 unsigned long sched_ra;
107 unsigned long sched_cfa;
110 unsigned long csr_prmd;
111 unsigned long csr_crmd;
112 unsigned long csr_euen;
113 unsigned long csr_ecfg;
114 unsigned long csr_badvaddr; /* Last user fault */
116 /* Scratch registers */
122 /* Eflags register */
123 unsigned long eflags;
125 /* Other stuff associated with the thread. */
126 unsigned long trap_nr;
127 unsigned long error_code;
128 unsigned long single_step; /* Used by PTRACE_SINGLESTEP */
129 struct loongarch_vdso_info *vdso;
132 * FPU & vector registers, must be at the last of inherited
133 * context because they are conditionally copied at fork().
135 struct loongarch_fpu fpu FPU_ALIGN;
137 /* Hardware breakpoints pinned to this task. */
138 struct perf_event *hbp_break[LOONGARCH_MAX_BRP];
139 struct perf_event *hbp_watch[LOONGARCH_MAX_WRP];
142 #define thread_saved_ra(tsk) (tsk->thread.sched_ra)
143 #define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
145 #define INIT_THREAD { \
147 * Main processor registers \
169 * Other stuff associated with the process \
174 * FPU & vector registers \
187 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
189 extern unsigned long boot_option_idle_override;
191 * Do necessary setup to start up a newly executed thread.
193 extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
195 unsigned long __get_wchan(struct task_struct *p);
197 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
198 THREAD_SIZE - sizeof(struct pt_regs))
199 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
200 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
201 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
202 #define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
203 #define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
205 #define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
207 #ifdef CONFIG_CPU_HAS_PREFETCH
209 #define ARCH_HAS_PREFETCH
210 #define prefetch(x) __builtin_prefetch((x), 0, 1)
212 #define ARCH_HAS_PREFETCHW
213 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
217 #endif /* _ASM_PROCESSOR_H */