Merge tag 'pci-v4.9-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[sfrench/cifs-2.6.git] / arch / sh / include / asm / atomic-llsc.h
1 #ifndef __ASM_SH_ATOMIC_LLSC_H
2 #define __ASM_SH_ATOMIC_LLSC_H
3
4 /*
5  * SH-4A note:
6  *
7  * We basically get atomic_xxx_return() for free compared with
8  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
9  * encoding, so the retval is automatically set without having to
10  * do any special work.
11  */
12 /*
13  * To get proper branch prediction for the main line, we must branch
14  * forward to code at the end of this object's .text section, then
15  * branch back to restart the operation.
16  */
17
18 #define ATOMIC_OP(op)                                                   \
19 static inline void atomic_##op(int i, atomic_t *v)                      \
20 {                                                                       \
21         unsigned long tmp;                                              \
22                                                                         \
23         __asm__ __volatile__ (                                          \
24 "1:     movli.l @%2, %0         ! atomic_" #op "\n"                     \
25 "       " #op " %1, %0                          \n"                     \
26 "       movco.l %0, @%2                         \n"                     \
27 "       bf      1b                              \n"                     \
28         : "=&z" (tmp)                                                   \
29         : "r" (i), "r" (&v->counter)                                    \
30         : "t");                                                         \
31 }
32
33 #define ATOMIC_OP_RETURN(op)                                            \
34 static inline int atomic_##op##_return(int i, atomic_t *v)              \
35 {                                                                       \
36         unsigned long temp;                                             \
37                                                                         \
38         __asm__ __volatile__ (                                          \
39 "1:     movli.l @%2, %0         ! atomic_" #op "_return \n"             \
40 "       " #op " %1, %0                                  \n"             \
41 "       movco.l %0, @%2                                 \n"             \
42 "       bf      1b                                      \n"             \
43 "       synco                                           \n"             \
44         : "=&z" (temp)                                                  \
45         : "r" (i), "r" (&v->counter)                                    \
46         : "t");                                                         \
47                                                                         \
48         return temp;                                                    \
49 }
50
51 #define ATOMIC_FETCH_OP(op)                                             \
52 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
53 {                                                                       \
54         unsigned long res, temp;                                        \
55                                                                         \
56         __asm__ __volatile__ (                                          \
57 "1:     movli.l @%3, %0         ! atomic_fetch_" #op "  \n"             \
58 "       mov %0, %1                                      \n"             \
59 "       " #op " %2, %0                                  \n"             \
60 "       movco.l %0, @%3                                 \n"             \
61 "       bf      1b                                      \n"             \
62 "       synco                                           \n"             \
63         : "=&z" (temp), "=&r" (res)                                     \
64         : "r" (i), "r" (&v->counter)                                    \
65         : "t");                                                         \
66                                                                         \
67         return res;                                                     \
68 }
69
70 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
71
72 ATOMIC_OPS(add)
73 ATOMIC_OPS(sub)
74
75 #undef ATOMIC_OPS
76 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
77
78 ATOMIC_OPS(and)
79 ATOMIC_OPS(or)
80 ATOMIC_OPS(xor)
81
82 #undef ATOMIC_OPS
83 #undef ATOMIC_FETCH_OP
84 #undef ATOMIC_OP_RETURN
85 #undef ATOMIC_OP
86
87 #endif /* __ASM_SH_ATOMIC_LLSC_H */