Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / sh / include / asm / atomic-grb.h
1 #ifndef __ASM_SH_ATOMIC_GRB_H
2 #define __ASM_SH_ATOMIC_GRB_H
3
4 #define ATOMIC_OP(op)                                                   \
5 static inline void atomic_##op(int i, atomic_t *v)                      \
6 {                                                                       \
7         int tmp;                                                        \
8                                                                         \
9         __asm__ __volatile__ (                                          \
10                 "   .align 2              \n\t"                         \
11                 "   mova    1f,   r0      \n\t" /* r0 = end point */    \
12                 "   mov    r15,   r1      \n\t" /* r1 = saved sp */     \
13                 "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */ \
14                 "   mov.l  @%1,   %0      \n\t" /* load  old value */   \
15                 " " #op "   %2,   %0      \n\t" /* $op */               \
16                 "   mov.l   %0,   @%1     \n\t" /* store new value */   \
17                 "1: mov     r1,   r15     \n\t" /* LOGOUT */            \
18                 : "=&r" (tmp),                                          \
19                   "+r"  (v)                                             \
20                 : "r"   (i)                                             \
21                 : "memory" , "r0", "r1");                               \
22 }                                                                       \
23
24 #define ATOMIC_OP_RETURN(op)                                            \
25 static inline int atomic_##op##_return(int i, atomic_t *v)              \
26 {                                                                       \
27         int tmp;                                                        \
28                                                                         \
29         __asm__ __volatile__ (                                          \
30                 "   .align 2              \n\t"                         \
31                 "   mova    1f,   r0      \n\t" /* r0 = end point */    \
32                 "   mov    r15,   r1      \n\t" /* r1 = saved sp */     \
33                 "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */ \
34                 "   mov.l  @%1,   %0      \n\t" /* load  old value */   \
35                 " " #op "   %2,   %0      \n\t" /* $op */               \
36                 "   mov.l   %0,   @%1     \n\t" /* store new value */   \
37                 "1: mov     r1,   r15     \n\t" /* LOGOUT */            \
38                 : "=&r" (tmp),                                          \
39                   "+r"  (v)                                             \
40                 : "r"   (i)                                             \
41                 : "memory" , "r0", "r1");                               \
42                                                                         \
43         return tmp;                                                     \
44 }
45
46 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
47
48 ATOMIC_OPS(add)
49 ATOMIC_OPS(sub)
50
51 #undef ATOMIC_OPS
52 #undef ATOMIC_OP_RETURN
53 #undef ATOMIC_OP
54
55 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
56 {
57         int tmp;
58         unsigned int _mask = ~mask;
59
60         __asm__ __volatile__ (
61                 "   .align 2              \n\t"
62                 "   mova    1f,   r0      \n\t" /* r0 = end point */
63                 "   mov    r15,   r1      \n\t" /* r1 = saved sp */
64                 "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
65                 "   mov.l  @%1,   %0      \n\t" /* load  old value */
66                 "   and     %2,   %0      \n\t" /* add */
67                 "   mov.l   %0,   @%1     \n\t" /* store new value */
68                 "1: mov     r1,   r15     \n\t" /* LOGOUT */
69                 : "=&r" (tmp),
70                   "+r"  (v)
71                 : "r"   (_mask)
72                 : "memory" , "r0", "r1");
73 }
74
75 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
76 {
77         int tmp;
78
79         __asm__ __volatile__ (
80                 "   .align 2              \n\t"
81                 "   mova    1f,   r0      \n\t" /* r0 = end point */
82                 "   mov    r15,   r1      \n\t" /* r1 = saved sp */
83                 "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
84                 "   mov.l  @%1,   %0      \n\t" /* load  old value */
85                 "   or      %2,   %0      \n\t" /* or */
86                 "   mov.l   %0,   @%1     \n\t" /* store new value */
87                 "1: mov     r1,   r15     \n\t" /* LOGOUT */
88                 : "=&r" (tmp),
89                   "+r"  (v)
90                 : "r"   (mask)
91                 : "memory" , "r0", "r1");
92 }
93
94 #endif /* __ASM_SH_ATOMIC_GRB_H */