Revert "ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock"
authorVineet Gupta <vgupta@synopsys.com>
Wed, 29 Jul 2015 13:50:58 +0000 (19:20 +0530)
committerVineet Gupta <vgupta@synopsys.com>
Tue, 4 Aug 2015 03:56:31 +0000 (09:26 +0530)
Extended testing of quad core configuration revealed that this fix was
insufficient. Specifically LTP open posix shm_op/23-1 would cause the
hardware livelock in llock/scond loop in update_cpu_load_active()

So remove this and make way for a proper workaround

This reverts commit a5c8b52abe677977883655166796f167ef1e0084.

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
arch/arc/include/asm/atomic.h

index 03484cb4d16d2eb4fada0095ee427726c23bd2e1..20b7dc17979ea25c1b19e8513f66738fad380adc 100644 (file)
 
 #define atomic_set(v, i) (((v)->counter) = (i))
 
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW      "       prefetchw   [%1]        \n"
-#else
-#define PREFETCHW
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        unsigned int temp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
+       "1:     llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \
@@ -58,9 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)            \
        smp_mb();                                                       \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
+       "1:     llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \