Merge tag 'arc-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Oct 2014 14:50:02 +0000 (07:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Oct 2014 14:50:02 +0000 (07:50 -0700)
Pull ARC updates from Vineet Gupta:
 "Sorry for the late pull request.  Current stuff was ready for a while
  but I was hoping to squeeze in support for almost ready ARC SDP
  platform (and avoid a 2nd pull request), however it seems there are
  still some loose ends which warrant more time.

   - Platform code reduction/moving-up (TB10X no longer needs any
     callbacks)
   - updated boot printing
   - kgdb update for arc gdb 7.5
   - bug fixes (some marked for stable)
   - more code refactoring/consolidation"

* tag 'arc-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: boot: cpu feature print enhancements
  ARC: boot: consolidate cross-checking of h/w and s/w
  ARC: unbork FPU save/restore
  ARC: remove extraneous __KERNEL__ guards
  ARC: Update order of registers in KGDB to match GDB 7.5
  ARC: Remove unneeded Kconfig entry NO_DMA
  ARC: BUG() dumps stack after @msg (@msg now same as in generic BUG))
  ARC: refactoring: reduce the scope of some local vars
  ARC: remove gcc mpy heuristics
  ARC: RIP @running_on_hw
  ARC: Update comments about uncached address space
  ARC: rename kconfig option for unaligned emulation
  ARC: [nsimosci] Allow "headless" models to boot
  ARC: [arcfpga] Get rid of ARC_BOARD_ANGEL4 and ARC_BOARD_ML509
  ARC: [arcfpga] Remove more dead code
  ARC: [plat*] move code out of .init_machine into common
  ARC: [arcfpga] consolidate machine description, DT
  ARC: Allow SMP kernel to build/boot on UP-only infrastructure

1  2 
arch/arc/include/asm/atomic.h

index 173f303a868f20854cbdcc598cf991e4f6d6d0e5,6638a0392f4e345b215b504dee1e608ee2b15ae2..067551b6920af99fe733f1f13d4aee8b1903a77b
@@@ -9,8 -9,6 +9,6 @@@
  #ifndef _ASM_ARC_ATOMIC_H
  #define _ASM_ARC_ATOMIC_H
  
- #ifdef __KERNEL__
  #ifndef __ASSEMBLY__
  
  #include <linux/types.h>
  
  #define atomic_set(v, i) (((v)->counter) = (i))
  
 -static inline void atomic_add(int i, atomic_t *v)
 -{
 -      unsigned int temp;
 -
 -      __asm__ __volatile__(
 -      "1:     llock   %0, [%1]        \n"
 -      "       add     %0, %0, %2      \n"
 -      "       scond   %0, [%1]        \n"
 -      "       bnz     1b              \n"
 -      : "=&r"(temp)   /* Early clobber, to prevent reg reuse */
 -      : "r"(&v->counter), "ir"(i)
 -      : "cc");
 -}
 -
 -static inline void atomic_sub(int i, atomic_t *v)
 -{
 -      unsigned int temp;
 -
 -      __asm__ __volatile__(
 -      "1:     llock   %0, [%1]        \n"
 -      "       sub     %0, %0, %2      \n"
 -      "       scond   %0, [%1]        \n"
 -      "       bnz     1b              \n"
 -      : "=&r"(temp)
 -      : "r"(&v->counter), "ir"(i)
 -      : "cc");
 -}
 -
 -/* add and also return the new value */
 -static inline int atomic_add_return(int i, atomic_t *v)
 -{
 -      unsigned int temp;
 -
 -      __asm__ __volatile__(
 -      "1:     llock   %0, [%1]        \n"
 -      "       add     %0, %0, %2      \n"
 -      "       scond   %0, [%1]        \n"
 -      "       bnz     1b              \n"
 -      : "=&r"(temp)
 -      : "r"(&v->counter), "ir"(i)
 -      : "cc");
 -
 -      return temp;
 -}
 -
 -static inline int atomic_sub_return(int i, atomic_t *v)
 -{
 -      unsigned int temp;
 -
 -      __asm__ __volatile__(
 -      "1:     llock   %0, [%1]        \n"
 -      "       sub     %0, %0, %2      \n"
 -      "       scond   %0, [%1]        \n"
 -      "       bnz     1b              \n"
 -      : "=&r"(temp)
 -      : "r"(&v->counter), "ir"(i)
 -      : "cc");
 -
 -      return temp;
 -}
 -
 -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 -{
 -      unsigned int temp;
 -
 -      __asm__ __volatile__(
 -      "1:     llock   %0, [%1]        \n"
 -      "       bic     %0, %0, %2      \n"
 -      "       scond   %0, [%1]        \n"
 -      "       bnz     1b              \n"
 -      : "=&r"(temp)
 -      : "r"(addr), "ir"(mask)
 -      : "cc");
 +#define ATOMIC_OP(op, c_op, asm_op)                                   \
 +static inline void atomic_##op(int i, atomic_t *v)                    \
 +{                                                                     \
 +      unsigned int temp;                                              \
 +                                                                      \
 +      __asm__ __volatile__(                                           \
 +      "1:     llock   %0, [%1]        \n"                             \
 +      "       " #asm_op " %0, %0, %2  \n"                             \
 +      "       scond   %0, [%1]        \n"                             \
 +      "       bnz     1b              \n"                             \
 +      : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
 +      : "r"(&v->counter), "ir"(i)                                     \
 +      : "cc");                                                        \
 +}                                                                     \
 +
 +#define ATOMIC_OP_RETURN(op, c_op, asm_op)                            \
 +static inline int atomic_##op##_return(int i, atomic_t *v)            \
 +{                                                                     \
 +      unsigned int temp;                                              \
 +                                                                      \
 +      __asm__ __volatile__(                                           \
 +      "1:     llock   %0, [%1]        \n"                             \
 +      "       " #asm_op " %0, %0, %2  \n"                             \
 +      "       scond   %0, [%1]        \n"                             \
 +      "       bnz     1b              \n"                             \
 +      : "=&r"(temp)                                                   \
 +      : "r"(&v->counter), "ir"(i)                                     \
 +      : "cc");                                                        \
 +                                                                      \
 +      return temp;                                                    \
  }
  
  #else /* !CONFIG_ARC_HAS_LLSC */
@@@ -83,7 -124,6 +81,7 @@@ static inline void atomic_set(atomic_t 
        v->counter = i;
        atomic_ops_unlock(flags);
  }
 +
  #endif
  
  /*
   * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
   */
  
 -static inline void atomic_add(int i, atomic_t *v)
 -{
 -      unsigned long flags;
 -
 -      atomic_ops_lock(flags);
 -      v->counter += i;
 -      atomic_ops_unlock(flags);
 +#define ATOMIC_OP(op, c_op, asm_op)                                   \
 +static inline void atomic_##op(int i, atomic_t *v)                    \
 +{                                                                     \
 +      unsigned long flags;                                            \
 +                                                                      \
 +      atomic_ops_lock(flags);                                         \
 +      v->counter c_op i;                                              \
 +      atomic_ops_unlock(flags);                                       \
  }
  
 -static inline void atomic_sub(int i, atomic_t *v)
 -{
 -      unsigned long flags;
 -
 -      atomic_ops_lock(flags);
 -      v->counter -= i;
 -      atomic_ops_unlock(flags);
 +#define ATOMIC_OP_RETURN(op, c_op)                                    \
 +static inline int atomic_##op##_return(int i, atomic_t *v)            \
 +{                                                                     \
 +      unsigned long flags;                                            \
 +      unsigned long temp;                                             \
 +                                                                      \
 +      atomic_ops_lock(flags);                                         \
 +      temp = v->counter;                                              \
 +      temp c_op i;                                                    \
 +      v->counter = temp;                                              \
 +      atomic_ops_unlock(flags);                                       \
 +                                                                      \
 +      return temp;                                                    \
  }
  
 -static inline int atomic_add_return(int i, atomic_t *v)
 -{
 -      unsigned long flags;
 -      unsigned long temp;
 -
 -      atomic_ops_lock(flags);
 -      temp = v->counter;
 -      temp += i;
 -      v->counter = temp;
 -      atomic_ops_unlock(flags);
 -
 -      return temp;
 -}
 -
 -static inline int atomic_sub_return(int i, atomic_t *v)
 -{
 -      unsigned long flags;
 -      unsigned long temp;
 -
 -      atomic_ops_lock(flags);
 -      temp = v->counter;
 -      temp -= i;
 -      v->counter = temp;
 -      atomic_ops_unlock(flags);
 +#endif /* !CONFIG_ARC_HAS_LLSC */
  
 -      return temp;
 -}
 +#define ATOMIC_OPS(op, c_op, asm_op)                                  \
 +      ATOMIC_OP(op, c_op, asm_op)                                     \
 +      ATOMIC_OP_RETURN(op, c_op, asm_op)
  
 -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 -{
 -      unsigned long flags;
 +ATOMIC_OPS(add, +=, add)
 +ATOMIC_OPS(sub, -=, sub)
 +ATOMIC_OP(and, &=, and)
  
 -      atomic_ops_lock(flags);
 -      *addr &= ~mask;
 -      atomic_ops_unlock(flags);
 -}
 +#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
  
 -#endif /* !CONFIG_ARC_HAS_LLSC */
 +#undef ATOMIC_OPS
 +#undef ATOMIC_OP_RETURN
 +#undef ATOMIC_OP
  
  /**
   * __atomic_add_unless - add unless the number is a given value
  #endif
  
  #endif
- #endif