From c6470150dff9aff682063890c9b8eac71b695def Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 26 Mar 2014 18:12:45 +0100 Subject: locking,arch,sh: Fold atomic_ops Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: linux-sh@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.770036493@infradead.org Signed-off-by: Ingo Molnar --- arch/sh/include/asm/atomic-grb.h | 119 ++++++++++++++------------------------ arch/sh/include/asm/atomic-irq.h | 62 +++++++++----------- arch/sh/include/asm/atomic-llsc.h | 101 +++++++++++++------------------- 3 files changed, 112 insertions(+), 170 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index a273c88578fc..97a5fda83450 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h @@ -1,85 +1,56 @@ #ifndef __ASM_SH_ATOMIC_GRB_H #define __ASM_SH_ATOMIC_GRB_H -static inline void atomic_add(int i, atomic_t *v) -{ - int tmp; - - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" /* r0 = end point */ - " mov r15, r1 \n\t" /* r1 = saved sp */ - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ - " mov.l @%1, %0 \n\t" /* load old value */ - " add %2, %0 \n\t" /* add */ - " mov.l %0, @%1 \n\t" /* store new value */ - "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (tmp), - "+r" (v) - : "r" (i) - : "memory" , "r0", "r1"); -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - int tmp; - - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" /* r0 = end point */ - " mov r15, r1 \n\t" /* r1 = saved sp */ - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ - " mov.l @%1, %0 \n\t" /* load old value */ - " sub %2, %0 \n\t" /* sub */ - " mov.l %0, @%1 \n\t" /* store new value */ - "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (tmp), - "+r" (v) - : "r" (i) - : "memory" , "r0", "r1"); -} - -static inline int atomic_add_return(int i, atomic_t *v) -{ - int tmp; +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int tmp; \ + \ + __asm__ __volatile__ ( \ + " .align 2 \n\t" \ + " mova 1f, r0 \n\t" /* r0 = end point */ \ + " mov r15, r1 \n\t" /* r1 = saved sp */ \ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \ + " mov.l @%1, %0 \n\t" /* load old value */ \ + " " #op " %2, %0 \n\t" /* $op */ \ + " mov.l %0, @%1 \n\t" /* store new value */ \ + "1: mov r1, r15 \n\t" /* LOGOUT */ \ + : "=&r" (tmp), \ + "+r" (v) \ + : "r" (i) \ + : "memory" , "r0", "r1"); \ +} \ - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" /* r0 = end point */ - " mov r15, r1 \n\t" /* r1 = saved sp */ - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ - " mov.l @%1, %0 \n\t" /* load old value */ - " add %2, %0 \n\t" /* add */ - " mov.l %0, @%1 \n\t" /* store new value */ - "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (tmp), - "+r" (v) - : "r" (i) - : "memory" , "r0", "r1"); - - return tmp; +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int tmp; \ + \ + __asm__ __volatile__ ( \ + " .align 2 \n\t" \ + " mova 1f, r0 \n\t" /* r0 = end point */ \ + " mov r15, r1 \n\t" /* r1 = saved sp */ \ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \ + " mov.l @%1, %0 \n\t" /* load old value */ \ + " " #op " %2, %0 \n\t" /* $op */ \ + " mov.l %0, @%1 \n\t" /* store new value */ \ + "1: mov r1, r15 \n\t" /* LOGOUT */ \ + : "=&r" (tmp), \ + "+r" (v) \ + : "r" (i) \ + : "memory" , "r0", "r1"); \ + \ + return tmp; \ } -static inline int atomic_sub_return(int i, atomic_t *v) -{ - int tmp; +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) - __asm__ __volatile__ ( - " .align 2 \n\t" - " mova 1f, r0 \n\t" /* r0 = end point */ - " mov r15, r1 \n\t" /* r1 = saved sp */ - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ - " mov.l @%1, %0 \n\t" /* load old value */ - " sub %2, %0 \n\t" /* sub */ - " mov.l %0, @%1 \n\t" /* store new value */ - "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (tmp), - "+r" (v) - : "r" (i) - : "memory", "r0", "r1"); +ATOMIC_OPS(add) +ATOMIC_OPS(sub) - return tmp; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h index 9f7c56609e53..61d107523f06 100644 --- a/arch/sh/include/asm/atomic-irq.h +++ b/arch/sh/include/asm/atomic-irq.h @@ -8,49 +8,39 @@ * forward to code at the end of this object's .text section, then * branch back to restart the operation. */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); -} -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ } -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long temp, flags; - - raw_local_irq_save(flags); - temp = v->counter; - temp += i; - v->counter = temp; - raw_local_irq_restore(flags); - - return temp; +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long temp, flags; \ + \ + raw_local_irq_save(flags); \ + temp = v->counter; \ + temp c_op i; \ + v->counter = temp; \ + raw_local_irq_restore(flags); \ + \ + return temp; \ } -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long temp, flags; +#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) - raw_local_irq_save(flags); - temp = v->counter; - temp -= i; - v->counter = temp; - raw_local_irq_restore(flags); +ATOMIC_OPS(add, +=) +ATOMIC_OPS(sub, -=) - return temp; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h index 4b00b78e3f4f..8575dccb9ef7 100644 --- a/arch/sh/include/asm/atomic-llsc.h +++ b/arch/sh/include/asm/atomic-llsc.h @@ -1,39 +1,6 @@ #ifndef __ASM_SH_ATOMIC_LLSC_H #define __ASM_SH_ATOMIC_LLSC_H -/* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (i), "r" (&v->counter) - : "t"); -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (i), "r" (&v->counter) - : "t"); -} - /* * SH-4A note: * @@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v) * encoding, so the retval is automatically set without having to * do any special work. */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long temp; +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add_return \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp) - : "r" (i), "r" (&v->counter) - : "t"); +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + \ + __asm__ __volatile__ ( \ +"1: movli.l @%2, %0 ! atomic_" #op "\n" \ +" " #op " %1, %0 \n" \ +" movco.l %0, @%2 \n" \ +" bf 1b \n" \ + : "=&z" (tmp) \ + : "r" (i), "r" (&v->counter) \ + : "t"); \ +} - return temp; +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long temp; \ + \ + __asm__ __volatile__ ( \ +"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \ +" " #op " %1, %0 \n" \ +" movco.l %0, @%2 \n" \ +" bf 1b \n" \ +" synco \n" \ + : "=&z" (temp) \ + : "r" (i), "r" (&v->counter) \ + : "t"); \ + \ + return temp; \ } -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long temp; +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub_return \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp) - : "r" (i), "r" (&v->counter) - : "t"); +ATOMIC_OPS(add) +ATOMIC_OPS(sub) - return temp; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { -- cgit v1.2.3 From 2291059c852706c6f5ffb400366042b7625066cd Mon Sep 17 00:00:00 2001 From: Pranith Kumar Date: Tue, 23 Sep 2014 10:29:50 -0400 Subject: locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() Use the much more reader friendly ACCESS_ONCE() instead of the cast to volatile. This is purely a stylistic change. Signed-off-by: Pranith Kumar Acked-by: Jesper Nilsson Acked-by: Hans-Christian Egtvedt Acked-by: Max Filippov Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: linux-arch@vger.kernel.org Link: http://lkml.kernel.org/r/1411482607-20948-1-git-send-email-bobby.prani@gmail.com Signed-off-by: Ingo Molnar --- arch/alpha/include/asm/atomic.h | 4 ++-- arch/arm/include/asm/atomic.h | 2 +- arch/arm64/include/asm/atomic.h | 4 ++-- arch/avr32/include/asm/atomic.h | 2 +- arch/cris/include/asm/atomic.h | 2 +- arch/frv/include/asm/atomic.h | 2 +- arch/ia64/include/asm/atomic.h | 4 ++-- arch/m32r/include/asm/atomic.h | 2 +- arch/m68k/include/asm/atomic.h | 2 +- arch/mips/include/asm/atomic.h | 4 ++-- arch/parisc/include/asm/atomic.h | 4 ++-- arch/sh/include/asm/atomic.h | 2 +- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 4 ++-- arch/x86/include/asm/atomic.h | 2 +- arch/x86/include/asm/atomic64_64.h | 2 +- arch/xtensa/include/asm/atomic.h | 2 +- include/asm-generic/atomic.h | 2 +- 18 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch/sh') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 6fbb53a13049..8f8eafbedd7c 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -17,8 +17,8 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #define atomic64_set(v,i) ((v)->counter = (i)) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 832f1cdfcd6a..e22c11970b7b 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -27,7 +27,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index b83c325e587f..7047051ded40 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -35,7 +35,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) /* @@ -139,7 +139,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) */ #define ATOMIC64_INIT(i) { (i) } -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic64_set(v,i) (((v)->counter) = (i)) #define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 83e980a4e483..2d07ce1c5327 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -19,7 +19,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #define ATOMIC_OP_RETURN(op, asm_op, asm_con) \ diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index 0033f9dfea24..279766a70664 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -17,7 +17,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) /* These should be written in asm but we do it in C for now. */ diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index f6c3a1690101..102190a61d65 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -31,7 +31,7 @@ */ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 42919a831c6c..0bf03501fe5c 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,8 +21,8 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 3946b2c8d971..31bb74adba08 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -28,7 +28,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) /** * atomic_set - set atomic variable diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 663d4ba2462c..e85f047fb072 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -17,7 +17,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) /* diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index f3ee721fe61d..6dd6bfc607e9 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) /* * atomic_set - set atomic variable @@ -306,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) /* * atomic64_set - set atomic variable diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 219750bb4ae7..226f8ca993f6 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -67,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ int atomic_read(const atomic_t *v) { - return (*(volatile int *)&(v)->counter); + return ACCESS_ONCE((v)->counter); } /* exported interface */ @@ -204,7 +204,7 @@ atomic64_set(atomic64_t *v, s64 i) static __inline__ s64 atomic64_read(const atomic64_t *v) { - return (*(volatile long *)&(v)->counter); + return ACCESS_ONCE((v)->counter); } #define atomic64_inc(v) (atomic64_add( 1,(v))) diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index f57b8a6743b3..05b9f74ce2d5 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -14,7 +14,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #if defined(CONFIG_GUSA_RB) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7b024f02a7ce..765c1776ec9f 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -26,7 +26,7 @@ int atomic_cmpxchg(atomic_t *, int, int); int __atomic_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 7e4ca1e73cd9..4082749913ce 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -14,8 +14,8 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #define atomic64_set(v, i) (((v)->counter) = i) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index bf20c817ed34..5e5cd123fdfb 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -24,7 +24,7 @@ */ static inline int atomic_read(const atomic_t *v) { - return (*(volatile int *)&(v)->counter); + return ACCESS_ONCE((v)->counter); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 46e9052bbd28..f8d273e18516 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -18,7 +18,7 @@ */ static inline long atomic64_read(const atomic64_t *v) { - return (*(volatile long *)&(v)->counter); + return ACCESS_ONCE((v)->counter); } /** diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 626676660b80..00b7d46b35b8 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -47,7 +47,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) /** * atomic_set - set atomic variable diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 56d4d36e1531..1973ad2b13f4 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -126,7 +126,7 @@ ATOMIC_OP(or, |) * Atomically reads the value of @v. */ #ifndef atomic_read -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #endif /** -- cgit v1.2.3