FRV: Implement atomic64_t

Implement atomic64_t and its ops for FRV. Tested with the following patch:

diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 55e4fab..086d50d 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -746,6 +746,52 @@ static void __init parse_cmdline_early(char *cmdline)

} /* end parse_cmdline_early() */

+static atomic64_t xxx;
+
+static void test_atomic64(void)
+{
+ atomic64_set(&xxx, 0x12300000023LL);
+
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x12300000023LL);
+ mb();
+ if (atomic64_inc_return(&xxx) != 0x12300000024LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x12300000024LL);
+ mb();
+ if (atomic64_sub_return(0x36900000050LL, &xxx) != -0x2460000002cLL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != -0x2460000002cLL);
+ mb();
+ if (atomic64_dec_return(&xxx) != -0x2460000002dLL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != -0x2460000002dLL);
+ mb();
+ if (atomic64_add_return(0x36800000001LL, &xxx) != 0x121ffffffd4LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
+ mb();
+ if (atomic64_cmpxchg(&xxx, 0x123456789abcdefLL, 0x121ffffffd4LL) != 0x121ffffffd4LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
+ mb();
+ if (atomic64_cmpxchg(&xxx, 0x121ffffffd4LL, 0x123456789abcdefLL) != 0x121ffffffd4LL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0x123456789abcdefLL);
+ mb();
+ if (atomic64_xchg(&xxx, 0xabcdef123456789LL) != 0x123456789abcdefLL)
+ BUG();
+ mb();
+ BUG_ON(atomic64_read(&xxx) != 0xabcdef123456789LL);
+ mb();
+}
+
/*****************************************************************************/
/*
*
@@ -845,6 +891,8 @@ void __init setup_arch(char **cmdline_p)
// asm volatile("movgs %0,timerd" :: "r"(10000000));
// __set_HSR(0, __get_HSR(0) | HSR0_ETMD);

+ test_atomic64();
+
} /* end setup_arch() */

#if 0

Note that this doesn't cover all the trivial wrappers, but does cover all the
substantial implementations.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by David Howells and committed by Linus Torvalds 00460f41 5a475ce4

+236 -5
+66 -2
arch/frv/include/asm/atomic.h
··· 121 121 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 122 122 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 123 123 124 + /* 125 + * 64-bit atomic ops 126 + */ 127 + typedef struct { 128 + volatile long long counter; 129 + } atomic64_t; 130 + 131 + #define ATOMIC64_INIT(i) { (i) } 132 + 133 + static inline long long atomic64_read(atomic64_t *v) 134 + { 135 + long long counter; 136 + 137 + asm("ldd%I1 %M1,%0" 138 + : "=e"(counter) 139 + : "m"(v->counter)); 140 + return counter; 141 + } 142 + 143 + static inline void atomic64_set(atomic64_t *v, long long i) 144 + { 145 + asm volatile("std%I0 %1,%M0" 146 + : "=m"(v->counter) 147 + : "e"(i)); 148 + } 149 + 150 + extern long long atomic64_inc_return(atomic64_t *v); 151 + extern long long atomic64_dec_return(atomic64_t *v); 152 + extern long long atomic64_add_return(long long i, atomic64_t *v); 153 + extern long long atomic64_sub_return(long long i, atomic64_t *v); 154 + 155 + static inline long long atomic64_add_negative(long long i, atomic64_t *v) 156 + { 157 + return atomic64_add_return(i, v) < 0; 158 + } 159 + 160 + static inline void atomic64_add(long long i, atomic64_t *v) 161 + { 162 + atomic64_add_return(i, v); 163 + } 164 + 165 + static inline void atomic64_sub(long long i, atomic64_t *v) 166 + { 167 + atomic64_sub_return(i, v); 168 + } 169 + 170 + static inline void atomic64_inc(atomic64_t *v) 171 + { 172 + atomic64_inc_return(v); 173 + } 174 + 175 + static inline void atomic64_dec(atomic64_t *v) 176 + { 177 + atomic64_dec_return(v); 178 + } 179 + 180 + #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 181 + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 182 + #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 183 + 124 184 /*****************************************************************************/ 125 185 /* 126 186 * exchange value with memory 127 187 */ 188 + extern uint64_t __xchg_64(uint64_t i, volatile void *v); 189 + 128 190 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 129 191 130 192 #define xchg(ptr, x) \ ··· 236 174 237 175 #define tas(ptr) (xchg((ptr), 1)) 238 176 239 - #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 240 - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 177 + #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 178 + #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 179 + #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) 180 + #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) 241 181 242 182 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 243 183 {
+2
arch/frv/include/asm/system.h
··· 208 208 * - if (*ptr == test) then orig = *ptr; *ptr = test; 209 209 * - if (*ptr != test) then orig = *ptr; 210 210 */ 211 + extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v); 212 + 211 213 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 212 214 213 215 #define cmpxchg(ptr, test, new) \
+4
arch/frv/kernel/frv_ksyms.c
··· 67 67 EXPORT_SYMBOL(__xchg_32); 68 68 EXPORT_SYMBOL(__cmpxchg_32); 69 69 #endif 70 + EXPORT_SYMBOL(atomic64_add_return); 71 + EXPORT_SYMBOL(atomic64_sub_return); 72 + EXPORT_SYMBOL(__xchg_64); 73 + EXPORT_SYMBOL(__cmpxchg_64); 70 74 71 75 EXPORT_SYMBOL(__debug_bug_printk); 72 76 EXPORT_SYMBOL(__delay_loops_MHz);
+1 -1
arch/frv/lib/Makefile
··· 4 4 5 5 lib-y := \ 6 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 7 - checksum.o memcpy.o memset.o atomic-ops.o \ 7 + checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 8 8 outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+1 -2
arch/frv/lib/atomic-ops.S
··· 163 163 ld.p @(gr11,gr0),gr8 164 164 orcr cc7,cc7,cc3 165 165 subcc gr8,gr9,gr7,icc0 166 - bne icc0,#0,1f 166 + bnelr icc0,#0 167 167 cst.p gr10,@(gr11,gr0) ,cc3,#1 168 168 corcc gr29,gr29,gr0 ,cc3,#1 169 169 beq icc3,#0,0b 170 - 1: 171 170 bralr 172 171 173 172 .size __cmpxchg_32, .-__cmpxchg_32
+162
arch/frv/lib/atomic64-ops.S
··· 1 + /* kernel atomic64 operations 2 + * 3 + * For an explanation of how atomic ops work in this arch, see: 4 + * Documentation/frv/atomic-ops.txt 5 + * 6 + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 7 + * Written by David Howells (dhowells@redhat.com) 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License 11 + * as published by the Free Software Foundation; either version 12 + * 2 of the License, or (at your option) any later version. 13 + */ 14 + 15 + #include <asm/spr-regs.h> 16 + 17 + .text 18 + .balign 4 19 + 20 + 21 + ############################################################################### 22 + # 23 + # long long atomic64_inc_return(atomic64_t *v) 24 + # 25 + ############################################################################### 26 + .globl atomic64_inc_return 27 + .type atomic64_inc_return,@function 28 + atomic64_inc_return: 29 + or.p gr8,gr8,gr10 30 + 0: 31 + orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 32 + ckeq icc3,cc7 33 + ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 34 + orcr cc7,cc7,cc3 /* set CC3 to true */ 35 + addicc gr9,#1,gr9,icc0 36 + addxi gr8,#0,gr8,icc0 37 + cstd.p gr8,@(gr10,gr0) ,cc3,#1 38 + corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 39 + beq icc3,#0,0b 40 + bralr 41 + 42 + .size atomic64_inc_return, .-atomic64_inc_return 43 + 44 + ############################################################################### 45 + # 46 + # long long atomic64_dec_return(atomic64_t *v) 47 + # 48 + ############################################################################### 49 + .globl atomic64_dec_return 50 + .type atomic64_dec_return,@function 51 + atomic64_dec_return: 52 + or.p gr8,gr8,gr10 53 + 0: 54 + orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 55 + ckeq icc3,cc7 56 + ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 57 + orcr cc7,cc7,cc3 /* set CC3 to true */ 58 + subicc gr9,#1,gr9,icc0 59 + subxi gr8,#0,gr8,icc0 60 + cstd.p gr8,@(gr10,gr0) ,cc3,#1 61 + corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 62 + beq icc3,#0,0b 63 + bralr 64 + 65 + .size atomic64_dec_return, .-atomic64_dec_return 66 + 67 + ############################################################################### 68 + # 69 + # long long atomic64_add_return(long long i, atomic64_t *v) 70 + # 71 + ############################################################################### 72 + .globl atomic64_add_return 73 + .type atomic64_add_return,@function 74 + atomic64_add_return: 75 + or.p gr8,gr8,gr4 76 + or gr9,gr9,gr5 77 + 0: 78 + orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 79 + ckeq icc3,cc7 80 + ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 81 + orcr cc7,cc7,cc3 /* set CC3 to true */ 82 + addcc gr9,gr5,gr9,icc0 83 + addx gr8,gr4,gr8,icc0 84 + cstd.p gr8,@(gr10,gr0) ,cc3,#1 85 + corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 86 + beq icc3,#0,0b 87 + bralr 88 + 89 + .size atomic64_add_return, .-atomic64_add_return 90 + 91 + ############################################################################### 92 + # 93 + # long long atomic64_sub_return(long long i, atomic64_t *v) 94 + # 95 + ############################################################################### 96 + .globl atomic64_sub_return 97 + .type atomic64_sub_return,@function 98 + atomic64_sub_return: 99 + or.p gr8,gr8,gr4 100 + or gr9,gr9,gr5 101 + 0: 102 + orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 103 + ckeq icc3,cc7 104 + ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 105 + orcr cc7,cc7,cc3 /* set CC3 to true */ 106 + subcc gr9,gr5,gr9,icc0 107 + subx gr8,gr4,gr8,icc0 108 + cstd.p gr8,@(gr10,gr0) ,cc3,#1 109 + corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 110 + beq icc3,#0,0b 111 + bralr 112 + 113 + .size atomic64_sub_return, .-atomic64_sub_return 114 + 115 + ############################################################################### 116 + # 117 + # uint64_t __xchg_64(uint64_t i, uint64_t *v) 118 + # 119 + ############################################################################### 120 + .globl __xchg_64 121 + .type __xchg_64,@function 122 + __xchg_64: 123 + or.p gr8,gr8,gr4 124 + or gr9,gr9,gr5 125 + 0: 126 + orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 127 + ckeq icc3,cc7 128 + ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 129 + orcr cc7,cc7,cc3 /* set CC3 to true */ 130 + cstd.p gr4,@(gr10,gr0) ,cc3,#1 131 + corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 132 + beq icc3,#0,0b 133 + bralr 134 + 135 + .size __xchg_64, .-__xchg_64 136 + 137 + ############################################################################### 138 + # 139 + # uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v) 140 + # 141 + ############################################################################### 142 + .globl __cmpxchg_64 143 + .type __cmpxchg_64,@function 144 + __cmpxchg_64: 145 + or.p gr8,gr8,gr4 146 + or gr9,gr9,gr5 147 + 0: 148 + orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 149 + ckeq icc3,cc7 150 + ldd.p @(gr12,gr0),gr8 /* LDD.P/ORCR must be atomic */ 151 + orcr cc7,cc7,cc3 152 + subcc gr8,gr4,gr0,icc0 153 + subcc.p gr9,gr5,gr0,icc1 154 + bnelr icc0,#0 155 + bnelr icc1,#0 156 + cstd.p gr10,@(gr12,gr0) ,cc3,#1 157 + corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 158 + beq icc3,#0,0b 159 + bralr 160 + 161 + .size __cmpxchg_64, .-__cmpxchg_64 162 +