Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: atomic.h: Reformat to fit in 79 columns

Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/8484/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Maciej W. Rozycki and committed by
Ralf Baechle
ddb3108e 0e525e48

+181 -180
+181 -180
arch/mips/include/asm/atomic.h
··· 41 41 */ 42 42 #define atomic_set(v, i) ((v)->counter = (i)) 43 43 44 - #define ATOMIC_OP(op, c_op, asm_op) \ 45 - static __inline__ void atomic_##op(int i, atomic_t * v) \ 46 - { \ 47 - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 48 - int temp; \ 49 - \ 50 - __asm__ __volatile__( \ 51 - " .set arch=r4000 \n" \ 52 - "1: ll %0, %1 # atomic_" #op " \n" \ 53 - " " #asm_op " %0, %2 \n" \ 54 - " sc %0, %1 \n" \ 55 - " beqzl %0, 1b \n" \ 56 - " .set mips0 \n" \ 57 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 58 - : "Ir" (i)); \ 59 - } else if (kernel_uses_llsc) { \ 60 - int temp; \ 61 - \ 62 - do { \ 63 - __asm__ __volatile__( \ 64 - " .set arch=r4000 \n" \ 65 - " ll %0, %1 # atomic_" #op "\n" \ 66 - " " #asm_op " %0, %2 \n" \ 67 - " sc %0, %1 \n" \ 68 - " .set mips0 \n" \ 69 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 70 - : "Ir" (i)); \ 71 - } while (unlikely(!temp)); \ 72 - } else { \ 73 - unsigned long flags; \ 74 - \ 75 - raw_local_irq_save(flags); \ 76 - v->counter c_op i; \ 77 - raw_local_irq_restore(flags); \ 78 - } \ 79 - } \ 80 - 81 - #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 82 - static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ 83 - { \ 84 - int result; \ 85 - \ 86 - smp_mb__before_llsc(); \ 87 - \ 88 - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 89 - int temp; \ 90 - \ 91 - __asm__ __volatile__( \ 92 - " .set arch=r4000 \n" \ 93 - "1: ll %1, %2 # atomic_" #op "_return \n" \ 94 - " " #asm_op " %0, %1, %3 \n" \ 95 - " sc %0, %2 \n" \ 96 - " beqzl %0, 1b \n" \ 97 - " " #asm_op " %0, %1, %3 \n" \ 98 - " .set mips0 \n" \ 99 - : "=&r" (result), "=&r" (temp), \ 100 - "+" GCC_OFF12_ASM() (v->counter) \ 101 - : "Ir" (i)); \ 102 - } else if (kernel_uses_llsc) { \ 103 - int temp; \ 104 - \ 105 - do { \ 106 - __asm__ __volatile__( \ 107 - " .set arch=r4000 \n" \ 108 - " ll %1, %2 # atomic_" #op "_return \n" \ 109 - " " #asm_op " %0, %1, %3 \n" \ 110 - " sc %0, %2 \n" \ 111 - " .set mips0 \n" \ 112 - : "=&r" (result), "=&r" (temp), \ 113 - "+" GCC_OFF12_ASM() (v->counter) \ 114 - : "Ir" (i)); \ 115 - } while (unlikely(!result)); \ 116 - \ 117 - result = temp; result c_op i; \ 118 - } else { \ 119 - unsigned long flags; \ 120 - \ 121 - raw_local_irq_save(flags); \ 122 - result = v->counter; \ 123 - result c_op i; \ 124 - v->counter = result; \ 125 - raw_local_irq_restore(flags); \ 126 - } \ 127 - \ 128 - smp_llsc_mb(); \ 129 - \ 130 - return result; \ 44 + #define ATOMIC_OP(op, c_op, asm_op) \ 45 + static __inline__ void atomic_##op(int i, atomic_t * v) \ 46 + { \ 47 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 48 + int temp; \ 49 + \ 50 + __asm__ __volatile__( \ 51 + " .set arch=r4000 \n" \ 52 + "1: ll %0, %1 # atomic_" #op " \n" \ 53 + " " #asm_op " %0, %2 \n" \ 54 + " sc %0, %1 \n" \ 55 + " beqzl %0, 1b \n" \ 56 + " .set mips0 \n" \ 57 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 58 + : "Ir" (i)); \ 59 + } else if (kernel_uses_llsc) { \ 60 + int temp; \ 61 + \ 62 + do { \ 63 + __asm__ __volatile__( \ 64 + " .set arch=r4000 \n" \ 65 + " ll %0, %1 # atomic_" #op "\n" \ 66 + " " #asm_op " %0, %2 \n" \ 67 + " sc %0, %1 \n" \ 68 + " .set mips0 \n" \ 69 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 70 + : "Ir" (i)); \ 71 + } while (unlikely(!temp)); \ 72 + } else { \ 73 + unsigned long flags; \ 74 + \ 75 + raw_local_irq_save(flags); \ 76 + v->counter c_op i; \ 77 + raw_local_irq_restore(flags); \ 78 + } \ 131 79 } 132 80 133 - #define ATOMIC_OPS(op, c_op, asm_op) \ 134 - ATOMIC_OP(op, c_op, asm_op) \ 81 + #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 82 + static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ 83 + { \ 84 + int result; \ 85 + \ 86 + smp_mb__before_llsc(); \ 87 + \ 88 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 89 + int temp; \ 90 + \ 91 + __asm__ __volatile__( \ 92 + " .set arch=r4000 \n" \ 93 + "1: ll %1, %2 # atomic_" #op "_return \n" \ 94 + " " #asm_op " %0, %1, %3 \n" \ 95 + " sc %0, %2 \n" \ 96 + " beqzl %0, 1b \n" \ 97 + " " #asm_op " %0, %1, %3 \n" \ 98 + " .set mips0 \n" \ 99 + : "=&r" (result), "=&r" (temp), \ 100 + "+" GCC_OFF12_ASM() (v->counter) \ 101 + : "Ir" (i)); \ 102 + } else if (kernel_uses_llsc) { \ 103 + int temp; \ 104 + \ 105 + do { \ 106 + __asm__ __volatile__( \ 107 + " .set arch=r4000 \n" \ 108 + " ll %1, %2 # atomic_" #op "_return \n" \ 109 + " " #asm_op " %0, %1, %3 \n" \ 110 + " sc %0, %2 \n" \ 111 + " .set mips0 \n" \ 112 + : "=&r" (result), "=&r" (temp), \ 113 + "+" GCC_OFF12_ASM() (v->counter) \ 114 + : "Ir" (i)); \ 115 + } while (unlikely(!result)); \ 116 + \ 117 + result = temp; result c_op i; \ 118 + } else { \ 119 + unsigned long flags; \ 120 + \ 121 + raw_local_irq_save(flags); \ 122 + result = v->counter; \ 123 + result c_op i; \ 124 + v->counter = result; \ 125 + raw_local_irq_restore(flags); \ 126 + } \ 127 + \ 128 + smp_llsc_mb(); \ 129 + \ 130 + return result; \ 131 + } 132 + 133 + #define ATOMIC_OPS(op, c_op, asm_op) \ 134 + ATOMIC_OP(op, c_op, asm_op) \ 135 135 ATOMIC_OP_RETURN(op, c_op, asm_op) 136 136 137 137 ATOMIC_OPS(add, +=, addu) ··· 320 320 */ 321 321 #define atomic64_set(v, i) ((v)->counter = (i)) 322 322 323 - #define ATOMIC64_OP(op, c_op, asm_op) \ 324 - static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 325 - { \ 326 - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 327 - long temp; \ 328 - \ 329 - __asm__ __volatile__( \ 330 - " .set arch=r4000 \n" \ 331 - "1: lld %0, %1 # atomic64_" #op " \n" \ 332 - " " #asm_op " %0, %2 \n" \ 333 - " scd %0, %1 \n" \ 334 - " beqzl %0, 1b \n" \ 335 - " .set mips0 \n" \ 336 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 337 - : "Ir" (i)); \ 338 - } else if (kernel_uses_llsc) { \ 339 - long temp; \ 340 - \ 341 - do { \ 342 - __asm__ __volatile__( \ 343 - " .set arch=r4000 \n" \ 344 - " lld %0, %1 # atomic64_" #op "\n" \ 345 - " " #asm_op " %0, %2 \n" \ 346 - " scd %0, %1 \n" \ 347 - " .set mips0 \n" \ 348 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 349 - : "Ir" (i)); \ 350 - } while (unlikely(!temp)); \ 351 - } else { \ 352 - unsigned long flags; \ 353 - \ 354 - raw_local_irq_save(flags); \ 355 - v->counter c_op i; \ 356 - raw_local_irq_restore(flags); \ 357 - } \ 358 - } \ 359 - 360 - #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 361 - static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 362 - { \ 363 - long result; \ 364 - \ 365 - smp_mb__before_llsc(); \ 366 - \ 367 - if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 368 - long temp; \ 369 - \ 370 - __asm__ __volatile__( \ 371 - " .set arch=r4000 \n" \ 372 - "1: lld %1, %2 # atomic64_" #op "_return\n" \ 373 - " " #asm_op " %0, %1, %3 \n" \ 374 - " scd %0, %2 \n" \ 375 - " beqzl %0, 1b \n" \ 376 - " " #asm_op " %0, %1, %3 \n" \ 377 - " .set mips0 \n" \ 378 - : "=&r" (result), "=&r" (temp), \ 379 - "+" GCC_OFF12_ASM() (v->counter) \ 380 - : "Ir" (i)); \ 381 - } else if (kernel_uses_llsc) { \ 382 - long temp; \ 383 - \ 384 - do { \ 385 - __asm__ __volatile__( \ 386 - " .set arch=r4000 \n" \ 387 - " lld %1, %2 # atomic64_" #op "_return\n" \ 388 - " " #asm_op " %0, %1, %3 \n" \ 389 - " scd %0, %2 \n" \ 390 - " .set mips0 \n" \ 391 - : "=&r" (result), "=&r" (temp), \ 392 - "=" GCC_OFF12_ASM() (v->counter) \ 393 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 394 - : "memory"); \ 395 - } while (unlikely(!result)); \ 396 - \ 397 - result = temp; result c_op i; \ 398 - } else { \ 399 - unsigned long flags; \ 400 - \ 401 - raw_local_irq_save(flags); \ 402 - result = v->counter; \ 403 - result c_op i; \ 404 - v->counter = result; \ 405 - raw_local_irq_restore(flags); \ 406 - } \ 407 - \ 408 - smp_llsc_mb(); \ 409 - \ 410 - return result; \ 323 + #define ATOMIC64_OP(op, c_op, asm_op) \ 324 + static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 325 + { \ 326 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 327 + long temp; \ 328 + \ 329 + __asm__ __volatile__( \ 330 + " .set arch=r4000 \n" \ 331 + "1: lld %0, %1 # atomic64_" #op " \n" \ 332 + " " #asm_op " %0, %2 \n" \ 333 + " scd %0, %1 \n" \ 334 + " beqzl %0, 1b \n" \ 335 + " .set mips0 \n" \ 336 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 337 + : "Ir" (i)); \ 338 + } else if (kernel_uses_llsc) { \ 339 + long temp; \ 340 + \ 341 + do { \ 342 + __asm__ __volatile__( \ 343 + " .set arch=r4000 \n" \ 344 + " lld %0, %1 # atomic64_" #op "\n" \ 345 + " " #asm_op " %0, %2 \n" \ 346 + " scd %0, %1 \n" \ 347 + " .set mips0 \n" \ 348 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 349 + : "Ir" (i)); \ 350 + } while (unlikely(!temp)); \ 351 + } else { \ 352 + unsigned long flags; \ 353 + \ 354 + raw_local_irq_save(flags); \ 355 + v->counter c_op i; \ 356 + raw_local_irq_restore(flags); \ 357 + } \ 411 358 } 412 359 413 - #define ATOMIC64_OPS(op, c_op, asm_op) \ 414 - ATOMIC64_OP(op, c_op, asm_op) \ 360 + #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 361 + static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 362 + { \ 363 + long result; \ 364 + \ 365 + smp_mb__before_llsc(); \ 366 + \ 367 + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 368 + long temp; \ 369 + \ 370 + __asm__ __volatile__( \ 371 + " .set arch=r4000 \n" \ 372 + "1: lld %1, %2 # atomic64_" #op "_return\n" \ 373 + " " #asm_op " %0, %1, %3 \n" \ 374 + " scd %0, %2 \n" \ 375 + " beqzl %0, 1b \n" \ 376 + " " #asm_op " %0, %1, %3 \n" \ 377 + " .set mips0 \n" \ 378 + : "=&r" (result), "=&r" (temp), \ 379 + "+" GCC_OFF12_ASM() (v->counter) \ 380 + : "Ir" (i)); \ 381 + } else if (kernel_uses_llsc) { \ 382 + long temp; \ 383 + \ 384 + do { \ 385 + __asm__ __volatile__( \ 386 + " .set arch=r4000 \n" \ 387 + " lld %1, %2 # atomic64_" #op "_return\n" \ 388 + " " #asm_op " %0, %1, %3 \n" \ 389 + " scd %0, %2 \n" \ 390 + " .set mips0 \n" \ 391 + : "=&r" (result), "=&r" (temp), \ 392 + "=" GCC_OFF12_ASM() (v->counter) \ 393 + : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 394 + : "memory"); \ 395 + } while (unlikely(!result)); \ 396 + \ 397 + result = temp; result c_op i; \ 398 + } else { \ 399 + unsigned long flags; \ 400 + \ 401 + raw_local_irq_save(flags); \ 402 + result = v->counter; \ 403 + result c_op i; \ 404 + v->counter = result; \ 405 + raw_local_irq_restore(flags); \ 406 + } \ 407 + \ 408 + smp_llsc_mb(); \ 409 + \ 410 + return result; \ 411 + } 412 + 413 + #define ATOMIC64_OPS(op, c_op, asm_op) \ 414 + ATOMIC64_OP(op, c_op, asm_op) \ 415 415 ATOMIC64_OP_RETURN(op, c_op, asm_op) 416 416 417 417 ATOMIC64_OPS(add, +=, daddu) ··· 422 422 #undef ATOMIC64_OP 423 423 424 424 /* 425 - * atomic64_sub_if_positive - conditionally subtract integer from atomic variable 425 + * atomic64_sub_if_positive - conditionally subtract integer from atomic 426 + * variable 426 427 * @i: integer value to subtract 427 428 * @v: pointer of type atomic64_t 428 429 *