Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.17 138 lines 4.2 kB view raw
1/* 2 * Assembly implementation of the mutex fastpath, based on atomic 3 * decrement/increment. 4 * 5 * started by Ingo Molnar: 6 * 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 */ 9#ifndef _ASM_MUTEX_H 10#define _ASM_MUTEX_H 11 12#include "asm/alternative.h" 13 14/** 15 * __mutex_fastpath_lock - try to take the lock by moving the count 16 * from 1 to a 0 value 17 * @count: pointer of type atomic_t 18 * @fn: function to call if the original value was not 1 19 * 20 * Change the count from 1 to a value lower than 1, and call <fn> if it 21 * wasn't 1 originally. This function MUST leave the value lower than 1 22 * even when the "1" assertion wasn't true. 23 */ 24#define __mutex_fastpath_lock(count, fail_fn) \ 25do { \ 26 unsigned int dummy; \ 27 \ 28 typecheck(atomic_t *, count); \ 29 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 30 \ 31 __asm__ __volatile__( \ 32 LOCK_PREFIX " decl (%%eax) \n" \ 33 " js 2f \n" \ 34 "1: \n" \ 35 \ 36 LOCK_SECTION_START("") \ 37 "2: call "#fail_fn" \n" \ 38 " jmp 1b \n" \ 39 LOCK_SECTION_END \ 40 \ 41 :"=a" (dummy) \ 42 : "a" (count) \ 43 : "memory", "ecx", "edx"); \ 44} while (0) 45 46 47/** 48 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 49 * from 1 to a 0 value 50 * @count: pointer of type atomic_t 51 * @fail_fn: function to call if the original value was not 1 52 * 53 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 54 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 55 * or anything the slow path function returns 56 */ 57static inline int 58__mutex_fastpath_lock_retval(atomic_t *count, 59 int fastcall (*fail_fn)(atomic_t *)) 60{ 61 if (unlikely(atomic_dec_return(count) < 0)) 62 return fail_fn(count); 63 else 64 return 0; 65} 66 67/** 68 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 69 * @count: pointer of type atomic_t 70 * @fail_fn: function to call if the original value was not 0 71 * 72 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>. 73 * In the failure case, this function is allowed to either set the value 74 * to 1, or to set it to a value lower than 1. 75 * 76 * If the implementation sets it to a value of lower than 1, the 77 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs 78 * to return 0 otherwise. 79 */ 80#define __mutex_fastpath_unlock(count, fail_fn) \ 81do { \ 82 unsigned int dummy; \ 83 \ 84 typecheck(atomic_t *, count); \ 85 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 86 \ 87 __asm__ __volatile__( \ 88 LOCK_PREFIX " incl (%%eax) \n" \ 89 " jle 2f \n" \ 90 "1: \n" \ 91 \ 92 LOCK_SECTION_START("") \ 93 "2: call "#fail_fn" \n" \ 94 " jmp 1b \n" \ 95 LOCK_SECTION_END \ 96 \ 97 :"=a" (dummy) \ 98 : "a" (count) \ 99 : "memory", "ecx", "edx"); \ 100} while (0) 101 102#define __mutex_slowpath_needs_to_unlock() 1 103 104/** 105 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting 106 * 107 * @count: pointer of type atomic_t 108 * @fail_fn: fallback function 109 * 110 * Change the count from 1 to a value lower than 1, and return 0 (failure) 111 * if it wasn't 1 originally, or return 1 (success) otherwise. This function 112 * MUST leave the value lower than 1 even when the "1" assertion wasn't true. 113 * Additionally, if the value was < 0 originally, this function must not leave 114 * it to 0 on failure. 115 */ 116static inline int 117__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 118{ 119 /* 120 * We have two variants here. The cmpxchg based one is the best one 121 * because it never induce a false contention state. It is included 122 * here because architectures using the inc/dec algorithms over the 123 * xchg ones are much more likely to support cmpxchg natively. 124 * 125 * If not we fall back to the spinlock based variant - that is 126 * just as efficient (and simpler) as a 'destructive' probing of 127 * the mutex state would be. 128 */ 129#ifdef __HAVE_ARCH_CMPXCHG 130 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) 131 return 1; 132 return 0; 133#else 134 return fail_fn(count); 135#endif 136} 137 138#endif