at v4.10 11 kB view raw
1/* 2 * Hardware spinlock public header 3 * 4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com 5 * 6 * Contact: Ohad Ben-Cohen <ohad@wizery.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 as published 10 * by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18#ifndef __LINUX_HWSPINLOCK_H 19#define __LINUX_HWSPINLOCK_H 20 21#include <linux/err.h> 22#include <linux/sched.h> 23 24/* hwspinlock mode argument */ 25#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ 26#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ 27 28struct device; 29struct device_node; 30struct hwspinlock; 31struct hwspinlock_device; 32struct hwspinlock_ops; 33 34/** 35 * struct hwspinlock_pdata - platform data for hwspinlock drivers 36 * @base_id: base id for this hwspinlock device 37 * 38 * hwspinlock devices provide system-wide hardware locks that are used 39 * by remote processors that have no other way to achieve synchronization. 40 * 41 * To achieve that, each physical lock must have a system-wide id number 42 * that is agreed upon, otherwise remote processors can't possibly assume 43 * they're using the same hardware lock. 44 * 45 * Usually boards have a single hwspinlock device, which provides several 46 * hwspinlocks, and in this case, they can be trivially numbered 0 to 47 * (num-of-locks - 1). 48 * 49 * In case boards have several hwspinlocks devices, a different base id 50 * should be used for each hwspinlock device (they can't all use 0 as 51 * a starting id!). 52 * 53 * This platform data structure should be used to provide the base id 54 * for each device (which is trivially 0 when only a single hwspinlock 55 * device exists). It can be shared between different platforms, hence 56 * its location. 57 */ 58struct hwspinlock_pdata { 59 int base_id; 60}; 61 62#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE) 63 64int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, 65 const struct hwspinlock_ops *ops, int base_id, int num_locks); 66int hwspin_lock_unregister(struct hwspinlock_device *bank); 67struct hwspinlock *hwspin_lock_request(void); 68struct hwspinlock *hwspin_lock_request_specific(unsigned int id); 69int hwspin_lock_free(struct hwspinlock *hwlock); 70int of_hwspin_lock_get_id(struct device_node *np, int index); 71int hwspin_lock_get_id(struct hwspinlock *hwlock); 72int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, 73 unsigned long *); 74int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); 75void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); 76 77#else /* !CONFIG_HWSPINLOCK */ 78 79/* 80 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not 81 * enabled. We prefer to silently succeed in this case, and let the 82 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not 83 * required on a given setup, users will still work. 84 * 85 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which 86 * we _do_ want users to fail (no point in registering hwspinlock instances if 87 * the framework is not available). 88 * 89 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking 90 * users. Others, which care, can still check this with IS_ERR. 91 */ 92static inline struct hwspinlock *hwspin_lock_request(void) 93{ 94 return ERR_PTR(-ENODEV); 95} 96 97static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id) 98{ 99 return ERR_PTR(-ENODEV); 100} 101 102static inline int hwspin_lock_free(struct hwspinlock *hwlock) 103{ 104 return 0; 105} 106 107static inline 108int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, 109 int mode, unsigned long *flags) 110{ 111 return 0; 112} 113 114static inline 115int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 116{ 117 return 0; 118} 119 120static inline 121void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 122{ 123} 124 125static inline int of_hwspin_lock_get_id(struct device_node *np, int index) 126{ 127 return 0; 128} 129 130static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) 131{ 132 return 0; 133} 134 135#endif /* !CONFIG_HWSPINLOCK */ 136 137/** 138 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts 139 * @hwlock: an hwspinlock which we want to trylock 140 * @flags: a pointer to where the caller's interrupt state will be saved at 141 * 142 * This function attempts to lock the underlying hwspinlock, and will 143 * immediately fail if the hwspinlock is already locked. 144 * 145 * Upon a successful return from this function, preemption and local 146 * interrupts are disabled (previous interrupts state is saved at @flags), 147 * so the caller must not sleep, and is advised to release the hwspinlock 148 * as soon as possible. 149 * 150 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 151 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. 152 */ 153static inline 154int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags) 155{ 156 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags); 157} 158 159/** 160 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts 161 * @hwlock: an hwspinlock which we want to trylock 162 * 163 * This function attempts to lock the underlying hwspinlock, and will 164 * immediately fail if the hwspinlock is already locked. 165 * 166 * Upon a successful return from this function, preemption and local 167 * interrupts are disabled, so the caller must not sleep, and is advised 168 * to release the hwspinlock as soon as possible. 169 * 170 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 171 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. 172 */ 173static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) 174{ 175 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL); 176} 177 178/** 179 * hwspin_trylock() - attempt to lock a specific hwspinlock 180 * @hwlock: an hwspinlock which we want to trylock 181 * 182 * This function attempts to lock an hwspinlock, and will immediately fail 183 * if the hwspinlock is already taken. 184 * 185 * Upon a successful return from this function, preemption is disabled, 186 * so the caller must not sleep, and is advised to release the hwspinlock 187 * as soon as possible. This is required in order to minimize remote cores 188 * polling on the hardware interconnect. 189 * 190 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if 191 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. 192 */ 193static inline int hwspin_trylock(struct hwspinlock *hwlock) 194{ 195 return __hwspin_trylock(hwlock, 0, NULL); 196} 197 198/** 199 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs 200 * @hwlock: the hwspinlock to be locked 201 * @to: timeout value in msecs 202 * @flags: a pointer to where the caller's interrupt state will be saved at 203 * 204 * This function locks the underlying @hwlock. If the @hwlock 205 * is already taken, the function will busy loop waiting for it to 206 * be released, but give up when @timeout msecs have elapsed. 207 * 208 * Upon a successful return from this function, preemption and local interrupts 209 * are disabled (plus previous interrupt state is saved), so the caller must 210 * not sleep, and is advised to release the hwspinlock as soon as possible. 211 * 212 * Returns 0 when the @hwlock was successfully taken, and an appropriate 213 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still 214 * busy after @timeout msecs). The function will never sleep. 215 */ 216static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, 217 unsigned int to, unsigned long *flags) 218{ 219 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags); 220} 221 222/** 223 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs 224 * @hwlock: the hwspinlock to be locked 225 * @to: timeout value in msecs 226 * 227 * This function locks the underlying @hwlock. If the @hwlock 228 * is already taken, the function will busy loop waiting for it to 229 * be released, but give up when @timeout msecs have elapsed. 230 * 231 * Upon a successful return from this function, preemption and local interrupts 232 * are disabled so the caller must not sleep, and is advised to release the 233 * hwspinlock as soon as possible. 234 * 235 * Returns 0 when the @hwlock was successfully taken, and an appropriate 236 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still 237 * busy after @timeout msecs). The function will never sleep. 238 */ 239static inline 240int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) 241{ 242 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL); 243} 244 245/** 246 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit 247 * @hwlock: the hwspinlock to be locked 248 * @to: timeout value in msecs 249 * 250 * This function locks the underlying @hwlock. If the @hwlock 251 * is already taken, the function will busy loop waiting for it to 252 * be released, but give up when @timeout msecs have elapsed. 253 * 254 * Upon a successful return from this function, preemption is disabled 255 * so the caller must not sleep, and is advised to release the hwspinlock 256 * as soon as possible. 257 * This is required in order to minimize remote cores polling on the 258 * hardware interconnect. 259 * 260 * Returns 0 when the @hwlock was successfully taken, and an appropriate 261 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still 262 * busy after @timeout msecs). The function will never sleep. 263 */ 264static inline 265int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) 266{ 267 return __hwspin_lock_timeout(hwlock, to, 0, NULL); 268} 269 270/** 271 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state 272 * @hwlock: a previously-acquired hwspinlock which we want to unlock 273 * @flags: previous caller's interrupt state to restore 274 * 275 * This function will unlock a specific hwspinlock, enable preemption and 276 * restore the previous state of the local interrupts. It should be used 277 * to undo, e.g., hwspin_trylock_irqsave(). 278 * 279 * @hwlock must be already locked before calling this function: it is a bug 280 * to call unlock on a @hwlock that is already unlocked. 281 */ 282static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock, 283 unsigned long *flags) 284{ 285 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags); 286} 287 288/** 289 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts 290 * @hwlock: a previously-acquired hwspinlock which we want to unlock 291 * 292 * This function will unlock a specific hwspinlock, enable preemption and 293 * enable local interrupts. Should be used to undo hwspin_lock_irq(). 294 * 295 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before 296 * calling this function: it is a bug to call unlock on a @hwlock that is 297 * already unlocked. 298 */ 299static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) 300{ 301 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL); 302} 303 304/** 305 * hwspin_unlock() - unlock hwspinlock 306 * @hwlock: a previously-acquired hwspinlock which we want to unlock 307 * 308 * This function will unlock a specific hwspinlock and enable preemption 309 * back. 310 * 311 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling 312 * this function: it is a bug to call unlock on a @hwlock that is already 313 * unlocked. 314 */ 315static inline void hwspin_unlock(struct hwspinlock *hwlock) 316{ 317 __hwspin_unlock(hwlock, 0, NULL); 318} 319 320#endif /* __LINUX_HWSPINLOCK_H */