at master 14 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_COMPILER_H 3#define __LINUX_COMPILER_H 4 5#include <linux/compiler_types.h> 6 7#ifndef __ASSEMBLY__ 8 9#ifdef __KERNEL__ 10 11/* 12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 13 * to disable branch tracing on a per file basis. 14 */ 15void ftrace_likely_update(struct ftrace_likely_data *f, int val, 16 int expect, int is_constant); 17#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 18 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 19#define likely_notrace(x) __builtin_expect(!!(x), 1) 20#define unlikely_notrace(x) __builtin_expect(!!(x), 0) 21 22#define __branch_check__(x, expect, is_constant) ({ \ 23 long ______r; \ 24 static struct ftrace_likely_data \ 25 __aligned(4) \ 26 __section("_ftrace_annotated_branch") \ 27 ______f = { \ 28 .data.func = __func__, \ 29 .data.file = __FILE__, \ 30 .data.line = __LINE__, \ 31 }; \ 32 ______r = __builtin_expect(!!(x), expect); \ 33 ftrace_likely_update(&______f, ______r, \ 34 expect, is_constant); \ 35 ______r; \ 36 }) 37 38/* 39 * Using __builtin_constant_p(x) to ignore cases where the return 40 * value is always the same. This idea is taken from a similar patch 41 * written by Daniel Walker. 42 */ 43# ifndef likely 44# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) 45# endif 46# ifndef unlikely 47# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) 48# endif 49 50#ifdef CONFIG_PROFILE_ALL_BRANCHES 51/* 52 * "Define 'is'", Bill Clinton 53 * "Define 'if'", Steven Rostedt 54 */ 55#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) 56 57#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) 58 59#define __trace_if_value(cond) ({ \ 60 static struct ftrace_branch_data \ 61 __aligned(4) \ 62 __section("_ftrace_branch") \ 63 __if_trace = { \ 64 .func = __func__, \ 65 .file = __FILE__, \ 66 .line = __LINE__, \ 67 }; \ 68 (cond) ? \ 69 (__if_trace.miss_hit[1]++,1) : \ 70 (__if_trace.miss_hit[0]++,0); \ 71}) 72 73#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 74 75#else 76# define likely(x) __builtin_expect(!!(x), 1) 77# define unlikely(x) __builtin_expect(!!(x), 0) 78# define likely_notrace(x) likely(x) 79# define unlikely_notrace(x) unlikely(x) 80#endif 81 82/* Optimization barrier */ 83#ifndef barrier 84/* The "volatile" is due to gcc bugs */ 85# define barrier() __asm__ __volatile__("": : :"memory") 86#endif 87 88#ifndef barrier_data 89/* 90 * This version is i.e. to prevent dead stores elimination on @ptr 91 * where gcc and llvm may behave differently when otherwise using 92 * normal barrier(): while gcc behavior gets along with a normal 93 * barrier(), llvm needs an explicit input variable to be assumed 94 * clobbered. The issue is as follows: while the inline asm might 95 * access any memory it wants, the compiler could have fit all of 96 * @ptr into memory registers instead, and since @ptr never escaped 97 * from that, it proved that the inline asm wasn't touching any of 98 * it. This version works well with both compilers, i.e. we're telling 99 * the compiler that the inline asm absolutely may see the contents 100 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 101 */ 102# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") 103#endif 104 105/* workaround for GCC PR82365 if needed */ 106#ifndef barrier_before_unreachable 107# define barrier_before_unreachable() do { } while (0) 108#endif 109 110/* Unreachable code */ 111#ifdef CONFIG_OBJTOOL 112/* Annotate a C jump table to allow objtool to follow the code flow */ 113#define __annotate_jump_table __section(".data.rel.ro.c_jump_table") 114#else /* !CONFIG_OBJTOOL */ 115#define __annotate_jump_table 116#endif /* CONFIG_OBJTOOL */ 117 118/* 119 * Mark a position in code as unreachable. This can be used to 120 * suppress control flow warnings after asm blocks that transfer 121 * control elsewhere. 122 */ 123#define unreachable() do { \ 124 barrier_before_unreachable(); \ 125 __builtin_unreachable(); \ 126} while (0) 127 128/* 129 * KENTRY - kernel entry point 130 * This can be used to annotate symbols (functions or data) that are used 131 * without their linker symbol being referenced explicitly. For example, 132 * interrupt vector handlers, or functions in the kernel image that are found 133 * programatically. 134 * 135 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 136 * are handled in their own way (with KEEP() in linker scripts). 137 * 138 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 139 * linker script. For example an architecture could KEEP() its entire 140 * boot/exception vector code rather than annotate each function and data. 141 */ 142#ifndef KENTRY 143# define KENTRY(sym) \ 144 extern typeof(sym) sym; \ 145 static const unsigned long __kentry_##sym \ 146 __used \ 147 __attribute__((__section__("___kentry+" #sym))) \ 148 = (unsigned long)&sym; 149#endif 150 151#ifndef RELOC_HIDE 152# define RELOC_HIDE(ptr, off) \ 153 ({ unsigned long __ptr; \ 154 __ptr = (unsigned long) (ptr); \ 155 (typeof(ptr)) (__ptr + (off)); }) 156#endif 157 158#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) 159 160#ifndef OPTIMIZER_HIDE_VAR 161/* Make the optimizer believe the variable can be manipulated arbitrarily. */ 162#define OPTIMIZER_HIDE_VAR(var) \ 163 __asm__ ("" : "=r" (var) : "0" (var)) 164#endif 165 166/* Format: __UNIQUE_ID_<name>_<__COUNTER__> */ 167#define __UNIQUE_ID(name) \ 168 __PASTE(__UNIQUE_ID_, \ 169 __PASTE(name, \ 170 __PASTE(_, __COUNTER__))) 171 172/** 173 * data_race - mark an expression as containing intentional data races 174 * 175 * This data_race() macro is useful for situations in which data races 176 * should be forgiven. One example is diagnostic code that accesses 177 * shared variables but is not a part of the core synchronization design. 178 * For example, if accesses to a given variable are protected by a lock, 179 * except for diagnostic code, then the accesses under the lock should 180 * be plain C-language accesses and those in the diagnostic code should 181 * use data_race(). This way, KCSAN will complain if buggy lockless 182 * accesses to that variable are introduced, even if the buggy accesses 183 * are protected by READ_ONCE() or WRITE_ONCE(). 184 * 185 * This macro *does not* affect normal code generation, but is a hint 186 * to tooling that data races here are to be ignored. If the access must 187 * be atomic *and* KCSAN should ignore the access, use both data_race() 188 * and READ_ONCE(), for example, data_race(READ_ONCE(x)). 189 */ 190#define data_race(expr) \ 191({ \ 192 __kcsan_disable_current(); \ 193 auto __v = (expr); \ 194 __kcsan_enable_current(); \ 195 __v; \ 196}) 197 198#ifdef __CHECKER__ 199#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) (0) 200#else /* __CHECKER__ */ 201#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);})) 202#endif /* __CHECKER__ */ 203 204/* &a[0] degrades to a pointer: a different type from an array */ 205#define __is_array(a) (!__same_type((a), &(a)[0])) 206#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_array(a), \ 207 "must be array") 208 209#define __is_byte_array(a) (__is_array(a) && sizeof((a)[0]) == 1) 210#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \ 211 "must be byte array") 212 213/* 214 * If the "nonstring" attribute isn't available, we have to return true 215 * so the __must_*() checks pass when "nonstring" isn't supported. 216 */ 217#if __has_attribute(__nonstring__) && defined(__annotated) 218#define __is_cstr(a) (!__annotated(a, nonstring)) 219#define __is_noncstr(a) (__annotated(a, nonstring)) 220#else 221#define __is_cstr(a) (true) 222#define __is_noncstr(a) (true) 223#endif 224 225/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */ 226#define __must_be_cstr(p) \ 227 __BUILD_BUG_ON_ZERO_MSG(!__is_cstr(p), \ 228 "must be C-string (NUL-terminated)") 229#define __must_be_noncstr(p) \ 230 __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \ 231 "must be non-C-string (not NUL-terminated)") 232 233/* 234 * Use __typeof_unqual__() when available. 235 * 236 * XXX: Remove test for __CHECKER__ once 237 * sparse learns about __typeof_unqual__(). 238 */ 239#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__) 240# define USE_TYPEOF_UNQUAL 1 241#endif 242 243/* 244 * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof 245 * operator when available, to return an unqualified type of the exp. 246 */ 247#if defined(USE_TYPEOF_UNQUAL) 248# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp) 249#else 250# define TYPEOF_UNQUAL(exp) __typeof__(exp) 251#endif 252 253#endif /* __KERNEL__ */ 254 255#if defined(CONFIG_CFI) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 256/* 257 * Force a reference to the external symbol so the compiler generates 258 * __kcfi_typid. 259 */ 260#define KCFI_REFERENCE(sym) __ADDRESSABLE(sym) 261#else 262#define KCFI_REFERENCE(sym) 263#endif 264 265/** 266 * offset_to_ptr - convert a relative memory offset to an absolute pointer 267 * @off: the address of the 32-bit offset value 268 */ 269static inline void *offset_to_ptr(const int *off) 270{ 271 return (void *)((unsigned long)off + *off); 272} 273 274#endif /* __ASSEMBLY__ */ 275 276/* 277 * Force the compiler to emit 'sym' as a symbol, so that we can reference 278 * it from inline assembler. Necessary in case 'sym' could be inlined 279 * otherwise, or eliminated entirely due to lack of references that are 280 * visible to the compiler. 281 */ 282#define ___ADDRESSABLE(sym, __attrs) \ 283 static void * __used __attrs \ 284 __UNIQUE_ID(__PASTE(addressable_, sym)) = (void *)(uintptr_t)&sym; 285 286#define __ADDRESSABLE(sym) \ 287 ___ADDRESSABLE(sym, __section(".discard.addressable")) 288 289/* 290 * This returns a constant expression while determining if an argument is 291 * a constant expression, most importantly without evaluating the argument. 292 * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> 293 * 294 * Details: 295 * - sizeof() return an integer constant expression, and does not evaluate 296 * the value of its operand; it only examines the type of its operand. 297 * - The results of comparing two integer constant expressions is also 298 * an integer constant expression. 299 * - The first literal "8" isn't important. It could be any literal value. 300 * - The second literal "8" is to avoid warnings about unaligned pointers; 301 * this could otherwise just be "1". 302 * - (long)(x) is used to avoid warnings about 64-bit types on 32-bit 303 * architectures. 304 * - The C Standard defines "null pointer constant", "(void *)0", as 305 * distinct from other void pointers. 306 * - If (x) is an integer constant expression, then the "* 0l" resolves 307 * it into an integer constant expression of value 0. Since it is cast to 308 * "void *", this makes the second operand a null pointer constant. 309 * - If (x) is not an integer constant expression, then the second operand 310 * resolves to a void pointer (but not a null pointer constant: the value 311 * is not an integer constant 0). 312 * - The conditional operator's third operand, "(int *)8", is an object 313 * pointer (to type "int"). 314 * - The behavior (including the return type) of the conditional operator 315 * ("operand1 ? operand2 : operand3") depends on the kind of expressions 316 * given for the second and third operands. This is the central mechanism 317 * of the macro: 318 * - When one operand is a null pointer constant (i.e. when x is an integer 319 * constant expression) and the other is an object pointer (i.e. our 320 * third operand), the conditional operator returns the type of the 321 * object pointer operand (i.e. "int *"). Here, within the sizeof(), we 322 * would then get: 323 * sizeof(*((int *)(...)) == sizeof(int) == 4 324 * - When one operand is a void pointer (i.e. when x is not an integer 325 * constant expression) and the other is an object pointer (i.e. our 326 * third operand), the conditional operator returns a "void *" type. 327 * Here, within the sizeof(), we would then get: 328 * sizeof(*((void *)(...)) == sizeof(void) == 1 329 * - The equality comparison to "sizeof(int)" therefore depends on (x): 330 * sizeof(int) == sizeof(int) (x) was a constant expression 331 * sizeof(int) != sizeof(void) (x) was not a constant expression 332 */ 333#define __is_constexpr(x) \ 334 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) 335 336/* 337 * Whether 'type' is a signed type or an unsigned type. Supports scalar types, 338 * bool and also pointer types. 339 */ 340#define is_signed_type(type) (((type)(-1)) < (__force type)1) 341#define is_unsigned_type(type) (!is_signed_type(type)) 342 343/* 344 * Useful shorthand for "is this condition known at compile-time?" 345 * 346 * Note that the condition may involve non-constant values, 347 * but the compiler may know enough about the details of the 348 * values to determine that the condition is statically true. 349 */ 350#define statically_true(x) (__builtin_constant_p(x) && (x)) 351 352/* 353 * Similar to statically_true() but produces a constant expression 354 * 355 * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(), 356 * which require their input to be a constant expression and for which 357 * statically_true() would otherwise fail. 358 * 359 * This is a trade-off: const_true() requires all its operands to be 360 * compile time constants. Else, it would always returns false even on 361 * the most trivial cases like: 362 * 363 * true || non_const_var 364 * 365 * On the opposite, statically_true() is able to fold more complex 366 * tautologies and will return true on expressions such as: 367 * 368 * !(non_const_var * 8 % 4) 369 * 370 * For the general case, statically_true() is better. 371 */ 372#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false) 373 374/* 375 * This is needed in functions which generate the stack canary, see 376 * arch/x86/kernel/smpboot.c::start_secondary() for an example. 377 */ 378#define prevent_tail_call_optimization() mb() 379 380#include <asm/rwonce.h> 381 382#endif /* __LINUX_COMPILER_H */