at v4.12 18 kB view raw
1#ifndef __LINUX_COMPILER_H 2#define __LINUX_COMPILER_H 3 4#ifndef __ASSEMBLY__ 5 6#ifdef __CHECKER__ 7# define __user __attribute__((noderef, address_space(1))) 8# define __kernel __attribute__((address_space(0))) 9# define __safe __attribute__((safe)) 10# define __force __attribute__((force)) 11# define __nocast __attribute__((nocast)) 12# define __iomem __attribute__((noderef, address_space(2))) 13# define __must_hold(x) __attribute__((context(x,1,1))) 14# define __acquires(x) __attribute__((context(x,0,1))) 15# define __releases(x) __attribute__((context(x,1,0))) 16# define __acquire(x) __context__(x,1) 17# define __release(x) __context__(x,-1) 18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 19# define __percpu __attribute__((noderef, address_space(3))) 20#ifdef CONFIG_SPARSE_RCU_POINTER 21# define __rcu __attribute__((noderef, address_space(4))) 22#else /* CONFIG_SPARSE_RCU_POINTER */ 23# define __rcu 24#endif /* CONFIG_SPARSE_RCU_POINTER */ 25# define __private __attribute__((noderef)) 26extern void __chk_user_ptr(const volatile void __user *); 27extern void __chk_io_ptr(const volatile void __iomem *); 28# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) 29#else /* __CHECKER__ */ 30# ifdef STRUCTLEAK_PLUGIN 31# define __user __attribute__((user)) 32# else 33# define __user 34# endif 35# define __kernel 36# define __safe 37# define __force 38# define __nocast 39# define __iomem 40# define __chk_user_ptr(x) (void)0 41# define __chk_io_ptr(x) (void)0 42# define __builtin_warning(x, y...) (1) 43# define __must_hold(x) 44# define __acquires(x) 45# define __releases(x) 46# define __acquire(x) (void)0 47# define __release(x) (void)0 48# define __cond_lock(x,c) (c) 49# define __percpu 50# define __rcu 51# define __private 52# define ACCESS_PRIVATE(p, member) ((p)->member) 53#endif /* __CHECKER__ */ 54 55/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 56#define ___PASTE(a,b) a##b 57#define __PASTE(a,b) ___PASTE(a,b) 58 59#ifdef __KERNEL__ 60 61#ifdef __GNUC__ 62#include <linux/compiler-gcc.h> 63#endif 64 65#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) 66#define notrace __attribute__((hotpatch(0,0))) 67#else 68#define notrace __attribute__((no_instrument_function)) 69#endif 70 71/* Intel compiler defines __GNUC__. So we will overwrite implementations 72 * coming from above header files here 73 */ 74#ifdef __INTEL_COMPILER 75# include <linux/compiler-intel.h> 76#endif 77 78/* Clang compiler defines __GNUC__. So we will overwrite implementations 79 * coming from above header files here 80 */ 81#ifdef __clang__ 82#include <linux/compiler-clang.h> 83#endif 84 85/* 86 * Generic compiler-dependent macros required for kernel 87 * build go below this comment. Actual compiler/compiler version 88 * specific implementations come from the above header files 89 */ 90 91struct ftrace_branch_data { 92 const char *func; 93 const char *file; 94 unsigned line; 95 union { 96 struct { 97 unsigned long correct; 98 unsigned long incorrect; 99 }; 100 struct { 101 unsigned long miss; 102 unsigned long hit; 103 }; 104 unsigned long miss_hit[2]; 105 }; 106}; 107 108struct ftrace_likely_data { 109 struct ftrace_branch_data data; 110 unsigned long constant; 111}; 112 113/* 114 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 115 * to disable branch tracing on a per file basis. 116 */ 117#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 118 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 119void ftrace_likely_update(struct ftrace_likely_data *f, int val, 120 int expect, int is_constant); 121 122#define likely_notrace(x) __builtin_expect(!!(x), 1) 123#define unlikely_notrace(x) __builtin_expect(!!(x), 0) 124 125#define __branch_check__(x, expect, is_constant) ({ \ 126 int ______r; \ 127 static struct ftrace_likely_data \ 128 __attribute__((__aligned__(4))) \ 129 __attribute__((section("_ftrace_annotated_branch"))) \ 130 ______f = { \ 131 .data.func = __func__, \ 132 .data.file = __FILE__, \ 133 .data.line = __LINE__, \ 134 }; \ 135 ______r = __builtin_expect(!!(x), expect); \ 136 ftrace_likely_update(&______f, ______r, \ 137 expect, is_constant); \ 138 ______r; \ 139 }) 140 141/* 142 * Using __builtin_constant_p(x) to ignore cases where the return 143 * value is always the same. This idea is taken from a similar patch 144 * written by Daniel Walker. 145 */ 146# ifndef likely 147# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) 148# endif 149# ifndef unlikely 150# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) 151# endif 152 153#ifdef CONFIG_PROFILE_ALL_BRANCHES 154/* 155 * "Define 'is'", Bill Clinton 156 * "Define 'if'", Steven Rostedt 157 */ 158#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 159#define __trace_if(cond) \ 160 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ 161 ({ \ 162 int ______r; \ 163 static struct ftrace_branch_data \ 164 __attribute__((__aligned__(4))) \ 165 __attribute__((section("_ftrace_branch"))) \ 166 ______f = { \ 167 .func = __func__, \ 168 .file = __FILE__, \ 169 .line = __LINE__, \ 170 }; \ 171 ______r = !!(cond); \ 172 ______f.miss_hit[______r]++; \ 173 ______r; \ 174 })) 175#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 176 177#else 178# define likely(x) __builtin_expect(!!(x), 1) 179# define unlikely(x) __builtin_expect(!!(x), 0) 180#endif 181 182/* Optimization barrier */ 183#ifndef barrier 184# define barrier() __memory_barrier() 185#endif 186 187#ifndef barrier_data 188# define barrier_data(ptr) barrier() 189#endif 190 191/* Unreachable code */ 192#ifndef unreachable 193# define unreachable() do { } while (1) 194#endif 195 196/* 197 * KENTRY - kernel entry point 198 * This can be used to annotate symbols (functions or data) that are used 199 * without their linker symbol being referenced explicitly. For example, 200 * interrupt vector handlers, or functions in the kernel image that are found 201 * programatically. 202 * 203 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 204 * are handled in their own way (with KEEP() in linker scripts). 205 * 206 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 207 * linker script. For example an architecture could KEEP() its entire 208 * boot/exception vector code rather than annotate each function and data. 209 */ 210#ifndef KENTRY 211# define KENTRY(sym) \ 212 extern typeof(sym) sym; \ 213 static const unsigned long __kentry_##sym \ 214 __used \ 215 __attribute__((section("___kentry" "+" #sym ), used)) \ 216 = (unsigned long)&sym; 217#endif 218 219#ifndef RELOC_HIDE 220# define RELOC_HIDE(ptr, off) \ 221 ({ unsigned long __ptr; \ 222 __ptr = (unsigned long) (ptr); \ 223 (typeof(ptr)) (__ptr + (off)); }) 224#endif 225 226#ifndef OPTIMIZER_HIDE_VAR 227#define OPTIMIZER_HIDE_VAR(var) barrier() 228#endif 229 230/* Not-quite-unique ID. */ 231#ifndef __UNIQUE_ID 232# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 233#endif 234 235#include <uapi/linux/types.h> 236 237#define __READ_ONCE_SIZE \ 238({ \ 239 switch (size) { \ 240 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ 241 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ 242 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ 243 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ 244 default: \ 245 barrier(); \ 246 __builtin_memcpy((void *)res, (const void *)p, size); \ 247 barrier(); \ 248 } \ 249}) 250 251static __always_inline 252void __read_once_size(const volatile void *p, void *res, int size) 253{ 254 __READ_ONCE_SIZE; 255} 256 257#ifdef CONFIG_KASAN 258/* 259 * This function is not 'inline' because __no_sanitize_address confilcts 260 * with inlining. Attempt to inline it may cause a build failure. 261 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 262 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 263 */ 264static __no_sanitize_address __maybe_unused 265void __read_once_size_nocheck(const volatile void *p, void *res, int size) 266{ 267 __READ_ONCE_SIZE; 268} 269#else 270static __always_inline 271void __read_once_size_nocheck(const volatile void *p, void *res, int size) 272{ 273 __READ_ONCE_SIZE; 274} 275#endif 276 277static __always_inline void __write_once_size(volatile void *p, void *res, int size) 278{ 279 switch (size) { 280 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 281 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 282 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 283 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 284 default: 285 barrier(); 286 __builtin_memcpy((void *)p, (const void *)res, size); 287 barrier(); 288 } 289} 290 291/* 292 * Prevent the compiler from merging or refetching reads or writes. The 293 * compiler is also forbidden from reordering successive instances of 294 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 295 * compiler is aware of some particular ordering. One way to make the 296 * compiler aware of ordering is to put the two invocations of READ_ONCE, 297 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 298 * 299 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 300 * data types like structs or unions. If the size of the accessed data 301 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 302 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at 303 * least two memcpy()s: one for the __builtin_memcpy() and then one for 304 * the macro doing the copy of variable - '__u' allocated on the stack. 305 * 306 * Their two major use cases are: (1) Mediating communication between 307 * process-level code and irq/NMI handlers, all running on the same CPU, 308 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 309 * mutilate accesses that either do not require ordering or that interact 310 * with an explicit memory barrier or atomic instruction that provides the 311 * required ordering. 312 */ 313 314#define __READ_ONCE(x, check) \ 315({ \ 316 union { typeof(x) __val; char __c[1]; } __u; \ 317 if (check) \ 318 __read_once_size(&(x), __u.__c, sizeof(x)); \ 319 else \ 320 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ 321 __u.__val; \ 322}) 323#define READ_ONCE(x) __READ_ONCE(x, 1) 324 325/* 326 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need 327 * to hide memory access from KASAN. 328 */ 329#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) 330 331#define WRITE_ONCE(x, val) \ 332({ \ 333 union { typeof(x) __val; char __c[1]; } __u = \ 334 { .__val = (__force typeof(x)) (val) }; \ 335 __write_once_size(&(x), __u.__c, sizeof(x)); \ 336 __u.__val; \ 337}) 338 339#endif /* __KERNEL__ */ 340 341#endif /* __ASSEMBLY__ */ 342 343#ifdef __KERNEL__ 344/* 345 * Allow us to mark functions as 'deprecated' and have gcc emit a nice 346 * warning for each use, in hopes of speeding the functions removal. 347 * Usage is: 348 * int __deprecated foo(void) 349 */ 350#ifndef __deprecated 351# define __deprecated /* unimplemented */ 352#endif 353 354#ifdef MODULE 355#define __deprecated_for_modules __deprecated 356#else 357#define __deprecated_for_modules 358#endif 359 360#ifndef __must_check 361#define __must_check 362#endif 363 364#ifndef CONFIG_ENABLE_MUST_CHECK 365#undef __must_check 366#define __must_check 367#endif 368#ifndef CONFIG_ENABLE_WARN_DEPRECATED 369#undef __deprecated 370#undef __deprecated_for_modules 371#define __deprecated 372#define __deprecated_for_modules 373#endif 374 375#ifndef __malloc 376#define __malloc 377#endif 378 379/* 380 * Allow us to avoid 'defined but not used' warnings on functions and data, 381 * as well as force them to be emitted to the assembly file. 382 * 383 * As of gcc 3.4, static functions that are not marked with attribute((used)) 384 * may be elided from the assembly file. As of gcc 3.4, static data not so 385 * marked will not be elided, but this may change in a future gcc version. 386 * 387 * NOTE: Because distributions shipped with a backported unit-at-a-time 388 * compiler in gcc 3.3, we must define __used to be __attribute__((used)) 389 * for gcc >=3.3 instead of 3.4. 390 * 391 * In prior versions of gcc, such functions and data would be emitted, but 392 * would be warned about except with attribute((unused)). 393 * 394 * Mark functions that are referenced only in inline assembly as __used so 395 * the code is emitted even though it appears to be unreferenced. 396 */ 397#ifndef __used 398# define __used /* unimplemented */ 399#endif 400 401#ifndef __maybe_unused 402# define __maybe_unused /* unimplemented */ 403#endif 404 405#ifndef __always_unused 406# define __always_unused /* unimplemented */ 407#endif 408 409#ifndef noinline 410#define noinline 411#endif 412 413/* 414 * Rather then using noinline to prevent stack consumption, use 415 * noinline_for_stack instead. For documentation reasons. 416 */ 417#define noinline_for_stack noinline 418 419#ifndef __always_inline 420#define __always_inline inline 421#endif 422 423#endif /* __KERNEL__ */ 424 425/* 426 * From the GCC manual: 427 * 428 * Many functions do not examine any values except their arguments, 429 * and have no effects except the return value. Basically this is 430 * just slightly more strict class than the `pure' attribute above, 431 * since function is not allowed to read global memory. 432 * 433 * Note that a function that has pointer arguments and examines the 434 * data pointed to must _not_ be declared `const'. Likewise, a 435 * function that calls a non-`const' function usually must not be 436 * `const'. It does not make sense for a `const' function to return 437 * `void'. 438 */ 439#ifndef __attribute_const__ 440# define __attribute_const__ /* unimplemented */ 441#endif 442 443#ifndef __latent_entropy 444# define __latent_entropy 445#endif 446 447/* 448 * Tell gcc if a function is cold. The compiler will assume any path 449 * directly leading to the call is unlikely. 450 */ 451 452#ifndef __cold 453#define __cold 454#endif 455 456/* Simple shorthand for a section definition */ 457#ifndef __section 458# define __section(S) __attribute__ ((__section__(#S))) 459#endif 460 461#ifndef __visible 462#define __visible 463#endif 464 465/* 466 * Assume alignment of return value. 467 */ 468#ifndef __assume_aligned 469#define __assume_aligned(a, ...) 470#endif 471 472 473/* Are two types/vars the same type (ignoring qualifiers)? */ 474#ifndef __same_type 475# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) 476#endif 477 478/* Is this type a native word size -- useful for atomic operations */ 479#ifndef __native_word 480# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 481#endif 482 483/* Compile time object size, -1 for unknown */ 484#ifndef __compiletime_object_size 485# define __compiletime_object_size(obj) -1 486#endif 487#ifndef __compiletime_warning 488# define __compiletime_warning(message) 489#endif 490#ifndef __compiletime_error 491# define __compiletime_error(message) 492/* 493 * Sparse complains of variable sized arrays due to the temporary variable in 494 * __compiletime_assert. Unfortunately we can't just expand it out to make 495 * sparse see a constant array size without breaking compiletime_assert on old 496 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. 497 */ 498# ifndef __CHECKER__ 499# define __compiletime_error_fallback(condition) \ 500 do { ((void)sizeof(char[1 - 2 * condition])); } while (0) 501# endif 502#endif 503#ifndef __compiletime_error_fallback 504# define __compiletime_error_fallback(condition) do { } while (0) 505#endif 506 507#define __compiletime_assert(condition, msg, prefix, suffix) \ 508 do { \ 509 bool __cond = !(condition); \ 510 extern void prefix ## suffix(void) __compiletime_error(msg); \ 511 if (__cond) \ 512 prefix ## suffix(); \ 513 __compiletime_error_fallback(__cond); \ 514 } while (0) 515 516#define _compiletime_assert(condition, msg, prefix, suffix) \ 517 __compiletime_assert(condition, msg, prefix, suffix) 518 519/** 520 * compiletime_assert - break build and emit msg if condition is false 521 * @condition: a compile-time constant condition to check 522 * @msg: a message to emit if condition is false 523 * 524 * In tradition of POSIX assert, this macro will break the build if the 525 * supplied condition is *false*, emitting the supplied error message if the 526 * compiler has support to do so. 527 */ 528#define compiletime_assert(condition, msg) \ 529 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) 530 531#define compiletime_assert_atomic_type(t) \ 532 compiletime_assert(__native_word(t), \ 533 "Need native word sized stores/loads for atomicity.") 534 535/* 536 * Prevent the compiler from merging or refetching accesses. The compiler 537 * is also forbidden from reordering successive instances of ACCESS_ONCE(), 538 * but only when the compiler is aware of some particular ordering. One way 539 * to make the compiler aware of ordering is to put the two invocations of 540 * ACCESS_ONCE() in different C statements. 541 * 542 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE 543 * on a union member will work as long as the size of the member matches the 544 * size of the union and the size is smaller than word size. 545 * 546 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication 547 * between process-level code and irq/NMI handlers, all running on the same CPU, 548 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 549 * mutilate accesses that either do not require ordering or that interact 550 * with an explicit memory barrier or atomic instruction that provides the 551 * required ordering. 552 * 553 * If possible use READ_ONCE()/WRITE_ONCE() instead. 554 */ 555#define __ACCESS_ONCE(x) ({ \ 556 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ 557 (volatile typeof(x) *)&(x); }) 558#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) 559 560/** 561 * lockless_dereference() - safely load a pointer for later dereference 562 * @p: The pointer to load 563 * 564 * Similar to rcu_dereference(), but for situations where the pointed-to 565 * object's lifetime is managed by something other than RCU. That 566 * "something other" might be reference counting or simple immortality. 567 * 568 * The seemingly unused variable ___typecheck_p validates that @p is 569 * indeed a pointer type by using a pointer to typeof(*p) as the type. 570 * Taking a pointer to typeof(*p) again is needed in case p is void *. 571 */ 572#define lockless_dereference(p) \ 573({ \ 574 typeof(p) _________p1 = READ_ONCE(p); \ 575 typeof(*(p)) *___typecheck_p __maybe_unused; \ 576 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 577 (_________p1); \ 578}) 579 580#endif /* __LINUX_COMPILER_H */