at v4.4-rc2 16 kB view raw
1#ifndef __LINUX_COMPILER_H 2#define __LINUX_COMPILER_H 3 4#ifndef __ASSEMBLY__ 5 6#ifdef __CHECKER__ 7# define __user __attribute__((noderef, address_space(1))) 8# define __kernel __attribute__((address_space(0))) 9# define __safe __attribute__((safe)) 10# define __force __attribute__((force)) 11# define __nocast __attribute__((nocast)) 12# define __iomem __attribute__((noderef, address_space(2))) 13# define __must_hold(x) __attribute__((context(x,1,1))) 14# define __acquires(x) __attribute__((context(x,0,1))) 15# define __releases(x) __attribute__((context(x,1,0))) 16# define __acquire(x) __context__(x,1) 17# define __release(x) __context__(x,-1) 18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 19# define __percpu __attribute__((noderef, address_space(3))) 20# define __pmem __attribute__((noderef, address_space(5))) 21#ifdef CONFIG_SPARSE_RCU_POINTER 22# define __rcu __attribute__((noderef, address_space(4))) 23#else 24# define __rcu 25#endif 26extern void __chk_user_ptr(const volatile void __user *); 27extern void __chk_io_ptr(const volatile void __iomem *); 28#else 29# define __user 30# define __kernel 31# define __safe 32# define __force 33# define __nocast 34# define __iomem 35# define __chk_user_ptr(x) (void)0 36# define __chk_io_ptr(x) (void)0 37# define __builtin_warning(x, y...) (1) 38# define __must_hold(x) 39# define __acquires(x) 40# define __releases(x) 41# define __acquire(x) (void)0 42# define __release(x) (void)0 43# define __cond_lock(x,c) (c) 44# define __percpu 45# define __rcu 46# define __pmem 47#endif 48 49/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 50#define ___PASTE(a,b) a##b 51#define __PASTE(a,b) ___PASTE(a,b) 52 53#ifdef __KERNEL__ 54 55#ifdef __GNUC__ 56#include <linux/compiler-gcc.h> 57#endif 58 59#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) 60#define notrace __attribute__((hotpatch(0,0))) 61#else 62#define notrace __attribute__((no_instrument_function)) 63#endif 64 65/* Intel compiler defines __GNUC__. So we will overwrite implementations 66 * coming from above header files here 67 */ 68#ifdef __INTEL_COMPILER 69# include <linux/compiler-intel.h> 70#endif 71 72/* Clang compiler defines __GNUC__. So we will overwrite implementations 73 * coming from above header files here 74 */ 75#ifdef __clang__ 76#include <linux/compiler-clang.h> 77#endif 78 79/* 80 * Generic compiler-dependent macros required for kernel 81 * build go below this comment. Actual compiler/compiler version 82 * specific implementations come from the above header files 83 */ 84 85struct ftrace_branch_data { 86 const char *func; 87 const char *file; 88 unsigned line; 89 union { 90 struct { 91 unsigned long correct; 92 unsigned long incorrect; 93 }; 94 struct { 95 unsigned long miss; 96 unsigned long hit; 97 }; 98 unsigned long miss_hit[2]; 99 }; 100}; 101 102/* 103 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 104 * to disable branch tracing on a per file basis. 105 */ 106#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 107 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 108void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); 109 110#define likely_notrace(x) __builtin_expect(!!(x), 1) 111#define unlikely_notrace(x) __builtin_expect(!!(x), 0) 112 113#define __branch_check__(x, expect) ({ \ 114 int ______r; \ 115 static struct ftrace_branch_data \ 116 __attribute__((__aligned__(4))) \ 117 __attribute__((section("_ftrace_annotated_branch"))) \ 118 ______f = { \ 119 .func = __func__, \ 120 .file = __FILE__, \ 121 .line = __LINE__, \ 122 }; \ 123 ______r = likely_notrace(x); \ 124 ftrace_likely_update(&______f, ______r, expect); \ 125 ______r; \ 126 }) 127 128/* 129 * Using __builtin_constant_p(x) to ignore cases where the return 130 * value is always the same. This idea is taken from a similar patch 131 * written by Daniel Walker. 132 */ 133# ifndef likely 134# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) 135# endif 136# ifndef unlikely 137# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) 138# endif 139 140#ifdef CONFIG_PROFILE_ALL_BRANCHES 141/* 142 * "Define 'is'", Bill Clinton 143 * "Define 'if'", Steven Rostedt 144 */ 145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 146#define __trace_if(cond) \ 147 if (__builtin_constant_p((cond)) ? !!(cond) : \ 148 ({ \ 149 int ______r; \ 150 static struct ftrace_branch_data \ 151 __attribute__((__aligned__(4))) \ 152 __attribute__((section("_ftrace_branch"))) \ 153 ______f = { \ 154 .func = __func__, \ 155 .file = __FILE__, \ 156 .line = __LINE__, \ 157 }; \ 158 ______r = !!(cond); \ 159 ______f.miss_hit[______r]++; \ 160 ______r; \ 161 })) 162#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 163 164#else 165# define likely(x) __builtin_expect(!!(x), 1) 166# define unlikely(x) __builtin_expect(!!(x), 0) 167#endif 168 169/* Optimization barrier */ 170#ifndef barrier 171# define barrier() __memory_barrier() 172#endif 173 174#ifndef barrier_data 175# define barrier_data(ptr) barrier() 176#endif 177 178/* Unreachable code */ 179#ifndef unreachable 180# define unreachable() do { } while (1) 181#endif 182 183#ifndef RELOC_HIDE 184# define RELOC_HIDE(ptr, off) \ 185 ({ unsigned long __ptr; \ 186 __ptr = (unsigned long) (ptr); \ 187 (typeof(ptr)) (__ptr + (off)); }) 188#endif 189 190#ifndef OPTIMIZER_HIDE_VAR 191#define OPTIMIZER_HIDE_VAR(var) barrier() 192#endif 193 194/* Not-quite-unique ID. */ 195#ifndef __UNIQUE_ID 196# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 197#endif 198 199#include <uapi/linux/types.h> 200 201#define __READ_ONCE_SIZE \ 202({ \ 203 switch (size) { \ 204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ 205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ 206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ 207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ 208 default: \ 209 barrier(); \ 210 __builtin_memcpy((void *)res, (const void *)p, size); \ 211 barrier(); \ 212 } \ 213}) 214 215static __always_inline 216void __read_once_size(const volatile void *p, void *res, int size) 217{ 218 __READ_ONCE_SIZE; 219} 220 221#ifdef CONFIG_KASAN 222/* 223 * This function is not 'inline' because __no_sanitize_address confilcts 224 * with inlining. Attempt to inline it may cause a build failure. 225 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 226 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 227 */ 228static __no_sanitize_address __maybe_unused 229void __read_once_size_nocheck(const volatile void *p, void *res, int size) 230{ 231 __READ_ONCE_SIZE; 232} 233#else 234static __always_inline 235void __read_once_size_nocheck(const volatile void *p, void *res, int size) 236{ 237 __READ_ONCE_SIZE; 238} 239#endif 240 241static __always_inline void __write_once_size(volatile void *p, void *res, int size) 242{ 243 switch (size) { 244 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 245 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 246 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 247 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 248 default: 249 barrier(); 250 __builtin_memcpy((void *)p, (const void *)res, size); 251 barrier(); 252 } 253} 254 255/* 256 * Prevent the compiler from merging or refetching reads or writes. The 257 * compiler is also forbidden from reordering successive instances of 258 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 259 * compiler is aware of some particular ordering. One way to make the 260 * compiler aware of ordering is to put the two invocations of READ_ONCE, 261 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 262 * 263 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 264 * data types like structs or unions. If the size of the accessed data 265 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 266 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 267 * compile-time warning. 268 * 269 * Their two major use cases are: (1) Mediating communication between 270 * process-level code and irq/NMI handlers, all running on the same CPU, 271 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 272 * mutilate accesses that either do not require ordering or that interact 273 * with an explicit memory barrier or atomic instruction that provides the 274 * required ordering. 275 */ 276 277#define __READ_ONCE(x, check) \ 278({ \ 279 union { typeof(x) __val; char __c[1]; } __u; \ 280 if (check) \ 281 __read_once_size(&(x), __u.__c, sizeof(x)); \ 282 else \ 283 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ 284 __u.__val; \ 285}) 286#define READ_ONCE(x) __READ_ONCE(x, 1) 287 288/* 289 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need 290 * to hide memory access from KASAN. 291 */ 292#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) 293 294#define WRITE_ONCE(x, val) \ 295({ \ 296 union { typeof(x) __val; char __c[1]; } __u = \ 297 { .__val = (__force typeof(x)) (val) }; \ 298 __write_once_size(&(x), __u.__c, sizeof(x)); \ 299 __u.__val; \ 300}) 301 302#endif /* __KERNEL__ */ 303 304#endif /* __ASSEMBLY__ */ 305 306#ifdef __KERNEL__ 307/* 308 * Allow us to mark functions as 'deprecated' and have gcc emit a nice 309 * warning for each use, in hopes of speeding the functions removal. 310 * Usage is: 311 * int __deprecated foo(void) 312 */ 313#ifndef __deprecated 314# define __deprecated /* unimplemented */ 315#endif 316 317#ifdef MODULE 318#define __deprecated_for_modules __deprecated 319#else 320#define __deprecated_for_modules 321#endif 322 323#ifndef __must_check 324#define __must_check 325#endif 326 327#ifndef CONFIG_ENABLE_MUST_CHECK 328#undef __must_check 329#define __must_check 330#endif 331#ifndef CONFIG_ENABLE_WARN_DEPRECATED 332#undef __deprecated 333#undef __deprecated_for_modules 334#define __deprecated 335#define __deprecated_for_modules 336#endif 337 338/* 339 * Allow us to avoid 'defined but not used' warnings on functions and data, 340 * as well as force them to be emitted to the assembly file. 341 * 342 * As of gcc 3.4, static functions that are not marked with attribute((used)) 343 * may be elided from the assembly file. As of gcc 3.4, static data not so 344 * marked will not be elided, but this may change in a future gcc version. 345 * 346 * NOTE: Because distributions shipped with a backported unit-at-a-time 347 * compiler in gcc 3.3, we must define __used to be __attribute__((used)) 348 * for gcc >=3.3 instead of 3.4. 349 * 350 * In prior versions of gcc, such functions and data would be emitted, but 351 * would be warned about except with attribute((unused)). 352 * 353 * Mark functions that are referenced only in inline assembly as __used so 354 * the code is emitted even though it appears to be unreferenced. 355 */ 356#ifndef __used 357# define __used /* unimplemented */ 358#endif 359 360#ifndef __maybe_unused 361# define __maybe_unused /* unimplemented */ 362#endif 363 364#ifndef __always_unused 365# define __always_unused /* unimplemented */ 366#endif 367 368#ifndef noinline 369#define noinline 370#endif 371 372/* 373 * Rather then using noinline to prevent stack consumption, use 374 * noinline_for_stack instead. For documentation reasons. 375 */ 376#define noinline_for_stack noinline 377 378#ifndef __always_inline 379#define __always_inline inline 380#endif 381 382#endif /* __KERNEL__ */ 383 384/* 385 * From the GCC manual: 386 * 387 * Many functions do not examine any values except their arguments, 388 * and have no effects except the return value. Basically this is 389 * just slightly more strict class than the `pure' attribute above, 390 * since function is not allowed to read global memory. 391 * 392 * Note that a function that has pointer arguments and examines the 393 * data pointed to must _not_ be declared `const'. Likewise, a 394 * function that calls a non-`const' function usually must not be 395 * `const'. It does not make sense for a `const' function to return 396 * `void'. 397 */ 398#ifndef __attribute_const__ 399# define __attribute_const__ /* unimplemented */ 400#endif 401 402/* 403 * Tell gcc if a function is cold. The compiler will assume any path 404 * directly leading to the call is unlikely. 405 */ 406 407#ifndef __cold 408#define __cold 409#endif 410 411/* Simple shorthand for a section definition */ 412#ifndef __section 413# define __section(S) __attribute__ ((__section__(#S))) 414#endif 415 416#ifndef __visible 417#define __visible 418#endif 419 420/* 421 * Assume alignment of return value. 422 */ 423#ifndef __assume_aligned 424#define __assume_aligned(a, ...) 425#endif 426 427 428/* Are two types/vars the same type (ignoring qualifiers)? */ 429#ifndef __same_type 430# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) 431#endif 432 433/* Is this type a native word size -- useful for atomic operations */ 434#ifndef __native_word 435# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 436#endif 437 438/* Compile time object size, -1 for unknown */ 439#ifndef __compiletime_object_size 440# define __compiletime_object_size(obj) -1 441#endif 442#ifndef __compiletime_warning 443# define __compiletime_warning(message) 444#endif 445#ifndef __compiletime_error 446# define __compiletime_error(message) 447/* 448 * Sparse complains of variable sized arrays due to the temporary variable in 449 * __compiletime_assert. Unfortunately we can't just expand it out to make 450 * sparse see a constant array size without breaking compiletime_assert on old 451 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. 452 */ 453# ifndef __CHECKER__ 454# define __compiletime_error_fallback(condition) \ 455 do { ((void)sizeof(char[1 - 2 * condition])); } while (0) 456# endif 457#endif 458#ifndef __compiletime_error_fallback 459# define __compiletime_error_fallback(condition) do { } while (0) 460#endif 461 462#define __compiletime_assert(condition, msg, prefix, suffix) \ 463 do { \ 464 bool __cond = !(condition); \ 465 extern void prefix ## suffix(void) __compiletime_error(msg); \ 466 if (__cond) \ 467 prefix ## suffix(); \ 468 __compiletime_error_fallback(__cond); \ 469 } while (0) 470 471#define _compiletime_assert(condition, msg, prefix, suffix) \ 472 __compiletime_assert(condition, msg, prefix, suffix) 473 474/** 475 * compiletime_assert - break build and emit msg if condition is false 476 * @condition: a compile-time constant condition to check 477 * @msg: a message to emit if condition is false 478 * 479 * In tradition of POSIX assert, this macro will break the build if the 480 * supplied condition is *false*, emitting the supplied error message if the 481 * compiler has support to do so. 482 */ 483#define compiletime_assert(condition, msg) \ 484 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) 485 486#define compiletime_assert_atomic_type(t) \ 487 compiletime_assert(__native_word(t), \ 488 "Need native word sized stores/loads for atomicity.") 489 490/* 491 * Prevent the compiler from merging or refetching accesses. The compiler 492 * is also forbidden from reordering successive instances of ACCESS_ONCE(), 493 * but only when the compiler is aware of some particular ordering. One way 494 * to make the compiler aware of ordering is to put the two invocations of 495 * ACCESS_ONCE() in different C statements. 496 * 497 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE 498 * on a union member will work as long as the size of the member matches the 499 * size of the union and the size is smaller than word size. 500 * 501 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication 502 * between process-level code and irq/NMI handlers, all running on the same CPU, 503 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 504 * mutilate accesses that either do not require ordering or that interact 505 * with an explicit memory barrier or atomic instruction that provides the 506 * required ordering. 507 * 508 * If possible use READ_ONCE()/WRITE_ONCE() instead. 509 */ 510#define __ACCESS_ONCE(x) ({ \ 511 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ 512 (volatile typeof(x) *)&(x); }) 513#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) 514 515/** 516 * lockless_dereference() - safely load a pointer for later dereference 517 * @p: The pointer to load 518 * 519 * Similar to rcu_dereference(), but for situations where the pointed-to 520 * object's lifetime is managed by something other than RCU. That 521 * "something other" might be reference counting or simple immortality. 522 */ 523#define lockless_dereference(p) \ 524({ \ 525 typeof(p) _________p1 = READ_ONCE(p); \ 526 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 527 (_________p1); \ 528}) 529 530/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ 531#ifdef CONFIG_KPROBES 532# define __kprobes __attribute__((__section__(".kprobes.text"))) 533# define nokprobe_inline __always_inline 534#else 535# define __kprobes 536# define nokprobe_inline inline 537#endif 538#endif /* __LINUX_COMPILER_H */