Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.13-rc3 133 lines 2.6 kB view raw
1/* 2 * Copyright (C) 2016 Red Hat, Inc. 3 * Author: Michael S. Tsirkin <mst@redhat.com> 4 * This work is licensed under the terms of the GNU GPL, version 2. 5 * 6 * Common macros and functions for ring benchmarking. 7 */ 8#ifndef MAIN_H 9#define MAIN_H 10 11#include <stdbool.h> 12 13extern int param; 14 15extern bool do_exit; 16 17#if defined(__x86_64__) || defined(__i386__) 18#include "x86intrin.h" 19 20static inline void wait_cycles(unsigned long long cycles) 21{ 22 unsigned long long t; 23 24 t = __rdtsc(); 25 while (__rdtsc() - t < cycles) {} 26} 27 28#define VMEXIT_CYCLES 500 29#define VMENTRY_CYCLES 500 30 31#elif defined(__s390x__) 32static inline void wait_cycles(unsigned long long cycles) 33{ 34 asm volatile("0: brctg %0,0b" : : "d" (cycles)); 35} 36 37/* tweak me */ 38#define VMEXIT_CYCLES 200 39#define VMENTRY_CYCLES 200 40 41#else 42static inline void wait_cycles(unsigned long long cycles) 43{ 44 _Exit(5); 45} 46#define VMEXIT_CYCLES 0 47#define VMENTRY_CYCLES 0 48#endif 49 50static inline void vmexit(void) 51{ 52 if (!do_exit) 53 return; 54 55 wait_cycles(VMEXIT_CYCLES); 56} 57static inline void vmentry(void) 58{ 59 if (!do_exit) 60 return; 61 62 wait_cycles(VMENTRY_CYCLES); 63} 64 65/* implemented by ring */ 66void alloc_ring(void); 67/* guest side */ 68int add_inbuf(unsigned, void *, void *); 69void *get_buf(unsigned *, void **); 70void disable_call(); 71bool used_empty(); 72bool enable_call(); 73void kick_available(); 74/* host side */ 75void disable_kick(); 76bool avail_empty(); 77bool enable_kick(); 78bool use_buf(unsigned *, void **); 79void call_used(); 80 81/* implemented by main */ 82extern bool do_sleep; 83void kick(void); 84void wait_for_kick(void); 85void call(void); 86void wait_for_call(void); 87 88extern unsigned ring_size; 89 90/* Compiler barrier - similar to what Linux uses */ 91#define barrier() asm volatile("" ::: "memory") 92 93/* Is there a portable way to do this? */ 94#if defined(__x86_64__) || defined(__i386__) 95#define cpu_relax() asm ("rep; nop" ::: "memory") 96#elif defined(__s390x__) 97#define cpu_relax() barrier() 98#else 99#define cpu_relax() assert(0) 100#endif 101 102extern bool do_relax; 103 104static inline void busy_wait(void) 105{ 106 if (do_relax) 107 cpu_relax(); 108 else 109 /* prevent compiler from removing busy loops */ 110 barrier(); 111} 112 113/* 114 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized 115 * with other __ATOMIC_SEQ_CST calls. 116 */ 117#define smp_mb() __sync_synchronize() 118 119/* 120 * This abuses the atomic builtins for thread fences, and 121 * adds a compiler barrier. 122 */ 123#define smp_release() do { \ 124 barrier(); \ 125 __atomic_thread_fence(__ATOMIC_RELEASE); \ 126} while (0) 127 128#define smp_acquire() do { \ 129 __atomic_thread_fence(__ATOMIC_ACQUIRE); \ 130 barrier(); \ 131} while (0) 132 133#endif