at v4.15-rc2 188 lines 3.1 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#define _GNU_SOURCE 3#include "main.h" 4#include <stdlib.h> 5#include <stdio.h> 6#include <string.h> 7#include <pthread.h> 8#include <malloc.h> 9#include <assert.h> 10#include <errno.h> 11#include <limits.h> 12 13#define SMP_CACHE_BYTES 64 14#define cache_line_size() SMP_CACHE_BYTES 15#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) 16#define unlikely(x) (__builtin_expect(!!(x), 0)) 17#define likely(x) (__builtin_expect(!!(x), 1)) 18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 19typedef pthread_spinlock_t spinlock_t; 20 21typedef int gfp_t; 22static void *kmalloc(unsigned size, gfp_t gfp) 23{ 24 return memalign(64, size); 25} 26 27static void *kzalloc(unsigned size, gfp_t gfp) 28{ 29 void *p = memalign(64, size); 30 if (!p) 31 return p; 32 memset(p, 0, size); 33 34 return p; 35} 36 37static void kfree(void *p) 38{ 39 if (p) 40 free(p); 41} 42 43static void spin_lock_init(spinlock_t *lock) 44{ 45 int r = pthread_spin_init(lock, 0); 46 assert(!r); 47} 48 49static void spin_lock(spinlock_t *lock) 50{ 51 int ret = pthread_spin_lock(lock); 52 assert(!ret); 53} 54 55static void spin_unlock(spinlock_t *lock) 56{ 57 int ret = pthread_spin_unlock(lock); 58 assert(!ret); 59} 60 61static void spin_lock_bh(spinlock_t *lock) 62{ 63 spin_lock(lock); 64} 65 66static void spin_unlock_bh(spinlock_t *lock) 67{ 68 spin_unlock(lock); 69} 70 71static void spin_lock_irq(spinlock_t *lock) 72{ 73 spin_lock(lock); 74} 75 76static void spin_unlock_irq(spinlock_t *lock) 77{ 78 spin_unlock(lock); 79} 80 81static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) 82{ 83 spin_lock(lock); 84} 85 86static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) 87{ 88 spin_unlock(lock); 89} 90 91#include "../../../include/linux/ptr_ring.h" 92 93static unsigned long long headcnt, tailcnt; 94static struct ptr_ring array ____cacheline_aligned_in_smp; 95 96/* implemented by ring */ 97void alloc_ring(void) 98{ 99 int ret = ptr_ring_init(&array, ring_size, 0); 100 assert(!ret); 101 /* Hacky way to poke at ring internals. Useful for testing though. */ 102 if (param) 103 array.batch = param; 104} 105 106/* guest side */ 107int add_inbuf(unsigned len, void *buf, void *datap) 108{ 109 int ret; 110 111 ret = __ptr_ring_produce(&array, buf); 112 if (ret >= 0) { 113 ret = 0; 114 headcnt++; 115 } 116 117 return ret; 118} 119 120/* 121 * ptr_ring API provides no way for producer to find out whether a given 122 * buffer was consumed. Our tests merely require that a successful get_buf 123 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, 124 * fake it accordingly. 125 */ 126void *get_buf(unsigned *lenp, void **bufp) 127{ 128 void *datap; 129 130 if (tailcnt == headcnt || __ptr_ring_full(&array)) 131 datap = NULL; 132 else { 133 datap = "Buffer\n"; 134 ++tailcnt; 135 } 136 137 return datap; 138} 139 140bool used_empty() 141{ 142 return (tailcnt == headcnt || __ptr_ring_full(&array)); 143} 144 145void disable_call() 146{ 147 assert(0); 148} 149 150bool enable_call() 151{ 152 assert(0); 153} 154 155void kick_available(void) 156{ 157 assert(0); 158} 159 160/* host side */ 161void disable_kick() 162{ 163 assert(0); 164} 165 166bool enable_kick() 167{ 168 assert(0); 169} 170 171bool avail_empty() 172{ 173 return !__ptr_ring_peek(&array); 174} 175 176bool use_buf(unsigned *lenp, void **bufp) 177{ 178 void *ptr; 179 180 ptr = __ptr_ring_consume(&array); 181 182 return ptr; 183} 184 185void call_used(void) 186{ 187 assert(0); 188}