at v4.16-rc4 205 lines 3.5 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#define _GNU_SOURCE 3#include "main.h" 4#include <stdlib.h> 5#include <stdio.h> 6#include <string.h> 7#include <pthread.h> 8#include <malloc.h> 9#include <assert.h> 10#include <errno.h> 11#include <limits.h> 12 13#define SMP_CACHE_BYTES 64 14#define cache_line_size() SMP_CACHE_BYTES 15#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) 16#define unlikely(x) (__builtin_expect(!!(x), 0)) 17#define likely(x) (__builtin_expect(!!(x), 1)) 18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 19#define SIZE_MAX (~(size_t)0) 20 21typedef pthread_spinlock_t spinlock_t; 22 23typedef int gfp_t; 24#define __GFP_ZERO 0x1 25 26static void *kmalloc(unsigned size, gfp_t gfp) 27{ 28 void *p = memalign(64, size); 29 if (!p) 30 return p; 31 32 if (gfp & __GFP_ZERO) 33 memset(p, 0, size); 34 return p; 35} 36 37static inline void *kzalloc(unsigned size, gfp_t flags) 38{ 39 return kmalloc(size, flags | __GFP_ZERO); 40} 41 42static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 43{ 44 if (size != 0 && n > SIZE_MAX / size) 45 return NULL; 46 return kmalloc(n * size, flags); 47} 48 49static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 50{ 51 return kmalloc_array(n, size, flags | __GFP_ZERO); 52} 53 54static void kfree(void *p) 55{ 56 if (p) 57 free(p); 58} 59 60static void spin_lock_init(spinlock_t *lock) 61{ 62 int r = pthread_spin_init(lock, 0); 63 assert(!r); 64} 65 66static void spin_lock(spinlock_t *lock) 67{ 68 int ret = pthread_spin_lock(lock); 69 assert(!ret); 70} 71 72static void spin_unlock(spinlock_t *lock) 73{ 74 int ret = pthread_spin_unlock(lock); 75 assert(!ret); 76} 77 78static void spin_lock_bh(spinlock_t *lock) 79{ 80 spin_lock(lock); 81} 82 83static void spin_unlock_bh(spinlock_t *lock) 84{ 85 spin_unlock(lock); 86} 87 88static void spin_lock_irq(spinlock_t *lock) 89{ 90 spin_lock(lock); 91} 92 93static void spin_unlock_irq(spinlock_t *lock) 94{ 95 spin_unlock(lock); 96} 97 98static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) 99{ 100 spin_lock(lock); 101} 102 103static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) 104{ 105 spin_unlock(lock); 106} 107 108#include "../../../include/linux/ptr_ring.h" 109 110static unsigned long long headcnt, tailcnt; 111static struct ptr_ring array ____cacheline_aligned_in_smp; 112 113/* implemented by ring */ 114void alloc_ring(void) 115{ 116 int ret = ptr_ring_init(&array, ring_size, 0); 117 assert(!ret); 118 /* Hacky way to poke at ring internals. Useful for testing though. */ 119 if (param) 120 array.batch = param; 121} 122 123/* guest side */ 124int add_inbuf(unsigned len, void *buf, void *datap) 125{ 126 int ret; 127 128 ret = __ptr_ring_produce(&array, buf); 129 if (ret >= 0) { 130 ret = 0; 131 headcnt++; 132 } 133 134 return ret; 135} 136 137/* 138 * ptr_ring API provides no way for producer to find out whether a given 139 * buffer was consumed. Our tests merely require that a successful get_buf 140 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, 141 * fake it accordingly. 142 */ 143void *get_buf(unsigned *lenp, void **bufp) 144{ 145 void *datap; 146 147 if (tailcnt == headcnt || __ptr_ring_full(&array)) 148 datap = NULL; 149 else { 150 datap = "Buffer\n"; 151 ++tailcnt; 152 } 153 154 return datap; 155} 156 157bool used_empty() 158{ 159 return (tailcnt == headcnt || __ptr_ring_full(&array)); 160} 161 162void disable_call() 163{ 164 assert(0); 165} 166 167bool enable_call() 168{ 169 assert(0); 170} 171 172void kick_available(void) 173{ 174 assert(0); 175} 176 177/* host side */ 178void disable_kick() 179{ 180 assert(0); 181} 182 183bool enable_kick() 184{ 185 assert(0); 186} 187 188bool avail_empty() 189{ 190 return __ptr_ring_empty(&array); 191} 192 193bool use_buf(unsigned *lenp, void **bufp) 194{ 195 void *ptr; 196 197 ptr = __ptr_ring_consume(&array); 198 199 return ptr; 200} 201 202void call_used(void) 203{ 204 assert(0); 205}