Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/******************************************************************************
3 * ring.h
4 *
5 * Shared producer-consumer ring macros.
6 *
7 * Tim Deegan and Andrew Warfield November 2004.
8 */
9
10#ifndef __XEN_PUBLIC_IO_RING_H__
11#define __XEN_PUBLIC_IO_RING_H__
12
13/*
14 * When #include'ing this header, you need to provide the following
15 * declaration upfront:
16 * - standard integers types (uint8_t, uint16_t, etc)
17 * They are provided by stdint.h of the standard headers.
18 *
19 * In addition, if you intend to use the FLEX macros, you also need to
20 * provide the following, before invoking the FLEX macros:
21 * - size_t
22 * - memcpy
23 * - grant_ref_t
24 * These declarations are provided by string.h of the standard headers,
25 * and grant_table.h from the Xen public headers.
26 */
27
28#include <xen/interface/grant_table.h>
29
30typedef unsigned int RING_IDX;
31
32/* Round a 32-bit unsigned constant down to the nearest power of two. */
33#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
34#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
35#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
36#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
37#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
38
39/*
40 * Calculate size of a shared ring, given the total available space for the
41 * ring and indexes (_sz), and the name tag of the request/response structure.
42 * A ring contains as many entries as will fit, rounded down to the nearest
43 * power of two (so we can mask with (size-1) to loop around).
44 */
45#define __CONST_RING_SIZE(_s, _sz) \
46 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
47 sizeof(((struct _s##_sring *)0)->ring[0])))
48/*
49 * The same for passing in an actual pointer instead of a name tag.
50 */
51#define __RING_SIZE(_s, _sz) \
52 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
53
54/*
55 * Macros to make the correct C datatypes for a new kind of ring.
56 *
57 * To make a new ring datatype, you need to have two message structures,
58 * let's say request_t, and response_t already defined.
59 *
60 * In a header where you want the ring datatype declared, you then do:
61 *
62 * DEFINE_RING_TYPES(mytag, request_t, response_t);
63 *
64 * These expand out to give you a set of types, as you can see below.
65 * The most important of these are:
66 *
67 * mytag_sring_t - The shared ring.
68 * mytag_front_ring_t - The 'front' half of the ring.
69 * mytag_back_ring_t - The 'back' half of the ring.
70 *
71 * To initialize a ring in your code you need to know the location and size
72 * of the shared memory area (PAGE_SIZE, for instance). To initialise
73 * the front half:
74 *
75 * mytag_front_ring_t front_ring;
76 * SHARED_RING_INIT((mytag_sring_t *)shared_page);
77 * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
78 *
79 * Initializing the back follows similarly (note that only the front
80 * initializes the shared ring):
81 *
82 * mytag_back_ring_t back_ring;
83 * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
84 */
85
86#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
87 \
88/* Shared ring entry */ \
89union __name##_sring_entry { \
90 __req_t req; \
91 __rsp_t rsp; \
92}; \
93 \
94/* Shared ring page */ \
95struct __name##_sring { \
96 RING_IDX req_prod, req_event; \
97 RING_IDX rsp_prod, rsp_event; \
98 uint8_t __pad[48]; \
99 union __name##_sring_entry ring[1]; /* variable-length */ \
100}; \
101 \
102/* "Front" end's private variables */ \
103struct __name##_front_ring { \
104 RING_IDX req_prod_pvt; \
105 RING_IDX rsp_cons; \
106 unsigned int nr_ents; \
107 struct __name##_sring *sring; \
108}; \
109 \
110/* "Back" end's private variables */ \
111struct __name##_back_ring { \
112 RING_IDX rsp_prod_pvt; \
113 RING_IDX req_cons; \
114 unsigned int nr_ents; \
115 struct __name##_sring *sring; \
116}; \
117 \
118/*
119 * Macros for manipulating rings.
120 *
121 * FRONT_RING_whatever works on the "front end" of a ring: here
122 * requests are pushed on to the ring and responses taken off it.
123 *
124 * BACK_RING_whatever works on the "back end" of a ring: here
125 * requests are taken off the ring and responses put on.
126 *
127 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
128 * This is OK in 1-for-1 request-response situations where the
129 * requestor (front end) never has more than RING_SIZE()-1
130 * outstanding requests.
131 */
132
133/* Initialising empty rings */
134#define SHARED_RING_INIT(_s) do { \
135 (_s)->req_prod = (_s)->rsp_prod = 0; \
136 (_s)->req_event = (_s)->rsp_event = 1; \
137 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
138} while(0)
139
140#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
141 (_r)->req_prod_pvt = (_i); \
142 (_r)->rsp_cons = (_i); \
143 (_r)->nr_ents = __RING_SIZE(_s, __size); \
144 (_r)->sring = (_s); \
145} while (0)
146
147#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
148
149#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
150 (_r)->rsp_prod_pvt = (_i); \
151 (_r)->req_cons = (_i); \
152 (_r)->nr_ents = __RING_SIZE(_s, __size); \
153 (_r)->sring = (_s); \
154} while (0)
155
156#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
157
158/* How big is this ring? */
159#define RING_SIZE(_r) \
160 ((_r)->nr_ents)
161
162/* Number of free requests (for use on front side only). */
163#define RING_FREE_REQUESTS(_r) \
164 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
165
166/* Test if there is an empty slot available on the front ring.
167 * (This is only meaningful from the front. )
168 */
169#define RING_FULL(_r) \
170 (RING_FREE_REQUESTS(_r) == 0)
171
172/* Test if there are outstanding messages to be processed on a ring. */
173#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
174 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
175
176#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
177 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
178 unsigned int rsp = RING_SIZE(_r) - \
179 ((_r)->req_cons - (_r)->rsp_prod_pvt); \
180 req < rsp ? req : rsp; \
181})
182
183/* Direct access to individual ring elements, by index. */
184#define RING_GET_REQUEST(_r, _idx) \
185 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
186
187#define RING_GET_RESPONSE(_r, _idx) \
188 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
189
190/*
191 * Get a local copy of a request/response.
192 *
193 * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
194 * done on a local copy that cannot be modified by the other end.
195 *
196 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
197 * to be ineffective where dest is a struct which consists of only bitfields.
198 */
199#define RING_COPY_(type, r, idx, dest) do { \
200 /* Use volatile to force the copy into dest. */ \
201 *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
202} while (0)
203
204#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
205#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
206
207/* Loop termination condition: Would the specified index overflow the ring? */
208#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
209 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
210
211/* Ill-behaved frontend determination: Can there be this many requests? */
212#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
213 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
214
215/* Ill-behaved backend determination: Can there be this many responses? */
216#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
217 (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
218
219#define RING_PUSH_REQUESTS(_r) do { \
220 virt_wmb(); /* back sees requests /before/ updated producer index */\
221 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
222} while (0)
223
224#define RING_PUSH_RESPONSES(_r) do { \
225 virt_wmb(); /* front sees resps /before/ updated producer index */ \
226 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
227} while (0)
228
229/*
230 * Notification hold-off (req_event and rsp_event):
231 *
232 * When queueing requests or responses on a shared ring, it may not always be
233 * necessary to notify the remote end. For example, if requests are in flight
234 * in a backend, the front may be able to queue further requests without
235 * notifying the back (if the back checks for new requests when it queues
236 * responses).
237 *
238 * When enqueuing requests or responses:
239 *
240 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
241 * is a boolean return value. True indicates that the receiver requires an
242 * asynchronous notification.
243 *
244 * After dequeuing requests or responses (before sleeping the connection):
245 *
246 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
247 * The second argument is a boolean return value. True indicates that there
248 * are pending messages on the ring (i.e., the connection should not be put
249 * to sleep).
250 *
251 * These macros will set the req_event/rsp_event field to trigger a
252 * notification on the very next message that is enqueued. If you want to
253 * create batches of work (i.e., only receive a notification after several
254 * messages have been enqueued) then you will need to create a customised
255 * version of the FINAL_CHECK macro in your own code, which sets the event
256 * field appropriately.
257 */
258
259#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
260 RING_IDX __old = (_r)->sring->req_prod; \
261 RING_IDX __new = (_r)->req_prod_pvt; \
262 virt_wmb(); /* back sees requests /before/ updated producer index */\
263 (_r)->sring->req_prod = __new; \
264 virt_mb(); /* back sees new requests /before/ we check req_event */ \
265 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
266 (RING_IDX)(__new - __old)); \
267} while (0)
268
269#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
270 RING_IDX __old = (_r)->sring->rsp_prod; \
271 RING_IDX __new = (_r)->rsp_prod_pvt; \
272 virt_wmb(); /* front sees resps /before/ updated producer index */ \
273 (_r)->sring->rsp_prod = __new; \
274 virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
275 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
276 (RING_IDX)(__new - __old)); \
277} while (0)
278
279#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
280 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
281 if (_work_to_do) break; \
282 (_r)->sring->req_event = (_r)->req_cons + 1; \
283 virt_mb(); \
284 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
285} while (0)
286
287#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
288 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
289 if (_work_to_do) break; \
290 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
291 virt_mb(); \
292 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
293} while (0)
294
295
296/*
297 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
298 * functions to check if there is data on the ring, and to read and
299 * write to them.
300 *
301 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
302 * does not define the indexes page. As different protocols can have
303 * extensions to the basic format, this macro allow them to define their
304 * own struct.
305 *
306 * XEN_FLEX_RING_SIZE
307 * Convenience macro to calculate the size of one of the two rings
308 * from the overall order.
309 *
310 * $NAME_mask
311 * Function to apply the size mask to an index, to reduce the index
312 * within the range [0-size].
313 *
314 * $NAME_read_packet
315 * Function to read data from the ring. The amount of data to read is
316 * specified by the "size" argument.
317 *
318 * $NAME_write_packet
319 * Function to write data to the ring. The amount of data to write is
320 * specified by the "size" argument.
321 *
322 * $NAME_get_ring_ptr
323 * Convenience function that returns a pointer to read/write to the
324 * ring at the right location.
325 *
326 * $NAME_data_intf
327 * Indexes page, shared between frontend and backend. It also
328 * contains the array of grant refs.
329 *
330 * $NAME_queued
331 * Function to calculate how many bytes are currently on the ring,
332 * ready to be read. It can also be used to calculate how much free
333 * space is currently on the ring (XEN_FLEX_RING_SIZE() -
334 * $NAME_queued()).
335 */
336
337#ifndef XEN_PAGE_SHIFT
338/* The PAGE_SIZE for ring protocols and hypercall interfaces is always
339 * 4K, regardless of the architecture, and page granularity chosen by
340 * operating systems.
341 */
342#define XEN_PAGE_SHIFT 12
343#endif
344#define XEN_FLEX_RING_SIZE(order) \
345 (1UL << ((order) + XEN_PAGE_SHIFT - 1))
346
347#define DEFINE_XEN_FLEX_RING(name) \
348static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
349{ \
350 return idx & (ring_size - 1); \
351} \
352 \
353static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \
354 RING_IDX idx, \
355 RING_IDX ring_size) \
356{ \
357 return buf + name##_mask(idx, ring_size); \
358} \
359 \
360static inline void name##_read_packet(void *opaque, \
361 const unsigned char *buf, \
362 size_t size, \
363 RING_IDX masked_prod, \
364 RING_IDX *masked_cons, \
365 RING_IDX ring_size) \
366{ \
367 if (*masked_cons < masked_prod || \
368 size <= ring_size - *masked_cons) { \
369 memcpy(opaque, buf + *masked_cons, size); \
370 } else { \
371 memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
372 memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
373 size - (ring_size - *masked_cons)); \
374 } \
375 *masked_cons = name##_mask(*masked_cons + size, ring_size); \
376} \
377 \
378static inline void name##_write_packet(unsigned char *buf, \
379 const void *opaque, \
380 size_t size, \
381 RING_IDX *masked_prod, \
382 RING_IDX masked_cons, \
383 RING_IDX ring_size) \
384{ \
385 if (*masked_prod < masked_cons || \
386 size <= ring_size - *masked_prod) { \
387 memcpy(buf + *masked_prod, opaque, size); \
388 } else { \
389 memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
390 memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
391 size - (ring_size - *masked_prod)); \
392 } \
393 *masked_prod = name##_mask(*masked_prod + size, ring_size); \
394} \
395 \
396static inline RING_IDX name##_queued(RING_IDX prod, \
397 RING_IDX cons, \
398 RING_IDX ring_size) \
399{ \
400 RING_IDX size; \
401 \
402 if (prod == cons) \
403 return 0; \
404 \
405 prod = name##_mask(prod, ring_size); \
406 cons = name##_mask(cons, ring_size); \
407 \
408 if (prod == cons) \
409 return ring_size; \
410 \
411 if (prod > cons) \
412 size = prod - cons; \
413 else \
414 size = ring_size - (cons - prod); \
415 return size; \
416} \
417 \
418struct name##_data { \
419 unsigned char *in; /* half of the allocation */ \
420 unsigned char *out; /* half of the allocation */ \
421}
422
423#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
424struct name##_data_intf { \
425 RING_IDX in_cons, in_prod; \
426 \
427 uint8_t pad1[56]; \
428 \
429 RING_IDX out_cons, out_prod; \
430 \
431 uint8_t pad2[56]; \
432 \
433 RING_IDX ring_order; \
434 grant_ref_t ref[]; \
435}; \
436DEFINE_XEN_FLEX_RING(name)
437
438#endif /* __XEN_PUBLIC_IO_RING_H__ */