Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _TOOLS_ASM_GENERIC_IO_H
3#define _TOOLS_ASM_GENERIC_IO_H
4
5#include <asm/barrier.h>
6#include <asm/byteorder.h>
7
8#include <linux/compiler.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12#ifndef mmiowb_set_pending
13#define mmiowb_set_pending() do { } while (0)
14#endif
15
16#ifndef __io_br
17#define __io_br() barrier()
18#endif
19
20/* prevent prefetching of coherent DMA data ahead of a dma-complete */
21#ifndef __io_ar
22#ifdef rmb
23#define __io_ar(v) rmb()
24#else
25#define __io_ar(v) barrier()
26#endif
27#endif
28
29/* flush writes to coherent DMA data before possibly triggering a DMA read */
30#ifndef __io_bw
31#ifdef wmb
32#define __io_bw() wmb()
33#else
34#define __io_bw() barrier()
35#endif
36#endif
37
38/* serialize device access against a spin_unlock, usually handled there. */
39#ifndef __io_aw
40#define __io_aw() mmiowb_set_pending()
41#endif
42
43#ifndef __io_pbw
44#define __io_pbw() __io_bw()
45#endif
46
47#ifndef __io_paw
48#define __io_paw() __io_aw()
49#endif
50
51#ifndef __io_pbr
52#define __io_pbr() __io_br()
53#endif
54
55#ifndef __io_par
56#define __io_par(v) __io_ar(v)
57#endif
58
59#ifndef _THIS_IP_
60#define _THIS_IP_ 0
61#endif
62
63static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
64 unsigned long caller_addr, unsigned long caller_addr0) {}
65static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
66 unsigned long caller_addr, unsigned long caller_addr0) {}
67static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
68 unsigned long caller_addr, unsigned long caller_addr0) {}
69static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
70 unsigned long caller_addr, unsigned long caller_addr0) {}
71
72/*
73 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
74 *
75 * On some architectures memory mapped IO needs to be accessed differently.
76 * On the simple architectures, we just read/write the memory location
77 * directly.
78 */
79
80#ifndef __raw_readb
81#define __raw_readb __raw_readb
82static inline u8 __raw_readb(const volatile void __iomem *addr)
83{
84 return *(const volatile u8 __force *)addr;
85}
86#endif
87
88#ifndef __raw_readw
89#define __raw_readw __raw_readw
90static inline u16 __raw_readw(const volatile void __iomem *addr)
91{
92 return *(const volatile u16 __force *)addr;
93}
94#endif
95
96#ifndef __raw_readl
97#define __raw_readl __raw_readl
98static inline u32 __raw_readl(const volatile void __iomem *addr)
99{
100 return *(const volatile u32 __force *)addr;
101}
102#endif
103
104#ifndef __raw_readq
105#define __raw_readq __raw_readq
106static inline u64 __raw_readq(const volatile void __iomem *addr)
107{
108 return *(const volatile u64 __force *)addr;
109}
110#endif
111
112#ifndef __raw_writeb
113#define __raw_writeb __raw_writeb
114static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
115{
116 *(volatile u8 __force *)addr = value;
117}
118#endif
119
120#ifndef __raw_writew
121#define __raw_writew __raw_writew
122static inline void __raw_writew(u16 value, volatile void __iomem *addr)
123{
124 *(volatile u16 __force *)addr = value;
125}
126#endif
127
128#ifndef __raw_writel
129#define __raw_writel __raw_writel
130static inline void __raw_writel(u32 value, volatile void __iomem *addr)
131{
132 *(volatile u32 __force *)addr = value;
133}
134#endif
135
136#ifndef __raw_writeq
137#define __raw_writeq __raw_writeq
138static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
139{
140 *(volatile u64 __force *)addr = value;
141}
142#endif
143
144/*
145 * {read,write}{b,w,l,q}() access little endian memory and return result in
146 * native endianness.
147 */
148
149#ifndef readb
150#define readb readb
151static inline u8 readb(const volatile void __iomem *addr)
152{
153 u8 val;
154
155 log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
156 __io_br();
157 val = __raw_readb(addr);
158 __io_ar(val);
159 log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
160 return val;
161}
162#endif
163
164#ifndef readw
165#define readw readw
166static inline u16 readw(const volatile void __iomem *addr)
167{
168 u16 val;
169
170 log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
171 __io_br();
172 val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
173 __io_ar(val);
174 log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
175 return val;
176}
177#endif
178
179#ifndef readl
180#define readl readl
181static inline u32 readl(const volatile void __iomem *addr)
182{
183 u32 val;
184
185 log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
186 __io_br();
187 val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
188 __io_ar(val);
189 log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
190 return val;
191}
192#endif
193
194#ifndef readq
195#define readq readq
196static inline u64 readq(const volatile void __iomem *addr)
197{
198 u64 val;
199
200 log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
201 __io_br();
202 val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
203 __io_ar(val);
204 log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
205 return val;
206}
207#endif
208
209#ifndef writeb
210#define writeb writeb
211static inline void writeb(u8 value, volatile void __iomem *addr)
212{
213 log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
214 __io_bw();
215 __raw_writeb(value, addr);
216 __io_aw();
217 log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
218}
219#endif
220
221#ifndef writew
222#define writew writew
223static inline void writew(u16 value, volatile void __iomem *addr)
224{
225 log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
226 __io_bw();
227 __raw_writew((u16 __force)cpu_to_le16(value), addr);
228 __io_aw();
229 log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
230}
231#endif
232
233#ifndef writel
234#define writel writel
235static inline void writel(u32 value, volatile void __iomem *addr)
236{
237 log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
238 __io_bw();
239 __raw_writel((u32 __force)__cpu_to_le32(value), addr);
240 __io_aw();
241 log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
242}
243#endif
244
245#ifndef writeq
246#define writeq writeq
247static inline void writeq(u64 value, volatile void __iomem *addr)
248{
249 log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
250 __io_bw();
251 __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
252 __io_aw();
253 log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
254}
255#endif
256
257/*
258 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
259 * are not guaranteed to provide ordering against spinlocks or memory
260 * accesses.
261 */
262#ifndef readb_relaxed
263#define readb_relaxed readb_relaxed
264static inline u8 readb_relaxed(const volatile void __iomem *addr)
265{
266 u8 val;
267
268 log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
269 val = __raw_readb(addr);
270 log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
271 return val;
272}
273#endif
274
275#ifndef readw_relaxed
276#define readw_relaxed readw_relaxed
277static inline u16 readw_relaxed(const volatile void __iomem *addr)
278{
279 u16 val;
280
281 log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
282 val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
283 log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
284 return val;
285}
286#endif
287
288#ifndef readl_relaxed
289#define readl_relaxed readl_relaxed
290static inline u32 readl_relaxed(const volatile void __iomem *addr)
291{
292 u32 val;
293
294 log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
295 val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
296 log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
297 return val;
298}
299#endif
300
301#if defined(readq) && !defined(readq_relaxed)
302#define readq_relaxed readq_relaxed
303static inline u64 readq_relaxed(const volatile void __iomem *addr)
304{
305 u64 val;
306
307 log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
308 val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
309 log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
310 return val;
311}
312#endif
313
314#ifndef writeb_relaxed
315#define writeb_relaxed writeb_relaxed
316static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
317{
318 log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
319 __raw_writeb(value, addr);
320 log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
321}
322#endif
323
324#ifndef writew_relaxed
325#define writew_relaxed writew_relaxed
326static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
327{
328 log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
329 __raw_writew((u16 __force)cpu_to_le16(value), addr);
330 log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
331}
332#endif
333
334#ifndef writel_relaxed
335#define writel_relaxed writel_relaxed
336static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
337{
338 log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
339 __raw_writel((u32 __force)__cpu_to_le32(value), addr);
340 log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
341}
342#endif
343
344#if defined(writeq) && !defined(writeq_relaxed)
345#define writeq_relaxed writeq_relaxed
346static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
347{
348 log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
349 __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
350 log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
351}
352#endif
353
354/*
355 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
356 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
357 */
358#ifndef readsb
359#define readsb readsb
360static inline void readsb(const volatile void __iomem *addr, void *buffer,
361 unsigned int count)
362{
363 if (count) {
364 u8 *buf = buffer;
365
366 do {
367 u8 x = __raw_readb(addr);
368 *buf++ = x;
369 } while (--count);
370 }
371}
372#endif
373
374#ifndef readsw
375#define readsw readsw
376static inline void readsw(const volatile void __iomem *addr, void *buffer,
377 unsigned int count)
378{
379 if (count) {
380 u16 *buf = buffer;
381
382 do {
383 u16 x = __raw_readw(addr);
384 *buf++ = x;
385 } while (--count);
386 }
387}
388#endif
389
390#ifndef readsl
391#define readsl readsl
392static inline void readsl(const volatile void __iomem *addr, void *buffer,
393 unsigned int count)
394{
395 if (count) {
396 u32 *buf = buffer;
397
398 do {
399 u32 x = __raw_readl(addr);
400 *buf++ = x;
401 } while (--count);
402 }
403}
404#endif
405
406#ifndef readsq
407#define readsq readsq
408static inline void readsq(const volatile void __iomem *addr, void *buffer,
409 unsigned int count)
410{
411 if (count) {
412 u64 *buf = buffer;
413
414 do {
415 u64 x = __raw_readq(addr);
416 *buf++ = x;
417 } while (--count);
418 }
419}
420#endif
421
422#ifndef writesb
423#define writesb writesb
424static inline void writesb(volatile void __iomem *addr, const void *buffer,
425 unsigned int count)
426{
427 if (count) {
428 const u8 *buf = buffer;
429
430 do {
431 __raw_writeb(*buf++, addr);
432 } while (--count);
433 }
434}
435#endif
436
437#ifndef writesw
438#define writesw writesw
439static inline void writesw(volatile void __iomem *addr, const void *buffer,
440 unsigned int count)
441{
442 if (count) {
443 const u16 *buf = buffer;
444
445 do {
446 __raw_writew(*buf++, addr);
447 } while (--count);
448 }
449}
450#endif
451
452#ifndef writesl
453#define writesl writesl
454static inline void writesl(volatile void __iomem *addr, const void *buffer,
455 unsigned int count)
456{
457 if (count) {
458 const u32 *buf = buffer;
459
460 do {
461 __raw_writel(*buf++, addr);
462 } while (--count);
463 }
464}
465#endif
466
467#ifndef writesq
468#define writesq writesq
469static inline void writesq(volatile void __iomem *addr, const void *buffer,
470 unsigned int count)
471{
472 if (count) {
473 const u64 *buf = buffer;
474
475 do {
476 __raw_writeq(*buf++, addr);
477 } while (--count);
478 }
479}
480#endif
481
482#endif /* _TOOLS_ASM_GENERIC_IO_H */