1diff --git a/src/dbinc/atomic.h b/src/dbinc/atomic.h
2index e4420aa..4799b5f 100644
3--- a/src/dbinc/atomic.h
4+++ b/src/dbinc/atomic.h
5@@ -70,7 +70,7 @@ typedef struct {
6 * These have no memory barriers; the caller must include them when necessary.
7 */
8 #define atomic_read(p) ((p)->value)
9-#define atomic_init(p, val) ((p)->value = (val))
10+#define atomic_init_db(p, val) ((p)->value = (val))
11
12 #ifdef HAVE_ATOMIC_SUPPORT
13
14@@ -225,7 +225,7 @@ static inline int __atomic_compare_exchange_int(
15 #define atomic_dec(env, p) (--(p)->value)
16 #define atomic_compare_exchange(env, p, oldval, newval) \
17 (DB_ASSERT(env, atomic_read(p) == (oldval)), \
18- atomic_init(p, (newval)), 1)
19+ atomic_init_db(p, (newval)), 1)
20 #else
21 #define atomic_inc(env, p) __atomic_inc_int(env, p)
22 #define atomic_dec(env, p) __atomic_dec_int(env, p)
23diff --git a/src/mp/mp_fget.c b/src/mp/mp_fget.c
24index 59fe9fe..fa4ced7 100644
25--- a/src/mp/mp_fget.c
26+++ b/src/mp/mp_fget.c
27@@ -654,7 +654,7 @@ alloc: /* Allocate a new buffer header and data space. */
28
29 /* Initialize enough so we can call __memp_bhfree. */
30 alloc_bhp->flags = 0;
31- atomic_init(&alloc_bhp->ref, 1);
32+ atomic_init_db(&alloc_bhp->ref, 1);
33 #ifdef DIAGNOSTIC
34 if ((uintptr_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
35 __db_errx(env, DB_STR("3025",
36@@ -969,7 +969,7 @@ alloc: /* Allocate a new buffer header and data space. */
37 MVCC_MPROTECT(bhp->buf, mfp->pagesize,
38 PROT_READ);
39
40- atomic_init(&alloc_bhp->ref, 1);
41+ atomic_init_db(&alloc_bhp->ref, 1);
42 MUTEX_LOCK(env, alloc_bhp->mtx_buf);
43 alloc_bhp->priority = bhp->priority;
44 alloc_bhp->pgno = bhp->pgno;
45diff --git a/src/mp/mp_mvcc.c b/src/mp/mp_mvcc.c
46index 83c4d72..0a47202 100644
47--- a/src/mp/mp_mvcc.c
48+++ b/src/mp/mp_mvcc.c
49@@ -281,7 +281,7 @@ __memp_bh_freeze(dbmp, infop, hp, bhp, need_frozenp)
50 #else
51 memcpy(frozen_bhp, bhp, SSZA(BH, buf));
52 #endif
53- atomic_init(&frozen_bhp->ref, 0);
54+ atomic_init_db(&frozen_bhp->ref, 0);
55 if (mutex != MUTEX_INVALID)
56 frozen_bhp->mtx_buf = mutex;
57 else if ((ret = __mutex_alloc(env, MTX_MPOOL_BH,
58@@ -440,7 +440,7 @@ __memp_bh_thaw(dbmp, infop, hp, frozen_bhp, alloc_bhp)
59 #endif
60 alloc_bhp->mtx_buf = mutex;
61 MUTEX_LOCK(env, alloc_bhp->mtx_buf);
62- atomic_init(&alloc_bhp->ref, 1);
63+ atomic_init_db(&alloc_bhp->ref, 1);
64 F_CLR(alloc_bhp, BH_FROZEN);
65 }
66
67diff --git a/src/mp/mp_region.c b/src/mp/mp_region.c
68index 4d95e4f..e97459c 100644
69--- a/src/mp/mp_region.c
70+++ b/src/mp/mp_region.c
71@@ -278,7 +278,7 @@ __memp_init(env, dbmp, reginfo_off, htab_buckets, max_nreg)
72 MTX_MPOOL_FILE_BUCKET, 0, &htab[i].mtx_hash)) != 0)
73 return (ret);
74 SH_TAILQ_INIT(&htab[i].hash_bucket);
75- atomic_init(&htab[i].hash_page_dirty, 0);
76+ atomic_init_db(&htab[i].hash_page_dirty, 0);
77 }
78
79 mtx_base = mtx_prev = MUTEX_INVALID;
80@@ -332,7 +332,7 @@ no_prealloc:
81 DB_MUTEX_SHARED, &hp->mtx_hash)) != 0)
82 return (ret);
83 SH_TAILQ_INIT(&hp->hash_bucket);
84- atomic_init(&hp->hash_page_dirty, 0);
85+ atomic_init_db(&hp->hash_page_dirty, 0);
86 #ifdef HAVE_STATISTICS
87 hp->hash_io_wait = 0;
88 hp->hash_frozen = hp->hash_thawed = hp->hash_frozen_freed = 0;
89diff --git a/src/mutex/mut_method.c b/src/mutex/mut_method.c
90index 72b34de..a9f9868 100644
91--- a/src/mutex/mut_method.c
92+++ b/src/mutex/mut_method.c
93@@ -501,7 +501,7 @@ __atomic_compare_exchange_int(env, v, oldval, newval)
94 MUTEX_LOCK(env, mtx);
95 ret = atomic_read(v) == oldval;
96 if (ret)
97- atomic_init(v, newval);
98+ atomic_init_db(v, newval);
99 MUTEX_UNLOCK(env, mtx);
100
101 return (ret);
102diff --git a/src/mutex/mut_tas.c b/src/mutex/mut_tas.c
103index 7899c4b..d9420fa 100644
104--- a/src/mutex/mut_tas.c
105+++ b/src/mutex/mut_tas.c
106@@ -47,7 +47,7 @@ __db_tas_mutex_init(env, mutex, flags)
107
108 #ifdef HAVE_SHARED_LATCHES
109 if (F_ISSET(mutexp, DB_MUTEX_SHARED))
110- atomic_init(&mutexp->sharecount, 0);
111+ atomic_init_db(&mutexp->sharecount, 0);
112 else
113 #endif
114 if (MUTEX_INIT(&mutexp->tas)) {
115@@ -643,7 +643,7 @@ was_not_locked:
116 F_CLR(mutexp, DB_MUTEX_LOCKED);
117 /* Flush flag update before zeroing count */
118 MEMBAR_EXIT();
119- atomic_init(&mutexp->sharecount, 0);
120+ atomic_init_db(&mutexp->sharecount, 0);
121 } else {
122 DB_ASSERT(env, sharecount > 0);
123 MEMBAR_EXIT();