Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <Kernel/Process.h>
28#include <Kernel/SharedBuffer.h>
29
30namespace Kernel {
31
32Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>& shared_buffers()
33{
34 static Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>* map;
35 if (!map)
36 map = new Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>;
37 return *map;
38}
39
40void SharedBuffer::sanity_check(const char* what)
41{
42 LOCKER(shared_buffers().lock());
43
44 unsigned found_refs = 0;
45 for (const auto& ref : m_refs)
46 found_refs += ref.count;
47
48 if (found_refs != m_total_refs) {
49 dbg() << what << " sanity -- SharedBuffer{" << this << "} id: " << m_shbuf_id << " has total refs " << m_total_refs << " but we found " << found_refs;
50 for (const auto& ref : m_refs) {
51 dbg() << " ref from pid " << ref.pid << ": refcnt " << ref.count;
52 }
53 ASSERT_NOT_REACHED();
54 }
55}
56
57bool SharedBuffer::is_shared_with(pid_t peer_pid)
58{
59 LOCKER(shared_buffers().lock());
60 if (m_global)
61 return true;
62 for (auto& ref : m_refs) {
63 if (ref.pid == peer_pid) {
64 return true;
65 }
66 }
67
68 return false;
69}
70
71void* SharedBuffer::ref_for_process_and_get_address(Process& process)
72{
73 LOCKER(shared_buffers().lock());
74 ASSERT(is_shared_with(process.pid()));
75 if (m_global) {
76 bool found = false;
77 for (auto& ref : m_refs) {
78 if (ref.pid == process.pid()) {
79 found = true;
80 break;
81 }
82 }
83 if (!found)
84 m_refs.append(Reference(process.pid()));
85 }
86
87 for (auto& ref : m_refs) {
88 if (ref.pid == process.pid()) {
89 if (!ref.region) {
90 auto* region = process.allocate_region_with_vmobject(VirtualAddress(), size(), m_vmobject, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
91 if (!region)
92 return (void*)-ENOMEM;
93 ref.region = region->make_weak_ptr();
94 ref.region->set_shared(true);
95 }
96 ref.count++;
97 m_total_refs++;
98 sanity_check("ref_for_process_and_get_address");
99 return ref.region->vaddr().as_ptr();
100 }
101 }
102 ASSERT_NOT_REACHED();
103}
104
105void SharedBuffer::share_with(pid_t peer_pid)
106{
107 LOCKER(shared_buffers().lock());
108 if (m_global)
109 return;
110 for (auto& ref : m_refs) {
111 if (ref.pid == peer_pid) {
112 // don't increment the reference count yet; let them shbuf_get it first.
113 sanity_check("share_with (old ref)");
114 return;
115 }
116 }
117
118 m_refs.append(Reference(peer_pid));
119 sanity_check("share_with (new ref)");
120}
121
122void SharedBuffer::deref_for_process(Process& process)
123{
124 LOCKER(shared_buffers().lock());
125 for (size_t i = 0; i < m_refs.size(); ++i) {
126 auto& ref = m_refs[i];
127 if (ref.pid == process.pid()) {
128 ref.count--;
129 m_total_refs--;
130 if (ref.count == 0) {
131#ifdef SHARED_BUFFER_DEBUG
132 dbg() << "Releasing shared buffer reference on " << m_shbuf_id << " of size " << size() << " by PID " << process.pid();
133#endif
134 process.deallocate_region(*ref.region);
135 m_refs.unstable_remove(i);
136#ifdef SHARED_BUFFER_DEBUG
137 dbg() << "Released shared buffer reference on " << m_shbuf_id << " of size " << size() << " by PID " << process.pid();
138#endif
139 sanity_check("deref_for_process");
140 destroy_if_unused();
141 return;
142 }
143 return;
144 }
145 }
146
147 ASSERT_NOT_REACHED();
148}
149
150void SharedBuffer::disown(pid_t pid)
151{
152 LOCKER(shared_buffers().lock());
153 for (size_t i = 0; i < m_refs.size(); ++i) {
154 auto& ref = m_refs[i];
155 if (ref.pid == pid) {
156#ifdef SHARED_BUFFER_DEBUG
157 dbg() << "Disowning shared buffer " << m_shbuf_id << " of size " << size() << " by PID " << pid;
158#endif
159 m_total_refs -= ref.count;
160 m_refs.unstable_remove(i);
161#ifdef SHARED_BUFFER_DEBUG
162 dbg() << "Disowned shared buffer " << m_shbuf_id << " of size " << size() << " by PID " << pid;
163#endif
164 destroy_if_unused();
165 return;
166 }
167 }
168}
169
170void SharedBuffer::destroy_if_unused()
171{
172 LOCKER(shared_buffers().lock());
173 sanity_check("destroy_if_unused");
174 if (m_total_refs == 0) {
175#ifdef SHARED_BUFFER_DEBUG
176 dbg() << "Destroying unused SharedBuffer{" << this << "} id: " << m_shbuf_id;
177#endif
178 auto count_before = shared_buffers().resource().size();
179 shared_buffers().resource().remove(m_shbuf_id);
180 ASSERT(count_before != shared_buffers().resource().size());
181 }
182}
183
184void SharedBuffer::seal()
185{
186 LOCKER(shared_buffers().lock());
187 m_writable = false;
188 for (auto& ref : m_refs) {
189 if (ref.region) {
190 ref.region->set_writable(false);
191 ref.region->remap();
192 }
193 }
194}
195
196}