Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <Kernel/FileSystem/Inode.h>
28#include <Kernel/VM/InodeVMObject.h>
29#include <Kernel/VM/MemoryManager.h>
30#include <Kernel/VM/Region.h>
31
32namespace Kernel {
33
34NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
35{
36 size_t size = inode.size();
37 if (inode.vmobject())
38 return *inode.vmobject();
39 auto vmobject = adopt(*new InodeVMObject(inode, size));
40 vmobject->inode().set_vmobject(*vmobject);
41 return vmobject;
42}
43
44NonnullRefPtr<VMObject> InodeVMObject::clone()
45{
46 return adopt(*new InodeVMObject(*this));
47}
48
49InodeVMObject::InodeVMObject(Inode& inode, size_t size)
50 : VMObject(size)
51 , m_inode(inode)
52 , m_dirty_pages(page_count(), false)
53{
54}
55
56InodeVMObject::InodeVMObject(const InodeVMObject& other)
57 : VMObject(other)
58 , m_inode(other.m_inode)
59{
60}
61
62InodeVMObject::~InodeVMObject()
63{
64 ASSERT(inode().vmobject() == this);
65}
66
67size_t InodeVMObject::amount_clean() const
68{
69 size_t count = 0;
70 ASSERT(page_count() == (size_t)m_dirty_pages.size());
71 for (size_t i = 0; i < page_count(); ++i) {
72 if (!m_dirty_pages.get(i) && m_physical_pages[i])
73 ++count;
74 }
75 return count * PAGE_SIZE;
76}
77
78size_t InodeVMObject::amount_dirty() const
79{
80 size_t count = 0;
81 for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
82 if (m_dirty_pages.get(i))
83 ++count;
84 }
85 return count * PAGE_SIZE;
86}
87
88void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
89{
90 dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
91 m_inode->fsid(), m_inode->index(),
92 old_size, new_size);
93
94 InterruptDisabler disabler;
95
96 auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
97 m_physical_pages.resize(new_page_count);
98
99 m_dirty_pages.grow(new_page_count, false);
100
101 // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
102 for_each_region([](auto& region) {
103 region.remap();
104 });
105}
106
107void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
108{
109 (void)size;
110 (void)data;
111 InterruptDisabler disabler;
112 ASSERT(offset >= 0);
113
114 // FIXME: Only invalidate the parts that actually changed.
115 for (auto& physical_page : m_physical_pages)
116 physical_page = nullptr;
117
118#if 0
119 size_t current_offset = offset;
120 size_t remaining_bytes = size;
121 const u8* data_ptr = data;
122
123 auto to_page_index = [] (size_t offset) -> size_t {
124 return offset / PAGE_SIZE;
125 };
126
127 if (current_offset & PAGE_MASK) {
128 size_t page_index = to_page_index(current_offset);
129 size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
130 if (m_physical_pages[page_index]) {
131 auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
132 memcpy(ptr, data_ptr, bytes_to_copy);
133 MM.unquickmap_page();
134 }
135 current_offset += bytes_to_copy;
136 data += bytes_to_copy;
137 remaining_bytes -= bytes_to_copy;
138 }
139
140 for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
141 size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
142 if (m_physical_pages[page_index]) {
143 auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
144 memcpy(ptr, data_ptr, bytes_to_copy);
145 MM.unquickmap_page();
146 }
147 current_offset += bytes_to_copy;
148 data += bytes_to_copy;
149 }
150#endif
151
152 // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
153 for_each_region([](auto& region) {
154 region.remap();
155 });
156}
157
158int InodeVMObject::release_all_clean_pages()
159{
160 LOCKER(m_paging_lock);
161 return release_all_clean_pages_impl();
162}
163
164int InodeVMObject::release_all_clean_pages_impl()
165{
166 int count = 0;
167 InterruptDisabler disabler;
168 for (size_t i = 0; i < page_count(); ++i) {
169 if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
170 m_physical_pages[i] = nullptr;
171 ++count;
172 }
173 }
174 for_each_region([](auto& region) {
175 region.remap();
176 });
177 return count;
178}
179
180u32 InodeVMObject::writable_mappings() const
181{
182 u32 count = 0;
183 const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
184 if (region.is_writable())
185 ++count;
186 });
187 return count;
188}
189
190u32 InodeVMObject::executable_mappings() const
191{
192 u32 count = 0;
193 const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
194 if (region.is_executable())
195 ++count;
196 });
197 return count;
198}
199
200}