Serenity Operating System
1/*
2 * Copyright (c) 2019-2020, Sergey Bugaev <bugaevc@serenityos.org>
3 * Copyright (c) 2022-2023, Liav A. <liavalb@hotmail.co.il>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <Kernel/FileSystem/RAMFS/Inode.h>
9#include <Kernel/Process.h>
10
11namespace Kernel {
12
13RAMFSInode::RAMFSInode(RAMFS& fs, InodeMetadata const& metadata, LockWeakPtr<RAMFSInode> parent)
14 : Inode(fs, fs.next_inode_index())
15 , m_metadata(metadata)
16 , m_parent(move(parent))
17{
18 m_metadata.inode = identifier();
19}
20
21RAMFSInode::RAMFSInode(RAMFS& fs)
22 : Inode(fs, 1)
23 , m_root_directory_inode(true)
24{
25 auto now = kgettimeofday();
26 m_metadata.inode = identifier();
27 m_metadata.atime = now;
28 m_metadata.ctime = now;
29 m_metadata.mtime = now;
30 m_metadata.mode = S_IFDIR | 0755;
31}
32
33RAMFSInode::~RAMFSInode() = default;
34
35ErrorOr<NonnullRefPtr<RAMFSInode>> RAMFSInode::try_create(RAMFS& fs, InodeMetadata const& metadata, LockWeakPtr<RAMFSInode> parent)
36{
37 return adopt_nonnull_ref_or_enomem(new (nothrow) RAMFSInode(fs, metadata, move(parent)));
38}
39
40ErrorOr<NonnullRefPtr<RAMFSInode>> RAMFSInode::try_create_root(RAMFS& fs)
41{
42 return adopt_nonnull_ref_or_enomem(new (nothrow) RAMFSInode(fs));
43}
44
45InodeMetadata RAMFSInode::metadata() const
46{
47 MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
48
49 return m_metadata;
50}
51
52ErrorOr<void> RAMFSInode::traverse_as_directory(Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const
53{
54 MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
55
56 if (!is_directory())
57 return ENOTDIR;
58
59 TRY(callback({ "."sv, identifier(), 0 }));
60 if (m_root_directory_inode) {
61 TRY(callback({ ".."sv, identifier(), 0 }));
62 } else if (auto parent = m_parent.strong_ref()) {
63 TRY(callback({ ".."sv, parent->identifier(), 0 }));
64 }
65
66 for (auto& child : m_children) {
67 TRY(callback({ child.name->view(), child.inode->identifier(), 0 }));
68 }
69 return {};
70}
71
72ErrorOr<void> RAMFSInode::replace_child(StringView name, Inode& new_child)
73{
74 MutexLocker locker(m_inode_lock);
75 VERIFY(is_directory());
76 VERIFY(new_child.fsid() == fsid());
77
78 auto* child = find_child_by_name(name);
79 if (!child)
80 return ENOENT;
81
82 auto old_child = child->inode;
83 child->inode = static_cast<RAMFSInode&>(new_child);
84
85 old_child->did_delete_self();
86
87 // TODO: Emit a did_replace_child event.
88
89 return {};
90}
91
92ErrorOr<NonnullOwnPtr<RAMFSInode::DataBlock>> RAMFSInode::DataBlock::create()
93{
94 auto data_block_buffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(DataBlock::block_size, AllocationStrategy::AllocateNow));
95 return TRY(adopt_nonnull_own_or_enomem(new (nothrow) DataBlock(move(data_block_buffer_vmobject))));
96}
97
98ErrorOr<void> RAMFSInode::ensure_allocated_blocks(size_t offset, size_t io_size)
99{
100 VERIFY(m_inode_lock.is_locked());
101 size_t block_start_index = offset / DataBlock::block_size;
102 size_t block_last_index = ((offset + io_size) / DataBlock::block_size) + (((offset + io_size) % DataBlock::block_size) == 0 ? 0 : 1);
103 VERIFY(block_start_index <= block_last_index);
104
105 size_t original_size = m_blocks.size();
106 Vector<size_t> allocated_block_indices;
107 ArmedScopeGuard clean_allocated_blocks_on_failure([&] {
108 for (auto index : allocated_block_indices)
109 m_blocks[index].clear();
110 MUST(m_blocks.try_resize(original_size));
111 });
112
113 if (m_blocks.size() < (block_last_index))
114 TRY(m_blocks.try_resize(block_last_index));
115
116 for (size_t block_index = block_start_index; block_index < block_last_index; block_index++) {
117 if (!m_blocks[block_index]) {
118 TRY(allocated_block_indices.try_append(block_index));
119 m_blocks[block_index] = TRY(DataBlock::create());
120 }
121 }
122 clean_allocated_blocks_on_failure.disarm();
123 return {};
124}
125
126ErrorOr<size_t> RAMFSInode::read_bytes_from_content_space(size_t offset, size_t io_size, UserOrKernelBuffer& buffer) const
127{
128 VERIFY(m_inode_lock.is_locked());
129 VERIFY(m_metadata.size >= 0);
130 if (offset >= static_cast<size_t>(m_metadata.size))
131 return 0;
132 auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "RAMFSInode Mapping Region"sv, Memory::Region::Access::Read, AllocationStrategy::Reserve));
133 return const_cast<RAMFSInode&>(*this).do_io_on_content_space(*mapping_region, offset, io_size, buffer, false);
134}
135
136ErrorOr<size_t> RAMFSInode::read_bytes_locked(off_t offset, size_t size, UserOrKernelBuffer& buffer, OpenFileDescription*) const
137{
138 VERIFY(m_inode_lock.is_locked());
139 VERIFY(!is_directory());
140 return read_bytes_from_content_space(offset, size, buffer);
141}
142
143ErrorOr<size_t> RAMFSInode::write_bytes_to_content_space(size_t offset, size_t io_size, UserOrKernelBuffer const& buffer)
144{
145 VERIFY(m_inode_lock.is_locked());
146 auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "RAMFSInode Mapping Region"sv, Memory::Region::Access::Write, AllocationStrategy::Reserve));
147 return do_io_on_content_space(*mapping_region, offset, io_size, const_cast<UserOrKernelBuffer&>(buffer), true);
148}
149
150ErrorOr<size_t> RAMFSInode::write_bytes_locked(off_t offset, size_t size, UserOrKernelBuffer const& buffer, OpenFileDescription*)
151{
152 VERIFY(m_inode_lock.is_locked());
153 VERIFY(!is_directory());
154 VERIFY(offset >= 0);
155
156 TRY(ensure_allocated_blocks(offset, size));
157 auto nwritten = TRY(write_bytes_to_content_space(offset, size, buffer));
158
159 off_t old_size = m_metadata.size;
160 off_t new_size = m_metadata.size;
161 if (static_cast<off_t>(offset + size) > new_size)
162 new_size = offset + size;
163
164 if (new_size > old_size) {
165 m_metadata.size = new_size;
166 set_metadata_dirty(true);
167 }
168 did_modify_contents();
169 return nwritten;
170}
171
172ErrorOr<size_t> RAMFSInode::do_io_on_content_space(Memory::Region& mapping_region, size_t offset, size_t io_size, UserOrKernelBuffer& buffer, bool write)
173{
174 VERIFY(m_inode_lock.is_locked());
175 size_t remaining_bytes = 0;
176 if (!write) {
177 // Note: For read operations, only perform read until the last byte.
178 // If we are beyond the last byte, return 0 to indicate EOF.
179 remaining_bytes = min(io_size, m_metadata.size - offset);
180 if (remaining_bytes == 0)
181 return 0;
182 } else {
183 remaining_bytes = io_size;
184 }
185 VERIFY(remaining_bytes != 0);
186
187 UserOrKernelBuffer current_buffer = buffer.offset(0);
188 auto block_start_index = offset / DataBlock::block_size;
189 auto offset_in_block = offset % DataBlock::block_size;
190 u64 block_index = block_start_index;
191 size_t nio = 0;
192 while (remaining_bytes > 0) {
193 size_t current_io_size = min(DataBlock::block_size - offset_in_block, remaining_bytes);
194 auto& block = m_blocks[block_index];
195 if (!block && !write) {
196 // Note: If the block does not exist then it's just a gap in the file,
197 // so the buffer should be placed with zeroes in that section.
198 TRY(current_buffer.memset(0, 0, current_io_size));
199 remaining_bytes -= current_io_size;
200 current_buffer = current_buffer.offset(current_io_size);
201 nio += current_io_size;
202 block_index++;
203 // Note: Clear offset_in_block to zero to ensure that if we started from a middle of
204 // a block, then next writes are just going to happen from the start of each block until the end.
205 offset_in_block = 0;
206 continue;
207 } else if (!block) {
208 return Error::from_errno(EIO);
209 }
210
211 NonnullLockRefPtr<Memory::AnonymousVMObject> block_vmobject = block->vmobject();
212 mapping_region.set_vmobject(block_vmobject);
213 mapping_region.remap();
214 if (write)
215 TRY(current_buffer.read(mapping_region.vaddr().offset(offset_in_block).as_ptr(), 0, current_io_size));
216 else
217 TRY(current_buffer.write(mapping_region.vaddr().offset(offset_in_block).as_ptr(), 0, current_io_size));
218 current_buffer = current_buffer.offset(current_io_size);
219 nio += current_io_size;
220 remaining_bytes -= current_io_size;
221 block_index++;
222 // Note: Clear offset_in_block to zero to ensure that if we started from a middle of
223 // a block, then next writes are just going to happen from the start of each block until the end.
224 offset_in_block = 0;
225 }
226 VERIFY(nio <= io_size);
227 return nio;
228}
229
230ErrorOr<void> RAMFSInode::truncate_to_block_index(size_t block_index)
231{
232 VERIFY(m_inode_lock.is_locked());
233 TRY(m_blocks.try_resize(block_index));
234 return {};
235}
236
237ErrorOr<NonnullRefPtr<Inode>> RAMFSInode::lookup(StringView name)
238{
239 MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
240 VERIFY(is_directory());
241
242 if (name == ".")
243 return *this;
244 if (name == "..") {
245 if (auto parent = m_parent.strong_ref())
246 return *parent;
247 return ENOENT;
248 }
249
250 auto* child = find_child_by_name(name);
251 if (!child)
252 return ENOENT;
253 return child->inode;
254}
255
256RAMFSInode::Child* RAMFSInode::find_child_by_name(StringView name)
257{
258 for (auto& child : m_children) {
259 if (child.name->view() == name)
260 return &child;
261 }
262 return nullptr;
263}
264
265ErrorOr<void> RAMFSInode::flush_metadata()
266{
267 // We don't really have any metadata that could become dirty.
268 // The only reason we even call set_metadata_dirty() is
269 // to let the watchers know we have updates. Once that is
270 // switched to a different mechanism, we can stop ever marking
271 // our metadata as dirty at all.
272 set_metadata_dirty(false);
273 return {};
274}
275
276ErrorOr<void> RAMFSInode::chmod(mode_t mode)
277{
278 MutexLocker locker(m_inode_lock);
279
280 m_metadata.mode = mode;
281 set_metadata_dirty(true);
282 return {};
283}
284
285ErrorOr<void> RAMFSInode::chown(UserID uid, GroupID gid)
286{
287 MutexLocker locker(m_inode_lock);
288
289 m_metadata.uid = uid;
290 m_metadata.gid = gid;
291 set_metadata_dirty(true);
292 return {};
293}
294
295ErrorOr<NonnullRefPtr<Inode>> RAMFSInode::create_child(StringView name, mode_t mode, dev_t dev, UserID uid, GroupID gid)
296{
297 MutexLocker locker(m_inode_lock);
298 auto now = kgettimeofday();
299
300 InodeMetadata metadata;
301 metadata.mode = mode;
302 metadata.uid = uid;
303 metadata.gid = gid;
304 metadata.atime = now;
305 metadata.ctime = now;
306 metadata.mtime = now;
307 metadata.major_device = major_from_encoded_device(dev);
308 metadata.minor_device = minor_from_encoded_device(dev);
309
310 auto child = TRY(RAMFSInode::try_create(fs(), metadata, *this));
311 TRY(add_child(*child, name, mode));
312 return child;
313}
314
315ErrorOr<void> RAMFSInode::add_child(Inode& child, StringView name, mode_t)
316{
317 VERIFY(is_directory());
318 VERIFY(child.fsid() == fsid());
319
320 if (name.length() > NAME_MAX)
321 return ENAMETOOLONG;
322
323 MutexLocker locker(m_inode_lock);
324 for (auto const& existing_child : m_children) {
325 if (existing_child.name->view() == name)
326 return EEXIST;
327 }
328
329 auto name_kstring = TRY(KString::try_create(name));
330 // Balanced by `delete` in remove_child()
331
332 auto* child_entry = new (nothrow) Child { move(name_kstring), static_cast<RAMFSInode&>(child) };
333 if (!child_entry)
334 return ENOMEM;
335
336 m_children.append(*child_entry);
337 did_add_child(child.identifier(), name);
338 return {};
339}
340
341ErrorOr<void> RAMFSInode::remove_child(StringView name)
342{
343 MutexLocker locker(m_inode_lock);
344 VERIFY(is_directory());
345
346 if (name == "." || name == "..")
347 return {};
348
349 auto* child = find_child_by_name(name);
350 if (!child)
351 return ENOENT;
352
353 auto child_id = child->inode->identifier();
354 child->inode->did_delete_self();
355 m_children.remove(*child);
356 did_remove_child(child_id, name);
357 // Balanced by `new` in add_child()
358 delete child;
359 return {};
360}
361
362ErrorOr<void> RAMFSInode::truncate(u64 size)
363{
364 MutexLocker locker(m_inode_lock);
365 VERIFY(!is_directory());
366
367 u64 block_index = size / DataBlock::block_size + ((size % DataBlock::block_size == 0) ? 0 : 1);
368 TRY(truncate_to_block_index(block_index));
369
370 u64 last_possible_block_index = size / DataBlock::block_size;
371 if ((size % DataBlock::block_size != 0) && m_blocks[last_possible_block_index]) {
372 auto mapping_region = TRY(MM.allocate_kernel_region(DataBlock::block_size, "RAMFSInode Mapping Region"sv, Memory::Region::Access::Write, AllocationStrategy::Reserve));
373 VERIFY(m_blocks[last_possible_block_index]);
374 NonnullLockRefPtr<Memory::AnonymousVMObject> block_vmobject = m_blocks[last_possible_block_index]->vmobject();
375 mapping_region->set_vmobject(block_vmobject);
376 mapping_region->remap();
377 memset(mapping_region->vaddr().offset(size % DataBlock::block_size).as_ptr(), 0, DataBlock::block_size - (size % DataBlock::block_size));
378 }
379 m_metadata.size = size;
380 set_metadata_dirty(true);
381 return {};
382}
383
384ErrorOr<void> RAMFSInode::update_timestamps(Optional<Time> atime, Optional<Time> ctime, Optional<Time> mtime)
385{
386 MutexLocker locker(m_inode_lock);
387
388 if (atime.has_value())
389 m_metadata.atime = atime.value();
390 if (ctime.has_value())
391 m_metadata.ctime = ctime.value();
392 if (mtime.has_value())
393 m_metadata.mtime = mtime.value();
394 set_metadata_dirty(true);
395 return {};
396}
397
398}