Serenity Operating System
at master 1084 lines 47 kB view raw
1/* 2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> 3 * Copyright (c) 2021, sin-ack <sin-ack@protonmail.com> 4 * 5 * SPDX-License-Identifier: BSD-2-Clause 6 */ 7 8#include <AK/MemoryStream.h> 9#include <Kernel/API/POSIX/errno.h> 10#include <Kernel/Debug.h> 11#include <Kernel/FileSystem/Ext2FS/Inode.h> 12#include <Kernel/FileSystem/InodeMetadata.h> 13#include <Kernel/UnixTypes.h> 14 15namespace Kernel { 16 17static constexpr size_t max_inline_symlink_length = 60; 18 19static u8 to_ext2_file_type(mode_t mode) 20{ 21 if (is_regular_file(mode)) 22 return EXT2_FT_REG_FILE; 23 if (is_directory(mode)) 24 return EXT2_FT_DIR; 25 if (is_character_device(mode)) 26 return EXT2_FT_CHRDEV; 27 if (is_block_device(mode)) 28 return EXT2_FT_BLKDEV; 29 if (is_fifo(mode)) 30 return EXT2_FT_FIFO; 31 if (is_socket(mode)) 32 return EXT2_FT_SOCK; 33 if (is_symlink(mode)) 34 return EXT2_FT_SYMLINK; 35 return EXT2_FT_UNKNOWN; 36} 37 38ErrorOr<void> Ext2FSInode::write_indirect_block(BlockBasedFileSystem::BlockIndex block, Span<BlockBasedFileSystem::BlockIndex> blocks_indices) 39{ 40 auto const entries_per_block = EXT2_ADDR_PER_BLOCK(&fs().super_block()); 41 VERIFY(blocks_indices.size() <= entries_per_block); 42 43 auto block_contents = TRY(ByteBuffer::create_zeroed(fs().block_size())); 44 FixedMemoryStream stream { block_contents.bytes() }; 45 auto buffer = UserOrKernelBuffer::for_kernel_buffer(block_contents.data()); 46 47 VERIFY(blocks_indices.size() <= EXT2_ADDR_PER_BLOCK(&fs().super_block())); 48 for (unsigned i = 0; i < blocks_indices.size(); ++i) 49 MUST(stream.write_value<u32>(blocks_indices[i].value())); 50 51 return fs().write_block(block, buffer, block_contents.size()); 52} 53 54ErrorOr<void> Ext2FSInode::grow_doubly_indirect_block(BlockBasedFileSystem::BlockIndex block, size_t old_blocks_length, Span<BlockBasedFileSystem::BlockIndex> blocks_indices, Vector<Ext2FS::BlockIndex>& new_meta_blocks, unsigned& meta_blocks) 55{ 56 auto const entries_per_block = EXT2_ADDR_PER_BLOCK(&fs().super_block()); 57 auto const entries_per_doubly_indirect_block = entries_per_block * entries_per_block; 58 auto const old_indirect_blocks_length = ceil_div(old_blocks_length, entries_per_block); 59 auto const new_indirect_blocks_length = ceil_div(blocks_indices.size(), entries_per_block); 60 VERIFY(blocks_indices.size() > 0); 61 VERIFY(blocks_indices.size() > old_blocks_length); 62 VERIFY(blocks_indices.size() <= entries_per_doubly_indirect_block); 63 64 auto block_contents = TRY(ByteBuffer::create_zeroed(fs().block_size())); 65 auto* block_as_pointers = (unsigned*)block_contents.data(); 66 FixedMemoryStream stream { block_contents.bytes() }; 67 auto buffer = UserOrKernelBuffer::for_kernel_buffer(block_contents.data()); 68 69 if (old_blocks_length > 0) { 70 TRY(fs().read_block(block, &buffer, fs().block_size())); 71 } 72 73 // Grow the doubly indirect block. 74 for (unsigned i = 0; i < old_indirect_blocks_length; i++) 75 MUST(stream.write_value<u32>(block_as_pointers[i])); 76 for (unsigned i = old_indirect_blocks_length; i < new_indirect_blocks_length; i++) { 77 auto new_block = new_meta_blocks.take_last().value(); 78 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::grow_doubly_indirect_block(): Allocating indirect block {} at index {}", identifier(), new_block, i); 79 MUST(stream.write_value<u32>(new_block)); 80 meta_blocks++; 81 } 82 83 // Write out the indirect blocks. 84 for (unsigned i = old_blocks_length / entries_per_block; i < new_indirect_blocks_length; i++) { 85 auto const offset_block = i * entries_per_block; 86 TRY(write_indirect_block(block_as_pointers[i], blocks_indices.slice(offset_block, min(blocks_indices.size() - offset_block, entries_per_block)))); 87 } 88 89 // Write out the doubly indirect block. 90 return fs().write_block(block, buffer, block_contents.size()); 91} 92 93ErrorOr<void> Ext2FSInode::shrink_doubly_indirect_block(BlockBasedFileSystem::BlockIndex block, size_t old_blocks_length, size_t new_blocks_length, unsigned& meta_blocks) 94{ 95 auto const entries_per_block = EXT2_ADDR_PER_BLOCK(&fs().super_block()); 96 auto const entries_per_doubly_indirect_block = entries_per_block * entries_per_block; 97 auto const old_indirect_blocks_length = ceil_div(old_blocks_length, entries_per_block); 98 auto const new_indirect_blocks_length = ceil_div(new_blocks_length, entries_per_block); 99 VERIFY(old_blocks_length > 0); 100 VERIFY(old_blocks_length >= new_blocks_length); 101 VERIFY(new_blocks_length <= entries_per_doubly_indirect_block); 102 103 auto block_contents = TRY(ByteBuffer::create_uninitialized(fs().block_size())); 104 auto* block_as_pointers = (unsigned*)block_contents.data(); 105 auto buffer = UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<u8*>(block_as_pointers)); 106 TRY(fs().read_block(block, &buffer, fs().block_size())); 107 108 // Free the unused indirect blocks. 109 for (unsigned i = new_indirect_blocks_length; i < old_indirect_blocks_length; i++) { 110 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::shrink_doubly_indirect_block(): Freeing indirect block {} at index {}", identifier(), block_as_pointers[i], i); 111 TRY(fs().set_block_allocation_state(block_as_pointers[i], false)); 112 meta_blocks--; 113 } 114 115 // Free the doubly indirect block if no longer needed. 116 if (new_blocks_length == 0) { 117 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::shrink_doubly_indirect_block(): Freeing doubly indirect block {}", identifier(), block); 118 TRY(fs().set_block_allocation_state(block, false)); 119 meta_blocks--; 120 } 121 122 return {}; 123} 124 125ErrorOr<void> Ext2FSInode::grow_triply_indirect_block(BlockBasedFileSystem::BlockIndex block, size_t old_blocks_length, Span<BlockBasedFileSystem::BlockIndex> blocks_indices, Vector<Ext2FS::BlockIndex>& new_meta_blocks, unsigned& meta_blocks) 126{ 127 auto const entries_per_block = EXT2_ADDR_PER_BLOCK(&fs().super_block()); 128 auto const entries_per_doubly_indirect_block = entries_per_block * entries_per_block; 129 auto const entries_per_triply_indirect_block = entries_per_block * entries_per_block; 130 auto const old_doubly_indirect_blocks_length = ceil_div(old_blocks_length, entries_per_doubly_indirect_block); 131 auto const new_doubly_indirect_blocks_length = ceil_div(blocks_indices.size(), entries_per_doubly_indirect_block); 132 VERIFY(blocks_indices.size() > 0); 133 VERIFY(blocks_indices.size() > old_blocks_length); 134 VERIFY(blocks_indices.size() <= entries_per_triply_indirect_block); 135 136 auto block_contents = TRY(ByteBuffer::create_zeroed(fs().block_size())); 137 auto* block_as_pointers = (unsigned*)block_contents.data(); 138 FixedMemoryStream stream { block_contents.bytes() }; 139 auto buffer = UserOrKernelBuffer::for_kernel_buffer(block_contents.data()); 140 141 if (old_blocks_length > 0) { 142 TRY(fs().read_block(block, &buffer, fs().block_size())); 143 } 144 145 // Grow the triply indirect block. 146 for (unsigned i = 0; i < old_doubly_indirect_blocks_length; i++) 147 MUST(stream.write_value<u32>(block_as_pointers[i])); 148 for (unsigned i = old_doubly_indirect_blocks_length; i < new_doubly_indirect_blocks_length; i++) { 149 auto new_block = new_meta_blocks.take_last().value(); 150 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::grow_triply_indirect_block(): Allocating doubly indirect block {} at index {}", identifier(), new_block, i); 151 MUST(stream.write_value<u32>(new_block)); 152 meta_blocks++; 153 } 154 155 // Write out the doubly indirect blocks. 156 for (unsigned i = old_blocks_length / entries_per_doubly_indirect_block; i < new_doubly_indirect_blocks_length; i++) { 157 auto const processed_blocks = i * entries_per_doubly_indirect_block; 158 auto const old_doubly_indirect_blocks_length = min(old_blocks_length > processed_blocks ? old_blocks_length - processed_blocks : 0, entries_per_doubly_indirect_block); 159 auto const new_doubly_indirect_blocks_length = min(blocks_indices.size() > processed_blocks ? blocks_indices.size() - processed_blocks : 0, entries_per_doubly_indirect_block); 160 TRY(grow_doubly_indirect_block(block_as_pointers[i], old_doubly_indirect_blocks_length, blocks_indices.slice(processed_blocks, new_doubly_indirect_blocks_length), new_meta_blocks, meta_blocks)); 161 } 162 163 // Write out the triply indirect block. 164 return fs().write_block(block, buffer, block_contents.size()); 165} 166 167ErrorOr<void> Ext2FSInode::shrink_triply_indirect_block(BlockBasedFileSystem::BlockIndex block, size_t old_blocks_length, size_t new_blocks_length, unsigned& meta_blocks) 168{ 169 auto const entries_per_block = EXT2_ADDR_PER_BLOCK(&fs().super_block()); 170 auto const entries_per_doubly_indirect_block = entries_per_block * entries_per_block; 171 auto const entries_per_triply_indirect_block = entries_per_doubly_indirect_block * entries_per_block; 172 auto const old_triply_indirect_blocks_length = ceil_div(old_blocks_length, entries_per_doubly_indirect_block); 173 auto const new_triply_indirect_blocks_length = new_blocks_length / entries_per_doubly_indirect_block; 174 VERIFY(old_blocks_length > 0); 175 VERIFY(old_blocks_length >= new_blocks_length); 176 VERIFY(new_blocks_length <= entries_per_triply_indirect_block); 177 178 auto block_contents = TRY(ByteBuffer::create_uninitialized(fs().block_size())); 179 auto* block_as_pointers = (unsigned*)block_contents.data(); 180 auto buffer = UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<u8*>(block_as_pointers)); 181 TRY(fs().read_block(block, &buffer, fs().block_size())); 182 183 // Shrink the doubly indirect blocks. 184 for (unsigned i = new_triply_indirect_blocks_length; i < old_triply_indirect_blocks_length; i++) { 185 auto const processed_blocks = i * entries_per_doubly_indirect_block; 186 auto const old_doubly_indirect_blocks_length = min(old_blocks_length > processed_blocks ? old_blocks_length - processed_blocks : 0, entries_per_doubly_indirect_block); 187 auto const new_doubly_indirect_blocks_length = min(new_blocks_length > processed_blocks ? new_blocks_length - processed_blocks : 0, entries_per_doubly_indirect_block); 188 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::shrink_triply_indirect_block(): Shrinking doubly indirect block {} at index {}", identifier(), block_as_pointers[i], i); 189 TRY(shrink_doubly_indirect_block(block_as_pointers[i], old_doubly_indirect_blocks_length, new_doubly_indirect_blocks_length, meta_blocks)); 190 } 191 192 // Free the triply indirect block if no longer needed. 193 if (new_blocks_length == 0) { 194 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::shrink_triply_indirect_block(): Freeing triply indirect block {}", identifier(), block); 195 TRY(fs().set_block_allocation_state(block, false)); 196 meta_blocks--; 197 } 198 199 return {}; 200} 201 202ErrorOr<void> Ext2FSInode::flush_block_list() 203{ 204 MutexLocker locker(m_inode_lock); 205 206 if (m_block_list.is_empty()) { 207 m_raw_inode.i_blocks = 0; 208 memset(m_raw_inode.i_block, 0, sizeof(m_raw_inode.i_block)); 209 set_metadata_dirty(true); 210 return {}; 211 } 212 213 // NOTE: There is a mismatch between i_blocks and blocks.size() since i_blocks includes meta blocks and blocks.size() does not. 214 auto const old_block_count = ceil_div(size(), static_cast<u64>(fs().block_size())); 215 216 auto old_shape = fs().compute_block_list_shape(old_block_count); 217 auto const new_shape = fs().compute_block_list_shape(m_block_list.size()); 218 219 Vector<Ext2FS::BlockIndex> new_meta_blocks; 220 if (new_shape.meta_blocks > old_shape.meta_blocks) { 221 new_meta_blocks = TRY(fs().allocate_blocks(fs().group_index_from_inode(index()), new_shape.meta_blocks - old_shape.meta_blocks)); 222 } 223 224 m_raw_inode.i_blocks = (m_block_list.size() + new_shape.meta_blocks) * (fs().block_size() / 512); 225 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::flush_block_list(): Old shape=({};{};{};{}:{}), new shape=({};{};{};{}:{})", identifier(), old_shape.direct_blocks, old_shape.indirect_blocks, old_shape.doubly_indirect_blocks, old_shape.triply_indirect_blocks, old_shape.meta_blocks, new_shape.direct_blocks, new_shape.indirect_blocks, new_shape.doubly_indirect_blocks, new_shape.triply_indirect_blocks, new_shape.meta_blocks); 226 227 unsigned output_block_index = 0; 228 unsigned remaining_blocks = m_block_list.size(); 229 230 // Deal with direct blocks. 231 bool inode_dirty = false; 232 VERIFY(new_shape.direct_blocks <= EXT2_NDIR_BLOCKS); 233 for (unsigned i = 0; i < new_shape.direct_blocks; ++i) { 234 if (BlockBasedFileSystem::BlockIndex(m_raw_inode.i_block[i]) != m_block_list[output_block_index]) 235 inode_dirty = true; 236 m_raw_inode.i_block[i] = m_block_list[output_block_index].value(); 237 ++output_block_index; 238 --remaining_blocks; 239 } 240 // e2fsck considers all blocks reachable through any of the pointers in 241 // m_raw_inode.i_block as part of this inode regardless of the value in 242 // m_raw_inode.i_size. When it finds more blocks than the amount that 243 // is indicated by i_size or i_blocks it offers to repair the filesystem 244 // by changing those values. That will actually cause further corruption. 245 // So we must zero all pointers to blocks that are now unused. 246 for (unsigned i = new_shape.direct_blocks; i < EXT2_NDIR_BLOCKS; ++i) { 247 m_raw_inode.i_block[i] = 0; 248 } 249 if (inode_dirty) { 250 if constexpr (EXT2_DEBUG) { 251 dbgln("Ext2FSInode[{}]::flush_block_list(): Writing {} direct block(s) to i_block array of inode {}", identifier(), min((size_t)EXT2_NDIR_BLOCKS, m_block_list.size()), index()); 252 for (size_t i = 0; i < min((size_t)EXT2_NDIR_BLOCKS, m_block_list.size()); ++i) 253 dbgln(" + {}", m_block_list[i]); 254 } 255 set_metadata_dirty(true); 256 } 257 258 // Deal with indirect blocks. 259 if (old_shape.indirect_blocks != new_shape.indirect_blocks) { 260 if (new_shape.indirect_blocks > old_shape.indirect_blocks) { 261 // Write out the indirect block. 262 if (old_shape.indirect_blocks == 0) { 263 auto new_block = new_meta_blocks.take_last().value(); 264 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::flush_block_list(): Allocating indirect block: {}", identifier(), new_block); 265 m_raw_inode.i_block[EXT2_IND_BLOCK] = new_block; 266 set_metadata_dirty(true); 267 old_shape.meta_blocks++; 268 } 269 270 TRY(write_indirect_block(m_raw_inode.i_block[EXT2_IND_BLOCK], m_block_list.span().slice(output_block_index, new_shape.indirect_blocks))); 271 } else if ((new_shape.indirect_blocks == 0) && (old_shape.indirect_blocks != 0)) { 272 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::flush_block_list(): Freeing indirect block: {}", identifier(), m_raw_inode.i_block[EXT2_IND_BLOCK]); 273 TRY(fs().set_block_allocation_state(m_raw_inode.i_block[EXT2_IND_BLOCK], false)); 274 old_shape.meta_blocks--; 275 m_raw_inode.i_block[EXT2_IND_BLOCK] = 0; 276 } 277 } 278 279 remaining_blocks -= new_shape.indirect_blocks; 280 output_block_index += new_shape.indirect_blocks; 281 282 if (old_shape.doubly_indirect_blocks != new_shape.doubly_indirect_blocks) { 283 // Write out the doubly indirect block. 284 if (new_shape.doubly_indirect_blocks > old_shape.doubly_indirect_blocks) { 285 if (old_shape.doubly_indirect_blocks == 0) { 286 auto new_block = new_meta_blocks.take_last().value(); 287 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::flush_block_list(): Allocating doubly indirect block: {}", identifier(), new_block); 288 m_raw_inode.i_block[EXT2_DIND_BLOCK] = new_block; 289 set_metadata_dirty(true); 290 old_shape.meta_blocks++; 291 } 292 TRY(grow_doubly_indirect_block(m_raw_inode.i_block[EXT2_DIND_BLOCK], old_shape.doubly_indirect_blocks, m_block_list.span().slice(output_block_index, new_shape.doubly_indirect_blocks), new_meta_blocks, old_shape.meta_blocks)); 293 } else { 294 TRY(shrink_doubly_indirect_block(m_raw_inode.i_block[EXT2_DIND_BLOCK], old_shape.doubly_indirect_blocks, new_shape.doubly_indirect_blocks, old_shape.meta_blocks)); 295 if (new_shape.doubly_indirect_blocks == 0) 296 m_raw_inode.i_block[EXT2_DIND_BLOCK] = 0; 297 } 298 } 299 300 remaining_blocks -= new_shape.doubly_indirect_blocks; 301 output_block_index += new_shape.doubly_indirect_blocks; 302 303 if (old_shape.triply_indirect_blocks != new_shape.triply_indirect_blocks) { 304 // Write out the triply indirect block. 305 if (new_shape.triply_indirect_blocks > old_shape.triply_indirect_blocks) { 306 if (old_shape.triply_indirect_blocks == 0) { 307 auto new_block = new_meta_blocks.take_last().value(); 308 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::flush_block_list(): Allocating triply indirect block: {}", identifier(), new_block); 309 m_raw_inode.i_block[EXT2_TIND_BLOCK] = new_block; 310 set_metadata_dirty(true); 311 old_shape.meta_blocks++; 312 } 313 TRY(grow_triply_indirect_block(m_raw_inode.i_block[EXT2_TIND_BLOCK], old_shape.triply_indirect_blocks, m_block_list.span().slice(output_block_index, new_shape.triply_indirect_blocks), new_meta_blocks, old_shape.meta_blocks)); 314 } else { 315 TRY(shrink_triply_indirect_block(m_raw_inode.i_block[EXT2_TIND_BLOCK], old_shape.triply_indirect_blocks, new_shape.triply_indirect_blocks, old_shape.meta_blocks)); 316 if (new_shape.triply_indirect_blocks == 0) 317 m_raw_inode.i_block[EXT2_TIND_BLOCK] = 0; 318 } 319 } 320 321 remaining_blocks -= new_shape.triply_indirect_blocks; 322 output_block_index += new_shape.triply_indirect_blocks; 323 324 dbgln_if(EXT2_BLOCKLIST_DEBUG, "Ext2FSInode[{}]::flush_block_list(): New meta blocks count at {}, expecting {}", identifier(), old_shape.meta_blocks, new_shape.meta_blocks); 325 VERIFY(new_meta_blocks.size() == 0); 326 VERIFY(old_shape.meta_blocks == new_shape.meta_blocks); 327 if (!remaining_blocks) 328 return {}; 329 330 dbgln("we don't know how to write qind ext2fs blocks, they don't exist anyway!"); 331 VERIFY_NOT_REACHED(); 332} 333 334ErrorOr<Vector<Ext2FS::BlockIndex>> Ext2FSInode::compute_block_list() const 335{ 336 return compute_block_list_impl(false); 337} 338 339ErrorOr<Vector<Ext2FS::BlockIndex>> Ext2FSInode::compute_block_list_with_meta_blocks() const 340{ 341 return compute_block_list_impl(true); 342} 343 344ErrorOr<Vector<Ext2FS::BlockIndex>> Ext2FSInode::compute_block_list_impl(bool include_block_list_blocks) const 345{ 346 // FIXME: This is really awkwardly factored.. foo_impl_internal :| 347 auto block_list = TRY(compute_block_list_impl_internal(m_raw_inode, include_block_list_blocks)); 348 while (!block_list.is_empty() && block_list.last() == 0) 349 block_list.take_last(); 350 return block_list; 351} 352 353ErrorOr<Vector<Ext2FS::BlockIndex>> Ext2FSInode::compute_block_list_impl_internal(ext2_inode const& e2inode, bool include_block_list_blocks) const 354{ 355 unsigned entries_per_block = EXT2_ADDR_PER_BLOCK(&fs().super_block()); 356 357 unsigned block_count = ceil_div(size(), static_cast<u64>(fs().block_size())); 358 359 // If we are handling a symbolic link, the path is stored in the 60 bytes in 360 // the inode that are used for the 12 direct and 3 indirect block pointers, 361 // If the path is longer than 60 characters, a block is allocated, and the 362 // block contains the destination path. The file size corresponds to the 363 // path length of the destination. 364 if (Kernel::is_symlink(e2inode.i_mode) && e2inode.i_blocks == 0) 365 block_count = 0; 366 367 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::block_list_for_inode(): i_size={}, i_blocks={}, block_count={}", identifier(), e2inode.i_size, e2inode.i_blocks, block_count); 368 369 unsigned blocks_remaining = block_count; 370 371 if (include_block_list_blocks) { 372 auto shape = fs().compute_block_list_shape(block_count); 373 blocks_remaining += shape.meta_blocks; 374 } 375 376 Vector<Ext2FS::BlockIndex> list; 377 378 auto add_block = [&](auto bi) -> ErrorOr<void> { 379 if (blocks_remaining) { 380 TRY(list.try_append(bi)); 381 --blocks_remaining; 382 } 383 return {}; 384 }; 385 386 if (include_block_list_blocks) { 387 // This seems like an excessive over-estimate but w/e. 388 TRY(list.try_ensure_capacity(blocks_remaining * 2)); 389 } else { 390 TRY(list.try_ensure_capacity(blocks_remaining)); 391 } 392 393 unsigned direct_count = min(block_count, (unsigned)EXT2_NDIR_BLOCKS); 394 for (unsigned i = 0; i < direct_count; ++i) { 395 auto block_index = e2inode.i_block[i]; 396 TRY(add_block(block_index)); 397 } 398 399 if (!blocks_remaining) 400 return list; 401 402 // Don't need to make copy of add_block, since this capture will only 403 // be called before compute_block_list_impl_internal finishes. 404 auto process_block_array = [&](auto array_block_index, auto&& callback) -> ErrorOr<void> { 405 if (include_block_list_blocks) 406 TRY(add_block(array_block_index)); 407 auto count = min(blocks_remaining, entries_per_block); 408 if (!count) 409 return {}; 410 size_t read_size = count * sizeof(u32); 411 auto array_storage = TRY(ByteBuffer::create_uninitialized(read_size)); 412 auto* array = (u32*)array_storage.data(); 413 auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)array); 414 TRY(fs().read_block(array_block_index, &buffer, read_size, 0)); 415 for (unsigned i = 0; i < count; ++i) 416 TRY(callback(Ext2FS::BlockIndex(array[i]))); 417 return {}; 418 }; 419 420 TRY(process_block_array(e2inode.i_block[EXT2_IND_BLOCK], [&](auto block_index) -> ErrorOr<void> { 421 return add_block(block_index); 422 })); 423 424 if (!blocks_remaining) 425 return list; 426 427 TRY(process_block_array(e2inode.i_block[EXT2_DIND_BLOCK], [&](auto block_index) -> ErrorOr<void> { 428 return process_block_array(block_index, [&](auto block_index2) -> ErrorOr<void> { 429 return add_block(block_index2); 430 }); 431 })); 432 433 if (!blocks_remaining) 434 return list; 435 436 TRY(process_block_array(e2inode.i_block[EXT2_TIND_BLOCK], [&](auto block_index) -> ErrorOr<void> { 437 return process_block_array(block_index, [&](auto block_index2) -> ErrorOr<void> { 438 return process_block_array(block_index2, [&](auto block_index3) -> ErrorOr<void> { 439 return add_block(block_index3); 440 }); 441 }); 442 })); 443 444 return list; 445} 446 447Ext2FSInode::Ext2FSInode(Ext2FS& fs, InodeIndex index) 448 : Inode(fs, index) 449{ 450} 451 452Ext2FSInode::~Ext2FSInode() 453{ 454 if (m_raw_inode.i_links_count == 0) { 455 // Alas, we have nowhere to propagate any errors that occur here. 456 (void)fs().free_inode(*this); 457 } 458} 459 460u64 Ext2FSInode::size() const 461{ 462 if (Kernel::is_regular_file(m_raw_inode.i_mode) && ((u32)fs().get_features_readonly() & (u32)Ext2FS::FeaturesReadOnly::FileSize64bits)) 463 return static_cast<u64>(m_raw_inode.i_dir_acl) << 32 | m_raw_inode.i_size; 464 return m_raw_inode.i_size; 465} 466 467InodeMetadata Ext2FSInode::metadata() const 468{ 469 MutexLocker locker(m_inode_lock); 470 InodeMetadata metadata; 471 metadata.inode = identifier(); 472 metadata.size = size(); 473 metadata.mode = m_raw_inode.i_mode; 474 metadata.uid = m_raw_inode.i_uid; 475 metadata.gid = m_raw_inode.i_gid; 476 metadata.link_count = m_raw_inode.i_links_count; 477 metadata.atime = Time::from_timespec({ m_raw_inode.i_atime, 0 }); 478 metadata.ctime = Time::from_timespec({ m_raw_inode.i_ctime, 0 }); 479 metadata.mtime = Time::from_timespec({ m_raw_inode.i_mtime, 0 }); 480 metadata.dtime = Time::from_timespec({ m_raw_inode.i_dtime, 0 }); 481 metadata.block_size = fs().block_size(); 482 metadata.block_count = m_raw_inode.i_blocks; 483 484 if (Kernel::is_character_device(m_raw_inode.i_mode) || Kernel::is_block_device(m_raw_inode.i_mode)) { 485 unsigned dev = m_raw_inode.i_block[0]; 486 if (!dev) 487 dev = m_raw_inode.i_block[1]; 488 metadata.major_device = (dev & 0xfff00) >> 8; 489 metadata.minor_device = (dev & 0xff) | ((dev >> 12) & 0xfff00); 490 } 491 return metadata; 492} 493 494ErrorOr<void> Ext2FSInode::flush_metadata() 495{ 496 MutexLocker locker(m_inode_lock); 497 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::flush_metadata(): Flushing inode", identifier()); 498 TRY(fs().write_ext2_inode(index(), m_raw_inode)); 499 if (is_directory()) { 500 // Unless we're about to go away permanently, invalidate the lookup cache. 501 if (m_raw_inode.i_links_count != 0) { 502 // FIXME: This invalidation is way too hardcore. It's sad to throw away the whole cache. 503 m_lookup_cache.clear(); 504 } 505 } 506 set_metadata_dirty(false); 507 return {}; 508} 509 510ErrorOr<void> Ext2FSInode::compute_block_list_with_exclusive_locking() 511{ 512 // Note: We verify that the inode mutex is being held locked. Because only the read_bytes_locked() 513 // method uses this method and the mutex can be locked in shared mode when reading the Inode if 514 // it is an ext2 regular file, but also in exclusive mode, when the Inode is an ext2 directory and being 515 // traversed, we use another exclusive lock to ensure we always mutate the block list safely. 516 VERIFY(m_inode_lock.is_locked()); 517 MutexLocker block_list_locker(m_block_list_lock); 518 if (m_block_list.is_empty()) 519 m_block_list = TRY(compute_block_list()); 520 return {}; 521} 522 523ErrorOr<size_t> Ext2FSInode::read_bytes_locked(off_t offset, size_t count, UserOrKernelBuffer& buffer, OpenFileDescription* description) const 524{ 525 VERIFY(m_inode_lock.is_locked()); 526 VERIFY(offset >= 0); 527 if (m_raw_inode.i_size == 0) 528 return 0; 529 530 if (static_cast<u64>(offset) >= size()) 531 return 0; 532 533 // Symbolic links shorter than 60 characters are store inline inside the i_block array. 534 // This avoids wasting an entire block on short links. (Most links are short.) 535 if (is_symlink() && size() < max_inline_symlink_length) { 536 VERIFY(offset == 0); 537 size_t nread = min((off_t)size() - offset, static_cast<off_t>(count)); 538 TRY(buffer.write(((u8 const*)m_raw_inode.i_block) + offset, nread)); 539 return nread; 540 } 541 542 // Note: We bypass the const declaration of this method, but this is a strong 543 // requirement to be able to accomplish the read operation successfully. 544 // We call this special method because it locks a separate mutex to ensure we 545 // update the block list of the inode safely, as the m_inode_lock is locked in 546 // shared mode. 547 TRY(const_cast<Ext2FSInode&>(*this).compute_block_list_with_exclusive_locking()); 548 549 if (m_block_list.is_empty()) { 550 dmesgln("Ext2FSInode[{}]::read_bytes(): Empty block list", identifier()); 551 return EIO; 552 } 553 554 bool allow_cache = !description || !description->is_direct(); 555 556 int const block_size = fs().block_size(); 557 558 BlockBasedFileSystem::BlockIndex first_block_logical_index = offset / block_size; 559 BlockBasedFileSystem::BlockIndex last_block_logical_index = (offset + count) / block_size; 560 if (last_block_logical_index >= m_block_list.size()) 561 last_block_logical_index = m_block_list.size() - 1; 562 563 int offset_into_first_block = offset % block_size; 564 565 size_t nread = 0; 566 auto remaining_count = min((off_t)count, (off_t)size() - offset); 567 568 dbgln_if(EXT2_VERY_DEBUG, "Ext2FSInode[{}]::read_bytes(): Reading up to {} bytes, {} bytes into inode to {}", identifier(), count, offset, buffer.user_or_kernel_ptr()); 569 570 for (auto bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; bi = bi.value() + 1) { 571 auto block_index = m_block_list[bi.value()]; 572 size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0; 573 size_t num_bytes_to_copy = min((size_t)block_size - offset_into_block, (size_t)remaining_count); 574 auto buffer_offset = buffer.offset(nread); 575 if (block_index.value() == 0) { 576 // This is a hole, act as if it's filled with zeroes. 577 TRY(buffer_offset.memset(0, num_bytes_to_copy)); 578 } else { 579 if (auto result = fs().read_block(block_index, &buffer_offset, num_bytes_to_copy, offset_into_block, allow_cache); result.is_error()) { 580 dmesgln("Ext2FSInode[{}]::read_bytes(): Failed to read block {} (index {})", identifier(), block_index.value(), bi); 581 return result.release_error(); 582 } 583 } 584 remaining_count -= num_bytes_to_copy; 585 nread += num_bytes_to_copy; 586 } 587 588 return nread; 589} 590 591ErrorOr<void> Ext2FSInode::resize(u64 new_size) 592{ 593 auto old_size = size(); 594 if (old_size == new_size) 595 return {}; 596 597 if (!((u32)fs().get_features_readonly() & (u32)Ext2FS::FeaturesReadOnly::FileSize64bits) && (new_size >= static_cast<u32>(-1))) 598 return ENOSPC; 599 600 u64 block_size = fs().block_size(); 601 auto blocks_needed_before = ceil_div(old_size, block_size); 602 auto blocks_needed_after = ceil_div(new_size, block_size); 603 604 if constexpr (EXT2_DEBUG) { 605 dbgln("Ext2FSInode[{}]::resize(): Blocks needed before (size was {}): {}", identifier(), old_size, blocks_needed_before); 606 dbgln("Ext2FSInode[{}]::resize(): Blocks needed after (size is {}): {}", identifier(), new_size, blocks_needed_after); 607 } 608 609 if (blocks_needed_after > blocks_needed_before) { 610 auto additional_blocks_needed = blocks_needed_after - blocks_needed_before; 611 if (additional_blocks_needed > fs().super_block().s_free_blocks_count) 612 return ENOSPC; 613 } 614 615 if (m_block_list.is_empty()) 616 m_block_list = TRY(compute_block_list()); 617 618 if (blocks_needed_after > blocks_needed_before) { 619 auto blocks = TRY(fs().allocate_blocks(fs().group_index_from_inode(index()), blocks_needed_after - blocks_needed_before)); 620 TRY(m_block_list.try_extend(move(blocks))); 621 } else if (blocks_needed_after < blocks_needed_before) { 622 if constexpr (EXT2_VERY_DEBUG) { 623 dbgln("Ext2FSInode[{}]::resize(): Shrinking inode, old block list is {} entries:", identifier(), m_block_list.size()); 624 for (auto block_index : m_block_list) { 625 dbgln(" # {}", block_index); 626 } 627 } 628 while (m_block_list.size() != blocks_needed_after) { 629 auto block_index = m_block_list.take_last(); 630 if (block_index.value()) { 631 if (auto result = fs().set_block_allocation_state(block_index, false); result.is_error()) { 632 dbgln("Ext2FSInode[{}]::resize(): Failed to free block {}: {}", identifier(), block_index, result.error()); 633 return result; 634 } 635 } 636 } 637 } 638 639 TRY(flush_block_list()); 640 641 m_raw_inode.i_size = new_size; 642 if (Kernel::is_regular_file(m_raw_inode.i_mode)) 643 m_raw_inode.i_dir_acl = new_size >> 32; 644 645 set_metadata_dirty(true); 646 647 if (new_size > old_size) { 648 // If we're growing the inode, make sure we zero out all the new space. 649 // FIXME: There are definitely more efficient ways to achieve this. 650 auto bytes_to_clear = new_size - old_size; 651 auto clear_from = old_size; 652 u8 zero_buffer[PAGE_SIZE] {}; 653 while (bytes_to_clear) { 654 auto nwritten = TRY(write_bytes(clear_from, min(static_cast<u64>(sizeof(zero_buffer)), bytes_to_clear), UserOrKernelBuffer::for_kernel_buffer(zero_buffer), nullptr)); 655 VERIFY(nwritten != 0); 656 bytes_to_clear -= nwritten; 657 clear_from += nwritten; 658 } 659 } 660 661 return {}; 662} 663 664ErrorOr<size_t> Ext2FSInode::write_bytes_locked(off_t offset, size_t count, UserOrKernelBuffer const& data, OpenFileDescription* description) 665{ 666 VERIFY(m_inode_lock.is_locked()); 667 VERIFY(offset >= 0); 668 669 if (count == 0) 670 return 0; 671 672 if (is_symlink()) { 673 VERIFY(offset == 0); 674 if (max((size_t)(offset + count), (size_t)m_raw_inode.i_size) < max_inline_symlink_length) { 675 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::write_bytes_locked(): Poking into i_block array for inline symlink ({} bytes)", identifier(), count); 676 TRY(data.read(((u8*)m_raw_inode.i_block) + offset, count)); 677 if ((size_t)(offset + count) > (size_t)m_raw_inode.i_size) 678 m_raw_inode.i_size = offset + count; 679 set_metadata_dirty(true); 680 return count; 681 } 682 } 683 684 bool allow_cache = !description || !description->is_direct(); 685 686 auto const block_size = fs().block_size(); 687 auto new_size = max(static_cast<u64>(offset) + count, size()); 688 689 TRY(resize(new_size)); 690 691 if (m_block_list.is_empty()) 692 m_block_list = TRY(compute_block_list()); 693 694 if (m_block_list.is_empty()) { 695 dbgln("Ext2FSInode[{}]::write_bytes(): Empty block list", identifier()); 696 return EIO; 697 } 698 699 BlockBasedFileSystem::BlockIndex first_block_logical_index = offset / block_size; 700 BlockBasedFileSystem::BlockIndex last_block_logical_index = (offset + count) / block_size; 701 if (last_block_logical_index >= m_block_list.size()) 702 last_block_logical_index = m_block_list.size() - 1; 703 704 size_t offset_into_first_block = offset % block_size; 705 706 size_t nwritten = 0; 707 auto remaining_count = min((off_t)count, (off_t)new_size - offset); 708 709 dbgln_if(EXT2_VERY_DEBUG, "Ext2FSInode[{}]::write_bytes_locked(): Writing {} bytes, {} bytes into inode from {}", identifier(), count, offset, data.user_or_kernel_ptr()); 710 711 for (auto bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; bi = bi.value() + 1) { 712 size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0; 713 size_t num_bytes_to_copy = min((size_t)block_size - offset_into_block, (size_t)remaining_count); 714 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::write_bytes_locked(): Writing block {} (offset_into_block: {})", identifier(), m_block_list[bi.value()], offset_into_block); 715 if (auto result = fs().write_block(m_block_list[bi.value()], data.offset(nwritten), num_bytes_to_copy, offset_into_block, allow_cache); result.is_error()) { 716 dbgln("Ext2FSInode[{}]::write_bytes_locked(): Failed to write block {} (index {})", identifier(), m_block_list[bi.value()], bi); 717 return result.release_error(); 718 } 719 remaining_count -= num_bytes_to_copy; 720 nwritten += num_bytes_to_copy; 721 } 722 723 did_modify_contents(); 724 725 dbgln_if(EXT2_VERY_DEBUG, "Ext2FSInode[{}]::write_bytes_locked(): After write, i_size={}, i_blocks={} ({} blocks in list)", identifier(), size(), m_raw_inode.i_blocks, m_block_list.size()); 726 return nwritten; 727} 728 729ErrorOr<void> Ext2FSInode::traverse_as_directory(Function<ErrorOr<void>(FileSystem::DirectoryEntryView const&)> callback) const 730{ 731 VERIFY(is_directory()); 732 733 u8 buffer[max_block_size]; 734 auto buf = UserOrKernelBuffer::for_kernel_buffer(buffer); 735 736 auto block_size = fs().block_size(); 737 auto file_size = size(); 738 739 // Directory entries are guaranteed not to span multiple blocks, 740 // so we can iterate over blocks separately. 741 742 for (u64 offset = 0; offset < file_size; offset += block_size) { 743 TRY(read_bytes(offset, block_size, buf, nullptr)); 744 745 using ext2_extended_dir_entry = ext2_dir_entry_2; 746 auto* entry = reinterpret_cast<ext2_extended_dir_entry*>(buffer); 747 auto* entries_end = reinterpret_cast<ext2_extended_dir_entry*>(buffer + block_size); 748 while (entry < entries_end) { 749 if (entry->inode != 0) { 750 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::traverse_as_directory(): inode {}, name_len: {}, rec_len: {}, file_type: {}, name: {}", identifier(), entry->inode, entry->name_len, entry->rec_len, entry->file_type, StringView(entry->name, entry->name_len)); 751 TRY(callback({ { entry->name, entry->name_len }, { fsid(), entry->inode }, entry->file_type })); 752 } 753 entry = (ext2_extended_dir_entry*)((char*)entry + entry->rec_len); 754 } 755 } 756 757 return {}; 758} 759 760ErrorOr<void> Ext2FSInode::write_directory(Vector<Ext2FSDirectoryEntry>& entries) 761{ 762 MutexLocker locker(m_inode_lock); 763 auto block_size = fs().block_size(); 764 765 // Calculate directory size and record length of entries so that 766 // the following constraints are met: 767 // - All used blocks must be entirely filled. 768 // - Entries are aligned on a 4-byte boundary. 769 // - No entry may span multiple blocks. 770 size_t directory_size = 0; 771 size_t space_in_block = block_size; 772 for (size_t i = 0; i < entries.size(); ++i) { 773 auto& entry = entries[i]; 774 entry.record_length = EXT2_DIR_REC_LEN(entry.name->length()); 775 space_in_block -= entry.record_length; 776 if (i + 1 < entries.size()) { 777 if (EXT2_DIR_REC_LEN(entries[i + 1].name->length()) > space_in_block) { 778 entry.record_length += space_in_block; 779 space_in_block = block_size; 780 } 781 } else { 782 entry.record_length += space_in_block; 783 } 784 directory_size += entry.record_length; 785 } 786 787 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::write_directory(): New directory contents to write (size {}):", identifier(), directory_size); 788 789 auto directory_data = TRY(ByteBuffer::create_uninitialized(directory_size)); 790 FixedMemoryStream stream { directory_data.bytes() }; 791 792 for (auto& entry : entries) { 793 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::write_directory(): Writing inode: {}, name_len: {}, rec_len: {}, file_type: {}, name: {}", identifier(), entry.inode_index, u16(entry.name->length()), u16(entry.record_length), u8(entry.file_type), entry.name); 794 795 MUST(stream.write_value<u32>(entry.inode_index.value())); 796 MUST(stream.write_value<u16>(entry.record_length)); 797 MUST(stream.write_value<u8>(entry.name->length())); 798 MUST(stream.write_value<u8>(entry.file_type)); 799 MUST(stream.write_until_depleted(entry.name->bytes())); 800 int padding = entry.record_length - entry.name->length() - 8; 801 for (int j = 0; j < padding; ++j) 802 MUST(stream.write_value<u8>(0)); 803 } 804 805 auto serialized_bytes_count = TRY(stream.tell()); 806 VERIFY(serialized_bytes_count == directory_size); 807 808 TRY(resize(serialized_bytes_count)); 809 810 auto buffer = UserOrKernelBuffer::for_kernel_buffer(directory_data.data()); 811 auto nwritten = TRY(write_bytes(0, serialized_bytes_count, buffer, nullptr)); 812 set_metadata_dirty(true); 813 if (nwritten != directory_data.size()) 814 return EIO; 815 return {}; 816} 817 818ErrorOr<NonnullRefPtr<Inode>> Ext2FSInode::create_child(StringView name, mode_t mode, dev_t dev, UserID uid, GroupID gid) 819{ 820 if (Kernel::is_directory(mode)) 821 return fs().create_directory(*this, name, mode, uid, gid); 822 return fs().create_inode(*this, name, mode, dev, uid, gid); 823} 824 825ErrorOr<void> Ext2FSInode::add_child(Inode& child, StringView name, mode_t mode) 826{ 827 MutexLocker locker(m_inode_lock); 828 VERIFY(is_directory()); 829 830 if (name.length() > EXT2_NAME_LEN) 831 return ENAMETOOLONG; 832 833 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::add_child(): Adding inode {} with name '{}' and mode {:o} to directory {}", identifier(), child.index(), name, mode, index()); 834 835 Vector<Ext2FSDirectoryEntry> entries; 836 TRY(traverse_as_directory([&](auto& entry) -> ErrorOr<void> { 837 if (name == entry.name) 838 return EEXIST; 839 auto entry_name = TRY(KString::try_create(entry.name)); 840 TRY(entries.try_append({ move(entry_name), entry.inode.index(), entry.file_type })); 841 return {}; 842 })); 843 844 TRY(child.increment_link_count()); 845 846 auto entry_name = TRY(KString::try_create(name)); 847 TRY(entries.try_empend(move(entry_name), child.index(), to_ext2_file_type(mode))); 848 849 TRY(write_directory(entries)); 850 TRY(populate_lookup_cache()); 851 852 auto cache_entry_name = TRY(KString::try_create(name)); 853 TRY(m_lookup_cache.try_set(move(cache_entry_name), child.index())); 854 did_add_child(child.identifier(), name); 855 return {}; 856} 857 858ErrorOr<void> Ext2FSInode::remove_child(StringView name) 859{ 860 MutexLocker locker(m_inode_lock); 861 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::remove_child(): Removing '{}'", identifier(), name); 862 VERIFY(is_directory()); 863 864 TRY(populate_lookup_cache()); 865 866 auto it = m_lookup_cache.find(name); 867 if (it == m_lookup_cache.end()) 868 return ENOENT; 869 auto child_inode_index = (*it).value; 870 871 InodeIdentifier child_id { fsid(), child_inode_index }; 872 873 Vector<Ext2FSDirectoryEntry> entries; 874 TRY(traverse_as_directory([&](auto& entry) -> ErrorOr<void> { 875 if (name != entry.name) { 876 auto entry_name = TRY(KString::try_create(entry.name)); 877 TRY(entries.try_append({ move(entry_name), entry.inode.index(), entry.file_type })); 878 } 879 return {}; 880 })); 881 882 TRY(write_directory(entries)); 883 884 m_lookup_cache.remove(it); 885 886 auto child_inode = TRY(fs().get_inode(child_id)); 887 TRY(child_inode->decrement_link_count()); 888 889 did_remove_child(child_id, name); 890 return {}; 891} 892 893ErrorOr<void> Ext2FSInode::replace_child(StringView name, Inode& child) 894{ 895 MutexLocker locker(m_inode_lock); 896 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::replace_child(): Replacing '{}' with inode {}", identifier(), name, child.index()); 897 VERIFY(is_directory()); 898 899 TRY(populate_lookup_cache()); 900 901 if (name.length() > EXT2_NAME_LEN) 902 return ENAMETOOLONG; 903 904 Vector<Ext2FSDirectoryEntry> entries; 905 906 Optional<InodeIndex> old_child_index; 907 TRY(traverse_as_directory([&](auto& entry) -> ErrorOr<void> { 908 auto is_replacing_this_inode = name == entry.name; 909 auto inode_index = is_replacing_this_inode ? child.index() : entry.inode.index(); 910 911 auto entry_name = TRY(KString::try_create(entry.name)); 912 TRY(entries.try_empend(move(entry_name), inode_index, to_ext2_file_type(child.mode()))); 913 if (is_replacing_this_inode) 914 old_child_index = entry.inode.index(); 915 916 return {}; 917 })); 918 919 if (!old_child_index.has_value()) 920 return ENOENT; 921 922 auto old_child = TRY(fs().get_inode({ fsid(), *old_child_index })); 923 924 auto old_index_it = m_lookup_cache.find(name); 925 VERIFY(old_index_it != m_lookup_cache.end()); 926 old_index_it->value = child.index(); 927 928 // NOTE: Between this line and the write_directory line, all operations must 929 // be atomic. Any changes made should be reverted. 930 TRY(child.increment_link_count()); 931 932 auto maybe_decrement_error = old_child->decrement_link_count(); 933 if (maybe_decrement_error.is_error()) { 934 old_index_it->value = *old_child_index; 935 MUST(child.decrement_link_count()); 936 return maybe_decrement_error; 937 } 938 939 // FIXME: The filesystem is left in an inconsistent state if this fails. 940 // Revert the changes made above if we can't write_directory. 941 // Ideally, decrement should be the last operation, but we currently 942 // can't "un-write" a directory entry list. 943 TRY(write_directory(entries)); 944 945 // TODO: Emit a did_replace_child event. 946 947 return {}; 948} 949 950ErrorOr<void> Ext2FSInode::populate_lookup_cache() 951{ 952 VERIFY(m_inode_lock.is_exclusively_locked_by_current_thread()); 953 if (!m_lookup_cache.is_empty()) 954 return {}; 955 HashMap<NonnullOwnPtr<KString>, InodeIndex> children; 956 957 TRY(traverse_as_directory([&children](auto& entry) -> ErrorOr<void> { 958 auto entry_name = TRY(KString::try_create(entry.name)); 959 TRY(children.try_set(move(entry_name), entry.inode.index())); 960 return {}; 961 })); 962 963 VERIFY(m_lookup_cache.is_empty()); 964 m_lookup_cache = move(children); 965 return {}; 966} 967 968ErrorOr<NonnullRefPtr<Inode>> Ext2FSInode::lookup(StringView name) 969{ 970 VERIFY(is_directory()); 971 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]:lookup(): Looking up '{}'", identifier(), name); 972 973 InodeIndex inode_index; 974 { 975 MutexLocker locker(m_inode_lock); 976 TRY(populate_lookup_cache()); 977 auto it = m_lookup_cache.find(name); 978 if (it == m_lookup_cache.end()) { 979 dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]:lookup(): '{}' not found", identifier(), name); 980 return ENOENT; 981 } 982 inode_index = it->value; 983 } 984 985 return fs().get_inode({ fsid(), inode_index }); 986} 987 988ErrorOr<void> Ext2FSInode::update_timestamps(Optional<Time> atime, Optional<Time> ctime, Optional<Time> mtime) 989{ 990 MutexLocker locker(m_inode_lock); 991 if (fs().is_readonly()) 992 return EROFS; 993 if (atime.value_or({}).to_timespec().tv_sec > NumericLimits<i32>::max()) 994 return EINVAL; 995 if (ctime.value_or({}).to_timespec().tv_sec > NumericLimits<i32>::max()) 996 return EINVAL; 997 if (mtime.value_or({}).to_timespec().tv_sec > NumericLimits<i32>::max()) 998 return EINVAL; 999 if (atime.has_value()) 1000 m_raw_inode.i_atime = atime.value().to_timespec().tv_sec; 1001 if (ctime.has_value()) 1002 m_raw_inode.i_ctime = ctime.value().to_timespec().tv_sec; 1003 if (mtime.has_value()) 1004 m_raw_inode.i_mtime = mtime.value().to_timespec().tv_sec; 1005 set_metadata_dirty(true); 1006 return {}; 1007} 1008 1009ErrorOr<void> Ext2FSInode::increment_link_count() 1010{ 1011 MutexLocker locker(m_inode_lock); 1012 if (fs().is_readonly()) 1013 return EROFS; 1014 constexpr size_t max_link_count = 65535; 1015 if (m_raw_inode.i_links_count == max_link_count) 1016 return EMLINK; 1017 ++m_raw_inode.i_links_count; 1018 set_metadata_dirty(true); 1019 return {}; 1020} 1021 1022ErrorOr<void> Ext2FSInode::decrement_link_count() 1023{ 1024 MutexLocker locker(m_inode_lock); 1025 if (fs().is_readonly()) 1026 return EROFS; 1027 VERIFY(m_raw_inode.i_links_count); 1028 1029 --m_raw_inode.i_links_count; 1030 set_metadata_dirty(true); 1031 if (m_raw_inode.i_links_count == 0) 1032 did_delete_self(); 1033 1034 if (ref_count() == 1 && m_raw_inode.i_links_count == 0) 1035 fs().uncache_inode(index()); 1036 1037 return {}; 1038} 1039 1040ErrorOr<void> Ext2FSInode::chmod(mode_t mode) 1041{ 1042 MutexLocker locker(m_inode_lock); 1043 if (m_raw_inode.i_mode == mode) 1044 return {}; 1045 m_raw_inode.i_mode = mode; 1046 set_metadata_dirty(true); 1047 return {}; 1048} 1049 1050ErrorOr<void> Ext2FSInode::chown(UserID uid, GroupID gid) 1051{ 1052 MutexLocker locker(m_inode_lock); 1053 if (m_raw_inode.i_uid == uid && m_raw_inode.i_gid == gid) 1054 return {}; 1055 m_raw_inode.i_uid = uid.value(); 1056 m_raw_inode.i_gid = gid.value(); 1057 set_metadata_dirty(true); 1058 return {}; 1059} 1060 1061ErrorOr<void> Ext2FSInode::truncate(u64 size) 1062{ 1063 MutexLocker locker(m_inode_lock); 1064 if (static_cast<u64>(m_raw_inode.i_size) == size) 1065 return {}; 1066 TRY(resize(size)); 1067 set_metadata_dirty(true); 1068 return {}; 1069} 1070 1071ErrorOr<int> Ext2FSInode::get_block_address(int index) 1072{ 1073 MutexLocker locker(m_inode_lock); 1074 1075 if (m_block_list.is_empty()) 1076 m_block_list = TRY(compute_block_list()); 1077 1078 if (index < 0 || (size_t)index >= m_block_list.size()) 1079 return 0; 1080 1081 return m_block_list[index].value(); 1082} 1083 1084}