Serenity Operating System
at master 204 lines 7.0 kB view raw
1/* 2 * Copyright (c) 2021, the SerenityOS developers. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 */ 6 7#include <AK/Atomic.h> 8#include <Kernel/Bus/VirtIO/Queue.h> 9 10namespace Kernel::VirtIO { 11 12ErrorOr<NonnullOwnPtr<Queue>> Queue::try_create(u16 queue_size, u16 notify_offset) 13{ 14 size_t size_of_descriptors = sizeof(QueueDescriptor) * queue_size; 15 size_t size_of_driver = sizeof(QueueDriver) + queue_size * sizeof(u16); 16 size_t size_of_device = sizeof(QueueDevice) + queue_size * sizeof(QueueDeviceItem); 17 auto queue_region_size = TRY(Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device)); 18 OwnPtr<Memory::Region> queue_region; 19 if (queue_region_size <= PAGE_SIZE) 20 queue_region = TRY(MM.allocate_kernel_region(queue_region_size, "VirtIO Queue"sv, Memory::Region::Access::ReadWrite)); 21 else 22 queue_region = TRY(MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue"sv, Memory::Region::Access::ReadWrite)); 23 return adopt_nonnull_own_or_enomem(new (nothrow) Queue(queue_region.release_nonnull(), queue_size, notify_offset)); 24} 25 26Queue::Queue(NonnullOwnPtr<Memory::Region> queue_region, u16 queue_size, u16 notify_offset) 27 : m_queue_size(queue_size) 28 , m_notify_offset(notify_offset) 29 , m_free_buffers(queue_size) 30 , m_queue_region(move(queue_region)) 31{ 32 size_t size_of_descriptors = sizeof(QueueDescriptor) * queue_size; 33 size_t size_of_driver = sizeof(QueueDriver) + queue_size * sizeof(u16); 34 // TODO: ensure alignment!!! 35 u8* ptr = m_queue_region->vaddr().as_ptr(); 36 memset(ptr, 0, m_queue_region->size()); 37 m_descriptors = reinterpret_cast<QueueDescriptor*>(ptr); 38 m_driver = reinterpret_cast<QueueDriver*>(ptr + size_of_descriptors); 39 m_device = reinterpret_cast<QueueDevice*>(ptr + size_of_descriptors + size_of_driver); 40 41 for (auto i = 0; i + 1 < queue_size; i++) 42 m_descriptors[i].next = i + 1; // link all the descriptors in a line 43 44 enable_interrupts(); 45} 46 47Queue::~Queue() = default; 48 49void Queue::enable_interrupts() 50{ 51 SpinlockLocker lock(m_lock); 52 m_driver->flags = 0; 53} 54 55void Queue::disable_interrupts() 56{ 57 SpinlockLocker lock(m_lock); 58 m_driver->flags = 1; 59} 60 61bool Queue::new_data_available() const 62{ 63 auto const index = AK::atomic_load(&m_device->index, AK::MemoryOrder::memory_order_relaxed); 64 auto const used_tail = AK::atomic_load(&m_used_tail, AK::MemoryOrder::memory_order_relaxed); 65 return index != used_tail; 66} 67 68QueueChain Queue::pop_used_buffer_chain(size_t& used) 69{ 70 VERIFY(m_lock.is_locked()); 71 if (!new_data_available()) { 72 used = 0; 73 return QueueChain(*this); 74 } 75 76 full_memory_barrier(); 77 78 // Determine used length 79 used = m_device->rings[m_used_tail % m_queue_size].length; 80 81 // Determine start, end and number of nodes in chain 82 auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index; 83 size_t length_of_chain = 1; 84 auto last_index = descriptor_index; 85 while (m_descriptors[last_index].flags & VIRTQ_DESC_F_NEXT) { 86 ++length_of_chain; 87 last_index = m_descriptors[last_index].next; 88 } 89 90 // We are now done with this buffer chain 91 m_used_tail++; 92 93 return QueueChain(*this, descriptor_index, last_index, length_of_chain); 94} 95 96void Queue::discard_used_buffers() 97{ 98 VERIFY(m_lock.is_locked()); 99 size_t used; 100 for (auto buffer = pop_used_buffer_chain(used); !buffer.is_empty(); buffer = pop_used_buffer_chain(used)) { 101 buffer.release_buffer_slots_to_queue(); 102 } 103} 104 105void Queue::reclaim_buffer_chain(u16 chain_start_index, u16 chain_end_index, size_t length_of_chain) 106{ 107 VERIFY(m_lock.is_locked()); 108 m_descriptors[chain_end_index].next = m_free_head; 109 m_free_head = chain_start_index; 110 m_free_buffers += length_of_chain; 111} 112 113bool Queue::has_free_slots() const 114{ 115 auto const free_buffers = AK::atomic_load(&m_free_buffers, AK::MemoryOrder::memory_order_relaxed); 116 return free_buffers > 0; 117} 118 119Optional<u16> Queue::take_free_slot() 120{ 121 VERIFY(m_lock.is_locked()); 122 if (has_free_slots()) { 123 auto descriptor_index = m_free_head; 124 m_free_head = m_descriptors[descriptor_index].next; 125 --m_free_buffers; 126 return descriptor_index; 127 } 128 129 return {}; 130} 131 132bool Queue::should_notify() const 133{ 134 VERIFY(m_lock.is_locked()); 135 auto device_flags = m_device->flags; 136 return !(device_flags & VIRTQ_USED_F_NO_NOTIFY); 137} 138 139bool QueueChain::add_buffer_to_chain(PhysicalAddress buffer_start, size_t buffer_length, BufferType buffer_type) 140{ 141 VERIFY(m_queue.lock().is_locked()); 142 143 // Ensure that no readable pages will be inserted after a writable one, as required by the VirtIO spec 144 VERIFY(buffer_type == BufferType::DeviceWritable || !m_chain_has_writable_pages); 145 m_chain_has_writable_pages |= (buffer_type == BufferType::DeviceWritable); 146 147 // Take a free slot from the queue 148 auto descriptor_index = m_queue.take_free_slot(); 149 if (!descriptor_index.has_value()) 150 return false; 151 152 if (!m_start_of_chain_index.has_value()) { 153 // Set start of chain if it hasn't been set 154 m_start_of_chain_index = descriptor_index.value(); 155 } else { 156 // Link from previous element in QueueChain 157 m_queue.m_descriptors[m_end_of_chain_index.value()].flags |= VIRTQ_DESC_F_NEXT; 158 m_queue.m_descriptors[m_end_of_chain_index.value()].next = descriptor_index.value(); 159 } 160 161 // Update end of chain 162 m_end_of_chain_index = descriptor_index.value(); 163 ++m_chain_length; 164 165 // Populate buffer info 166 VERIFY(buffer_length <= NumericLimits<size_t>::max()); 167 m_queue.m_descriptors[descriptor_index.value()].address = static_cast<u64>(buffer_start.get()); 168 m_queue.m_descriptors[descriptor_index.value()].flags = static_cast<u16>(buffer_type); 169 m_queue.m_descriptors[descriptor_index.value()].length = static_cast<u32>(buffer_length); 170 171 return true; 172} 173 174void QueueChain::submit_to_queue() 175{ 176 VERIFY(m_queue.lock().is_locked()); 177 VERIFY(m_start_of_chain_index.has_value()); 178 179 auto next_index = m_queue.m_driver_index_shadow % m_queue.m_queue_size; 180 m_queue.m_driver->rings[next_index] = m_start_of_chain_index.value(); 181 m_queue.m_driver_index_shadow++; 182 full_memory_barrier(); 183 m_queue.m_driver->index = m_queue.m_driver_index_shadow; 184 185 // Reset internal chain state 186 m_start_of_chain_index = m_end_of_chain_index = {}; 187 m_chain_has_writable_pages = false; 188 m_chain_length = 0; 189} 190 191void QueueChain::release_buffer_slots_to_queue() 192{ 193 VERIFY(m_queue.lock().is_locked()); 194 if (m_start_of_chain_index.has_value()) { 195 // Add the currently stored chain back to the queue's free pool 196 m_queue.reclaim_buffer_chain(m_start_of_chain_index.value(), m_end_of_chain_index.value(), m_chain_length); 197 // Reset internal chain state 198 m_start_of_chain_index = m_end_of_chain_index = {}; 199 m_chain_has_writable_pages = false; 200 m_chain_length = 0; 201 } 202} 203 204}