Serenity Operating System
1/*
2 * Copyright (c) 2021, the SerenityOS developers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <Kernel/Bus/PCI/API.h>
8#include <Kernel/Bus/PCI/IDs.h>
9#include <Kernel/Bus/VirtIO/Console.h>
10#include <Kernel/Bus/VirtIO/Device.h>
11#include <Kernel/Bus/VirtIO/RNG.h>
12#include <Kernel/CommandLine.h>
13#include <Kernel/Sections.h>
14
15namespace Kernel::VirtIO {
16
17UNMAP_AFTER_INIT void detect()
18{
19 if (kernel_command_line().disable_virtio())
20 return;
21 MUST(PCI::enumerate([&](PCI::DeviceIdentifier const& device_identifier) {
22 if (device_identifier.hardware_id().is_null())
23 return;
24 // TODO: We should also be checking that the device_id is in between 0x1000 - 0x107F inclusive
25 if (device_identifier.hardware_id().vendor_id != PCI::VendorID::VirtIO)
26 return;
27 switch (device_identifier.hardware_id().device_id) {
28 case PCI::DeviceID::VirtIOConsole: {
29 auto& console = Console::must_create(device_identifier).leak_ref();
30 console.initialize();
31 break;
32 }
33 case PCI::DeviceID::VirtIOEntropy: {
34 auto& rng = RNG::must_create(device_identifier).leak_ref();
35 rng.initialize();
36 break;
37 }
38 case PCI::DeviceID::VirtIOGPU: {
39 // This should have been initialized by the graphics subsystem
40 break;
41 }
42 default:
43 dbgln_if(VIRTIO_DEBUG, "VirtIO: Unknown VirtIO device with ID: {}", device_identifier.hardware_id().device_id);
44 break;
45 }
46 }));
47}
48
49static StringView determine_device_class(PCI::DeviceIdentifier const& device_identifier)
50{
51 if (device_identifier.revision_id().value() == 0) {
52 // Note: If the device is a legacy (or transitional) device, therefore,
53 // probe the subsystem ID in the PCI header and figure out the
54 auto subsystem_device_id = device_identifier.subsystem_id().value();
55 switch (subsystem_device_id) {
56 case 1:
57 return "VirtIONetAdapter"sv;
58 case 2:
59 return "VirtIOBlockDevice"sv;
60 case 3:
61 return "VirtIOConsole"sv;
62 case 4:
63 return "VirtIORNG"sv;
64 default:
65 dbgln("VirtIO: Unknown subsystem_device_id {}", subsystem_device_id);
66 VERIFY_NOT_REACHED();
67 }
68 }
69
70 auto id = device_identifier.hardware_id();
71 VERIFY(id.vendor_id == PCI::VendorID::VirtIO);
72 switch (id.device_id) {
73 case PCI::DeviceID::VirtIONetAdapter:
74 return "VirtIONetAdapter"sv;
75 case PCI::DeviceID::VirtIOBlockDevice:
76 return "VirtIOBlockDevice"sv;
77 case PCI::DeviceID::VirtIOConsole:
78 return "VirtIOConsole"sv;
79 case PCI::DeviceID::VirtIOEntropy:
80 return "VirtIORNG"sv;
81 case PCI::DeviceID::VirtIOGPU:
82 return "VirtIOGPU"sv;
83 default:
84 dbgln("VirtIO: Unknown device_id {}", id.vendor_id);
85 VERIFY_NOT_REACHED();
86 }
87}
88
89UNMAP_AFTER_INIT void Device::initialize()
90{
91 enable_bus_mastering(device_identifier());
92
93 auto capabilities = device_identifier().capabilities();
94 for (auto& capability : capabilities) {
95 if (capability.id().value() == PCI::Capabilities::ID::VendorSpecific) {
96 // We have a virtio_pci_cap
97 Configuration config {};
98 auto raw_config_type = capability.read8(0x3);
99 if (raw_config_type < static_cast<u8>(ConfigurationType::Common) || raw_config_type > static_cast<u8>(ConfigurationType::PCI)) {
100 dbgln("{}: Unknown capability configuration type: {}", m_class_name, raw_config_type);
101 return;
102 }
103 config.cfg_type = static_cast<ConfigurationType>(raw_config_type);
104 auto cap_length = capability.read8(0x2);
105 if (cap_length < 0x10) {
106 dbgln("{}: Unexpected capability size: {}", m_class_name, cap_length);
107 break;
108 }
109 config.bar = capability.read8(0x4);
110 if (config.bar > 0x5) {
111 dbgln("{}: Unexpected capability bar value: {}", m_class_name, config.bar);
112 break;
113 }
114 config.offset = capability.read32(0x8);
115 config.length = capability.read32(0xc);
116 // NOTE: Configuration length of zero is an invalid configuration that should be ignored.
117 if (config.length == 0)
118 continue;
119 dbgln_if(VIRTIO_DEBUG, "{}: Found configuration {}, bar: {}, offset: {}, length: {}", m_class_name, (u32)config.cfg_type, config.bar, config.offset, config.length);
120 if (config.cfg_type == ConfigurationType::Common)
121 m_use_mmio = true;
122 else if (config.cfg_type == ConfigurationType::Notify)
123 m_notify_multiplier = capability.read32(0x10);
124
125 m_configs.append(config);
126 }
127 }
128
129 if (m_use_mmio) {
130 for (auto& cfg : m_configs) {
131 auto mapping_io_window = IOWindow::create_for_pci_device_bar(device_identifier(), static_cast<PCI::HeaderType0BaseRegister>(cfg.bar)).release_value_but_fixme_should_propagate_errors();
132 m_register_bases[cfg.bar] = move(mapping_io_window);
133 }
134 m_common_cfg = get_config(ConfigurationType::Common, 0);
135 m_notify_cfg = get_config(ConfigurationType::Notify, 0);
136 m_isr_cfg = get_config(ConfigurationType::ISR, 0);
137 } else {
138 auto mapping_io_window = IOWindow::create_for_pci_device_bar(device_identifier(), PCI::HeaderType0BaseRegister::BAR0).release_value_but_fixme_should_propagate_errors();
139 m_register_bases[0] = move(mapping_io_window);
140 }
141
142 // Note: We enable interrupts at least after the m_register_bases[0] ptr is
143 // assigned with an IOWindow, to ensure that in case of getting an interrupt
144 // we can access registers from that IO window range.
145 PCI::enable_interrupt_line(device_identifier());
146 enable_irq();
147
148 reset_device();
149 set_status_bit(DEVICE_STATUS_ACKNOWLEDGE);
150
151 set_status_bit(DEVICE_STATUS_DRIVER);
152}
153
154UNMAP_AFTER_INIT VirtIO::Device::Device(PCI::DeviceIdentifier const& device_identifier)
155 : PCI::Device(const_cast<PCI::DeviceIdentifier&>(device_identifier))
156 , IRQHandler(device_identifier.interrupt_line().value())
157 , m_class_name(VirtIO::determine_device_class(device_identifier))
158{
159 dbgln("{}: Found @ {}", m_class_name, device_identifier.address());
160}
161
162void Device::notify_queue(u16 queue_index)
163{
164 dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", m_class_name, queue_index);
165 if (!m_notify_cfg)
166 base_io_window().write16(REG_QUEUE_NOTIFY, queue_index);
167 else
168 config_write16(*m_notify_cfg, get_queue(queue_index).notify_offset() * m_notify_multiplier, queue_index);
169}
170
171auto Device::mapping_for_bar(u8 bar) -> IOWindow&
172{
173 VERIFY(m_use_mmio);
174 VERIFY(m_register_bases[bar]);
175 return *m_register_bases[bar];
176}
177
178u8 Device::config_read8(Configuration const& config, u32 offset)
179{
180 return mapping_for_bar(config.bar).read8(config.offset + offset);
181}
182
183u16 Device::config_read16(Configuration const& config, u32 offset)
184{
185 return mapping_for_bar(config.bar).read16(config.offset + offset);
186}
187
188u32 Device::config_read32(Configuration const& config, u32 offset)
189{
190 return mapping_for_bar(config.bar).read32(config.offset + offset);
191}
192
193void Device::config_write8(Configuration const& config, u32 offset, u8 value)
194{
195 mapping_for_bar(config.bar).write8(config.offset + offset, value);
196}
197
198void Device::config_write16(Configuration const& config, u32 offset, u16 value)
199{
200 mapping_for_bar(config.bar).write16(config.offset + offset, value);
201}
202
203void Device::config_write32(Configuration const& config, u32 offset, u32 value)
204{
205 mapping_for_bar(config.bar).write32(config.offset + offset, value);
206}
207
208void Device::config_write64(Configuration const& config, u32 offset, u64 value)
209{
210 mapping_for_bar(config.bar).write32(config.offset + offset, (u32)(value & 0xFFFFFFFF));
211 mapping_for_bar(config.bar).write32(config.offset + offset + 4, (u32)(value >> 32));
212}
213
214u8 Device::read_status_bits()
215{
216 if (!m_common_cfg)
217 return base_io_window().read8(REG_DEVICE_STATUS);
218 return config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS);
219}
220
221void Device::mask_status_bits(u8 status_mask)
222{
223 m_status &= status_mask;
224 if (!m_common_cfg)
225 base_io_window().write8(REG_DEVICE_STATUS, m_status);
226 else
227 config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
228}
229
230void Device::set_status_bit(u8 status_bit)
231{
232 m_status |= status_bit;
233 if (!m_common_cfg)
234 base_io_window().write8(REG_DEVICE_STATUS, m_status);
235 else
236 config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, m_status);
237}
238
239u64 Device::get_device_features()
240{
241 if (!m_common_cfg)
242 return base_io_window().read32(REG_DEVICE_FEATURES);
243 config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 0);
244 auto lower_bits = config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE);
245 config_write32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE_SELECT, 1);
246 u64 upper_bits = (u64)config_read32(*m_common_cfg, COMMON_CFG_DEVICE_FEATURE) << 32;
247 return upper_bits | lower_bits;
248}
249
250IOWindow& Device::base_io_window()
251{
252 VERIFY(m_register_bases[0]);
253 return *m_register_bases[0];
254}
255
256bool Device::accept_device_features(u64 device_features, u64 accepted_features)
257{
258 VERIFY(!m_did_accept_features);
259 m_did_accept_features = true;
260
261 if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
262 accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
263 }
264
265 if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
266 dbgln_if(VIRTIO_DEBUG, "{}: packed queues not yet supported", m_class_name);
267 accepted_features &= ~(VIRTIO_F_RING_PACKED);
268 }
269
270 // TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
271 if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
272 // accepted_features |= VIRTIO_F_INDIRECT_DESC;
273 }
274
275 if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
276 accepted_features |= VIRTIO_F_IN_ORDER;
277 }
278
279 dbgln_if(VIRTIO_DEBUG, "{}: Device features: {}", m_class_name, device_features);
280 dbgln_if(VIRTIO_DEBUG, "{}: Accepted features: {}", m_class_name, accepted_features);
281
282 if (!m_common_cfg) {
283 base_io_window().write32(REG_GUEST_FEATURES, accepted_features);
284 } else {
285 config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 0);
286 config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features);
287 config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE_SELECT, 1);
288 config_write32(*m_common_cfg, COMMON_CFG_DRIVER_FEATURE, accepted_features >> 32);
289 }
290 set_status_bit(DEVICE_STATUS_FEATURES_OK);
291 m_status = read_status_bits();
292 if (!(m_status & DEVICE_STATUS_FEATURES_OK)) {
293 set_status_bit(DEVICE_STATUS_FAILED);
294 dbgln("{}: Features not accepted by host!", m_class_name);
295 return false;
296 }
297
298 m_accepted_features = accepted_features;
299 dbgln_if(VIRTIO_DEBUG, "{}: Features accepted by host", m_class_name);
300 return true;
301}
302
303void Device::reset_device()
304{
305 dbgln_if(VIRTIO_DEBUG, "{}: Reset device", m_class_name);
306 if (!m_common_cfg) {
307 mask_status_bits(0);
308 while (read_status_bits() != 0) {
309 // TODO: delay a bit?
310 }
311 return;
312 }
313 config_write8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS, 0);
314 while (config_read8(*m_common_cfg, COMMON_CFG_DEVICE_STATUS) != 0) {
315 // TODO: delay a bit?
316 }
317}
318
319bool Device::setup_queue(u16 queue_index)
320{
321 if (!m_common_cfg)
322 return false;
323
324 config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
325 u16 queue_size = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_SIZE);
326 if (queue_size == 0) {
327 dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] is unavailable!", m_class_name, queue_index);
328 return true;
329 }
330
331 u16 queue_notify_offset = config_read16(*m_common_cfg, COMMON_CFG_QUEUE_NOTIFY_OFF);
332
333 auto queue_or_error = Queue::try_create(queue_size, queue_notify_offset);
334 if (queue_or_error.is_error())
335 return false;
336 auto queue = queue_or_error.release_value();
337
338 config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DESC, queue->descriptor_area().get());
339 config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DRIVER, queue->driver_area().get());
340 config_write64(*m_common_cfg, COMMON_CFG_QUEUE_DEVICE, queue->device_area().get());
341
342 dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] configured with size: {}", m_class_name, queue_index, queue_size);
343
344 m_queues.append(move(queue));
345 return true;
346}
347
348bool Device::activate_queue(u16 queue_index)
349{
350 if (!m_common_cfg)
351 return false;
352
353 config_write16(*m_common_cfg, COMMON_CFG_QUEUE_SELECT, queue_index);
354 config_write16(*m_common_cfg, COMMON_CFG_QUEUE_ENABLE, true);
355
356 dbgln_if(VIRTIO_DEBUG, "{}: Queue[{}] activated", m_class_name, queue_index);
357 return true;
358}
359
360bool Device::setup_queues(u16 requested_queue_count)
361{
362 VERIFY(!m_did_setup_queues);
363 m_did_setup_queues = true;
364
365 if (m_common_cfg) {
366 auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
367 if (requested_queue_count == 0) {
368 m_queue_count = maximum_queue_count;
369 } else if (requested_queue_count > maximum_queue_count) {
370 dbgln("{}: {} queues requested but only {} available!", m_class_name, m_queue_count, maximum_queue_count);
371 return false;
372 } else {
373 m_queue_count = requested_queue_count;
374 }
375 } else {
376 m_queue_count = requested_queue_count;
377 dbgln("{}: device's available queue count could not be determined!", m_class_name);
378 }
379
380 dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", m_class_name, m_queue_count);
381 for (u16 i = 0; i < m_queue_count; i++) {
382 if (!setup_queue(i))
383 return false;
384 }
385 for (u16 i = 0; i < m_queue_count; i++) { // Queues can only be activated *after* all others queues were also configured
386 if (!activate_queue(i))
387 return false;
388 }
389 return true;
390}
391
392void Device::finish_init()
393{
394 VERIFY(m_did_accept_features); // ensure features were negotiated
395 VERIFY(m_did_setup_queues); // ensure queues were set-up
396 VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
397
398 set_status_bit(DEVICE_STATUS_DRIVER_OK);
399 dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
400}
401
402u8 Device::isr_status()
403{
404 if (!m_isr_cfg)
405 return base_io_window().read8(REG_ISR_STATUS);
406 return config_read8(*m_isr_cfg, 0);
407}
408
409bool Device::handle_irq(RegisterState const&)
410{
411 u8 isr_type = isr_status();
412 if ((isr_type & (QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT)) == 0) {
413 dbgln_if(VIRTIO_DEBUG, "{}: Handling interrupt with unknown type: {}", class_name(), isr_type);
414 return false;
415 }
416 if (isr_type & DEVICE_CONFIG_INTERRUPT) {
417 dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Device config interrupt!", class_name());
418 if (!handle_device_config_change()) {
419 set_status_bit(DEVICE_STATUS_FAILED);
420 dbgln("{}: Failed to handle device config change!", class_name());
421 }
422 }
423 if (isr_type & QUEUE_INTERRUPT) {
424 dbgln_if(VIRTIO_DEBUG, "{}: VirtIO Queue interrupt!", class_name());
425 for (size_t i = 0; i < m_queues.size(); i++) {
426 if (get_queue(i).new_data_available()) {
427 handle_queue_update(i);
428 return true;
429 }
430 }
431 dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", class_name());
432 }
433 return true;
434}
435
436void Device::supply_chain_and_notify(u16 queue_index, QueueChain& chain)
437{
438 auto& queue = get_queue(queue_index);
439 VERIFY(&chain.queue() == &queue);
440 VERIFY(queue.lock().is_locked());
441 chain.submit_to_queue();
442 if (queue.should_notify())
443 notify_queue(queue_index);
444}
445
446}