Serenity Operating System
at master 564 lines 27 kB view raw
1/* 2 * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 */ 6 7#include <AK/BinaryBufferWriter.h> 8#include <Kernel/Arch/Delay.h> 9#include <Kernel/Bus/PCI/API.h> 10#include <Kernel/Bus/PCI/IDs.h> 11#include <Kernel/Devices/DeviceManagement.h> 12#include <Kernel/Graphics/Console/GenericFramebufferConsole.h> 13#include <Kernel/Graphics/GraphicsManagement.h> 14#include <Kernel/Graphics/VirtIOGPU/Console.h> 15#include <Kernel/Graphics/VirtIOGPU/DisplayConnector.h> 16#include <Kernel/Graphics/VirtIOGPU/GPU3DDevice.h> 17#include <Kernel/Graphics/VirtIOGPU/GraphicsAdapter.h> 18 19namespace Kernel { 20 21#define DEVICE_EVENTS_READ 0x0 22#define DEVICE_EVENTS_CLEAR 0x4 23#define DEVICE_NUM_SCANOUTS 0x8 24 25ErrorOr<bool> VirtIOGraphicsAdapter::probe(PCI::DeviceIdentifier const& device_identifier) 26{ 27 return device_identifier.hardware_id().vendor_id == PCI::VendorID::VirtIO; 28} 29 30ErrorOr<NonnullLockRefPtr<GenericGraphicsAdapter>> VirtIOGraphicsAdapter::create(PCI::DeviceIdentifier const& device_identifier) 31{ 32 // Setup memory transfer region 33 auto scratch_space_region = TRY(MM.allocate_contiguous_kernel_region( 34 32 * PAGE_SIZE, 35 "VirtGPU Scratch Space"sv, 36 Memory::Region::Access::ReadWrite)); 37 38 auto active_context_ids = TRY(Bitmap::create(VREND_MAX_CTX, false)); 39 auto adapter = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) VirtIOGraphicsAdapter(device_identifier, move(active_context_ids), move(scratch_space_region)))); 40 adapter->initialize(); 41 TRY(adapter->initialize_adapter()); 42 return adapter; 43} 44 45ErrorOr<void> VirtIOGraphicsAdapter::initialize_adapter() 46{ 47 VERIFY(m_num_scanouts <= VIRTIO_GPU_MAX_SCANOUTS); 48 TRY(initialize_3d_device()); 49 for (size_t index = 0; index < m_num_scanouts; index++) { 50 auto display_connector = VirtIODisplayConnector::must_create(*this, index); 51 m_scanouts[index].display_connector = display_connector; 52 TRY(query_and_set_edid(index, *display_connector)); 53 display_connector->set_safe_mode_setting_after_initialization({}); 54 display_connector->initialize_console({}); 55 } 56 return {}; 57} 58 59ErrorOr<void> VirtIOGraphicsAdapter::mode_set_resolution(Badge<VirtIODisplayConnector>, VirtIODisplayConnector& connector, size_t width, size_t height) 60{ 61 SpinlockLocker locker(m_operation_lock); 62 VERIFY(connector.scanout_id() < VIRTIO_GPU_MAX_SCANOUTS); 63 auto rounded_buffer_size = TRY(calculate_framebuffer_size(width, height)); 64 TRY(attach_physical_range_to_framebuffer(connector, true, 0, rounded_buffer_size)); 65 return {}; 66} 67 68void VirtIOGraphicsAdapter::set_dirty_displayed_rect(Badge<VirtIODisplayConnector>, VirtIODisplayConnector& connector, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect, bool main_buffer) 69{ 70 VERIFY(m_operation_lock.is_locked()); 71 VERIFY(connector.scanout_id() < VIRTIO_GPU_MAX_SCANOUTS); 72 Scanout::PhysicalBuffer& buffer = main_buffer ? m_scanouts[connector.scanout_id().value()].main_buffer : m_scanouts[connector.scanout_id().value()].back_buffer; 73 if (buffer.dirty_rect.width == 0 || buffer.dirty_rect.height == 0) { 74 buffer.dirty_rect = dirty_rect; 75 } else { 76 auto current_dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width; 77 auto current_dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height; 78 buffer.dirty_rect.x = min(buffer.dirty_rect.x, dirty_rect.x); 79 buffer.dirty_rect.y = min(buffer.dirty_rect.y, dirty_rect.y); 80 buffer.dirty_rect.width = max(current_dirty_right, dirty_rect.x + dirty_rect.width) - buffer.dirty_rect.x; 81 buffer.dirty_rect.height = max(current_dirty_bottom, dirty_rect.y + dirty_rect.height) - buffer.dirty_rect.y; 82 } 83} 84 85ErrorOr<void> VirtIOGraphicsAdapter::flush_displayed_image(Badge<VirtIODisplayConnector>, VirtIODisplayConnector& connector, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect, bool main_buffer) 86{ 87 VERIFY(m_operation_lock.is_locked()); 88 VERIFY(connector.scanout_id() < VIRTIO_GPU_MAX_SCANOUTS); 89 Scanout::PhysicalBuffer& buffer = main_buffer ? m_scanouts[connector.scanout_id().value()].main_buffer : m_scanouts[connector.scanout_id().value()].back_buffer; 90 TRY(flush_displayed_image(buffer.resource_id, dirty_rect)); 91 buffer.dirty_rect = {}; 92 return {}; 93} 94 95ErrorOr<void> VirtIOGraphicsAdapter::transfer_framebuffer_data_to_host(Badge<VirtIODisplayConnector>, VirtIODisplayConnector& connector, Graphics::VirtIOGPU::Protocol::Rect const& rect, bool main_buffer) 96{ 97 VERIFY(m_operation_lock.is_locked()); 98 VERIFY(connector.scanout_id() < VIRTIO_GPU_MAX_SCANOUTS); 99 Scanout::PhysicalBuffer& buffer = main_buffer ? m_scanouts[connector.scanout_id().value()].main_buffer : m_scanouts[connector.scanout_id().value()].back_buffer; 100 TRY(transfer_framebuffer_data_to_host(connector.scanout_id(), buffer.resource_id, rect)); 101 return {}; 102} 103 104ErrorOr<void> VirtIOGraphicsAdapter::attach_physical_range_to_framebuffer(VirtIODisplayConnector& connector, bool main_buffer, size_t framebuffer_offset, size_t framebuffer_size) 105{ 106 VERIFY(m_operation_lock.is_locked()); 107 Scanout::PhysicalBuffer& buffer = main_buffer ? m_scanouts[connector.scanout_id().value()].main_buffer : m_scanouts[connector.scanout_id().value()].back_buffer; 108 buffer.framebuffer_offset = framebuffer_offset; 109 110 // 1. Create BUFFER using VIRTIO_GPU_CMD_RESOURCE_CREATE_2D 111 if (buffer.resource_id.value() != 0) { 112 // FIXME: Do we need to remove the resource regardless of this condition? 113 // Do we need to remove it if any of the code below fails for some reason? 114 TRY(delete_resource(buffer.resource_id)); 115 } 116 117 auto display_info = connector.display_information({}); 118 buffer.resource_id = TRY(create_2d_resource(display_info.rect)); 119 120 // 2. Attach backing storage using VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING 121 TRY(ensure_backing_storage(buffer.resource_id, connector.framebuffer_region(), buffer.framebuffer_offset, framebuffer_size)); 122 // 3. Use VIRTIO_GPU_CMD_SET_SCANOUT to link the framebuffer to a display scanout. 123 TRY(set_scanout_resource(connector.scanout_id(), buffer.resource_id, display_info.rect)); 124 // 4. Render our test pattern 125 connector.draw_ntsc_test_pattern({}); 126 // 5. Use VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D to update the host resource from guest memory. 127 TRY(transfer_framebuffer_data_to_host(connector.scanout_id(), buffer.resource_id, display_info.rect)); 128 // 6. Use VIRTIO_GPU_CMD_RESOURCE_FLUSH to flush the updated resource to the display. 129 TRY(flush_displayed_image(buffer.resource_id, display_info.rect)); 130 131 // Make sure we constrain the existing dirty rect (if any) 132 if (buffer.dirty_rect.width != 0 || buffer.dirty_rect.height != 0) { 133 auto dirty_right = buffer.dirty_rect.x + buffer.dirty_rect.width; 134 auto dirty_bottom = buffer.dirty_rect.y + buffer.dirty_rect.height; 135 buffer.dirty_rect.width = min(dirty_right, display_info.rect.x + display_info.rect.width) - buffer.dirty_rect.x; 136 buffer.dirty_rect.height = min(dirty_bottom, display_info.rect.y + display_info.rect.height) - buffer.dirty_rect.y; 137 } 138 return {}; 139} 140 141VirtIOGraphicsAdapter::VirtIOGraphicsAdapter(PCI::DeviceIdentifier const& device_identifier, Bitmap&& active_context_ids, NonnullOwnPtr<Memory::Region> scratch_space_region) 142 : VirtIO::Device(device_identifier) 143 , m_scratch_space(move(scratch_space_region)) 144{ 145 m_active_context_ids.with([&](Bitmap& my_active_context_ids) { 146 my_active_context_ids = move(active_context_ids); 147 // Note: Context ID 0 is invalid, so mark it as in use. 148 my_active_context_ids.set(0, true); 149 }); 150} 151 152void VirtIOGraphicsAdapter::initialize() 153{ 154 VirtIO::Device::initialize(); 155 if (auto* config = get_config(VirtIO::ConfigurationType::Device)) { 156 m_device_configuration = config; 157 bool success = negotiate_features([&](u64 supported_features) { 158 u64 negotiated = 0; 159 if (is_feature_set(supported_features, VIRTIO_GPU_F_VIRGL)) { 160 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: VirGL is available, enabling"); 161 negotiated |= VIRTIO_GPU_F_VIRGL; 162 m_has_virgl_support = true; 163 } 164 if (is_feature_set(supported_features, VIRTIO_GPU_F_EDID)) 165 negotiated |= VIRTIO_GPU_F_EDID; 166 return negotiated; 167 }); 168 if (success) { 169 read_config_atomic([&]() { 170 m_num_scanouts = config_read32(*config, DEVICE_NUM_SCANOUTS); 171 }); 172 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: num_scanouts: {}", m_num_scanouts); 173 success = setup_queues(2); // CONTROLQ + CURSORQ 174 } 175 VERIFY(success); 176 finish_init(); 177 } else { 178 VERIFY_NOT_REACHED(); 179 } 180} 181 182bool VirtIOGraphicsAdapter::handle_device_config_change() 183{ 184 auto events = get_pending_events(); 185 if (events & VIRTIO_GPU_EVENT_DISPLAY) { 186 // The host window was resized, in SerenityOS we completely ignore this event 187 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Ignoring virtio gpu display resize event"); 188 clear_pending_events(VIRTIO_GPU_EVENT_DISPLAY); 189 } 190 if (events & ~VIRTIO_GPU_EVENT_DISPLAY) { 191 dbgln("VirtIO::GraphicsAdapter: Got unknown device config change event: {:#x}", events); 192 return false; 193 } 194 return true; 195} 196 197void VirtIOGraphicsAdapter::handle_queue_update(u16) 198{ 199} 200 201u32 VirtIOGraphicsAdapter::get_pending_events() 202{ 203 return config_read32(*m_device_configuration, DEVICE_EVENTS_READ); 204} 205 206void VirtIOGraphicsAdapter::clear_pending_events(u32 event_bitmask) 207{ 208 config_write32(*m_device_configuration, DEVICE_EVENTS_CLEAR, event_bitmask); 209} 210 211static void populate_virtio_gpu_request_header(Graphics::VirtIOGPU::Protocol::ControlHeader& header, Graphics::VirtIOGPU::Protocol::CommandType ctrl_type, u32 flags) 212{ 213 header.type = to_underlying(ctrl_type); 214 header.flags = flags; 215 header.fence_id = 0; 216 header.context_id = 0; 217 header.padding = 0; 218} 219 220ErrorOr<void> VirtIOGraphicsAdapter::query_and_set_edid(u32 scanout_id, VirtIODisplayConnector& display_connector) 221{ 222 SpinlockLocker locker(m_operation_lock); 223 if (!is_feature_accepted(VIRTIO_GPU_F_EDID)) 224 return Error::from_errno(ENOTSUP); 225 226 auto writer = create_scratchspace_writer(); 227 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::GetEDID>(); 228 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::GetEDIDResponse>(); 229 230 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_GET_EDID, 0); 231 232 request.scanout_id = scanout_id; 233 request.padding = 0; 234 235 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 236 237 if (response.header.type != to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_EDID)) { 238 dmesgln("VirtIO::GraphicsAdapter: Failed to get EDID"); 239 return Error::from_errno(ENOTSUP); 240 } 241 242 if (response.size == 0) { 243 dmesgln("VirtIO::GraphicsAdapter: Failed to get EDID, empty buffer"); 244 return Error::from_errno(EIO); 245 } 246 247 Array<u8, 128> raw_edid; 248 memcpy(raw_edid.data(), response.edid, min(sizeof(raw_edid), response.size)); 249 display_connector.set_edid_bytes({}, raw_edid); 250 return {}; 251} 252 253ErrorOr<Graphics::VirtIOGPU::ResourceID> VirtIOGraphicsAdapter::create_2d_resource(Graphics::VirtIOGPU::Protocol::Rect rect) 254{ 255 VERIFY(m_operation_lock.is_locked()); 256 auto writer = create_scratchspace_writer(); 257 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceCreate2D>(); 258 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 259 260 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, 0); 261 262 auto resource_id = allocate_resource_id(); 263 request.resource_id = resource_id.value(); 264 request.width = rect.width; 265 request.height = rect.height; 266 request.format = to_underlying(Graphics::VirtIOGPU::Protocol::TextureFormat::VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM); 267 268 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 269 270 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) { 271 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated 2d resource with id {}", resource_id.value()); 272 return resource_id; 273 } 274 return EIO; 275} 276 277ErrorOr<Graphics::VirtIOGPU::ResourceID> VirtIOGraphicsAdapter::create_3d_resource(Graphics::VirtIOGPU::Protocol::Resource3DSpecification const& resource_3d_specification) 278{ 279 VERIFY(m_operation_lock.is_locked()); 280 auto writer = create_scratchspace_writer(); 281 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceCreate3D>(); 282 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 283 284 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, 0); 285 286 // FIXME: What would be an appropriate resource free-ing mechanism to use in case anything 287 // after this fails? 288 auto resource_id = allocate_resource_id(); 289 request.resource_id = resource_id.value(); 290 // TODO: Abstract this out a bit more 291 u32* start_of_copied_fields = &request.target; 292 293 // Validate that the sub copy from the resource_3d_specification to the offset of the request fits. 294 static_assert((sizeof(request) - offsetof(Graphics::VirtIOGPU::Protocol::ResourceCreate3D, target) == sizeof(resource_3d_specification))); 295 memcpy(start_of_copied_fields, &resource_3d_specification, sizeof(resource_3d_specification)); 296 297 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 298 299 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) { 300 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated 3d resource with id {}", resource_id.value()); 301 return resource_id; 302 } 303 return EIO; 304} 305 306ErrorOr<void> VirtIOGraphicsAdapter::ensure_backing_storage(Graphics::VirtIOGPU::ResourceID resource_id, Memory::Region const& region, size_t buffer_offset, size_t buffer_length) 307{ 308 VERIFY(m_operation_lock.is_locked()); 309 310 VERIFY(buffer_offset % PAGE_SIZE == 0); 311 VERIFY(buffer_length % PAGE_SIZE == 0); 312 auto first_page_index = buffer_offset / PAGE_SIZE; 313 size_t num_mem_regions = buffer_length / PAGE_SIZE; 314 315 auto writer = create_scratchspace_writer(); 316 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceAttachBacking>(); 317 const size_t header_block_size = sizeof(request) + num_mem_regions * sizeof(Graphics::VirtIOGPU::Protocol::MemoryEntry); 318 319 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, 0); 320 request.resource_id = resource_id.value(); 321 request.num_entries = num_mem_regions; 322 for (size_t i = 0; i < num_mem_regions; ++i) { 323 auto& memory_entry = writer.append_structure<Graphics::VirtIOGPU::Protocol::MemoryEntry>(); 324 memory_entry.address = region.physical_page(first_page_index + i)->paddr().get(); 325 memory_entry.length = PAGE_SIZE; 326 } 327 328 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 329 330 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), header_block_size, sizeof(response))); 331 332 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) { 333 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Allocated backing storage"); 334 return {}; 335 } 336 return EIO; 337} 338 339ErrorOr<void> VirtIOGraphicsAdapter::detach_backing_storage(Graphics::VirtIOGPU::ResourceID resource_id) 340{ 341 VERIFY(m_operation_lock.is_locked()); 342 auto writer = create_scratchspace_writer(); 343 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceDetachBacking>(); 344 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 345 346 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, 0); 347 request.resource_id = resource_id.value(); 348 349 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 350 351 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) { 352 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Detached backing storage"); 353 return {}; 354 } 355 return EIO; 356} 357 358ErrorOr<void> VirtIOGraphicsAdapter::set_scanout_resource(Graphics::VirtIOGPU::ScanoutID scanout, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect rect) 359{ 360 VERIFY(m_operation_lock.is_locked()); 361 // We need to scope the request/response here so that we can query display information later on 362 auto writer = create_scratchspace_writer(); 363 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::SetScanOut>(); 364 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 365 366 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_SET_SCANOUT, 0); 367 request.resource_id = resource_id.value(); 368 request.scanout_id = scanout.value(); 369 request.rect = rect; 370 371 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 372 373 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) { 374 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Set backing scanout"); 375 return {}; 376 } 377 return EIO; 378} 379 380ErrorOr<void> VirtIOGraphicsAdapter::transfer_framebuffer_data_to_host(Graphics::VirtIOGPU::ScanoutID scanout, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect) 381{ 382 VERIFY(m_operation_lock.is_locked()); 383 auto writer = create_scratchspace_writer(); 384 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::TransferToHost2D>(); 385 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 386 387 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, 0); 388 request.offset = (dirty_rect.x + (dirty_rect.y * m_scanouts[scanout.value()].display_connector->display_information({}).rect.width)) * sizeof(u32); 389 request.resource_id = resource_id.value(); 390 request.rect = dirty_rect; 391 392 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 393 394 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) 395 return {}; 396 return EIO; 397} 398 399ErrorOr<void> VirtIOGraphicsAdapter::flush_displayed_image(Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect) 400{ 401 VERIFY(m_operation_lock.is_locked()); 402 auto writer = create_scratchspace_writer(); 403 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceFlush>(); 404 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 405 406 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_FLUSH, 0); 407 request.resource_id = resource_id.value(); 408 request.rect = dirty_rect; 409 410 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 411 412 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) 413 return {}; 414 return EIO; 415} 416 417ErrorOr<void> VirtIOGraphicsAdapter::synchronous_virtio_gpu_command(size_t microseconds_timeout, PhysicalAddress buffer_start, size_t request_size, size_t response_size) 418{ 419 VERIFY(m_operation_lock.is_locked()); 420 VERIFY(microseconds_timeout > 10); 421 VERIFY(microseconds_timeout < 100000); 422 auto& queue = get_queue(CONTROLQ); 423 queue.disable_interrupts(); 424 SpinlockLocker lock(queue.lock()); 425 VirtIO::QueueChain chain { queue }; 426 chain.add_buffer_to_chain(buffer_start, request_size, VirtIO::BufferType::DeviceReadable); 427 chain.add_buffer_to_chain(buffer_start.offset(request_size), response_size, VirtIO::BufferType::DeviceWritable); 428 supply_chain_and_notify(CONTROLQ, chain); 429 full_memory_barrier(); 430 size_t current_time = 0; 431 ScopeGuard clear_used_buffers([&] { 432 queue.discard_used_buffers(); 433 }); 434 while (current_time < microseconds_timeout) { 435 if (queue.new_data_available()) 436 return {}; 437 microseconds_delay(1); 438 current_time++; 439 } 440 return Error::from_errno(EBUSY); 441} 442 443ErrorOr<void> VirtIOGraphicsAdapter::flush_dirty_rectangle(Graphics::VirtIOGPU::ScanoutID scanout_id, Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::Protocol::Rect const& dirty_rect) 444{ 445 VERIFY(m_operation_lock.is_locked()); 446 TRY(transfer_framebuffer_data_to_host(scanout_id, resource_id, dirty_rect)); 447 TRY(flush_displayed_image(resource_id, dirty_rect)); 448 return {}; 449} 450 451Graphics::VirtIOGPU::ResourceID VirtIOGraphicsAdapter::allocate_resource_id() 452{ 453 return m_resource_id_counter++; 454} 455 456ErrorOr<void> VirtIOGraphicsAdapter::delete_resource(Graphics::VirtIOGPU::ResourceID resource_id) 457{ 458 VERIFY(m_operation_lock.is_locked()); 459 auto writer = create_scratchspace_writer(); 460 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ResourceUnref>(); 461 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 462 463 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_RESOURCE_UNREF, 0); 464 request.resource_id = resource_id.value(); 465 466 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 467 468 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) 469 return {}; 470 return EIO; 471} 472 473ErrorOr<void> VirtIOGraphicsAdapter::initialize_3d_device() 474{ 475 if (m_has_virgl_support) { 476 SpinlockLocker locker(m_operation_lock); 477 m_3d_device = TRY(VirtIOGPU3DDevice::try_create(*this)); 478 } 479 return {}; 480} 481 482ErrorOr<Graphics::VirtIOGPU::ContextID> VirtIOGraphicsAdapter::create_context() 483{ 484 VERIFY(m_operation_lock.is_locked()); 485 return m_active_context_ids.with([&](Bitmap& active_context_ids) -> ErrorOr<Graphics::VirtIOGPU::ContextID> { 486 auto maybe_available_id = active_context_ids.find_first_unset(); 487 if (!maybe_available_id.has_value()) { 488 dmesgln("VirtIO::GraphicsAdapter: No available context IDs."); 489 return Error::from_errno(ENXIO); 490 } 491 auto new_context_id = static_cast<u32>(maybe_available_id.value()); 492 493 auto writer = create_scratchspace_writer(); 494 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ContextCreate>(); 495 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 496 497 constexpr char const* region_name = "Serenity VirGL3D Context"; 498 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_CTX_CREATE, 0); 499 request.header.context_id = new_context_id; 500 request.name_length = strlen(region_name); 501 memset(request.debug_name.data(), 0, 64); 502 VERIFY(request.name_length <= 64); 503 memcpy(request.debug_name.data(), region_name, request.name_length); 504 505 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 506 507 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) { 508 active_context_ids.set(maybe_available_id.value(), true); 509 return static_cast<Graphics::VirtIOGPU::ContextID>(new_context_id); 510 } 511 return Error::from_errno(EIO); 512 }); 513} 514 515ErrorOr<void> VirtIOGraphicsAdapter::submit_command_buffer(Graphics::VirtIOGPU::ContextID context_id, Function<size_t(Bytes)> buffer_writer) 516{ 517 VERIFY(m_operation_lock.is_locked()); 518 auto writer = create_scratchspace_writer(); 519 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::CommandSubmit>(); 520 521 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_SUBMIT_3D, 0); 522 request.header.context_id = context_id.value(); 523 524 auto max_command_buffer_length = m_scratch_space->size() - sizeof(request) - sizeof(Graphics::VirtIOGPU::Protocol::ControlHeader); 525 // Truncate to nearest multiple of alignment, to ensure padding loop doesn't exhaust allocated space 526 max_command_buffer_length -= max_command_buffer_length % alignof(Graphics::VirtIOGPU::Protocol::ControlHeader); 527 Bytes command_buffer_buffer(m_scratch_space->vaddr().offset(sizeof(request)).as_ptr(), max_command_buffer_length); 528 request.size = buffer_writer(command_buffer_buffer); 529 writer.skip_bytes(request.size); 530 // The alignment of a ControlHeader may be a few words larger than the length of a command buffer, so 531 // we pad with no-ops until we reach the correct alignment 532 while (writer.current_offset() % alignof(Graphics::VirtIOGPU::Protocol::ControlHeader) != 0) { 533 VERIFY((writer.current_offset() % alignof(Graphics::VirtIOGPU::Protocol::ControlHeader)) % sizeof(u32) == 0); 534 writer.append_structure<u32>() = to_underlying(Graphics::VirtIOGPU::VirGLCommand::NOP); 535 request.size += 4; 536 } 537 dbgln_if(VIRTIO_DEBUG, "VirtIO::GraphicsAdapter: Sending command buffer of length {}", request.size); 538 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 539 540 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request) + request.size, sizeof(response))); 541 542 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) 543 return {}; 544 return EIO; 545} 546 547ErrorOr<void> VirtIOGraphicsAdapter::attach_resource_to_context(Graphics::VirtIOGPU::ResourceID resource_id, Graphics::VirtIOGPU::ContextID context_id) 548{ 549 VERIFY(m_operation_lock.is_locked()); 550 auto writer = create_scratchspace_writer(); 551 auto& request = writer.append_structure<Graphics::VirtIOGPU::Protocol::ContextAttachResource>(); 552 auto& response = writer.append_structure<Graphics::VirtIOGPU::Protocol::ControlHeader>(); 553 populate_virtio_gpu_request_header(request.header, Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, 0); 554 request.header.context_id = context_id.value(); 555 request.resource_id = resource_id.value(); 556 557 TRY(synchronous_virtio_gpu_command(100, start_of_scratch_space(), sizeof(request), sizeof(response))); 558 559 if (response.type == to_underlying(Graphics::VirtIOGPU::Protocol::CommandType::VIRTIO_GPU_RESP_OK_NODATA)) 560 return {}; 561 return EIO; 562} 563 564}