Mii rendering and parsing library

thats pretty alright

Changed files
+1002 -275
lightweight_viewer
vfl
src
+1
.gitignore
··· 5 5 *.charinfo 6 6 *.png 7 7 *.hexproj 8 + *.rdc 8 9 .DS_Store 9 10 .idea
+4
Cargo.lock
··· 2660 2660 source = "registry+https://github.com/rust-lang/crates.io-index" 2661 2661 checksum = "6b46b9ca4690308844c644e7c634d68792467260e051c8543e0c7871662b3ba7" 2662 2662 dependencies = [ 2663 + "bytemuck", 2663 2664 "mint", 2664 2665 ] 2665 2666 ··· 3306 3307 name = "lightweight_viewer" 3307 3308 version = "0.1.0" 3308 3309 dependencies = [ 3310 + "bytemuck", 3311 + "glam 0.30.3", 3312 + "image 0.25.6", 3309 3313 "pollster", 3310 3314 "vfl", 3311 3315 "wgpu 25.0.0",
+4 -1
lightweight_viewer/Cargo.toml
··· 7 7 pollster = "0.4" 8 8 wgpu = "25.0.0" 9 9 winit = { version = "0.30.8", features = ["android-native-activity"] } 10 - vfl = { path = "../vfl" } 10 + vfl = { path = "../vfl", features = ["res", "draw"] } 11 + glam = { version = "0.30.3", features = ["bytemuck"] } 12 + bytemuck = "1.23.0" 13 + image = "0.25.6"
+560
lightweight_viewer/src/char.rs
··· 1 + use std::{fs::File, io::BufReader, sync::Arc}; 2 + 3 + use glam::{Vec3, Vec4}; 4 + use vfl::{ 5 + charinfo::nx::{BinRead, NxCharInfo}, 6 + color::nx::linear::FACELINE_COLOR, 7 + draw::wgpu_render::{ 8 + RenderContext, RenderShape as Rendered2dShape, SHADER, TextureTransformUniform, Vertex, 9 + cast_slice, render_context_wgpu, 10 + }, 11 + res::{ 12 + shape::nx::{ 13 + GenericResourceShape, ResourceShape, SHAPE_MID_DAT, Shape, ShapeData, ShapeElement, 14 + }, 15 + tex::nx::TEXTURE_MID_SRGB_DAT, 16 + }, 17 + }; 18 + use wgpu::{ 19 + Backends, CommandEncoder, TexelCopyTextureInfo, Texture, TextureFormat, TextureView, 20 + include_wgsl, util::DeviceExt, 21 + }; 22 + use winit::{ 23 + application::ApplicationHandler, 24 + dpi::PhysicalSize, 25 + event::WindowEvent, 26 + event_loop::{ActiveEventLoop, ControlFlow, EventLoop}, 27 + window::{Window, WindowId}, 28 + }; 29 + 30 + use crate::{State, texture}; 31 + 32 + #[repr(C)] 33 + #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] 34 + struct CharShapeUniform { 35 + diffuse_color: [f32; 4], 36 + } 37 + 38 + trait RenderThatContext { 39 + fn render(self, st: &mut State, texture_view: &TextureView, encoder: &mut CommandEncoder); 40 + fn render_2d_shape( 41 + shape: Rendered2dShape, 42 + st: &mut State, 43 + texture_view: &TextureView, 44 + encoder: &mut CommandEncoder, 45 + ); 46 + fn render_3d_shape( 47 + shape: Rendered3dShape, 48 + st: &mut State, 49 + texture_view: &TextureView, 50 + encoder: &mut CommandEncoder, 51 + ); 52 + } 53 + 54 + impl RenderThatContext for RenderContext { 55 + fn render(self, st: &mut State, texture_view: &TextureView, encoder: &mut CommandEncoder) { 56 + for shape in self.shape { 57 + RenderContext::render_2d_shape(shape, st, texture_view, encoder) 58 + } 59 + } 60 + 61 + fn render_2d_shape( 62 + shape: Rendered2dShape, 63 + st: &mut State, 64 + texture_view: &TextureView, 65 + encoder: &mut CommandEncoder, 66 + ) { 67 + let vertex_buffer = st 68 + .device 69 + .create_buffer_init(&wgpu::util::BufferInitDescriptor { 70 + label: Some("Vertex Buffer"), 71 + contents: cast_slice(&shape.vertices), 72 + usage: wgpu::BufferUsages::VERTEX, 73 + }); 74 + 75 + let index_buffer = st 76 + .device 77 + .create_buffer_init(&wgpu::util::BufferInitDescriptor { 78 + label: Some("Index Buffer"), 79 + contents: cast_slice(&shape.indices), 80 + usage: wgpu::BufferUsages::INDEX, 81 + }); 82 + 83 + let shape_texture_rgba = shape.tex.to_rgba8(); 84 + let shape_texture_dimensions = shape_texture_rgba.dimensions(); 85 + let shape_texture_size = wgpu::Extent3d { 86 + width: shape_texture_dimensions.0, 87 + height: shape_texture_dimensions.1, 88 + // All textures are stored as 3D, we represent our 2D texture 89 + // by setting depth to 1. 90 + depth_or_array_layers: 1, 91 + }; 92 + let shape_diffuse_texture = st.device.create_texture(&wgpu::TextureDescriptor { 93 + size: shape_texture_size, 94 + mip_level_count: 1, // We'll talk about this a little later 95 + sample_count: 1, 96 + dimension: wgpu::TextureDimension::D2, 97 + // Most images are stored using sRGB, so we need to reflect that here. 98 + format: st.surface_format.add_srgb_suffix(), 99 + // TEXTURE_BINDING tells wgpu that we want to use this texture in shaders 100 + // COPY_DST means that we want to copy data to this texture 101 + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, 102 + label: Some("diffuse_texture"), 103 + // This is the same as with the SurfaceConfig. It 104 + // specifies what texture formats can be used to 105 + // create TextureViews for this texture. The base 106 + // texture format (Rgba8UnormSrgb in this case) is 107 + // always supported. Note that using a different 108 + // texture format is not supported on the WebGL2 109 + // backend. 110 + view_formats: &[st.surface_format], 111 + }); 112 + 113 + st.queue.write_texture( 114 + TexelCopyTextureInfo { 115 + texture: &shape_diffuse_texture, 116 + mip_level: 0, 117 + origin: wgpu::Origin3d::ZERO, 118 + aspect: wgpu::TextureAspect::All, 119 + }, 120 + &shape_texture_rgba, 121 + wgpu::TexelCopyBufferLayout { 122 + offset: 0, 123 + bytes_per_row: Some(4 * shape_texture_dimensions.0), 124 + rows_per_image: Some(shape_texture_dimensions.1), 125 + }, 126 + shape_texture_size, 127 + ); 128 + 129 + let shape_diffuse_texture_view = 130 + shape_diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default()); 131 + let shape_diffuse_sampler = st.device.create_sampler(&wgpu::SamplerDescriptor { 132 + address_mode_u: wgpu::AddressMode::ClampToEdge, 133 + address_mode_v: wgpu::AddressMode::ClampToEdge, 134 + address_mode_w: wgpu::AddressMode::ClampToEdge, 135 + mag_filter: wgpu::FilterMode::Linear, 136 + min_filter: wgpu::FilterMode::Nearest, 137 + mipmap_filter: wgpu::FilterMode::Nearest, 138 + ..Default::default() 139 + }); 140 + 141 + let texture_bind_group_layout = 142 + st.device 143 + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 144 + entries: &[ 145 + wgpu::BindGroupLayoutEntry { 146 + binding: 0, 147 + visibility: wgpu::ShaderStages::FRAGMENT, 148 + ty: wgpu::BindingType::Texture { 149 + multisampled: false, 150 + view_dimension: wgpu::TextureViewDimension::D2, 151 + sample_type: wgpu::TextureSampleType::Float { filterable: true }, 152 + }, 153 + count: None, 154 + }, 155 + wgpu::BindGroupLayoutEntry { 156 + binding: 1, 157 + visibility: wgpu::ShaderStages::FRAGMENT, 158 + // This should match the filterable field of the 159 + // corresponding Texture entry above. 160 + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), 161 + count: None, 162 + }, 163 + ], 164 + label: Some("texture_bind_group_layout"), 165 + }); 166 + 167 + let shape_diffuse_bind_group = st.device.create_bind_group(&wgpu::BindGroupDescriptor { 168 + layout: &texture_bind_group_layout, 169 + entries: &[ 170 + wgpu::BindGroupEntry { 171 + binding: 0, 172 + resource: wgpu::BindingResource::TextureView(&shape_diffuse_texture_view), 173 + }, 174 + wgpu::BindGroupEntry { 175 + binding: 1, 176 + resource: wgpu::BindingResource::Sampler(&shape_diffuse_sampler), 177 + }, 178 + ], 179 + label: Some("diffuse_bind_group"), 180 + }); 181 + 182 + let mvp_matrix = shape.mvp_matrix.into(); 183 + let mvp_uniform = TextureTransformUniform { 184 + mvp_matrix, 185 + channel_replacements_r: shape.channel_replacements[0], 186 + channel_replacements_g: shape.channel_replacements[1], 187 + channel_replacements_b: shape.channel_replacements[2], 188 + texture_type: (Into::<u8>::into(shape.texture_type)).into(), 189 + pad: Default::default(), 190 + }; 191 + 192 + let mvp_buffer = st 193 + .device 194 + .create_buffer_init(&wgpu::util::BufferInitDescriptor { 195 + label: Some("MvpMatrix Buffer"), 196 + contents: cast_slice(&[mvp_uniform]), 197 + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, 198 + }); 199 + 200 + let mvp_bind_group_layout = 201 + st.device 202 + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 203 + entries: &[wgpu::BindGroupLayoutEntry { 204 + binding: 0, 205 + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, 206 + ty: wgpu::BindingType::Buffer { 207 + ty: wgpu::BufferBindingType::Uniform, 208 + has_dynamic_offset: false, 209 + min_binding_size: None, 210 + }, 211 + count: None, 212 + }], 213 + label: Some("mvp_bind_group_layout"), 214 + }); 215 + 216 + let mvp_bind_group = st.device.create_bind_group(&wgpu::BindGroupDescriptor { 217 + layout: &mvp_bind_group_layout, 218 + entries: &[wgpu::BindGroupEntry { 219 + binding: 0, 220 + resource: mvp_buffer.as_entire_binding(), 221 + }], 222 + label: Some("mvp_bind_group"), 223 + }); 224 + 225 + let shader = wgpu::ShaderSource::Wgsl(SHADER.into()); 226 + let shader_module = st 227 + .device 228 + .create_shader_module(wgpu::ShaderModuleDescriptor { 229 + label: Some("Shader"), 230 + source: shader, 231 + }); 232 + let render_pipeline_layout = 233 + st.device 234 + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { 235 + label: Some("Render Pipeline Layout"), 236 + bind_group_layouts: &[&texture_bind_group_layout, &mvp_bind_group_layout], 237 + push_constant_ranges: &[], 238 + }); 239 + let render_pipeline = st 240 + .device 241 + .create_render_pipeline(&wgpu::RenderPipelineDescriptor { 242 + label: Some("Render Pipeline"), 243 + layout: Some(&render_pipeline_layout), 244 + vertex: wgpu::VertexState { 245 + module: &shader_module, 246 + entry_point: Some("vs_main"), 247 + buffers: &[Vertex::desc()], 248 + compilation_options: Default::default(), 249 + }, 250 + fragment: Some(wgpu::FragmentState { 251 + module: &shader_module, 252 + entry_point: Some("fs_main"), 253 + targets: &[Some(wgpu::ColorTargetState { 254 + format: st.surface_format.add_srgb_suffix(), 255 + blend: Some(wgpu::BlendState::ALPHA_BLENDING), 256 + write_mask: wgpu::ColorWrites::ALL, 257 + })], 258 + compilation_options: Default::default(), 259 + }), 260 + primitive: wgpu::PrimitiveState { 261 + topology: wgpu::PrimitiveTopology::TriangleList, 262 + strip_index_format: None, 263 + front_face: wgpu::FrontFace::Ccw, 264 + cull_mode: Some(wgpu::Face::Back), 265 + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE 266 + polygon_mode: wgpu::PolygonMode::Fill, 267 + // Requires Features::DEPTH_CLIP_CONTROL 268 + unclipped_depth: false, 269 + // Requires Features::CONSERVATIVE_RASTERIZATION 270 + conservative: false, 271 + }, 272 + depth_stencil: None, 273 + multisample: wgpu::MultisampleState { 274 + count: 1, 275 + mask: !0, 276 + alpha_to_coverage_enabled: false, 277 + }, 278 + // If the pipeline will be used with a multiview render pass, this 279 + // indicates how many array layers the attachments will have. 280 + multiview: None, 281 + cache: None, 282 + }); 283 + 284 + { 285 + let render_pass_desc = wgpu::RenderPassDescriptor { 286 + label: Some("Render Pass"), 287 + color_attachments: &[Some(wgpu::RenderPassColorAttachment { 288 + view: texture_view, 289 + resolve_target: None, 290 + ops: wgpu::Operations { 291 + load: wgpu::LoadOp::Load, 292 + store: wgpu::StoreOp::Store, 293 + }, 294 + })], 295 + depth_stencil_attachment: None, 296 + occlusion_query_set: None, 297 + timestamp_writes: None, 298 + }; 299 + let mut render_pass = encoder.begin_render_pass(&render_pass_desc); 300 + 301 + render_pass.set_pipeline(&render_pipeline); 302 + render_pass.set_bind_group(0, &shape_diffuse_bind_group, &[]); 303 + render_pass.set_bind_group(1, &mvp_bind_group, &[]); 304 + render_pass.set_vertex_buffer(0, vertex_buffer.slice(..)); 305 + render_pass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); 306 + 307 + render_pass.draw_indexed(0..shape.indices.len() as u32, 0, 0..1); 308 + } 309 + } 310 + 311 + fn render_3d_shape( 312 + shape: Rendered3dShape, 313 + st: &mut State, 314 + texture_view: &TextureView, 315 + encoder: &mut CommandEncoder, 316 + ) { 317 + let vertex_buffer = st 318 + .device 319 + .create_buffer_init(&wgpu::util::BufferInitDescriptor { 320 + label: Some("Vertex Buffer"), 321 + contents: cast_slice(&shape.vertices), 322 + usage: wgpu::BufferUsages::VERTEX, 323 + }); 324 + 325 + let index_buffer = st 326 + .device 327 + .create_buffer_init(&wgpu::util::BufferInitDescriptor { 328 + label: Some("Index Buffer"), 329 + contents: cast_slice(&shape.indices), 330 + usage: wgpu::BufferUsages::INDEX, 331 + }); 332 + 333 + let char_shape_uniform = CharShapeUniform { 334 + diffuse_color: shape.color.into(), 335 + }; 336 + let char_shape_buffer = st 337 + .device 338 + .create_buffer_init(&wgpu::util::BufferInitDescriptor { 339 + label: Some("Cs Buffer"), 340 + contents: cast_slice(&[char_shape_uniform]), 341 + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, 342 + }); 343 + let char_shape_bind_group_layout = 344 + st.device 345 + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 346 + entries: &[wgpu::BindGroupLayoutEntry { 347 + binding: 0, 348 + visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, 349 + ty: wgpu::BindingType::Buffer { 350 + ty: wgpu::BufferBindingType::Uniform, 351 + has_dynamic_offset: false, 352 + min_binding_size: None, 353 + }, 354 + count: None, 355 + }], 356 + label: Some("cs_group_layout"), 357 + }); 358 + 359 + let char_shape_bind_group = st.device.create_bind_group(&wgpu::BindGroupDescriptor { 360 + layout: &char_shape_bind_group_layout, 361 + entries: &[wgpu::BindGroupEntry { 362 + binding: 0, 363 + resource: char_shape_buffer.as_entire_binding(), 364 + }], 365 + label: Some("cs_bind_group"), 366 + }); 367 + 368 + let shader_module = st.device.create_shader_module(include_wgsl!("shader.wgsl")); 369 + let render_pipeline_layout = 370 + st.device 371 + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { 372 + label: Some("Render Pipeline Layout"), 373 + bind_group_layouts: &[ 374 + &st.camera_bind_group_layout, 375 + &char_shape_bind_group_layout, 376 + ], 377 + push_constant_ranges: &[], 378 + }); 379 + let render_pipeline = st 380 + .device 381 + .create_render_pipeline(&wgpu::RenderPipelineDescriptor { 382 + label: Some("Render Pipeline"), 383 + layout: Some(&render_pipeline_layout), 384 + vertex: wgpu::VertexState { 385 + module: &shader_module, 386 + entry_point: Some("vs_main"), 387 + buffers: &[Vertex::desc()], 388 + compilation_options: Default::default(), 389 + }, 390 + fragment: Some(wgpu::FragmentState { 391 + module: &shader_module, 392 + entry_point: Some("fs_main"), 393 + targets: &[Some(wgpu::ColorTargetState { 394 + format: st.surface_format.add_srgb_suffix(), 395 + blend: None, 396 + write_mask: wgpu::ColorWrites::ALL, 397 + })], 398 + compilation_options: Default::default(), 399 + }), 400 + primitive: wgpu::PrimitiveState { 401 + topology: wgpu::PrimitiveTopology::TriangleList, 402 + strip_index_format: None, 403 + front_face: wgpu::FrontFace::Ccw, 404 + cull_mode: Some(wgpu::Face::Back), 405 + // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE 406 + polygon_mode: wgpu::PolygonMode::Fill, 407 + // Requires Features::DEPTH_CLIP_CONTROL 408 + unclipped_depth: false, 409 + // Requires Features::CONSERVATIVE_RASTERIZATION 410 + conservative: false, 411 + }, 412 + depth_stencil: Some(wgpu::DepthStencilState { 413 + format: texture::Texture::DEPTH_FORMAT, 414 + depth_write_enabled: true, 415 + depth_compare: wgpu::CompareFunction::Less, // 1. 416 + stencil: wgpu::StencilState::default(), // 2. 417 + bias: wgpu::DepthBiasState::default(), 418 + }), 419 + multisample: wgpu::MultisampleState { 420 + count: 1, 421 + mask: !0, 422 + alpha_to_coverage_enabled: false, 423 + }, 424 + // If the pipeline will be used with a multiview render pass, this 425 + // indicates how many array layers the attachments will have. 426 + multiview: None, 427 + cache: None, 428 + }); 429 + 430 + { 431 + let render_pass_desc = wgpu::RenderPassDescriptor { 432 + label: Some("Render Pass"), 433 + color_attachments: &[Some(wgpu::RenderPassColorAttachment { 434 + view: texture_view, 435 + resolve_target: None, 436 + ops: wgpu::Operations { 437 + load: wgpu::LoadOp::Load, 438 + store: wgpu::StoreOp::Store, 439 + }, 440 + })], 441 + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { 442 + view: &st.depth_texture.view, 443 + depth_ops: Some(wgpu::Operations { 444 + load: wgpu::LoadOp::Load, 445 + store: wgpu::StoreOp::Store, 446 + }), 447 + stencil_ops: None, 448 + }), 449 + occlusion_query_set: None, 450 + timestamp_writes: None, 451 + }; 452 + let mut render_pass = encoder.begin_render_pass(&render_pass_desc); 453 + 454 + render_pass.set_pipeline(&render_pipeline); 455 + render_pass.set_bind_group(0, &st.camera_bind_group, &[]); 456 + render_pass.set_bind_group(1, &char_shape_bind_group, &[]); 457 + // render_pass.set_bind_group(1, &mvp_bind_group, &[]); 458 + render_pass.set_vertex_buffer(0, vertex_buffer.slice(..)); 459 + render_pass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); 460 + 461 + render_pass.draw_indexed(0..shape.indices.len() as u32, 0, 0..1); 462 + } 463 + } 464 + } 465 + 466 + struct Rendered3dShape { 467 + vertices: Vec<Vertex>, 468 + indices: Vec<u32>, 469 + color: Vec4, 470 + texture: Option<Texture>, 471 + } 472 + 473 + // I'm in a fucking horror of my own design 474 + fn shape_data_to_render_3d_shape(d: ShapeData, shape: Shape, color: usize) -> Rendered3dShape { 475 + let mut vertices: Vec<Vertex> = vec![]; 476 + let tex_coords = d 477 + .uvs 478 + .unwrap_or(vec![[f32::NAN, f32::NAN]; d.positions.len()]); // Go on, return NULL. See if I care. 479 + let normals = d.normals.unwrap(); 480 + 481 + for i in 0..d.positions.len() { 482 + vertices.push(Vertex { 483 + position: d.positions[i], 484 + tex_coords: tex_coords[i], 485 + normal: normals[i], 486 + }) 487 + } 488 + 489 + let indices = d.indices.iter().map(|x| u32::from(*x)).collect(); 490 + 491 + Rendered3dShape { 492 + vertices, 493 + indices, 494 + color: match shape { 495 + Shape::HairNormal => vfl::color::nx::linear::COMMON_COLOR[color].into(), 496 + Shape::FaceLine | Shape::ForeheadNormal => FACELINE_COLOR[color].into(), 497 + _ => [1.0, 0.0, 1.0, 1.0].into(), 498 + }, 499 + texture: None, 500 + } 501 + } 502 + 503 + pub fn draw_mask(st: &mut State, texture_view: &TextureView, encoder: &mut CommandEncoder) { 504 + let render_context = 505 + RenderContext::new(&st.char_info.clone(), (&mut st.shape(), &mut st.texture())).unwrap(); 506 + 507 + render_context.render(st, texture_view, encoder); 508 + } 509 + 510 + pub fn draw_char(st: &mut State, texture_view: &TextureView, encoder: &mut CommandEncoder) { 511 + let res_shape: ResourceShape = ResourceShape::read(&mut st.shape()).unwrap(); 512 + let GenericResourceShape::Element(mut shape_faceline) = res_shape 513 + .fetch_shape( 514 + vfl::res::shape::nx::Shape::FaceLine, 515 + usize::from(st.char_info.faceline_type), 516 + ) 517 + .unwrap() 518 + else { 519 + panic!() 520 + }; 521 + let GenericResourceShape::Element(mut shape_hair) = res_shape 522 + .fetch_shape( 523 + vfl::res::shape::nx::Shape::HairNormal, 524 + usize::from(st.char_info.hair_type), 525 + ) 526 + .unwrap() 527 + else { 528 + panic!() 529 + }; 530 + 531 + let mask_texture = crate::texture::Texture::create_texture( 532 + &st.device, 533 + &PhysicalSize::<u32>::new(512, 512), 534 + "mask", 535 + ); 536 + 537 + draw_mask(st, &mask_texture.view, encoder); 538 + 539 + // TODO: add mask to mask model and whatever blablablablablablaballablabla 540 + RenderContext::render_3d_shape( 541 + shape_data_to_render_3d_shape( 542 + shape_faceline.shape_data(&mut st.shape()).unwrap(), 543 + Shape::FaceLine, 544 + usize::from(st.char_info.faceline_color), 545 + ), 546 + st, 547 + texture_view, 548 + encoder, 549 + ); 550 + RenderContext::render_3d_shape( 551 + shape_data_to_render_3d_shape( 552 + shape_hair.shape_data(&mut st.shape()).unwrap(), 553 + Shape::HairNormal, 554 + usize::from(st.char_info.hair_color), 555 + ), 556 + st, 557 + texture_view, 558 + encoder, 559 + ); 560 + }
+348 -268
lightweight_viewer/src/main.rs
··· 1 - use std::{fs::File, io::BufReader, sync::Arc}; 1 + use std::{f32::consts::FRAC_PI_2, fs::File, io::BufReader, sync::Arc}; 2 2 3 + use camera::{Camera, CameraUniform}; 4 + use char::{draw_char, draw_mask}; 5 + use glam::Vec3; 3 6 use vfl::{ 4 7 charinfo::nx::{BinRead, NxCharInfo}, 5 8 draw::wgpu_render::{ 6 - RenderContext, SHADER, TextureTransformUniform, Vertex, cast_slice, render_context_wgpu, 9 + RenderContext, RenderShape as Rendered2dShape, SHADER, TextureTransformUniform, Vertex, 10 + cast_slice, render_context_wgpu, 7 11 }, 8 - res::{shape::nx::SHAPE_MID_DAT, tex::nx::TEXTURE_MID_SRGB_DAT}, 12 + res::{ 13 + shape::nx::{GenericResourceShape, ResourceShape, SHAPE_MID_DAT, ShapeData, ShapeElement}, 14 + tex::nx::TEXTURE_MID_SRGB_DAT, 15 + }, 9 16 }; 10 - use wgpu::{Backends, TexelCopyTextureInfo, TextureFormat, util::DeviceExt}; 17 + use wgpu::{ 18 + Backends, CommandEncoder, TexelCopyTextureInfo, Texture, TextureFormat, TextureView, 19 + include_wgsl, util::DeviceExt, 20 + }; 11 21 use winit::{ 12 22 application::ApplicationHandler, 13 23 event::WindowEvent, ··· 15 25 window::{Window, WindowId}, 16 26 }; 17 27 18 - fn make_a_mii() -> RenderContext { 19 - let mut tex_file = BufReader::new(File::open(TEXTURE_MID_SRGB_DAT).unwrap()); 20 - let buf_reader = BufReader::new(File::open(SHAPE_MID_DAT).unwrap()); 21 - let mut tex_shape = buf_reader; 22 - 23 - let mut char = File::open(concat!(env!("CARGO_MANIFEST_DIR"), "/../Jasmine.charinfo")).unwrap(); 24 - let char = NxCharInfo::read(&mut char).unwrap(); 25 - 26 - RenderContext::new(&char, (&mut tex_shape, &mut tex_file)).unwrap() 27 - } 28 + pub mod char; 28 29 29 30 struct State { 30 31 window: Arc<Window>, ··· 33 34 size: winit::dpi::PhysicalSize<u32>, 34 35 surface: wgpu::Surface<'static>, 35 36 surface_format: wgpu::TextureFormat, 37 + res_shape: String, 38 + res_texture: String, 39 + char_info: NxCharInfo, 40 + camera: Camera, 41 + camera_buffer: wgpu::Buffer, 42 + camera_bind_group: wgpu::BindGroup, 43 + camera_uniform: CameraUniform, 44 + camera_bind_group_layout: wgpu::BindGroupLayout, 45 + depth_texture: texture::Texture, 36 46 } 37 47 38 48 impl State { 49 + fn shape(&mut self) -> BufReader<File> { 50 + BufReader::new(File::open(&self.res_shape).unwrap()) 51 + } 52 + fn texture(&mut self) -> BufReader<File> { 53 + BufReader::new(File::open(&self.res_texture).unwrap()) 54 + } 39 55 async fn new(window: Arc<Window>) -> State { 40 56 let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor { 41 57 backends: Backends::PRIMARY | Backends::SECONDARY, ··· 56 72 let cap = surface.get_capabilities(&adapter); 57 73 let surface_format = cap.formats[0]; 58 74 75 + let depth_texture = texture::Texture::create_depth_texture(&device, &size, "depth_texture"); 76 + 77 + let res_texture = TEXTURE_MID_SRGB_DAT.to_string(); 78 + 79 + let res_shape = SHAPE_MID_DAT.to_string(); 80 + 81 + let mut char_info = 82 + File::open(concat!(env!("CARGO_MANIFEST_DIR"), "/../Jasmine.charinfo")).unwrap(); 83 + let char_info = NxCharInfo::read(&mut char_info).unwrap(); 84 + 85 + let camera = Camera { 86 + eye: (0.0, 50.0, 100.0).into(), 87 + target: (0.0, 1.0, 0.0).into(), 88 + up: Vec3::Y, 89 + aspect: size.width as f32 / size.height as f32, 90 + fov_y_radians: FRAC_PI_2, 91 + znear: 0.1, 92 + zfar: 10000.0, 93 + }; 94 + 95 + let mut camera_uniform = CameraUniform::new(); 96 + camera_uniform.update_view_proj(&camera); 97 + 98 + let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { 99 + label: Some("Camera Buffer"), 100 + contents: bytemuck::cast_slice(&[camera_uniform]), 101 + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, 102 + }); 103 + 104 + let camera_bind_group_layout = 105 + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 106 + entries: &[wgpu::BindGroupLayoutEntry { 107 + binding: 0, 108 + visibility: wgpu::ShaderStages::VERTEX, 109 + ty: wgpu::BindingType::Buffer { 110 + ty: wgpu::BufferBindingType::Uniform, 111 + has_dynamic_offset: false, 112 + min_binding_size: None, 113 + }, 114 + count: None, 115 + }], 116 + label: Some("camera_bind_group_layout"), 117 + }); 118 + 119 + let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { 120 + layout: &camera_bind_group_layout, 121 + entries: &[wgpu::BindGroupEntry { 122 + binding: 0, 123 + resource: camera_buffer.as_entire_binding(), 124 + }], 125 + label: Some("camera_bind_group"), 126 + }); 127 + 59 128 let state = State { 60 129 window, 61 130 device, ··· 63 132 size, 64 133 surface, 65 134 surface_format, 135 + res_shape, 136 + res_texture, 137 + char_info, 138 + camera, 139 + camera_buffer, 140 + camera_bind_group, 141 + camera_uniform, 142 + camera_bind_group_layout, 143 + depth_texture, 66 144 }; 67 145 68 146 // Configure surface for the first time ··· 95 173 96 174 // reconfigure the surface 97 175 self.configure_surface(); 176 + 177 + self.depth_texture = 178 + texture::Texture::create_depth_texture(&self.device, &self.size, "depth_texture"); 98 179 } 99 180 100 181 fn render(&mut self) { ··· 111 192 format: Some(self.surface_format.add_srgb_suffix()), 112 193 ..Default::default() 113 194 }); 114 - 115 - let mut encoder = self.device.create_command_encoder(&Default::default()); 116 195 117 196 // Renders a GREEN screen 118 197 let mut encoder = self.device.create_command_encoder(&Default::default()); ··· 132 211 store: wgpu::StoreOp::Store, 133 212 }, 134 213 })], 135 - depth_stencil_attachment: None, 214 + // Clear the depth buffer, too. 215 + depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment { 216 + view: &self.depth_texture.view, 217 + depth_ops: Some(wgpu::Operations { 218 + load: wgpu::LoadOp::Clear(1.0), 219 + store: wgpu::StoreOp::Store, 220 + }), 221 + stencil_ops: None, 222 + }), 136 223 timestamp_writes: None, 137 224 occlusion_query_set: None, 138 225 }); ··· 142 229 // End the renderpass. 143 230 drop(renderpass); 144 231 145 - let render_context = make_a_mii(); 146 - 147 232 // If you wanted to call any drawing commands, they would go here. 148 - 149 - for shape in render_context.shape { 150 - let vertex_buffer = self 151 - .device 152 - .create_buffer_init(&wgpu::util::BufferInitDescriptor { 153 - label: Some("Vertex Buffer"), 154 - contents: cast_slice(&shape.vertices), 155 - usage: wgpu::BufferUsages::VERTEX, 156 - }); 157 - 158 - let index_buffer = self 159 - .device 160 - .create_buffer_init(&wgpu::util::BufferInitDescriptor { 161 - label: Some("Index Buffer"), 162 - contents: cast_slice(&shape.indices), 163 - usage: wgpu::BufferUsages::INDEX, 164 - }); 165 - 166 - let shape_texture_rgba = shape.tex.to_rgba8(); 167 - let shape_texture_dimensions = shape_texture_rgba.dimensions(); 168 - let shape_texture_size = wgpu::Extent3d { 169 - width: shape_texture_dimensions.0, 170 - height: shape_texture_dimensions.1, 171 - // All textures are stored as 3D, we represent our 2D texture 172 - // by setting depth to 1. 173 - depth_or_array_layers: 1, 174 - }; 175 - let shape_diffuse_texture = self.device.create_texture(&wgpu::TextureDescriptor { 176 - size: shape_texture_size, 177 - mip_level_count: 1, // We'll talk about this a little later 178 - sample_count: 1, 179 - dimension: wgpu::TextureDimension::D2, 180 - // Most images are stored using sRGB, so we need to reflect that here. 181 - format: wgpu::TextureFormat::Rgba8UnormSrgb, 182 - // TEXTURE_BINDING tells wgpu that we want to use this texture in shaders 183 - // COPY_DST means that we want to copy data to this texture 184 - usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, 185 - label: Some("diffuse_texture"), 186 - // This is the same as with the SurfaceConfig. It 187 - // specifies what texture formats can be used to 188 - // create TextureViews for this texture. The base 189 - // texture format (Rgba8UnormSrgb in this case) is 190 - // always supported. Note that using a different 191 - // texture format is not supported on the WebGL2 192 - // backend. 193 - view_formats: &[], 194 - }); 195 - 196 - self.queue.write_texture( 197 - TexelCopyTextureInfo { 198 - texture: &shape_diffuse_texture, 199 - mip_level: 0, 200 - origin: wgpu::Origin3d::ZERO, 201 - aspect: wgpu::TextureAspect::All, 202 - }, 203 - &shape_texture_rgba, 204 - wgpu::TexelCopyBufferLayout { 205 - offset: 0, 206 - bytes_per_row: Some(4 * shape_texture_dimensions.0), 207 - rows_per_image: Some(shape_texture_dimensions.1), 208 - }, 209 - shape_texture_size, 210 - ); 211 - 212 - let shape_diffuse_texture_view = 213 - shape_diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default()); 214 - let shape_diffuse_sampler = self.device.create_sampler(&wgpu::SamplerDescriptor { 215 - address_mode_u: wgpu::AddressMode::ClampToEdge, 216 - address_mode_v: wgpu::AddressMode::ClampToEdge, 217 - address_mode_w: wgpu::AddressMode::ClampToEdge, 218 - mag_filter: wgpu::FilterMode::Linear, 219 - min_filter: wgpu::FilterMode::Nearest, 220 - mipmap_filter: wgpu::FilterMode::Nearest, 221 - ..Default::default() 222 - }); 223 - 224 - let texture_bind_group_layout = 225 - self.device 226 - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 227 - entries: &[ 228 - wgpu::BindGroupLayoutEntry { 229 - binding: 0, 230 - visibility: wgpu::ShaderStages::FRAGMENT, 231 - ty: wgpu::BindingType::Texture { 232 - multisampled: false, 233 - view_dimension: wgpu::TextureViewDimension::D2, 234 - sample_type: wgpu::TextureSampleType::Float { 235 - filterable: true, 236 - }, 237 - }, 238 - count: None, 239 - }, 240 - wgpu::BindGroupLayoutEntry { 241 - binding: 1, 242 - visibility: wgpu::ShaderStages::FRAGMENT, 243 - // This should match the filterable field of the 244 - // corresponding Texture entry above. 245 - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), 246 - count: None, 247 - }, 248 - ], 249 - label: Some("texture_bind_group_layout"), 250 - }); 251 - 252 - let shape_diffuse_bind_group = 253 - self.device.create_bind_group(&wgpu::BindGroupDescriptor { 254 - layout: &texture_bind_group_layout, 255 - entries: &[ 256 - wgpu::BindGroupEntry { 257 - binding: 0, 258 - resource: wgpu::BindingResource::TextureView( 259 - &shape_diffuse_texture_view, 260 - ), 261 - }, 262 - wgpu::BindGroupEntry { 263 - binding: 1, 264 - resource: wgpu::BindingResource::Sampler(&shape_diffuse_sampler), 265 - }, 266 - ], 267 - label: Some("diffuse_bind_group"), 268 - }); 233 + // draw_mask(self, &texture_view, &mut encoder); 234 + draw_char(self, &texture_view, &mut encoder); 269 235 270 - let mvp_matrix = shape.mvp_matrix.into(); 271 - let mvp_uniform = TextureTransformUniform { 272 - mvp_matrix, 273 - channel_replacements_r: shape.channel_replacements[0], 274 - channel_replacements_g: shape.channel_replacements[1], 275 - channel_replacements_b: shape.channel_replacements[2], 276 - texture_type: (Into::<u8>::into(shape.texture_type)).into(), 277 - pad: Default::default(), 278 - }; 236 + // Submit the command in the queue to execute 237 + self.queue.submit([encoder.finish()]); 238 + self.window.pre_present_notify(); 239 + surface_texture.present(); 240 + } 279 241 280 - let mvp_buffer = self 281 - .device 282 - .create_buffer_init(&wgpu::util::BufferInitDescriptor { 283 - label: Some("MvpMatrix Buffer"), 284 - contents: cast_slice(&[mvp_uniform]), 285 - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, 286 - }); 287 - 288 - let mvp_bind_group_layout = 289 - self.device 290 - .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 291 - entries: &[wgpu::BindGroupLayoutEntry { 292 - binding: 0, 293 - visibility: wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT, 294 - ty: wgpu::BindingType::Buffer { 295 - ty: wgpu::BufferBindingType::Uniform, 296 - has_dynamic_offset: false, 297 - min_binding_size: None, 298 - }, 299 - count: None, 300 - }], 301 - label: Some("mvp_bind_group_layout"), 302 - }); 303 - 304 - let mvp_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor { 305 - layout: &mvp_bind_group_layout, 306 - entries: &[wgpu::BindGroupEntry { 307 - binding: 0, 308 - resource: mvp_buffer.as_entire_binding(), 309 - }], 310 - label: Some("mvp_bind_group"), 311 - }); 312 - 313 - let shader = wgpu::ShaderSource::Wgsl(SHADER.into()); 314 - let shader_module = self 315 - .device 316 - .create_shader_module(wgpu::ShaderModuleDescriptor { 317 - label: Some("Shader"), 318 - source: shader, 319 - }); 320 - let render_pipeline_layout = 321 - self.device 322 - .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { 323 - label: Some("Render Pipeline Layout"), 324 - bind_group_layouts: &[&texture_bind_group_layout, &mvp_bind_group_layout], 325 - push_constant_ranges: &[], 326 - }); 327 - let render_pipeline = 328 - self.device 329 - .create_render_pipeline(&wgpu::RenderPipelineDescriptor { 330 - label: Some("Render Pipeline"), 331 - layout: Some(&render_pipeline_layout), 332 - vertex: wgpu::VertexState { 333 - module: &shader_module, 334 - entry_point: Some("vs_main"), 335 - buffers: &[Vertex::desc()], 336 - compilation_options: Default::default(), 337 - }, 338 - fragment: Some(wgpu::FragmentState { 339 - module: &shader_module, 340 - entry_point: Some("fs_main"), 341 - targets: &[Some(wgpu::ColorTargetState { 342 - format: TextureFormat::Bgra8UnormSrgb, 343 - blend: Some(wgpu::BlendState::ALPHA_BLENDING), 344 - write_mask: wgpu::ColorWrites::ALL, 345 - })], 346 - compilation_options: Default::default(), 347 - }), 348 - primitive: wgpu::PrimitiveState { 349 - topology: wgpu::PrimitiveTopology::TriangleList, 350 - strip_index_format: None, 351 - front_face: wgpu::FrontFace::Ccw, 352 - cull_mode: Some(wgpu::Face::Back), 353 - // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE 354 - polygon_mode: wgpu::PolygonMode::Fill, 355 - // Requires Features::DEPTH_CLIP_CONTROL 356 - unclipped_depth: false, 357 - // Requires Features::CONSERVATIVE_RASTERIZATION 358 - conservative: false, 359 - }, 360 - depth_stencil: None, 361 - multisample: wgpu::MultisampleState { 362 - count: 1, 363 - mask: !0, 364 - alpha_to_coverage_enabled: false, 365 - }, 366 - // If the pipeline will be used with a multiview render pass, this 367 - // indicates how many array layers the attachments will have. 368 - multiview: None, 369 - cache: None, 370 - }); 242 + fn update(&mut self) { 243 + let forward = self.camera.target - self.camera.eye; 244 + let forward_norm = forward.normalize(); 371 245 372 - { 373 - let render_pass_desc = wgpu::RenderPassDescriptor { 374 - label: Some("Render Pass"), 375 - color_attachments: &[Some(wgpu::RenderPassColorAttachment { 376 - view: &texture_view, 377 - resolve_target: None, 378 - ops: wgpu::Operations { 379 - load: wgpu::LoadOp::Load, 380 - store: wgpu::StoreOp::Store, 381 - }, 382 - })], 383 - depth_stencil_attachment: None, 384 - occlusion_query_set: None, 385 - timestamp_writes: None, 386 - }; 387 - let mut render_pass = encoder.begin_render_pass(&render_pass_desc); 246 + let right = forward_norm.cross(self.camera.up); 388 247 389 - render_pass.set_pipeline(&render_pipeline); 390 - render_pass.set_bind_group(0, &shape_diffuse_bind_group, &[]); 391 - render_pass.set_bind_group(1, &mvp_bind_group, &[]); 392 - render_pass.set_vertex_buffer(0, vertex_buffer.slice(..)); 393 - render_pass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint32); 248 + // Redo radius calc in case the up/ down is pressed. 249 + let forward = self.camera.target - self.camera.eye; 250 + let forward_mag = forward.length(); 394 251 395 - render_pass.draw_indexed(0..shape.indices.len() as u32, 0, 0..1); 396 - } 397 - } 252 + const CAMERA_ROTATE_SPEED: f32 = 1.0; 253 + self.camera.eye = 254 + self.camera.target - (forward + right * CAMERA_ROTATE_SPEED).normalize() * forward_mag; 398 255 399 - // Submit the command in the queue to execute 400 - self.queue.submit([encoder.finish()]); 401 - self.window.pre_present_notify(); 402 - surface_texture.present(); 256 + self.camera_uniform.update_view_proj(&self.camera); 257 + self.queue.write_buffer( 258 + &self.camera_buffer, 259 + 0, 260 + bytemuck::cast_slice(&[self.camera_uniform]), 261 + ); 403 262 } 404 263 } 405 264 ··· 431 290 event_loop.exit(); 432 291 } 433 292 WindowEvent::RedrawRequested => { 293 + state.update(); 434 294 state.render(); 435 295 // Emits a new redraw requested event. 436 296 state.get_window().request_redraw(); ··· 469 329 let mut app = App::default(); 470 330 event_loop.run_app(&mut app).unwrap(); 471 331 } 332 + 333 + mod camera { 334 + use glam::{Mat4, Vec3}; 335 + 336 + pub struct Camera { 337 + pub eye: Vec3, 338 + pub target: Vec3, 339 + pub up: Vec3, 340 + pub aspect: f32, 341 + pub fov_y_radians: f32, 342 + pub znear: f32, 343 + pub zfar: f32, 344 + } 345 + 346 + impl Camera { 347 + pub fn build_view_projection_matrix(&self) -> Mat4 { 348 + let view = Mat4::look_at_rh(self.eye, self.target, self.up); 349 + let proj = Mat4::perspective_rh(self.fov_y_radians, self.aspect, self.znear, self.zfar); 350 + proj * view 351 + } 352 + } 353 + 354 + #[repr(C)] 355 + #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] 356 + pub struct CameraUniform { 357 + view_proj: [[f32; 4]; 4], 358 + } 359 + 360 + impl CameraUniform { 361 + pub fn new() -> Self { 362 + Self { 363 + view_proj: Mat4::IDENTITY.to_cols_array_2d(), 364 + } 365 + } 366 + 367 + pub fn update_view_proj(&mut self, camera: &Camera) { 368 + self.view_proj = camera.build_view_projection_matrix().to_cols_array_2d(); 369 + } 370 + } 371 + } 372 + 373 + mod texture { 374 + use std::error::Error; 375 + 376 + use image::GenericImageView; 377 + use wgpu::TextureFormat; 378 + use winit::dpi::PhysicalSize; 379 + 380 + pub struct Texture { 381 + #[allow(unused)] 382 + pub texture: wgpu::Texture, 383 + pub view: wgpu::TextureView, 384 + pub sampler: wgpu::Sampler, 385 + } 386 + 387 + impl Texture { 388 + pub fn from_bytes( 389 + device: &wgpu::Device, 390 + queue: &wgpu::Queue, 391 + bytes: &[u8], 392 + label: &str, 393 + ) -> Result<Self, Box<dyn Error>> { 394 + let img = image::load_from_memory(bytes)?; 395 + Self::from_image(device, queue, &img, Some(label)) 396 + } 397 + 398 + pub fn from_image( 399 + device: &wgpu::Device, 400 + queue: &wgpu::Queue, 401 + img: &image::DynamicImage, 402 + label: Option<&str>, 403 + ) -> Result<Self, Box<dyn Error>> { 404 + let rgba = img.to_rgba8(); 405 + let dimensions = img.dimensions(); 406 + 407 + let size = wgpu::Extent3d { 408 + width: dimensions.0, 409 + height: dimensions.1, 410 + depth_or_array_layers: 1, 411 + }; 412 + let texture = device.create_texture(&wgpu::TextureDescriptor { 413 + label, 414 + size, 415 + mip_level_count: 1, 416 + sample_count: 1, 417 + dimension: wgpu::TextureDimension::D2, 418 + format: wgpu::TextureFormat::Rgba8UnormSrgb, 419 + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, 420 + view_formats: &[], 421 + }); 422 + 423 + queue.write_texture( 424 + wgpu::TexelCopyTextureInfo { 425 + aspect: wgpu::TextureAspect::All, 426 + texture: &texture, 427 + mip_level: 0, 428 + origin: wgpu::Origin3d::ZERO, 429 + }, 430 + &rgba, 431 + wgpu::TexelCopyBufferLayout { 432 + offset: 0, 433 + bytes_per_row: Some(4 * dimensions.0), 434 + rows_per_image: Some(dimensions.1), 435 + }, 436 + size, 437 + ); 438 + 439 + let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); 440 + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { 441 + address_mode_u: wgpu::AddressMode::ClampToEdge, 442 + address_mode_v: wgpu::AddressMode::ClampToEdge, 443 + address_mode_w: wgpu::AddressMode::ClampToEdge, 444 + mag_filter: wgpu::FilterMode::Linear, 445 + min_filter: wgpu::FilterMode::Nearest, 446 + mipmap_filter: wgpu::FilterMode::Nearest, 447 + ..Default::default() 448 + }); 449 + 450 + Ok(Self { 451 + texture, 452 + view, 453 + sampler, 454 + }) 455 + } 456 + 457 + pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; // 1. 458 + 459 + pub fn create_depth_texture( 460 + device: &wgpu::Device, 461 + size: &PhysicalSize<u32>, 462 + label: &str, 463 + ) -> Self { 464 + let size = wgpu::Extent3d { 465 + // 2. 466 + width: size.width.max(1), 467 + height: size.height.max(1), 468 + depth_or_array_layers: 1, 469 + }; 470 + let desc = wgpu::TextureDescriptor { 471 + label: Some(label), 472 + size, 473 + mip_level_count: 1, 474 + sample_count: 1, 475 + dimension: wgpu::TextureDimension::D2, 476 + format: Self::DEPTH_FORMAT, 477 + usage: wgpu::TextureUsages::RENDER_ATTACHMENT // 3. 478 + | wgpu::TextureUsages::TEXTURE_BINDING, 479 + view_formats: &[], 480 + }; 481 + let texture = device.create_texture(&desc); 482 + 483 + let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); 484 + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { 485 + // 4. 486 + address_mode_u: wgpu::AddressMode::ClampToEdge, 487 + address_mode_v: wgpu::AddressMode::ClampToEdge, 488 + address_mode_w: wgpu::AddressMode::ClampToEdge, 489 + mag_filter: wgpu::FilterMode::Linear, 490 + min_filter: wgpu::FilterMode::Linear, 491 + mipmap_filter: wgpu::FilterMode::Nearest, 492 + compare: Some(wgpu::CompareFunction::LessEqual), // 5. 493 + lod_min_clamp: 0.0, 494 + lod_max_clamp: 100.0, 495 + ..Default::default() 496 + }); 497 + 498 + Self { 499 + texture, 500 + view, 501 + sampler, 502 + } 503 + } 504 + 505 + pub fn create_texture( 506 + device: &wgpu::Device, 507 + size: &PhysicalSize<u32>, 508 + label: &str, 509 + ) -> Self { 510 + let size = wgpu::Extent3d { 511 + // 2. 512 + width: size.width.max(1), 513 + height: size.height.max(1), 514 + depth_or_array_layers: 1, 515 + }; 516 + let desc = wgpu::TextureDescriptor { 517 + label: Some(label), 518 + size, 519 + mip_level_count: 1, 520 + sample_count: 1, 521 + dimension: wgpu::TextureDimension::D2, 522 + format: TextureFormat::Bgra8UnormSrgb, 523 + usage: wgpu::TextureUsages::RENDER_ATTACHMENT // 3. 524 + | wgpu::TextureUsages::TEXTURE_BINDING, 525 + view_formats: &[], 526 + }; 527 + let texture = device.create_texture(&desc); 528 + 529 + let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); 530 + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { 531 + // 4. 532 + address_mode_u: wgpu::AddressMode::ClampToEdge, 533 + address_mode_v: wgpu::AddressMode::ClampToEdge, 534 + address_mode_w: wgpu::AddressMode::ClampToEdge, 535 + mag_filter: wgpu::FilterMode::Linear, 536 + min_filter: wgpu::FilterMode::Linear, 537 + mipmap_filter: wgpu::FilterMode::Nearest, 538 + compare: Some(wgpu::CompareFunction::LessEqual), // 5. 539 + lod_min_clamp: 0.0, 540 + lod_max_clamp: 100.0, 541 + ..Default::default() 542 + }); 543 + 544 + Self { 545 + texture, 546 + view, 547 + sampler, 548 + } 549 + } 550 + } 551 + }
+65
lightweight_viewer/src/shader.wgsl
··· 1 + struct CameraUniform { 2 + view_proj: mat4x4<f32>, 3 + }; 4 + @group(0) @binding(0) 5 + var<uniform> camera: CameraUniform; 6 + 7 + struct CharShapeUniform { 8 + color: vec4<f32> 9 + } 10 + @group(1) @binding(0) 11 + var<uniform> char_shape: CharShapeUniform; 12 + 13 + 14 + struct VertexInput { 15 + @location(0) position: vec3<f32>, 16 + @location(1) tex_coords: vec2<f32>, 17 + @location(2) normal: vec3<f32>, 18 + } 19 + 20 + struct VertexOutput { 21 + @builtin(position) clip_position: vec4<f32>, 22 + @location(0) tex_coords: vec2<f32>, 23 + @location(1) world_normal: vec3<f32>, 24 + @location(2) world_position: vec3<f32>, 25 + } 26 + 27 + @vertex 28 + fn vs_main( 29 + model: VertexInput, 30 + // instance: InstanceInput, 31 + ) -> VertexOutput { 32 + var out: VertexOutput; 33 + out.tex_coords = model.tex_coords; 34 + out.world_normal = model.normal; 35 + var world_position: vec4<f32> = vec4<f32>(model.position, 1.0); 36 + out.world_position = world_position.xyz; 37 + out.clip_position = camera.view_proj * world_position; 38 + return out; 39 + } 40 + 41 + // Fragment shader 42 + 43 + @fragment 44 + fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> { 45 + return char_shape.color; 46 + // let object_color: vec4<f32> = textureSample(t_diffuse, s_diffuse, in.tex_coords); 47 + 48 + // // We don't need (or want) much ambient light, so 0.1 is fine 49 + // let ambient_strength = 0.1; 50 + // let ambient_color = light.color * ambient_strength; 51 + 52 + // let light_dir = normalize(light.position - in.world_position); 53 + // let view_dir = normalize(camera.view_pos.xyz - in.world_position); 54 + // let half_dir = normalize(view_dir + light_dir); 55 + 56 + // let diffuse_strength = max(dot(in.world_normal, light_dir), 0.0); 57 + // let diffuse_color = light.color * diffuse_strength; 58 + 59 + // let specular_strength = pow(max(dot(in.world_normal, half_dir), 0.0), 32.0); 60 + // let specular_color = specular_strength * light.color; 61 + 62 + // let result = (ambient_color + diffuse_color + specular_color) * object_color.xyz; 63 + 64 + // return vec4<f32>(result, object_color.a); 65 + }
+2 -2
vfl/src/charinfo/nx.rs
··· 1 1 pub use binrw::{BinRead, NullWideString, binrw}; 2 2 3 - #[derive(Debug)] 3 + #[derive(Debug, Copy, Clone)] 4 4 #[binrw] 5 5 #[brw(little)] 6 6 pub struct UuidVer4 { 7 7 idc: [u8; 16], 8 8 } 9 - #[derive(Debug)] 9 + #[derive(Debug, Clone)] 10 10 #[binrw] 11 11 #[brw(little, assert(nickname.len() <= 22))] 12 12 pub struct NxCharInfo {
+1
vfl/src/draw/shader.wgsl
··· 13 13 struct VertexOutput { 14 14 @builtin(position) clip_position: vec4<f32>, 15 15 @location(0) tex_coords: vec2<f32>, 16 + @location(1) thirdthing: vec4<f32>, 16 17 } 17 18 18 19 @vertex
+17 -4
vfl/src/draw/wgpu_render.rs
··· 27 27 #[repr(C)] 28 28 #[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] 29 29 pub struct Vertex { 30 - position: [f32; 3], 31 - tex_coords: [f32; 2], 30 + pub position: [f32; 3], 31 + pub tex_coords: [f32; 2], 32 + pub normal: [f32; 3], 32 33 } 33 34 34 35 impl Vertex { 35 - const ATTRIBS: [wgpu::VertexAttribute; 2] = 36 - wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2]; 36 + const ATTRIBS: [wgpu::VertexAttribute; 3] = 37 + wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3]; 37 38 38 39 pub fn desc() -> wgpu::VertexBufferLayout<'static> { 39 40 use std::mem; ··· 69 70 pub struct RenderContext { 70 71 pub size: UVec2, 71 72 pub shape: Vec<RenderShape>, 73 + } 74 + impl RenderContext { 75 + pub fn from_shapes(shape: Vec<RenderShape>) -> RenderContext { 76 + RenderContext { 77 + size: uvec2(FACE_OUTPUT_SIZE.into(), FACE_OUTPUT_SIZE.into()), 78 + shape, 79 + } 80 + } 72 81 } 73 82 74 83 impl RenderContext { ··· 285 294 Vertex { 286 295 position: v2(1.0 + base_x, -0.5), 287 296 tex_coords: [s0, 0.0], 297 + normal: [0.0, 0.0, 0.0], 288 298 }, 289 299 Vertex { 290 300 position: v2(1.0 + base_x, 0.5), 291 301 tex_coords: [s0, 1.0], 302 + normal: [0.0, 0.0, 0.0], 292 303 }, 293 304 Vertex { 294 305 position: v2(base_x, 0.5), 295 306 tex_coords: [s1, 1.0], 307 + normal: [0.0, 0.0, 0.0], 296 308 }, 297 309 Vertex { 298 310 position: v2(base_x, -0.5), 299 311 tex_coords: [s1, 0.0], 312 + normal: [0.0, 0.0, 0.0], 300 313 }, 301 314 ], 302 315 vec![0, 1, 2, 0, 2, 3],