A game about forced loneliness, made by TACStudios
1using System.Diagnostics;
2using System.Collections.Generic;
3using Unity.IO.LowLevel.Unsafe;
4using Unity.Collections;
5using Unity.Collections.LowLevel.Unsafe;
6
7namespace UnityEngine.Rendering
8{
9 public partial class ProbeReferenceVolume
10 {
11 internal class DiskStreamingRequest
12 {
13 ReadHandle m_ReadHandle;
14 ReadCommandArray m_ReadCommandArray = new ReadCommandArray();
15 NativeArray<ReadCommand> m_ReadCommandBuffer;
16 int m_BytesWritten;
17
18 public DiskStreamingRequest(int maxRequestCount)
19 {
20 m_ReadCommandBuffer = new NativeArray<ReadCommand>(maxRequestCount, Allocator.Persistent);
21 }
22
23 unsafe public void AddReadCommand(int offset, int size, byte* dest)
24 {
25 Debug.Assert(m_ReadCommandArray.CommandCount < m_ReadCommandBuffer.Length);
26
27 m_ReadCommandBuffer[m_ReadCommandArray.CommandCount++] = new ReadCommand()
28 {
29 Buffer = dest,
30 Offset = offset,
31 Size = size
32 };
33
34 m_BytesWritten += size;
35 }
36
37 unsafe public int RunCommands(FileHandle file)
38 {
39 m_ReadCommandArray.ReadCommands = (ReadCommand*)m_ReadCommandBuffer.GetUnsafePtr();
40 m_ReadHandle = AsyncReadManager.Read(file, m_ReadCommandArray);
41
42 return m_BytesWritten;
43 }
44
45 public void Clear()
46 {
47 if (m_ReadHandle.IsValid())
48 m_ReadHandle.JobHandle.Complete();
49 m_ReadHandle = default;
50 m_ReadCommandArray.CommandCount = 0;
51 m_BytesWritten = 0;
52 }
53
54 public void Cancel()
55 {
56 if (m_ReadHandle.IsValid())
57 m_ReadHandle.Cancel();
58 }
59
60 public void Wait()
61 {
62 if (m_ReadHandle.IsValid())
63 m_ReadHandle.JobHandle.Complete();
64 }
65
66 public void Dispose()
67 {
68 m_ReadCommandBuffer.Dispose();
69 }
70
71 public ReadStatus GetStatus()
72 {
73 return m_ReadHandle.IsValid() ? m_ReadHandle.Status : ReadStatus.Complete;
74 }
75 }
76
77 [GenerateHLSL(needAccessors = false, generateCBuffer = true)]
78 internal struct CellStreamingScratchBufferLayout
79 {
80 public int _SharedDestChunksOffset;
81 public int _L0L1rxOffset;
82 public int _L1GryOffset;
83 public int _L1BrzOffset;
84 public int _ValidityOffset;
85 public int _ProbeOcclusionOffset;
86 public int _SkyOcclusionOffset;
87 public int _SkyShadingDirectionOffset;
88 public int _L2_0Offset;
89 public int _L2_1Offset;
90 public int _L2_2Offset;
91 public int _L2_3Offset;
92
93 public int _L0Size;
94 public int _L0ProbeSize; // In bytes
95 public int _L1Size;
96 public int _L1ProbeSize; // In bytes
97 public int _ValiditySize;
98 public int _ValidityProbeSize; // In bytes
99 public int _ProbeOcclusionSize;
100 public int _ProbeOcclusionProbeSize; // In bytes
101 public int _SkyOcclusionSize;
102 public int _SkyOcclusionProbeSize; // In bytes
103 public int _SkyShadingDirectionSize;
104 public int _SkyShadingDirectionProbeSize; // In bytes
105 public int _L2Size;
106 public int _L2ProbeSize; // In bytes
107
108 public int _ProbeCountInChunkLine;
109 public int _ProbeCountInChunkSlice;
110 }
111
112 internal class CellStreamingScratchBuffer
113 {
114 public CellStreamingScratchBuffer(int chunkCount, int chunkSize, bool allocateGraphicsBuffers)
115 {
116 this.chunkCount = chunkCount;
117
118 // With a stride of 4 (one uint)
119 // Number of elements for chunk data: chunkCount * chunkSize / 4
120 // Number of elements for dest chunk data (Vector4Int): chunkCount * 4;
121 var bufferSize = chunkCount * chunkSize / 4 + chunkCount * 4;
122
123 // Account for additional padding needed
124 bufferSize += 2 * chunkCount * sizeof(uint);
125
126 if (allocateGraphicsBuffers)
127 {
128 for (int i = 0; i < 2; i++)
129 m_GraphicsBuffers[i] = new GraphicsBuffer(GraphicsBuffer.Target.Raw, GraphicsBuffer.UsageFlags.LockBufferForWrite, bufferSize, sizeof(uint));
130 }
131
132 m_CurrentBuffer = 0;
133
134 stagingBuffer = new NativeArray<byte>(bufferSize * sizeof(uint), Allocator.Persistent);
135 }
136
137 public void Swap()
138 {
139 m_CurrentBuffer = (m_CurrentBuffer + 1 ) % 2;
140 }
141
142 public void Dispose()
143 {
144 for (int i = 0; i < 2; ++i)
145 m_GraphicsBuffers[i]?.Dispose();
146 stagingBuffer.Dispose();
147 }
148
149 // The GraphicsBuffer is double buffer because the data upload shader might still be running
150 // when we start a new streaming request.
151 // We could have double buffered at the CellStreamingScratchBuffer level itself but it would consume more memory (native+graphics buffer x2)
152 public GraphicsBuffer buffer => m_GraphicsBuffers[m_CurrentBuffer];
153 public NativeArray<byte> stagingBuffer; // Contains data streamed from disk. To be copied into the graphics buffer.
154 public int chunkCount { get; }
155
156 int m_CurrentBuffer;
157 GraphicsBuffer[] m_GraphicsBuffers = new GraphicsBuffer[2];
158 }
159
160 [DebuggerDisplay("Index = {cell.desc.index} State = {state}")]
161 internal class CellStreamingRequest
162 {
163 public enum State
164 {
165 Pending,
166 Active,
167 Canceled,
168 Invalid,
169 Complete,
170 }
171
172 public Cell cell { get; set; }
173 public State state { get; set; }
174 public CellStreamingScratchBuffer scratchBuffer { get; set; }
175 public CellStreamingScratchBufferLayout scratchBufferLayout { get; set; }
176
177 public ProbeVolumeBakingSet.PerScenarioDataInfo scenarioData { get; set; }
178 public int poolIndex { get; set; }
179 public bool streamSharedData { get; set; }
180
181 public delegate void OnStreamingCompleteDelegate(CellStreamingRequest request, CommandBuffer cmd);
182 public OnStreamingCompleteDelegate onStreamingComplete = null;
183
184 public DiskStreamingRequest cellDataStreamingRequest = new DiskStreamingRequest(1);
185 public DiskStreamingRequest cellOptionalDataStreamingRequest = new DiskStreamingRequest(1);
186 public DiskStreamingRequest cellSharedDataStreamingRequest = new DiskStreamingRequest(1);
187 public DiskStreamingRequest cellProbeOcclusionDataStreamingRequest = new DiskStreamingRequest(1);
188 public DiskStreamingRequest brickStreamingRequest = new DiskStreamingRequest(1);
189 public DiskStreamingRequest supportStreamingRequest = new DiskStreamingRequest(5);
190
191 public int bytesWritten;
192
193 public bool IsStreaming()
194 {
195 return state == State.Pending || state == State.Active;
196 }
197
198 public void Cancel()
199 {
200 if (state == State.Active)
201 {
202 brickStreamingRequest.Cancel();
203 supportStreamingRequest.Cancel();
204 cellDataStreamingRequest.Cancel();
205 cellOptionalDataStreamingRequest.Cancel();
206 cellSharedDataStreamingRequest.Cancel();
207 cellProbeOcclusionDataStreamingRequest.Cancel();
208 }
209
210 state = State.Canceled;
211 }
212
213 public void WaitAll()
214 {
215 if (state == State.Active)
216 {
217 brickStreamingRequest.Wait();
218 supportStreamingRequest.Wait();
219 cellDataStreamingRequest.Wait();
220 cellOptionalDataStreamingRequest.Wait();
221 cellSharedDataStreamingRequest.Wait();
222 cellProbeOcclusionDataStreamingRequest.Wait();
223 }
224 }
225
226 public bool UpdateRequestState(DiskStreamingRequest request, ref bool isComplete)
227 {
228 var status = request.GetStatus();
229 if (status == ReadStatus.Failed)
230 return false;
231
232 isComplete &= status == ReadStatus.Complete;
233 return true;
234 }
235
236 public void UpdateState()
237 {
238 if (state == State.Active)
239 {
240 bool isComplete = true;
241 bool success = UpdateRequestState(brickStreamingRequest, ref isComplete);
242 success &= UpdateRequestState(supportStreamingRequest, ref isComplete);
243 success &= UpdateRequestState(cellDataStreamingRequest, ref isComplete);
244 success &= UpdateRequestState(cellOptionalDataStreamingRequest, ref isComplete);
245 success &= UpdateRequestState(cellSharedDataStreamingRequest, ref isComplete);
246 success &= UpdateRequestState(cellProbeOcclusionDataStreamingRequest, ref isComplete);
247
248 if (!success)
249 {
250 Cancel(); // At least one of the requests failed. Cancel the others.
251 state = State.Invalid;
252 }
253 else if (isComplete)
254 {
255 state = State.Complete;
256 }
257 }
258 }
259
260 public void Clear()
261 {
262 cell = null;
263 Reset();
264 }
265
266 public void Reset()
267 {
268 state = State.Pending;
269 scratchBuffer = null;
270 brickStreamingRequest.Clear();
271 supportStreamingRequest.Clear();
272 cellDataStreamingRequest.Clear();
273 cellOptionalDataStreamingRequest.Clear();
274 cellSharedDataStreamingRequest.Clear();
275 cellProbeOcclusionDataStreamingRequest.Clear();
276 bytesWritten = 0;
277 }
278
279 public void Dispose()
280 {
281 brickStreamingRequest.Dispose();
282 supportStreamingRequest.Dispose();
283 cellDataStreamingRequest.Dispose();
284 cellOptionalDataStreamingRequest.Dispose();
285 cellSharedDataStreamingRequest.Dispose();
286 cellProbeOcclusionDataStreamingRequest.Dispose();
287 }
288 }
289
290#if UNITY_EDITOR
291 // By default on editor we load a lot of cells in one go to avoid having to mess with scene view
292 // to see results, this value can still be changed via API.
293 bool m_LoadMaxCellsPerFrame = true;
294#else
295 bool m_LoadMaxCellsPerFrame = false;
296#endif
297
298 /// <summary>
299 /// Enable streaming as many cells per frame as possible.
300 /// </summary>
301 /// <param name="value">True to enable streaming as many cells per frame as possible.</param>
302 public void EnableMaxCellStreaming(bool value)
303 {
304 m_LoadMaxCellsPerFrame = value;
305 }
306
307 const int kMaxCellLoadedPerFrame = 10;
308 int m_NumberOfCellsLoadedPerFrame = 1;
309
310 /// <summary>
311 /// Set the number of cells that are loaded per frame when needed. This number is capped at 10.
312 /// </summary>
313 /// <param name="numberOfCells">Number of cells to be loaded per frame.</param>
314 public void SetNumberOfCellsLoadedPerFrame(int numberOfCells)
315 {
316 m_NumberOfCellsLoadedPerFrame = Mathf.Min(kMaxCellLoadedPerFrame, Mathf.Max(1, numberOfCells));
317 }
318
319 /// <summary>Set to true to stream as many cells as possible every frame.</summary>
320 public bool loadMaxCellsPerFrame
321 {
322 get => m_LoadMaxCellsPerFrame;
323 set => m_LoadMaxCellsPerFrame = value;
324 }
325
326 int numberOfCellsLoadedPerFrame => m_LoadMaxCellsPerFrame ? cells.Count : m_NumberOfCellsLoadedPerFrame;
327
328 int m_NumberOfCellsBlendedPerFrame = 10000;
329 /// <summary>Maximum number of cells that are blended per frame.</summary>
330 public int numberOfCellsBlendedPerFrame
331 {
332 get => m_NumberOfCellsBlendedPerFrame;
333 set => m_NumberOfCellsBlendedPerFrame = Mathf.Max(1, value);
334 }
335
336 float m_TurnoverRate = 0.1f;
337 /// <summary>Percentage of cells loaded in the blending pool that can be replaced by out of date cells.</summary>
338 public float turnoverRate
339 {
340 get => m_TurnoverRate;
341 set => m_TurnoverRate = Mathf.Clamp01(value);
342 }
343
344 DynamicArray<Cell> m_LoadedCells = new(); // List of currently loaded cells.
345 DynamicArray<Cell> m_ToBeLoadedCells = new(); // List of currently unloaded cells.
346 DynamicArray<Cell> m_WorseLoadedCells = new(); // Reduced list (N cells are processed per frame) of worse loaded cells.
347 DynamicArray<Cell> m_BestToBeLoadedCells = new(); // Reduced list (N cells are processed per frame) of best unloaded cells.
348 DynamicArray<Cell> m_TempCellToLoadList = new(); // Temp list of cells loaded during this frame.
349 DynamicArray<Cell> m_TempCellToUnloadList = new(); // Temp list of cells unloaded during this frame.
350
351 DynamicArray<Cell> m_LoadedBlendingCells = new();
352 DynamicArray<Cell> m_ToBeLoadedBlendingCells = new();
353 DynamicArray<Cell> m_TempBlendingCellToLoadList = new();
354 DynamicArray<Cell> m_TempBlendingCellToUnloadList = new();
355
356 Vector3 m_FrozenCameraPosition;
357 Vector3 m_FrozenCameraDirection;
358
359 const float kIndexFragmentationThreshold = 0.2f;
360 bool m_IndexDefragmentationInProgress;
361 ProbeBrickIndex m_DefragIndex;
362 ProbeGlobalIndirection m_DefragCellIndices;
363 DynamicArray<Cell> m_IndexDefragCells = new DynamicArray<Cell>();
364 DynamicArray<Cell> m_TempIndexDefragCells = new DynamicArray<Cell>();
365
366 internal float minStreamingScore;
367 internal float maxStreamingScore;
368
369 // Requests waiting to be run. Needed to preserve order of requests.
370 Queue<CellStreamingRequest> m_StreamingQueue = new Queue<CellStreamingRequest>();
371 // List of active requests. Needed to query the result every frame.
372 List<CellStreamingRequest> m_ActiveStreamingRequests = new List<CellStreamingRequest>();
373 ObjectPool<CellStreamingRequest> m_StreamingRequestsPool = new ObjectPool<CellStreamingRequest>(null, (val) => val.Clear());
374 bool m_DiskStreamingUseCompute = false;
375 ProbeVolumeScratchBufferPool m_ScratchBufferPool;
376
377 CellStreamingRequest.OnStreamingCompleteDelegate m_OnStreamingComplete;
378 CellStreamingRequest.OnStreamingCompleteDelegate m_OnBlendingStreamingComplete;
379
380 void InitStreaming()
381 {
382 m_OnStreamingComplete = OnStreamingComplete;
383 m_OnBlendingStreamingComplete = OnBlendingStreamingComplete;
384 }
385
386 void CleanupStreaming()
387 {
388 // Releases all active and pending canceled requests.
389 ProcessNewRequests();
390 UpdateActiveRequests(null);
391
392 Debug.Assert(m_StreamingQueue.Count == 0);
393 Debug.Assert(m_ActiveStreamingRequests.Count == 0);
394 Debug.Assert(m_StreamingRequestsPool.countAll == m_StreamingRequestsPool.countInactive); // Everything should have been released.
395
396 for (int i = 0; i < m_StreamingRequestsPool.countAll; ++i)
397 {
398 var request = m_StreamingRequestsPool.Get();
399 request.Dispose();
400 }
401
402 if (m_ScratchBufferPool != null)
403 {
404 m_ScratchBufferPool.Cleanup();
405 m_ScratchBufferPool = null;
406 }
407
408 m_StreamingRequestsPool = new ObjectPool<CellStreamingRequest>((val) => val.Clear(), null);
409 m_ActiveStreamingRequests.Clear();
410 m_StreamingQueue.Clear();
411
412 m_OnStreamingComplete = null;
413 m_OnBlendingStreamingComplete = null;
414 }
415
416 internal void ScenarioBlendingChanged(bool scenarioChanged)
417 {
418 if (scenarioChanged)
419 {
420 UnloadAllBlendingCells();
421 for (int i = 0; i < m_ToBeLoadedBlendingCells.size; ++i)
422 m_ToBeLoadedBlendingCells[i].blendingInfo.ForceReupload();
423 }
424 }
425
426 static void ComputeCellStreamingScore(Cell cell, Vector3 cameraPosition, Vector3 cameraDirection)
427 {
428 var cellPosition = cell.desc.position;
429 var cameraToCell = (cellPosition - cameraPosition).normalized;
430 cell.streamingInfo.streamingScore = Vector3.Distance(cameraPosition, cell.desc.position);
431 // This should give more weight to cells in front of the camera.
432 cell.streamingInfo.streamingScore *= (2.0f - Vector3.Dot(cameraDirection, cameraToCell));
433 }
434
435 void ComputeStreamingScore(Vector3 cameraPosition, Vector3 cameraDirection, DynamicArray<Cell> cells)
436 {
437 for (int i = 0; i < cells.size; ++i)
438 {
439 ComputeCellStreamingScore(cells[i], cameraPosition, cameraDirection);
440 }
441 }
442
443 void ComputeBestToBeLoadedCells(Vector3 cameraPosition, Vector3 cameraDirection)
444 {
445 m_BestToBeLoadedCells.Clear();
446 m_BestToBeLoadedCells.Reserve(m_ToBeLoadedCells.size); // Pre-reserve to avoid Insert allocating every time.
447
448 foreach (var cell in m_ToBeLoadedCells)
449 {
450 ComputeCellStreamingScore(cell, cameraPosition, cameraDirection);
451
452 // We need to compute min/max streaming scores here since we don't have the full sorted list anymore (which is used in ComputeMinMaxStreamingScore)
453 minStreamingScore = Mathf.Min(minStreamingScore, cell.streamingInfo.streamingScore);
454 maxStreamingScore = Mathf.Max(maxStreamingScore, cell.streamingInfo.streamingScore);
455
456 int currentBestCellsSize = System.Math.Min(m_BestToBeLoadedCells.size, numberOfCellsLoadedPerFrame);
457 int index;
458 for (index = 0; index < currentBestCellsSize; ++index)
459 {
460 if (cell.streamingInfo.streamingScore < m_BestToBeLoadedCells[index].streamingInfo.streamingScore)
461 break;
462 }
463
464 if (index < numberOfCellsLoadedPerFrame)
465 m_BestToBeLoadedCells.Insert(index, cell);
466
467 // Avoids too many copies when Inserting new elements.
468 if (m_BestToBeLoadedCells.size > numberOfCellsLoadedPerFrame)
469 m_BestToBeLoadedCells.Resize(numberOfCellsLoadedPerFrame);
470 }
471 }
472
473 void ComputeStreamingScoreAndWorseLoadedCells(Vector3 cameraPosition, Vector3 cameraDirection)
474 {
475 m_WorseLoadedCells.Clear();
476 m_WorseLoadedCells.Reserve(m_LoadedCells.size); // Pre-reserve to avoid Insert allocating every time.
477
478 int requiredSHChunks = 0;
479 int requiredIndexChunks = 0;
480 foreach(var cell in m_BestToBeLoadedCells)
481 {
482 requiredSHChunks += cell.desc.shChunkCount;
483 requiredIndexChunks += cell.desc.indexChunkCount;
484 }
485
486 foreach (var cell in m_LoadedCells)
487 {
488 ComputeCellStreamingScore(cell, cameraPosition, cameraDirection);
489
490 // We need to compute min/max streaming scores here since we don't have the full sorted list anymore (which is used in ComputeMinMaxStreamingScore)
491 minStreamingScore = Mathf.Min(minStreamingScore, cell.streamingInfo.streamingScore);
492 maxStreamingScore = Mathf.Max(maxStreamingScore, cell.streamingInfo.streamingScore);
493
494 int currentWorseSize = m_WorseLoadedCells.size;
495 int index;
496 for (index = 0; index < currentWorseSize; ++index)
497 {
498 if (cell.streamingInfo.streamingScore > m_WorseLoadedCells[index].streamingInfo.streamingScore)
499 break;
500 }
501
502 m_WorseLoadedCells.Insert(index, cell);
503
504 // Compute the chunk counts of the current worse cells.
505 int currentSHChunks = 0;
506 int currentIndexChunks = 0;
507 int newSize = 0;
508 for (int i = 0; i < m_WorseLoadedCells.size; ++i)
509 {
510 var worseCell = m_WorseLoadedCells[i];
511 currentSHChunks += worseCell.desc.shChunkCount;
512 currentIndexChunks += worseCell.desc.indexChunkCount;
513
514 if (currentSHChunks >= requiredSHChunks && currentIndexChunks >= requiredIndexChunks)
515 {
516 newSize = i + 1;
517 break;
518 }
519 }
520
521 // Now we resize to keep just enough worse cells that represent enough room to load the required cell.
522 // This allows insertions to be cheaper.
523 if (newSize != 0)
524 m_WorseLoadedCells.Resize(newSize);
525 }
526 }
527
528 void ComputeBlendingScore(DynamicArray<Cell> cells, float worstScore)
529 {
530 float factor = scenarioBlendingFactor;
531 for (int i = 0; i < cells.size; ++i)
532 {
533 var cell = cells[i];
534 var blendingInfo = cell.blendingInfo;
535 if (factor != blendingInfo.blendingFactor)
536 {
537 blendingInfo.blendingScore = cell.streamingInfo.streamingScore;
538 if (blendingInfo.ShouldPrioritize())
539 blendingInfo.blendingScore -= worstScore;
540 }
541 }
542 }
543
544 bool TryLoadCell(Cell cell, ref int shBudget, ref int indexBudget, DynamicArray<Cell> loadedCells)
545 {
546 // Are we within budget?
547 if (cell.poolInfo.shChunkCount <= shBudget && cell.indexInfo.indexChunkCount <= indexBudget)
548 {
549 // This can still fail because of fragmentation.
550 if (LoadCell(cell, ignoreErrorLog: true))
551 {
552 loadedCells.Add(cell);
553
554 shBudget -= cell.poolInfo.shChunkCount;
555 indexBudget -= cell.indexInfo.indexChunkCount;
556 return true;
557 }
558 }
559 return false;
560 }
561
562 void UnloadBlendingCell(Cell cell, DynamicArray<Cell> unloadedCells)
563 {
564 UnloadBlendingCell(cell);
565
566 unloadedCells.Add(cell);
567 }
568
569 bool TryLoadBlendingCell(Cell cell, DynamicArray<Cell> loadedCells)
570 {
571 if (!cell.UpdateCellScenarioData(lightingScenario, m_CurrentBakingSet.otherScenario))
572 return false;
573
574 if (!AddBlendingBricks(cell))
575 return false;
576
577 loadedCells.Add(cell);
578
579 return true;
580 }
581
582 void ComputeMinMaxStreamingScore()
583 {
584 minStreamingScore = float.MaxValue;
585 maxStreamingScore = float.MinValue;
586
587 if (m_ToBeLoadedCells.size != 0)
588 {
589 minStreamingScore = Mathf.Min(minStreamingScore, m_ToBeLoadedCells[0].streamingInfo.streamingScore);
590 maxStreamingScore = Mathf.Max(maxStreamingScore, m_ToBeLoadedCells[m_ToBeLoadedCells.size - 1].streamingInfo.streamingScore);
591 }
592
593 if (m_LoadedCells.size != 0)
594 {
595 minStreamingScore = Mathf.Min(minStreamingScore, m_LoadedCells[0].streamingInfo.streamingScore);
596 maxStreamingScore = Mathf.Max(maxStreamingScore, m_LoadedCells[m_LoadedCells.size - 1].streamingInfo.streamingScore);
597 }
598 }
599
600 /// <summary>
601 /// Updates the cell streaming for a <see cref="Camera"/>
602 /// </summary>
603 /// <param name="cmd">The <see cref="CommandBuffer"/></param>
604 /// <param name="camera">The <see cref="Camera"/></param>
605 public void UpdateCellStreaming(CommandBuffer cmd, Camera camera)
606 {
607 UpdateCellStreaming(cmd, camera, null);
608 }
609
610 /// <summary>
611 /// Updates the cell streaming for a <see cref="Camera"/>
612 /// </summary>
613 /// <param name="cmd">The <see cref="CommandBuffer"/></param>
614 /// <param name="camera">The <see cref="Camera"/></param>
615 /// <param name="options">Options coming from the volume stack.</param>
616 public void UpdateCellStreaming(CommandBuffer cmd, Camera camera, ProbeVolumesOptions options)
617 {
618 if (!isInitialized || m_CurrentBakingSet == null) return;
619
620 using (new ProfilingScope(ProfilingSampler.Get(CoreProfileId.APVCellStreamingUpdate)))
621 {
622 var cameraPosition = camera.transform.position;
623 if (!probeVolumeDebug.freezeStreaming)
624 {
625 m_FrozenCameraPosition = cameraPosition;
626 m_FrozenCameraDirection = camera.transform.forward;
627 }
628
629 // Cell position in cell space is the top left corner. So we need to shift the camera position by half a cell to make things comparable.
630 var offset = ProbeOffset() + (options != null ? options.worldOffset.value : Vector3.zero);
631 var cameraPositionCellSpace = (m_FrozenCameraPosition - offset) / MaxBrickSize() - Vector3.one * 0.5f;
632
633 DynamicArray<Cell> bestUnloadedCells;
634 DynamicArray<Cell> worseLoadedCells;
635
636 // When in this mode, we just sort through all loaded/ToBeLoaded cells in order to figure out worse/best cells to process.
637 // This is slow so only recommended in the editor.
638 if (m_LoadMaxCellsPerFrame)
639 {
640 ComputeStreamingScore(cameraPositionCellSpace, m_FrozenCameraDirection, m_ToBeLoadedCells);
641 m_ToBeLoadedCells.QuickSort();
642 bestUnloadedCells = m_ToBeLoadedCells;
643 }
644 // Otherwise, when we only process a handful of cells per frame, we'll linearly go through the lists to determine two things:
645 // - The list of best cells to load.
646 // - The list of worse cells to load. This list can be bigger than the previous one since cells have different sizes so we may need to evict more to make room.
647 // This allows us to not sort through all the cells every frame which is very slow. Instead we just output very small lists that we then process.
648 else
649 {
650 minStreamingScore = float.MaxValue;
651 maxStreamingScore = float.MinValue;
652
653 ComputeBestToBeLoadedCells(cameraPositionCellSpace, m_FrozenCameraDirection);
654 bestUnloadedCells = m_BestToBeLoadedCells;
655 }
656
657 // This is only a rough budget estimate at first.
658 // It doesn't account for fragmentation.
659 int indexChunkBudget = m_Index.GetRemainingChunkCount();
660 int shChunkBudget = m_Pool.GetRemainingChunkCount();
661 int cellCountToLoad = Mathf.Min(numberOfCellsLoadedPerFrame, bestUnloadedCells.size);
662
663 bool didRecomputeScoresForLoadedCells = false;
664 if (m_SupportGPUStreaming)
665 {
666 if (m_IndexDefragmentationInProgress)
667 {
668 UpdateIndexDefragmentation();
669 }
670 else
671 {
672 bool needComputeFragmentation = false;
673
674 while (m_TempCellToLoadList.size < cellCountToLoad)
675 {
676 // Enough memory, we can safely load the cell.
677 var cellInfo = bestUnloadedCells[m_TempCellToLoadList.size];
678 if (!TryLoadCell(cellInfo, ref shChunkBudget, ref indexChunkBudget, m_TempCellToLoadList))
679 break;
680 }
681
682 // Budget reached. We need to figure out if we can safely unload other cells to make room.
683 // If defrag was triggered by TryLoadCell we should not try to load further cells either.
684 if (m_TempCellToLoadList.size != cellCountToLoad && !m_IndexDefragmentationInProgress)
685 {
686 // We need to unload cells so we have to compute the worse loaded cells now (not earlier as it would be useless)
687 if (m_LoadMaxCellsPerFrame)
688 {
689 ComputeStreamingScore(cameraPositionCellSpace, m_FrozenCameraDirection, m_LoadedCells);
690 m_LoadedCells.QuickSort();
691 worseLoadedCells = m_LoadedCells;
692 }
693 else
694 {
695 ComputeStreamingScoreAndWorseLoadedCells(cameraPositionCellSpace, m_FrozenCameraDirection);
696 worseLoadedCells = m_WorseLoadedCells;
697 }
698 didRecomputeScoresForLoadedCells = true;
699
700 int pendingUnloadCount = 0;
701 while (m_TempCellToLoadList.size < cellCountToLoad)
702 {
703 // No more cells to unload.
704 if (worseLoadedCells.size - pendingUnloadCount == 0)
705 break;
706
707 // List are stored in reverse order depending on the mode.
708 // TODO make the full List be sorted the same way as partial list.
709 int worseCellIndex = m_LoadMaxCellsPerFrame ? worseLoadedCells.size - pendingUnloadCount - 1 : pendingUnloadCount;
710 var worseLoadedCell = worseLoadedCells[worseCellIndex];
711 var bestUnloadedCell = bestUnloadedCells[m_TempCellToLoadList.size];
712
713 // We are in a "stable" state, all the closest cells are loaded within the budget.
714 if (worseLoadedCell.streamingInfo.streamingScore <= bestUnloadedCell.streamingInfo.streamingScore)
715 break;
716
717 // The worse loaded cell is further than the best unloaded cell, we can unload it.
718 while (pendingUnloadCount < worseLoadedCells.size && worseLoadedCell.streamingInfo.streamingScore > bestUnloadedCell.streamingInfo.streamingScore && (shChunkBudget < bestUnloadedCell.desc.shChunkCount || indexChunkBudget < bestUnloadedCell.desc.indexChunkCount))
719 {
720 if (probeVolumeDebug.verboseStreamingLog)
721 LogStreaming($"Unloading cell {worseLoadedCell.desc.index}");
722
723 pendingUnloadCount++;
724 UnloadCell(worseLoadedCell);
725 shChunkBudget += worseLoadedCell.desc.shChunkCount;
726 indexChunkBudget += worseLoadedCell.desc.indexChunkCount;
727
728 m_TempCellToUnloadList.Add(worseLoadedCell);
729
730 worseCellIndex = m_LoadMaxCellsPerFrame ? worseLoadedCells.size - pendingUnloadCount - 1 : pendingUnloadCount;
731 if (pendingUnloadCount < worseLoadedCells.size)
732 worseLoadedCell = worseLoadedCells[worseCellIndex];
733 }
734
735 // We unloaded enough space (not taking fragmentation into account)
736 if (shChunkBudget >= bestUnloadedCell.desc.shChunkCount && indexChunkBudget >= bestUnloadedCell.desc.indexChunkCount)
737 {
738 if (!TryLoadCell(bestUnloadedCell, ref shChunkBudget, ref indexChunkBudget, m_TempCellToLoadList))
739 {
740 needComputeFragmentation = true;
741 break; // Alloc failed because of fragmentation, stop trying to load cells.
742 }
743 }
744 }
745 }
746
747 if (needComputeFragmentation)
748 m_Index.ComputeFragmentationRate();
749
750 if (m_Index.fragmentationRate >= kIndexFragmentationThreshold)
751 StartIndexDefragmentation();
752 }
753 }
754 else
755 {
756 for (int i = 0; i < cellCountToLoad; ++i)
757 {
758 var cellInfo = m_ToBeLoadedCells[m_TempCellToLoadList.size]; // m_TempCellToLoadList.size get incremented in TryLoadCell
759 if (!TryLoadCell(cellInfo, ref shChunkBudget, ref indexChunkBudget, m_TempCellToLoadList))
760 {
761 if (i > 0) // Only warn once
762 {
763 Debug.LogWarning("Max Memory Budget for Adaptive Probe Volumes has been reached, but there is still more data to load. Consider either increasing the Memory Budget, enabling GPU Streaming, or reducing the probe count.");
764 }
765 break;
766 }
767 }
768 }
769
770 // If we intend to blend scenarios, compute the streaming scores for the already loaded cells.
771 // These will be used to determine which of the loaded cells to perform blending on first.
772 // We only need to do this if we didn't already do it above.
773 if (!didRecomputeScoresForLoadedCells && supportScenarioBlending)
774 {
775 ComputeStreamingScore(cameraPositionCellSpace, m_FrozenCameraDirection, m_LoadedCells);
776 }
777
778 if (m_LoadMaxCellsPerFrame)
779 ComputeMinMaxStreamingScore();
780
781 // Update internal load/toBeLoaded lists.
782
783 // Move the successfully loaded cells to the "loaded cells" list.
784 foreach (var cell in m_TempCellToLoadList)
785 m_ToBeLoadedCells.Remove(cell);
786 m_LoadedCells.AddRange(m_TempCellToLoadList);
787 // Move the unloaded cells to the list of cells to be loaded.
788 if (m_TempCellToUnloadList.size > 0)
789 {
790 foreach (var cell in m_TempCellToUnloadList)
791 m_LoadedCells.Remove(cell);
792
793 ComputeCellGlobalInfo();
794 }
795 m_ToBeLoadedCells.AddRange(m_TempCellToUnloadList);
796 // Clear temp lists.
797 m_TempCellToLoadList.Clear();
798 m_TempCellToUnloadList.Clear();
799
800 UpdateDiskStreaming(cmd);
801 }
802
803 // Handle cell streaming for blending
804 if (supportScenarioBlending)
805 {
806 using (new ProfilingScope(cmd, ProfilingSampler.Get(CoreProfileId.APVScenarioBlendingUpdate)))
807 UpdateBlendingCellStreaming(cmd);
808 }
809 }
810
811 int FindWorstBlendingCellToBeLoaded()
812 {
813 int idx = -1;
814 float worstBlending = -1;
815 float factor = scenarioBlendingFactor;
816 for (int i = m_TempBlendingCellToLoadList.size; i < m_ToBeLoadedBlendingCells.size; ++i)
817 {
818 float score = Mathf.Abs(m_ToBeLoadedBlendingCells[i].blendingInfo.blendingFactor - factor);
819 if (score > worstBlending)
820 {
821 idx = i;
822 if (m_ToBeLoadedBlendingCells[i].blendingInfo.ShouldReupload()) // We are not gonna find worse than that
823 break;
824 worstBlending = score;
825 }
826 }
827 return idx;
828 }
829
830 static int BlendingComparer(Cell a, Cell b)
831 {
832 if (a.blendingInfo.blendingScore < b.blendingInfo.blendingScore)
833 return -1;
834 else if (a.blendingInfo.blendingScore > b.blendingInfo.blendingScore)
835 return 1;
836 else
837 return 0;
838 }
839
840 static DynamicArray<Cell>.SortComparer s_BlendingComparer = BlendingComparer;
841
842 void UpdateBlendingCellStreaming(CommandBuffer cmd)
843 {
844 // Compute the worst score to offset score of cells to prioritize
845 float worstLoaded = m_LoadedCells.size != 0 ? m_LoadedCells[m_LoadedCells.size - 1].streamingInfo.streamingScore : 0.0f;
846 float worstToBeLoaded = m_ToBeLoadedCells.size != 0 ? m_ToBeLoadedCells[m_ToBeLoadedCells.size - 1].streamingInfo.streamingScore : 0.0f;
847 float worstScore = Mathf.Max(worstLoaded, worstToBeLoaded);
848
849 ComputeBlendingScore(m_ToBeLoadedBlendingCells, worstScore);
850 ComputeBlendingScore(m_LoadedBlendingCells, worstScore);
851
852 m_ToBeLoadedBlendingCells.QuickSort(s_BlendingComparer);
853 m_LoadedBlendingCells.QuickSort(s_BlendingComparer);
854
855 int cellCountToLoad = Mathf.Min(numberOfCellsLoadedPerFrame, m_ToBeLoadedBlendingCells.size);
856 while (m_TempBlendingCellToLoadList.size < cellCountToLoad)
857 {
858 var blendingCell = m_ToBeLoadedBlendingCells[m_TempBlendingCellToLoadList.size];
859 if (!TryLoadBlendingCell(blendingCell, m_TempBlendingCellToLoadList))
860 break;
861 }
862
863 // Budget reached
864 if (m_TempBlendingCellToLoadList.size != cellCountToLoad)
865 {
866 // Turnover allows a percentage of the pool to be replaced by cells with a lower streaming score
867 // once the system is in a stable state. This ensures all cells get updated regularly.
868 int turnoverOffset = -1;
869 int idx = (int)(m_LoadedBlendingCells.size * (1.0f - turnoverRate));
870 var worstNoTurnover = idx < m_LoadedBlendingCells.size ? m_LoadedBlendingCells[idx] : null;
871
872 while (m_TempBlendingCellToLoadList.size < cellCountToLoad)
873 {
874 if (m_LoadedBlendingCells.size - m_TempBlendingCellToUnloadList.size == 0) // We unloaded everything
875 break;
876
877 var worstCellLoaded = m_LoadedBlendingCells[m_LoadedBlendingCells.size - m_TempBlendingCellToUnloadList.size - 1];
878 var bestCellToBeLoaded = m_ToBeLoadedBlendingCells[m_TempBlendingCellToLoadList.size];
879
880 // The best cell to be loaded has WORSE score than the worst cell already loaded.
881 // This means all cells waiting to be loaded are worse than the ones we already have - we are in a "stable" state.
882 if (bestCellToBeLoaded.blendingInfo.blendingScore >= (worstNoTurnover ?? worstCellLoaded).blendingInfo.blendingScore)
883 {
884 if (worstNoTurnover == null) // Disable turnover
885 break;
886
887 // Find worst cell and assume contiguous cells have roughly the same blending factor
888 // (contiguous cells are spatially close by, so it's good anyway to update them together)
889 if (turnoverOffset == -1)
890 turnoverOffset = FindWorstBlendingCellToBeLoaded();
891
892 bestCellToBeLoaded = m_ToBeLoadedBlendingCells[turnoverOffset];
893 if (bestCellToBeLoaded.blendingInfo.IsUpToDate()) // Every single cell is blended :)
894 break;
895 }
896
897 // If we encounter a cell that is still being streamed in (and thus hasn't had a chance to be blended yet), bail
898 // we don't want to keep unloading cells before they get blended, or we will never get any work done.
899 // This branch is only ever true when disk streaming is being used.
900 if (worstCellLoaded.streamingInfo.IsBlendingStreaming())
901 break;
902
903 UnloadBlendingCell(worstCellLoaded, m_TempBlendingCellToUnloadList);
904
905 if (probeVolumeDebug.verboseStreamingLog)
906 LogStreaming($"Unloading blending cell {worstCellLoaded.desc.index}");
907
908 bool loadOk = TryLoadBlendingCell(bestCellToBeLoaded, m_TempBlendingCellToLoadList);
909
910 // Handle turnover. Loading can still fail cause all cells don't have the same chunk count.
911 if (loadOk && turnoverOffset != -1)
912 {
913 // swap to ensure loaded cells are at the start of m_ToBeLoadedBlendingCells
914 m_ToBeLoadedBlendingCells[turnoverOffset] = m_ToBeLoadedBlendingCells[m_TempBlendingCellToLoadList.size-1];
915 m_ToBeLoadedBlendingCells[m_TempBlendingCellToLoadList.size-1] = bestCellToBeLoaded;
916 if (++turnoverOffset >= m_ToBeLoadedBlendingCells.size)
917 turnoverOffset = m_TempBlendingCellToLoadList.size;
918 }
919 }
920
921 m_LoadedBlendingCells.RemoveRange(m_LoadedBlendingCells.size - m_TempBlendingCellToUnloadList.size, m_TempBlendingCellToUnloadList.size);
922 }
923
924 m_ToBeLoadedBlendingCells.RemoveRange(0, m_TempBlendingCellToLoadList.size);
925 m_LoadedBlendingCells.AddRange(m_TempBlendingCellToLoadList);
926 m_TempBlendingCellToLoadList.Clear();
927 m_ToBeLoadedBlendingCells.AddRange(m_TempBlendingCellToUnloadList);
928 m_TempBlendingCellToUnloadList.Clear();
929
930 // Kick off blending.
931 if (m_LoadedBlendingCells.size != 0)
932 {
933 float factor = scenarioBlendingFactor;
934
935 int loadedBlendingCellIndex = 0;
936 int blendedCellCount = 0;
937 while (blendedCellCount < numberOfCellsBlendedPerFrame && loadedBlendingCellIndex < m_LoadedBlendingCells.size)
938 {
939 var blendingCell = m_LoadedBlendingCells[loadedBlendingCellIndex++];
940 if (!blendingCell.streamingInfo.IsBlendingStreaming() && !blendingCell.blendingInfo.IsUpToDate())
941 {
942 if (probeVolumeDebug.verboseStreamingLog)
943 LogStreaming($"Blending cell {blendingCell.desc.index} ({factor})");
944
945 blendingCell.blendingInfo.blendingFactor = factor;
946 blendingCell.blendingInfo.MarkUpToDate();
947 m_BlendingPool.BlendChunks(blendingCell, m_Pool);
948 blendedCellCount++;
949 }
950 }
951
952 m_BlendingPool.PerformBlending(cmd, factor, m_Pool);
953 }
954 }
955
956 static int DefragComparer(Cell a, Cell b)
957 {
958 if (a.indexInfo.updateInfo.GetNumberOfChunks() > b.indexInfo.updateInfo.GetNumberOfChunks())
959 return 1;
960 else if (a.indexInfo.updateInfo.GetNumberOfChunks() < b.indexInfo.updateInfo.GetNumberOfChunks())
961 return -1;
962 else return 0;
963 }
964
965 static DynamicArray<Cell>.SortComparer s_DefragComparer = DefragComparer;
966
967 void StartIndexDefragmentation()
968 {
969 // We can end up here during baking (dilation) when trying to load all cells even without supporting GPU streaming.
970 if (!m_SupportGPUStreaming)
971 return;
972
973 m_IndexDefragmentationInProgress = true;
974
975 // Prepare the list of cells.
976 // We want to relocate cells with more indices first.
977 m_IndexDefragCells.Clear();
978 m_IndexDefragCells.AddRange(m_LoadedCells);
979 m_IndexDefragCells.QuickSort(s_DefragComparer);
980
981 m_DefragIndex.Clear();
982 }
983
984 void UpdateIndexDefragmentation()
985 {
986 using (new ProfilingScope(ProfilingSampler.Get(CoreProfileId.APVIndexDefragUpdate)))
987 {
988 m_TempIndexDefragCells.Clear();
989
990 int numberOfCellsToProcess = Mathf.Min(m_IndexDefragCells.size, numberOfCellsLoadedPerFrame);
991 int i = 0;
992 int processedCells = 0;
993 while(i < m_IndexDefragCells.size && processedCells < numberOfCellsToProcess)
994 {
995 var cell = m_IndexDefragCells[m_IndexDefragCells.size - i - 1];
996
997 m_DefragIndex.FindSlotsForEntries(ref cell.indexInfo.updateInfo.entriesInfo);
998 m_DefragIndex.ReserveChunks(cell.indexInfo.updateInfo.entriesInfo, false);
999
1000 // Index of cells being streamed is not up to date yet so we can't defrag this cell.
1001 if (!(cell.streamingInfo.IsStreaming() || cell.streamingInfo.IsBlendingStreaming()))
1002 {
1003 // Update index and indirection
1004 m_DefragIndex.AddBricks(cell.indexInfo, cell.data.bricks, cell.poolInfo.chunkList, ProbeBrickPool.GetChunkSizeInBrickCount(), m_Pool.GetPoolWidth(), m_Pool.GetPoolHeight());
1005 m_DefragCellIndices.UpdateCell(cell.indexInfo);
1006 processedCells++;
1007 }
1008 else
1009 {
1010 m_TempIndexDefragCells.Add(cell);
1011 }
1012
1013 i++;
1014 }
1015
1016 // Remove processed cells from the list.
1017 // For faster removal, just resize by removing all processed cells and add back those that were streaming.
1018 m_IndexDefragCells.Resize(m_IndexDefragCells.size - i);
1019 m_IndexDefragCells.AddRange(m_TempIndexDefragCells);
1020
1021 if (m_IndexDefragCells.size == 0)
1022 {
1023 // Swap index buffers
1024 var oldDefragIndex = m_DefragIndex;
1025 m_DefragIndex = m_Index;
1026 m_Index = oldDefragIndex;
1027
1028 var oldDefragCellIndices = m_DefragCellIndices;
1029 m_DefragCellIndices = m_CellIndices;
1030 m_CellIndices = oldDefragCellIndices;
1031
1032 // Resume streaming
1033 m_IndexDefragmentationInProgress = false;
1034 }
1035 }
1036 }
1037
1038 void OnStreamingComplete(CellStreamingRequest request, CommandBuffer cmd)
1039 {
1040 request.cell.streamingInfo.request = null;
1041 UpdatePoolAndIndex(request.cell, request.scratchBuffer, request.scratchBufferLayout, request.poolIndex, cmd);
1042 }
1043
1044 void OnBlendingStreamingComplete(CellStreamingRequest request, CommandBuffer cmd)
1045 {
1046 UpdatePool(cmd, request.cell.blendingInfo.chunkList, request.scratchBuffer, request.scratchBufferLayout, request.poolIndex);
1047
1048 if (request.poolIndex == 0)
1049 request.cell.streamingInfo.blendingRequest0 = null;
1050 else
1051 request.cell.streamingInfo.blendingRequest1 = null;
1052
1053 // Streaming of both scenario is over, we can update the index and start blending.
1054 if (request.cell.streamingInfo.blendingRequest0 == null && request.cell.streamingInfo.blendingRequest1 == null && !request.cell.indexInfo.indexUpdated)
1055 UpdateCellIndex(request.cell);
1056 }
1057
1058 void PushDiskStreamingRequest(Cell cell, string scenario, int poolIndex, CellStreamingRequest.OnStreamingCompleteDelegate onStreamingComplete)
1059 {
1060 var streamingRequest = m_StreamingRequestsPool.Get();
1061 streamingRequest.cell = cell;
1062 streamingRequest.state = CellStreamingRequest.State.Pending;
1063 streamingRequest.scenarioData = m_CurrentBakingSet.scenarios[scenario];
1064 streamingRequest.poolIndex = poolIndex;
1065 streamingRequest.onStreamingComplete = onStreamingComplete;
1066
1067 // Only stream shared data for a regular streaming request (index -1 : no streaming)
1068 // or the first scenario of the two blending scenarios (index 0)
1069 if (poolIndex == -1 || poolIndex == 0)
1070 streamingRequest.streamSharedData = true;
1071
1072 if (probeVolumeDebug.verboseStreamingLog)
1073 {
1074 if (poolIndex == -1)
1075 LogStreaming($"Push streaming request for cell {cell.desc.index}.");
1076 else
1077 LogStreaming($"Push streaming request for blending cell {cell.desc.index}.");
1078 }
1079
1080 switch (poolIndex)
1081 {
1082 case -1:
1083 cell.streamingInfo.request = streamingRequest;
1084 break;
1085 case 0:
1086 cell.streamingInfo.blendingRequest0 = streamingRequest;
1087 break;
1088 case 1:
1089 cell.streamingInfo.blendingRequest1 = streamingRequest;
1090 break;
1091 }
1092
1093 // Enqueue request.
1094 m_StreamingQueue.Enqueue(streamingRequest);
1095 }
1096
1097 void CancelStreamingRequest(Cell cell)
1098 {
1099 m_Index.RemoveBricks(cell.indexInfo);
1100 m_Pool.Deallocate(cell.poolInfo.chunkList);
1101
1102 if (cell.streamingInfo.request != null)
1103 cell.streamingInfo.request.Cancel();
1104 }
1105
1106 void CancelBlendingStreamingRequest(Cell cell)
1107 {
1108 if (cell.streamingInfo.blendingRequest0 != null)
1109 cell.streamingInfo.blendingRequest0.Cancel();
1110 if (cell.streamingInfo.blendingRequest1 != null)
1111 cell.streamingInfo.blendingRequest1.Cancel();
1112 }
1113
1114 unsafe bool ProcessDiskStreamingRequest(CellStreamingRequest request)
1115 {
1116 var cellIndex = request.cell.desc.index;
1117 var cell = cells[cellIndex];
1118 var cellDesc = cell.desc;
1119 var cellData = cell.data;
1120
1121 if (!m_ScratchBufferPool.AllocateScratchBuffer(cellDesc.shChunkCount, out var cellStreamingScratchBuffer, out var layout, m_DiskStreamingUseCompute))
1122 return false;
1123
1124 if (!m_CurrentBakingSet.HasValidSharedData())
1125 {
1126 Debug.LogError($"One or more data file missing for baking set {m_CurrentBakingSet.name}. Cannot load shared data.");
1127 return false;
1128 }
1129
1130 if (!request.scenarioData.HasValidData(m_SHBands))
1131 {
1132 Debug.LogError($"One or more data file missing for baking set {m_CurrentBakingSet.name} scenario {lightingScenario}. Cannot load scenario data.");
1133 return false;
1134 }
1135
1136 if (probeVolumeDebug.verboseStreamingLog)
1137 {
1138 if (request.poolIndex == -1)
1139 LogStreaming($"Running disk streaming request for cell {cellDesc.index} ({cellDesc.shChunkCount} chunks)");
1140 else
1141 LogStreaming($"Running disk streaming request for cell {cellDesc.index} ({cellDesc.shChunkCount} chunks) for scenario {request.poolIndex}");
1142 }
1143
1144 // Note: We allocate new NativeArrays here.
1145 // This will not generate GCAlloc since NativeArrays are value types but it will allocate on the native side.
1146 // This is probably ok as the frequency should be pretty low but we need to keep an eye on this.
1147
1148 // GPU Data
1149 request.scratchBuffer = cellStreamingScratchBuffer;
1150 request.scratchBufferLayout = layout;
1151 request.bytesWritten = 0;
1152
1153 var mappedBuffer = request.scratchBuffer.stagingBuffer;
1154
1155 var mappedBufferBaseAddr = (byte*)mappedBuffer.GetUnsafePtr();
1156 var mappedBufferAddr = mappedBufferBaseAddr;
1157
1158 // Write destination chunk coordinates for SH data
1159 var destChunkAddr = (uint*)mappedBufferAddr;
1160 // Pool -1 is regular pool and 0/1 are blending pools.
1161 var destChunks = request.poolIndex == -1 ? request.cell.poolInfo.chunkList : request.cell.blendingInfo.chunkList;
1162 var destChunkCount = destChunks.Count;
1163 for (int i = 0; i < destChunkCount ; ++i)
1164 {
1165 var destChunk = destChunks[i];
1166 destChunkAddr[i * 4] = (uint)destChunk.x;
1167 destChunkAddr[i * 4 + 1] = (uint)destChunk.y;
1168 destChunkAddr[i * 4 + 2] = (uint)destChunk.z;
1169 destChunkAddr[i * 4 + 3] = 0;
1170 }
1171 mappedBufferAddr += (destChunkCount * sizeof(uint) * 4);
1172
1173 // Write destination chunk coordinates for Shared data (always in main pool)
1174 destChunkAddr = (uint*)mappedBufferAddr;
1175 destChunks = request.cell.poolInfo.chunkList;
1176 Debug.Assert(destChunks.Count == destChunkCount);
1177 for (int i = 0; i < destChunkCount; ++i)
1178 {
1179 var destChunk = destChunks[i];
1180 destChunkAddr[i * 4] = (uint)destChunk.x;
1181 destChunkAddr[i * 4 + 1] = (uint)destChunk.y;
1182 destChunkAddr[i * 4 + 2] = (uint)destChunk.z;
1183 destChunkAddr[i * 4 + 3] = 0;
1184 }
1185 mappedBufferAddr += (destChunkCount * sizeof(uint) * 4);
1186
1187 var shL0L1DataAsset = request.scenarioData.cellDataAsset;
1188 var cellStreamingDesc = shL0L1DataAsset.streamableCellDescs[cellIndex];
1189 var chunkCount = cellDesc.shChunkCount;
1190 var L0L1Size = m_CurrentBakingSet.L0ChunkSize * chunkCount;
1191 var L1Size = m_CurrentBakingSet.L1ChunkSize * chunkCount;
1192
1193 var L0L1ReadSize = L0L1Size + 2 * L1Size;
1194 request.cellDataStreamingRequest.AddReadCommand(cellStreamingDesc.offset, L0L1ReadSize, mappedBufferAddr);
1195 mappedBufferAddr += L0L1ReadSize;
1196 request.bytesWritten += request.cellDataStreamingRequest.RunCommands(shL0L1DataAsset.OpenFile());
1197
1198 if (request.streamSharedData)
1199 {
1200 var sharedDataAsset = m_CurrentBakingSet.cellSharedDataAsset;
1201 cellStreamingDesc = sharedDataAsset.streamableCellDescs[cellIndex];
1202 var sharedChunkSize = m_CurrentBakingSet.sharedDataChunkSize;
1203
1204 request.cellSharedDataStreamingRequest.AddReadCommand(cellStreamingDesc.offset, sharedChunkSize * chunkCount, mappedBufferAddr);
1205 mappedBufferAddr += (sharedChunkSize * chunkCount);
1206 request.bytesWritten += request.cellSharedDataStreamingRequest.RunCommands(sharedDataAsset.OpenFile());
1207 }
1208
1209 if (m_SHBands == ProbeVolumeSHBands.SphericalHarmonicsL2)
1210 {
1211 var optionalDataAsset = request.scenarioData.cellOptionalDataAsset;
1212 cellStreamingDesc = optionalDataAsset.streamableCellDescs[cellIndex];
1213 var L2ReadSize = m_CurrentBakingSet.L2TextureChunkSize * chunkCount * 4; // 4 textures
1214 request.cellOptionalDataStreamingRequest.AddReadCommand(cellStreamingDesc.offset, L2ReadSize, mappedBufferAddr);
1215 mappedBufferAddr += L2ReadSize;
1216 request.bytesWritten += request.cellOptionalDataStreamingRequest.RunCommands(optionalDataAsset.OpenFile());
1217 }
1218
1219 if (m_CurrentBakingSet.bakedProbeOcclusion)
1220 {
1221 var probeOcclusionDataAsset = request.scenarioData.cellProbeOcclusionDataAsset;
1222 cellStreamingDesc = probeOcclusionDataAsset.streamableCellDescs[cellIndex];
1223 var probeOcclusionReadSize = m_CurrentBakingSet.ProbeOcclusionChunkSize * chunkCount;
1224 request.cellProbeOcclusionDataStreamingRequest.AddReadCommand(cellStreamingDesc.offset, probeOcclusionReadSize, mappedBufferAddr);
1225 mappedBufferAddr += probeOcclusionReadSize;
1226 request.bytesWritten += request.cellProbeOcclusionDataStreamingRequest.RunCommands(probeOcclusionDataAsset.OpenFile());
1227 }
1228
1229 // Bricks Data
1230 cellData.bricks = new NativeArray<ProbeBrickIndex.Brick>(cellDesc.bricksCount, Allocator.Persistent, NativeArrayOptions.UninitializedMemory);
1231
1232 var brickDataAsset = m_CurrentBakingSet.cellBricksDataAsset;
1233 cellStreamingDesc = brickDataAsset.streamableCellDescs[cellIndex];
1234 request.brickStreamingRequest.AddReadCommand(cellStreamingDesc.offset, brickDataAsset.elementSize * cellStreamingDesc.elementCount, (byte*)cellData.bricks.GetUnsafePtr());
1235 request.brickStreamingRequest.RunCommands(brickDataAsset.OpenFile());
1236
1237 // Support Data
1238 if (m_CurrentBakingSet.HasSupportData())
1239 {
1240 var supportDataAsset = m_CurrentBakingSet.cellSupportDataAsset;
1241 cellStreamingDesc = supportDataAsset.streamableCellDescs[cellIndex];
1242
1243 var supportOffset = cellStreamingDesc.offset;
1244 var positionSize = cellStreamingDesc.elementCount * m_CurrentBakingSet.supportPositionChunkSize;
1245 var touchupSize = cellStreamingDesc.elementCount * m_CurrentBakingSet.supportTouchupChunkSize;
1246 var offsetsSize = cellStreamingDesc.elementCount * m_CurrentBakingSet.supportOffsetsChunkSize;
1247 var layerSize = cellStreamingDesc.elementCount * m_CurrentBakingSet.supportLayerMaskChunkSize;
1248 var validitySize = cellStreamingDesc.elementCount * m_CurrentBakingSet.supportValidityChunkSize;
1249
1250 cellData.probePositions = (new NativeArray<byte>(positionSize, Allocator.Persistent, NativeArrayOptions.UninitializedMemory)).Reinterpret<Vector3>(1);
1251 cellData.validity = (new NativeArray<byte>(validitySize, Allocator.Persistent, NativeArrayOptions.UninitializedMemory)).Reinterpret<float>(1);
1252 cellData.layer = (new NativeArray<byte>(layerSize, Allocator.Persistent, NativeArrayOptions.UninitializedMemory)).Reinterpret<byte>(1);
1253 cellData.touchupVolumeInteraction = (new NativeArray<byte>(touchupSize, Allocator.Persistent, NativeArrayOptions.UninitializedMemory)).Reinterpret<float>(1);
1254 cellData.offsetVectors = (new NativeArray<byte>(offsetsSize, Allocator.Persistent, NativeArrayOptions.UninitializedMemory)).Reinterpret<Vector3>(1);
1255
1256 request.supportStreamingRequest.AddReadCommand(supportOffset, positionSize, (byte*)cellData.probePositions.GetUnsafePtr()); supportOffset += positionSize;
1257 request.supportStreamingRequest.AddReadCommand(supportOffset, validitySize, (byte*)cellData.validity.GetUnsafePtr()); supportOffset += validitySize;
1258 request.supportStreamingRequest.AddReadCommand(supportOffset, touchupSize, (byte*)cellData.touchupVolumeInteraction.GetUnsafePtr()); supportOffset += touchupSize;
1259 request.supportStreamingRequest.AddReadCommand(supportOffset, layerSize, (byte*)cellData.layer.GetUnsafePtr()); supportOffset += layerSize;
1260 request.supportStreamingRequest.AddReadCommand(supportOffset, offsetsSize, (byte*)cellData.offsetVectors.GetUnsafePtr());
1261 request.supportStreamingRequest.RunCommands(supportDataAsset.OpenFile());
1262 }
1263
1264 request.state = CellStreamingRequest.State.Active;
1265 m_ActiveStreamingRequests.Add(request);
1266
1267 return true;
1268 }
1269
1270 void AllocateScratchBufferPoolIfNeeded()
1271 {
1272 if (m_SupportDiskStreaming)
1273 {
1274 int shChunkSize = m_CurrentBakingSet.GetChunkGPUMemory(m_SHBands);
1275 int maxSHChunkCount = m_CurrentBakingSet.maxSHChunkCount;
1276
1277 Debug.Assert(shChunkSize % 4 == 0);
1278
1279 // Recreate if chunk size or max count is different.
1280 if (m_ScratchBufferPool == null || m_ScratchBufferPool.chunkSize != shChunkSize || m_ScratchBufferPool.maxChunkCount != maxSHChunkCount)
1281 {
1282 if (probeVolumeDebug.verboseStreamingLog)
1283 LogStreaming($"Allocating new Scratch Buffer Pool. Chunk size: {shChunkSize}, max SH Chunks: {maxSHChunkCount}");
1284
1285 if (m_ScratchBufferPool != null)
1286 m_ScratchBufferPool.Cleanup();
1287
1288 m_ScratchBufferPool = new ProbeVolumeScratchBufferPool(m_CurrentBakingSet, m_SHBands);
1289 }
1290 }
1291 }
1292
1293 void UpdateActiveRequests(CommandBuffer cmd)
1294 {
1295 if (m_ActiveStreamingRequests.Count > 0)
1296 {
1297 for (int i = m_ActiveStreamingRequests.Count - 1; i >= 0; --i)
1298 {
1299 var request = m_ActiveStreamingRequests[i];
1300 // Can't String.Format in an assert message without generating garbage :/
1301 //Debug.Assert(request.state != CellStreamingRequest.State.Pending, $"Wrong status for request {request.cell.desc.index}: {request.state}");
1302 Debug.Assert(request.state != CellStreamingRequest.State.Pending, "Wrong status for request");
1303
1304 bool releaseRequest = false;
1305
1306 if (request.state == CellStreamingRequest.State.Canceled)
1307 {
1308 if (probeVolumeDebug.verboseStreamingLog)
1309 LogStreaming($"Discarding active request for cell {request.cell.desc.index}");
1310
1311 m_ScratchBufferPool.ReleaseScratchBuffer(request.scratchBuffer);
1312 releaseRequest = true;
1313 }
1314 else
1315 {
1316 request.UpdateState();
1317
1318 if (request.state == CellStreamingRequest.State.Complete)
1319 {
1320 Debug.Assert(cmd != null); // We should not get here during cleanup.
1321
1322 if (probeVolumeDebug.verboseStreamingLog)
1323 {
1324 if (request.poolIndex == -1)
1325 LogStreaming($"Completed disk streaming request for cell {request.cell.desc.index}");
1326 else
1327 LogStreaming($"Completed disk streaming request for blending cell {request.cell.desc.index} for scenario {request.poolIndex}");
1328 }
1329
1330 // Because of limitation of low level device implementation of Lock/Unlock on Graphics Buffers
1331 // (the fact that locking over multiple frames isn't really supported)
1332 // We need to go through a temporary buffer and copy into the GraphicsBuffer when streaming is done.
1333 // This can be a first step to later on, use compressed data on disk to lighten the I/O load and decompress
1334 // directly in the graphics buffer.
1335 if (request.scratchBuffer.buffer != null)
1336 {
1337 var mappedBuffer = request.scratchBuffer.buffer.LockBufferForWrite<byte>(0, request.scratchBuffer.stagingBuffer.Length);
1338 mappedBuffer.CopyFrom(request.scratchBuffer.stagingBuffer);
1339 request.scratchBuffer.buffer.UnlockBufferAfterWrite<byte>(request.scratchBuffer.stagingBuffer.Length);
1340 }
1341 request.onStreamingComplete(request, cmd);
1342
1343 // We can release here because the GraphicsBuffer inside the scratchBuffer is double buffered.
1344 // So a new request on next frame won't overlap.
1345 m_ScratchBufferPool.ReleaseScratchBuffer(request.scratchBuffer);
1346 releaseRequest = true;
1347 }
1348 else if (request.state == CellStreamingRequest.State.Invalid)
1349 {
1350 if (probeVolumeDebug.verboseStreamingLog)
1351 LogStreaming($"Reseting invalid request for cell {request.cell.desc.index}");
1352
1353 // If invalid, try to run it again.
1354 m_ScratchBufferPool.ReleaseScratchBuffer(request.scratchBuffer);
1355 request.Reset();
1356 m_ActiveStreamingRequests.RemoveAt(i);
1357 m_StreamingQueue.Enqueue(request);
1358 }
1359 }
1360
1361 if (releaseRequest)
1362 {
1363 m_ActiveStreamingRequests.RemoveAt(i);
1364 m_StreamingRequestsPool.Release(request);
1365 }
1366 }
1367 }
1368 }
1369
1370 unsafe void ProcessNewRequests()
1371 {
1372 while (m_StreamingQueue.TryPeek(out var request))
1373 {
1374 if (request.state == CellStreamingRequest.State.Canceled)
1375 {
1376 if (probeVolumeDebug.verboseStreamingLog)
1377 {
1378 if (request.poolIndex == -1)
1379 LogStreaming($"Discarding request for cell {request.cell.desc.index}");
1380 else
1381 LogStreaming($"Discarding request for blending cell {request.cell.desc.index} for scenario {request.poolIndex}");
1382 }
1383
1384 Debug.Assert(request.scratchBuffer == null);
1385 m_StreamingRequestsPool.Release(request);
1386 m_StreamingQueue.Dequeue(); // Discard request.
1387 }
1388 else
1389 {
1390 Debug.Assert(request.state == CellStreamingRequest.State.Pending);
1391 Debug.Assert(request.cell.data != null); // Need data for bricks and support data.
1392
1393 if (ProcessDiskStreamingRequest(request))
1394 {
1395 m_StreamingQueue.Dequeue();
1396 }
1397 else
1398 {
1399 // No available scratch buffer for this request.
1400 // Since we want to conserve order in the queue, we don't process any more requests this frame.
1401 break;
1402 }
1403 }
1404 }
1405 }
1406
1407 void UpdateDiskStreaming(CommandBuffer cmd)
1408 {
1409 if (!diskStreamingEnabled)
1410 return;
1411
1412 using (new ProfilingScope(ProfilingSampler.Get(CoreProfileId.APVDiskStreamingUpdate)))
1413 {
1414 AllocateScratchBufferPoolIfNeeded();
1415 ProcessNewRequests();
1416 UpdateActiveRequests(cmd);
1417
1418 // Close file handles if not needed anymore.
1419 // Checking cellBricksDataAsset here just to know if any of the files is open. If one if open, all of them should be.
1420 if (m_ActiveStreamingRequests.Count == 0 && m_StreamingQueue.Count == 0 && m_CurrentBakingSet.cellBricksDataAsset != null && m_CurrentBakingSet.cellBricksDataAsset.IsOpen())
1421 {
1422 if (probeVolumeDebug.verboseStreamingLog)
1423 LogStreaming("Closing files open for APV disk streaming.");
1424
1425 m_CurrentBakingSet.cellBricksDataAsset.CloseFile();
1426 m_CurrentBakingSet.cellSupportDataAsset.CloseFile();
1427 m_CurrentBakingSet.cellSharedDataAsset.CloseFile();
1428
1429 if (m_CurrentBakingSet.scenarios.TryGetValue(lightingScenario, out var scenarioData))
1430 {
1431 scenarioData.cellDataAsset.CloseFile();
1432 scenarioData.cellOptionalDataAsset.CloseFile();
1433 scenarioData.cellProbeOcclusionDataAsset.CloseFile();
1434 }
1435
1436 if (!string.IsNullOrEmpty(otherScenario) && m_CurrentBakingSet.scenarios.TryGetValue(lightingScenario, out var otherScenarioData))
1437 {
1438 otherScenarioData.cellDataAsset.CloseFile();
1439 otherScenarioData.cellOptionalDataAsset.CloseFile();
1440 otherScenarioData.cellProbeOcclusionDataAsset.CloseFile();
1441 }
1442 }
1443 }
1444
1445 // Debug flag to force unload/reload of cells to be able to debug streaming shader code.
1446 if (probeVolumeDebug.debugStreaming)
1447 {
1448 if (m_ToBeLoadedCells.size == 0 && m_ActiveStreamingRequests.Count == 0)
1449 UnloadAllCells();
1450 }
1451 }
1452
1453 [Conditional("UNITY_EDITOR")]
1454 [Conditional("DEVELOPMENT_BUILD")]
1455 void LogStreaming(string log)
1456 {
1457 Debug.Log(log);
1458 }
1459 }
1460}