A game about forced loneliness, made by TACStudios
1using NUnit.Framework;
2using UnityEngine;
3using Unity.Collections.LowLevel.Unsafe;
4using Unity.PerformanceTesting;
5using Unity.PerformanceTesting.Benchmark;
6using System.Runtime.CompilerServices;
7using System.Threading;
8
9namespace Unity.Collections.PerformanceTests
10{
11 static class ParallelHashSetUtil
12 {
13 static public void AllocInt(ref NativeParallelHashSet<int> container, int capacity, bool addValues)
14 {
15 if (capacity >= 0)
16 {
17 Random.InitState(0);
18 container = new NativeParallelHashSet<int>(capacity, Allocator.Persistent);
19 if (addValues)
20 {
21 for (int i = 0; i < capacity; i++)
22 container.Add(i);
23 }
24 }
25 else
26 container.Dispose();
27 }
28 static public void AllocInt(ref NativeParallelHashSet<int> containerA, ref NativeParallelHashSet<int> containerB, int capacity, bool addValues)
29 {
30 AllocInt(ref containerA, capacity, false);
31 AllocInt(ref containerB, capacity, false);
32 if (!addValues)
33 return;
34 for (int i = 0; i < capacity; i++)
35 {
36 containerA.Add(Random.Range(0, capacity * 2));
37 containerB.Add(Random.Range(0, capacity * 2));
38 }
39 }
40 static public void AllocInt(ref UnsafeParallelHashSet<int> container, int capacity, bool addValues)
41 {
42 if (capacity >= 0)
43 {
44 Random.InitState(0);
45 container = new UnsafeParallelHashSet<int>(capacity, Allocator.Persistent);
46 if (addValues)
47 {
48 for (int i = 0; i < capacity; i++)
49 container.Add(i);
50 }
51 }
52 else
53 container.Dispose();
54 }
55 static public void AllocInt(ref UnsafeParallelHashSet<int> containerA, ref UnsafeParallelHashSet<int> containerB, int capacity, bool addValues)
56 {
57 AllocInt(ref containerA, capacity, false);
58 AllocInt(ref containerB, capacity, false);
59 if (!addValues)
60 return;
61 for (int i = 0; i < capacity; i++)
62 {
63 containerA.Add(Random.Range(0, capacity * 2));
64 containerB.Add(Random.Range(0, capacity * 2));
65 }
66 }
67 static public object AllocBclContainer(int capacity, bool addValues)
68 {
69 if (capacity < 0)
70 return null;
71
72 Random.InitState(0);
73
74 var bclContainer = new FakeConcurrentHashSet<int>();
75
76 if (addValues)
77 {
78 for (int i = 0; i < capacity; i++)
79 bclContainer.Add(i);
80 }
81 return bclContainer;
82 }
83 static public object AllocBclContainerTuple(int capacity, bool addValues)
84 {
85 var tuple = new System.Tuple<FakeConcurrentHashSet<int>, FakeConcurrentHashSet<int>>(
86 (FakeConcurrentHashSet<int>)AllocBclContainer(capacity, false),
87 (FakeConcurrentHashSet<int>)AllocBclContainer(capacity, false));
88 if (addValues)
89 {
90 for (int i = 0; i < capacity; i++)
91 {
92 tuple.Item1.Add(Random.Range(0, capacity * 2));
93 tuple.Item2.Add(Random.Range(0, capacity * 2));
94 }
95 }
96 return tuple;
97 }
98 static public void CreateRandomKeys(int capacity, ref UnsafeList<int> keys)
99 {
100 if (!keys.IsCreated)
101 {
102 keys = new UnsafeList<int>(capacity, Allocator.Persistent);
103 Random.InitState(0);
104 for (int i = 0; i < capacity; i++)
105 {
106 int randKey = Random.Range(0, capacity);
107 keys.Add(randKey);
108 }
109 }
110 else
111 keys.Dispose();
112 }
113 [MethodImpl(MethodImplOptions.AggressiveInlining)]
114 static public void SplitForWorkers(int count, int worker, int workers, out int startInclusive, out int endExclusive)
115 {
116 startInclusive = count * worker / workers;
117 endExclusive = count * (worker + 1) / workers;
118 }
119 }
120
121 // A generic HashSet with a lock is generally recommended as most performant way to obtain a thread safe HashSet in C#.
122 internal class FakeConcurrentHashSet<T> : System.IDisposable
123 {
124 private readonly ReaderWriterLockSlim m_Lock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion);
125 private readonly System.Collections.Generic.HashSet<T> m_HashSet = new System.Collections.Generic.HashSet<T>();
126
127 ~FakeConcurrentHashSet() => Dispose(false);
128
129 public bool Add(T item)
130 {
131 m_Lock.EnterWriteLock();
132 try
133 {
134 return m_HashSet.Add(item);
135 }
136 finally
137 {
138 if (m_Lock.IsWriteLockHeld)
139 m_Lock.ExitWriteLock();
140 }
141 }
142
143 public void Clear()
144 {
145 m_Lock.EnterWriteLock();
146 try
147 {
148 m_HashSet.Clear();
149 }
150 finally
151 {
152 if (m_Lock.IsWriteLockHeld)
153 m_Lock.ExitWriteLock();
154 }
155 }
156
157 public bool Contains(T item)
158 {
159 m_Lock.EnterReadLock();
160 try
161 {
162 return m_HashSet.Contains(item);
163 }
164 finally
165 {
166 if (m_Lock.IsReadLockHeld)
167 m_Lock.ExitReadLock();
168 }
169 }
170
171 public bool Remove(T item)
172 {
173 m_Lock.EnterWriteLock();
174 try
175 {
176 return m_HashSet.Remove(item);
177 }
178 finally
179 {
180 if (m_Lock.IsWriteLockHeld)
181 m_Lock.ExitWriteLock();
182 }
183 }
184
185 public int Count
186 {
187 get
188 {
189 m_Lock.EnterReadLock();
190 try
191 {
192 return m_HashSet.Count;
193 }
194 finally
195 {
196 if (m_Lock.IsReadLockHeld)
197 m_Lock.ExitReadLock();
198 }
199 }
200 }
201
202 public void CopyTo(T[] array)
203 {
204 m_Lock.EnterReadLock();
205 try
206 {
207 m_HashSet.CopyTo(array);
208 }
209 finally
210 {
211 if (m_Lock.IsReadLockHeld)
212 m_Lock.ExitReadLock();
213 }
214 }
215
216 public System.Collections.Generic.HashSet<T>.Enumerator GetEnumerator()
217 {
218 m_Lock.EnterReadLock();
219 try
220 {
221 return m_HashSet.GetEnumerator();
222 }
223 finally
224 {
225 if (m_Lock.IsReadLockHeld)
226 m_Lock.ExitReadLock();
227 }
228 }
229
230 public void UnionWith(FakeConcurrentHashSet<T> other)
231 {
232 m_Lock.EnterReadLock();
233 try
234 {
235 m_HashSet.UnionWith(other.m_HashSet);
236 }
237 finally
238 {
239 if (m_Lock.IsReadLockHeld)
240 m_Lock.ExitReadLock();
241 }
242 }
243
244 public void IntersectWith(FakeConcurrentHashSet<T> other)
245 {
246 m_Lock.EnterReadLock();
247 try
248 {
249 m_HashSet.IntersectWith(other.m_HashSet);
250 }
251 finally
252 {
253 if (m_Lock.IsReadLockHeld)
254 m_Lock.ExitReadLock();
255 }
256 }
257
258 public void ExceptWith(FakeConcurrentHashSet<T> other)
259 {
260 m_Lock.EnterReadLock();
261 try
262 {
263 m_HashSet.ExceptWith(other.m_HashSet);
264 }
265 finally
266 {
267 if (m_Lock.IsReadLockHeld)
268 m_Lock.ExitReadLock();
269 }
270 }
271
272 public void Dispose()
273 {
274 Dispose(true);
275 System.GC.SuppressFinalize(this);
276 }
277
278 protected virtual void Dispose(bool disposing)
279 {
280 if (disposing && m_Lock != null)
281 m_Lock.Dispose();
282 }
283 }
284
285 struct ParallelHashSetIsEmpty100k : IBenchmarkContainerParallel
286 {
287 const int kIterations = 100_000;
288 int workers;
289 NativeParallelHashSet<int> nativeContainer;
290 UnsafeParallelHashSet<int> unsafeContainer;
291
292 void IBenchmarkContainerParallel.SetParams(int capacity, params int[] args) => workers = args[0];
293 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, true);
294 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, true);
295 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainer(capacity, true);
296
297 [MethodImpl(MethodImplOptions.NoOptimization)]
298 public void MeasureNativeContainer(int worker, int threadIndex)
299 {
300 var reader = nativeContainer.AsReadOnly();
301 ParallelHashSetUtil.SplitForWorkers(kIterations, worker, workers, out int start, out int end);
302 for (int i = start; i < end; i++)
303 _ = reader.IsEmpty;
304 }
305 [MethodImpl(MethodImplOptions.NoOptimization)]
306 public void MeasureUnsafeContainer(int worker, int threadIndex)
307 {
308 ParallelHashSetUtil.SplitForWorkers(kIterations, worker, workers, out int start, out int end);
309 for (int i = start; i < end; i++)
310 _ = unsafeContainer.IsEmpty;
311 }
312 [MethodImpl(MethodImplOptions.NoOptimization)]
313 public void MeasureBclContainer(object container, int worker)
314 {
315 var bclContainer = (FakeConcurrentHashSet<int>)container;
316 ParallelHashSetUtil.SplitForWorkers(kIterations, worker, workers, out int start, out int end);
317 for (int i = start; i < end; i++)
318 _ = bclContainer.Count == 0;
319 }
320 }
321
322 struct ParallelHashSetCount100k : IBenchmarkContainerParallel
323 {
324 const int kIterations = 100_000;
325 int workers;
326 NativeParallelHashSet<int> nativeContainer;
327 UnsafeParallelHashSet<int> unsafeContainer;
328
329 void IBenchmarkContainerParallel.SetParams(int capacity, params int[] args) => workers = args[0];
330 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, true);
331 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, true);
332 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainer(capacity, true);
333
334 [MethodImpl(MethodImplOptions.NoOptimization)]
335 public void MeasureNativeContainer(int worker, int threadIndex)
336 {
337 var reader = nativeContainer.AsReadOnly();
338 ParallelHashSetUtil.SplitForWorkers(kIterations, worker, workers, out int start, out int end);
339 for (int i = start; i < end; i++)
340 _ = reader.Count();
341 }
342 [MethodImpl(MethodImplOptions.NoOptimization)]
343 public void MeasureUnsafeContainer(int worker, int threadIndex)
344 {
345 ParallelHashSetUtil.SplitForWorkers(kIterations, worker, workers, out int start, out int end);
346 for (int i = start; i < end; i++)
347 _ = unsafeContainer.Count();
348 }
349 [MethodImpl(MethodImplOptions.NoOptimization)]
350 public void MeasureBclContainer(object container, int worker)
351 {
352 var bclContainer = (FakeConcurrentHashSet<int>)container;
353 ParallelHashSetUtil.SplitForWorkers(kIterations, worker, workers, out int start, out int end);
354 for (int i = start; i < end; i++)
355 _ = bclContainer.Count;
356 }
357 }
358
359 struct ParallelHashSetToNativeArray : IBenchmarkContainerParallel
360 {
361 NativeParallelHashSet<int> nativeContainer;
362 UnsafeParallelHashSet<int> unsafeContainer;
363
364 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, true);
365 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, true);
366 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainer(capacity, true);
367
368 public void MeasureNativeContainer(int worker, int threadIndex)
369 {
370 var asArray = nativeContainer.ToNativeArray(Allocator.Temp);
371 asArray.Dispose();
372 }
373 public void MeasureUnsafeContainer(int worker, int threadIndex)
374 {
375 var asArray = unsafeContainer.ToNativeArray(Allocator.Temp);
376 asArray.Dispose();
377 }
378 public void MeasureBclContainer(object container, int worker)
379 {
380 var bclContainer = (FakeConcurrentHashSet<int>)container;
381 int[] asArray = new int[bclContainer.Count];
382 bclContainer.CopyTo(asArray);
383 }
384 }
385
386 struct ParallelHashSetInsert : IBenchmarkContainerParallel
387 {
388 int capacity;
389 int workers;
390 NativeParallelHashSet<int> nativeContainer;
391 UnsafeParallelHashSet<int> unsafeContainer;
392
393 void IBenchmarkContainerParallel.SetParams(int capacity, params int[] args)
394 {
395 this.capacity = capacity;
396 workers = args[0];
397 }
398
399 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, false);
400 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, false);
401 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainer(capacity, false);
402
403 public void MeasureNativeContainer(int worker, int threadIndex)
404 {
405 var writer = nativeContainer.AsParallelWriter();
406 ParallelHashSetUtil.SplitForWorkers(capacity, worker, workers, out int start, out int end);
407 for (int i = start; i < end; i++)
408 writer.Add(i, threadIndex);
409 }
410 public void MeasureUnsafeContainer(int worker, int threadIndex)
411 {
412 var writer = unsafeContainer.AsParallelWriter();
413 ParallelHashSetUtil.SplitForWorkers(capacity, worker, workers, out int start, out int end);
414 for (int i = start; i < end; i++)
415 writer.Add(i, threadIndex);
416 }
417 public void MeasureBclContainer(object container, int worker)
418 {
419 var bclContainer = (FakeConcurrentHashSet<int>)container;
420 ParallelHashSetUtil.SplitForWorkers(capacity, worker, workers, out int start, out int end);
421 for (int i = start; i < end; i++)
422 bclContainer.Add(i);
423 }
424 }
425
426 struct ParallelHashSetAddGrow : IBenchmarkContainerParallel
427 {
428 int capacity;
429 int toAdd;
430 NativeParallelHashSet<int> nativeContainer;
431 UnsafeParallelHashSet<int> unsafeContainer;
432
433 void IBenchmarkContainerParallel.SetParams(int capacity, params int[] args)
434 {
435 this.capacity = capacity;
436 toAdd = args[0] - capacity;
437 }
438
439 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, true);
440 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, true);
441 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainer(capacity, true);
442
443 public void MeasureNativeContainer(int _, int __)
444 {
445 // Intentionally setting capacity small and growing by adding more items
446 for (int i = capacity; i < capacity + toAdd; i++)
447 nativeContainer.Add(i);
448 }
449 public void MeasureUnsafeContainer(int _, int __)
450 {
451 // Intentionally setting capacity small and growing by adding more items
452 for (int i = capacity; i < capacity + toAdd; i++)
453 unsafeContainer.Add(i);
454 }
455 public void MeasureBclContainer(object container, int _)
456 {
457 var bclContainer = (FakeConcurrentHashSet<int>)container;
458 // Intentionally setting capacity small and growing by adding more items
459 for (int i = capacity; i < capacity + toAdd; i++)
460 bclContainer.Add(i);
461 }
462 }
463
464 struct ParallelHashSetContains : IBenchmarkContainerParallel
465 {
466 int capacity;
467 int workers;
468 NativeParallelHashSet<int> nativeContainer;
469 UnsafeParallelHashSet<int> unsafeContainer;
470 UnsafeList<int> keys;
471
472 void IBenchmarkContainerParallel.SetParams(int capacity, params int[] args)
473 {
474 this.capacity = capacity;
475 workers = args[0];
476 }
477
478 public void AllocNativeContainer(int capacity)
479 {
480 ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, false);
481 ParallelHashSetUtil.CreateRandomKeys(capacity, ref keys);
482 for (int i = 0; i < capacity; i++)
483 nativeContainer.Add(keys[i]);
484 }
485 public void AllocUnsafeContainer(int capacity)
486 {
487 ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, false);
488 ParallelHashSetUtil.CreateRandomKeys(capacity, ref keys);
489 for (int i = 0; i < capacity; i++)
490 unsafeContainer.Add(keys[i]);
491 }
492 public object AllocBclContainer(int capacity)
493 {
494 object container = ParallelHashSetUtil.AllocBclContainer(capacity, false);
495 var bclContainer = (FakeConcurrentHashSet<int>)container;
496 ParallelHashSetUtil.CreateRandomKeys(capacity, ref keys);
497 for (int i = 0; i < capacity; i++)
498 bclContainer.Add(keys[i]);
499 return container;
500 }
501
502 public void MeasureNativeContainer(int worker, int threadIndex)
503 {
504 var reader = nativeContainer.AsReadOnly();
505 ParallelHashSetUtil.SplitForWorkers(capacity, worker, workers, out int start, out int end);
506 bool data = false;
507 for (int i = start; i < end; i++)
508 Volatile.Write(ref data, reader.Contains(keys[i]));
509 }
510 public void MeasureUnsafeContainer(int worker, int threadIndex)
511 {
512 ParallelHashSetUtil.SplitForWorkers(capacity, worker, workers, out int start, out int end);
513 bool data = false;
514 for (int i = start; i < end; i++)
515 Volatile.Write(ref data, unsafeContainer.Contains(keys[i]));
516 }
517 public void MeasureBclContainer(object container, int worker)
518 {
519 var bclContainer = (FakeConcurrentHashSet<int>)container;
520 ParallelHashSetUtil.SplitForWorkers(capacity, worker, workers, out int start, out int end);
521 bool data = false;
522 for (int i = start; i < end; i++)
523 Volatile.Write(ref data, bclContainer.Contains(keys[i]));
524 }
525 }
526
527 struct ParallelHashSetRemove : IBenchmarkContainerParallel
528 {
529 NativeParallelHashSet<int> nativeContainer;
530 UnsafeParallelHashSet<int> unsafeContainer;
531 UnsafeList<int> keys;
532
533 public void AllocNativeContainer(int capacity)
534 {
535 ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, false);
536 ParallelHashSetUtil.CreateRandomKeys(capacity, ref keys);
537 for (int i = 0; i < capacity; i++)
538 nativeContainer.Add(keys[i]);
539 }
540 public void AllocUnsafeContainer(int capacity)
541 {
542 ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, false);
543 ParallelHashSetUtil.CreateRandomKeys(capacity, ref keys);
544 for (int i = 0; i < capacity; i++)
545 unsafeContainer.Add(keys[i]);
546 }
547 public object AllocBclContainer(int capacity)
548 {
549 object container = ParallelHashSetUtil.AllocBclContainer(capacity, false);
550 var bclContainer = (FakeConcurrentHashSet<int>)container;
551 ParallelHashSetUtil.CreateRandomKeys(capacity, ref keys);
552 for (int i = 0; i < capacity; i++)
553 bclContainer.Add(keys[i]);
554 return container;
555 }
556
557 public void MeasureNativeContainer(int worker, int threadIndex)
558 {
559 int insertions = keys.Length;
560 for (int i = 0; i < insertions; i++)
561 nativeContainer.Remove(keys[i]);
562 }
563 public void MeasureUnsafeContainer(int worker, int threadIndex)
564 {
565 int insertions = keys.Length;
566 for (int i = 0; i < insertions; i++)
567 unsafeContainer.Remove(keys[i]);
568 }
569 public void MeasureBclContainer(object container, int worker)
570 {
571 var bclContainer = (FakeConcurrentHashSet<int>)container;
572 int insertions = keys.Length;
573 for (int i = 0; i < insertions; i++)
574 bclContainer.Remove(keys[i]);
575 }
576 }
577
578 struct ParallelHashSetForEach : IBenchmarkContainerParallel
579 {
580 NativeParallelHashSet<int> nativeContainer;
581 UnsafeParallelHashSet<int> unsafeContainer;
582
583 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, capacity, true);
584 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, capacity, true);
585 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainer(capacity, true);
586
587 public void MeasureNativeContainer(int _, int __)
588 {
589 int keep = 0;
590 foreach (var value in nativeContainer)
591 Volatile.Write(ref keep, value);
592 }
593 public void MeasureUnsafeContainer(int _, int __)
594 {
595 int keep = 0;
596 foreach (var value in unsafeContainer)
597 Volatile.Write(ref keep, value);
598 }
599 public void MeasureBclContainer(object container, int _)
600 {
601 int keep = 0;
602 var bclContainer = (FakeConcurrentHashSet<int>)container;
603 foreach (var value in bclContainer)
604 Volatile.Write(ref keep, value);
605 }
606 }
607
608 struct ParallelHashSetUnionWith : IBenchmarkContainerParallel
609 {
610 NativeParallelHashSet<int> nativeContainer;
611 NativeParallelHashSet<int> nativeContainerOther;
612 UnsafeParallelHashSet<int> unsafeContainer;
613 UnsafeParallelHashSet<int> unsafeContainerOther;
614 public int total;
615
616 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, ref nativeContainerOther, capacity, true);
617 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, ref unsafeContainerOther, capacity, true);
618 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainerTuple(capacity, true);
619
620 public void MeasureNativeContainer(int _, int __) => nativeContainer.UnionWith(nativeContainerOther);
621 public void MeasureUnsafeContainer(int _, int __) => unsafeContainer.UnionWith(unsafeContainerOther);
622 public void MeasureBclContainer(object container, int _)
623 {
624 var dotnetContainer = (System.Tuple<FakeConcurrentHashSet<int>, FakeConcurrentHashSet<int>>)container;
625 dotnetContainer.Item1.UnionWith(dotnetContainer.Item2);
626 }
627 }
628
629 struct ParallelHashSetIntersectWith : IBenchmarkContainerParallel
630 {
631 NativeParallelHashSet<int> nativeContainer;
632 NativeParallelHashSet<int> nativeContainerOther;
633 UnsafeParallelHashSet<int> unsafeContainer;
634 UnsafeParallelHashSet<int> unsafeContainerOther;
635 public int total;
636
637 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, ref nativeContainerOther, capacity, true);
638 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, ref unsafeContainerOther, capacity, true);
639 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainerTuple(capacity, true);
640
641 public void MeasureNativeContainer(int _, int __) => nativeContainer.IntersectWith(nativeContainerOther);
642 public void MeasureUnsafeContainer(int _, int __) => unsafeContainer.IntersectWith(unsafeContainerOther);
643 public void MeasureBclContainer(object container, int _)
644 {
645 var dotnetContainer = (System.Tuple<FakeConcurrentHashSet<int>, FakeConcurrentHashSet<int>>)container;
646 dotnetContainer.Item1.IntersectWith(dotnetContainer.Item2);
647 }
648 }
649
650 struct ParallelHashSetExceptWith : IBenchmarkContainerParallel
651 {
652 NativeParallelHashSet<int> nativeContainer;
653 NativeParallelHashSet<int> nativeContainerOther;
654 UnsafeParallelHashSet<int> unsafeContainer;
655 UnsafeParallelHashSet<int> unsafeContainerOther;
656 public int total;
657
658 public void AllocNativeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref nativeContainer, ref nativeContainerOther, capacity, true);
659 public void AllocUnsafeContainer(int capacity) => ParallelHashSetUtil.AllocInt(ref unsafeContainer, ref unsafeContainerOther, capacity, true);
660 public object AllocBclContainer(int capacity) => ParallelHashSetUtil.AllocBclContainerTuple(capacity, true);
661
662 public void MeasureNativeContainer(int _, int __) => nativeContainer.ExceptWith(nativeContainerOther);
663 public void MeasureUnsafeContainer(int _, int __) => unsafeContainer.ExceptWith(unsafeContainerOther);
664 public void MeasureBclContainer(object container, int _)
665 {
666 var dotnetContainer = (System.Tuple<FakeConcurrentHashSet<int>, FakeConcurrentHashSet<int>>)container;
667 dotnetContainer.Item1.ExceptWith(dotnetContainer.Item2);
668 }
669 }
670
671
672 [Benchmark(typeof(BenchmarkContainerType))]
673 [BenchmarkNameOverride(BenchmarkContainerConfig.BCL, "HashSet w/lock")]
674 class ParallelHashSet
675 {
676#if UNITY_EDITOR
677 [UnityEditor.MenuItem(BenchmarkContainerConfig.kMenuItemIndividual + nameof(ParallelHashSet))]
678 static void RunIndividual()
679 => BenchmarkContainerConfig.RunBenchmark(typeof(ParallelHashSet));
680#endif
681
682 [Test, Performance]
683 [Category("Performance")]
684 public unsafe void IsEmpty_x_100k(
685 [Values(1, 2, 4)] int workers,
686 [Values(0, 100)] int capacity,
687 [Values] BenchmarkContainerType type)
688 {
689 BenchmarkContainerRunnerParallel<ParallelHashSetIsEmpty100k>.Run(workers, capacity, type, workers);
690 }
691
692 [Test, Performance]
693 [Category("Performance")]
694 public unsafe void Count_x_100k(
695 [Values(1, 2, 4)] int workers,
696 [Values(0, 100)] int capacity,
697 [Values] BenchmarkContainerType type)
698 {
699 BenchmarkContainerRunnerParallel<ParallelHashSetCount100k>.Run(workers, capacity, type, workers);
700 }
701
702 [Test, Performance]
703 [Category("Performance")]
704 public unsafe void ToNativeArray(
705 [Values(1)] int workers,
706 [Values(10000, 100000, 1000000)] int capacity,
707 [Values] BenchmarkContainerType type)
708 {
709 BenchmarkContainerRunnerParallel<ParallelHashSetToNativeArray>.Run(workers, capacity, type);
710 }
711
712 [Test, Performance]
713 [Category("Performance")]
714 public unsafe void Insert(
715 [Values(1, 2, 4)] int workers,
716#if UNITY_STANDALONE || UNITY_EDITOR
717 [Values(10000, 100000, 1000000)] int insertions,
718#else
719 [Values(10000, 100000)] int insertions, // Observe potential lower memory requirement on non-desktop platforms
720#endif
721 [Values] BenchmarkContainerType type)
722 {
723 BenchmarkContainerRunnerParallel<ParallelHashSetInsert>.Run(workers, insertions, type, workers);
724 }
725
726 [Test, Performance]
727 [Category("Performance")]
728 [BenchmarkTestFootnote("Incrementally grows from `capacity` until reaching size of `growTo`")]
729 public unsafe void AddGrow(
730 [Values(1)] int workers, // Can't grow capacity in parallel
731 [Values(4, 65536)] int capacity,
732 [Values(1024 * 1024)] int growTo,
733 [Values] BenchmarkContainerType type)
734 {
735 BenchmarkContainerRunnerParallel<ParallelHashSetAddGrow>.Run(workers, capacity, type, growTo);
736 }
737
738 [Test, Performance]
739 [Category("Performance")]
740 public unsafe void Contains(
741 [Values(1, 2, 4)] int workers,
742#if UNITY_STANDALONE || UNITY_EDITOR
743 [Values(10000, 100000, 1000000)] int insertions,
744#else
745 [Values(10000, 100000)] int insertions, // Observe potential lower memory requirement on non-desktop platforms
746#endif
747 [Values] BenchmarkContainerType type)
748 {
749 BenchmarkContainerRunnerParallel<ParallelHashSetContains>.Run(workers, insertions, type, workers);
750 }
751
752 [Test, Performance]
753 [Category("Performance")]
754 public unsafe void Remove(
755 [Values(1)] int workers, // No API for ParallelWriter.TryRemove currently
756 [Values(10000, 100000, 1000000)] int insertions,
757 [Values] BenchmarkContainerType type)
758 {
759 BenchmarkContainerRunnerParallel<ParallelHashSetRemove>.Run(workers, insertions, type, workers);
760 }
761
762 [Test, Performance]
763 [Category("Performance")]
764 public unsafe void Foreach(
765 [Values(1)] int workers, // This work can't be split
766 [Values(10000, 100000, 1000000)] int insertions,
767 [Values] BenchmarkContainerType type)
768 {
769 BenchmarkContainerRunnerParallel<ParallelHashSetForEach>.Run(workers, insertions, type, workers);
770 }
771
772 [Test, Performance]
773 [Category("Performance")]
774 public unsafe void UnionWith(
775 [Values(1)] int workers, // This work is already split and unrelated to the parallelism of the container
776 [Values(10000, 100000, 1000000)] int insertions,
777 [Values] BenchmarkContainerType type)
778 {
779 BenchmarkContainerRunnerParallel<ParallelHashSetUnionWith>.Run(workers, insertions, type);
780 }
781
782 [Test, Performance]
783 [Category("Performance")]
784 public unsafe void IntersectWith(
785 [Values(1)] int workers, // This work is already split and unrelated to the parallelism of the container
786 [Values(10000, 100000, 1000000)] int insertions,
787 [Values] BenchmarkContainerType type)
788 {
789 BenchmarkContainerRunnerParallel<ParallelHashSetIntersectWith>.Run(workers, insertions, type);
790 }
791
792 [Test, Performance]
793 [Category("Performance")]
794 public unsafe void ExceptWith(
795 [Values(1)] int workers, // This work is already split and unrelated to the parallelism of the container
796 [Values(10000, 100000, 1000000)] int insertions,
797 [Values] BenchmarkContainerType type)
798 {
799 BenchmarkContainerRunnerParallel<ParallelHashSetExceptWith>.Run(workers, insertions, type);
800 }
801 }
802}