+129
-36
Cargo.lock
+129
-36
Cargo.lock
···
2453
2453
"tempfile",
2454
2454
"thiserror 2.0.17",
2455
2455
"tokio",
2456
-
"toml",
2456
+
"toml 0.8.23",
2457
2457
"walkdir",
2458
2458
]
2459
2459
···
2624
2624
2625
2625
[[package]]
2626
2626
name = "js-sys"
2627
-
version = "0.3.81"
2627
+
version = "0.3.82"
2628
2628
source = "registry+https://github.com/rust-lang/crates.io-index"
2629
-
checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305"
2629
+
checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65"
2630
2630
dependencies = [
2631
2631
"once_cell",
2632
2632
"wasm-bindgen",
···
2915
2915
[[package]]
2916
2916
name = "mini-moka"
2917
2917
version = "0.11.0"
2918
-
source = "git+https://github.com/moka-rs/mini-moka?rev=da864e849f5d034f32e02197fee9bb5d5af36d3d#da864e849f5d034f32e02197fee9bb5d5af36d3d"
2919
2918
dependencies = [
2919
+
"anyhow",
2920
2920
"crossbeam-channel",
2921
2921
"crossbeam-utils",
2922
2922
"dashmap",
2923
+
"getrandom 0.2.16",
2924
+
"once_cell",
2923
2925
"smallvec",
2924
2926
"tagptr",
2925
2927
"triomphe",
2928
+
"trybuild",
2929
+
"wasm-bindgen-test",
2926
2930
"web-time",
2931
+
]
2932
+
2933
+
[[package]]
2934
+
name = "minicov"
2935
+
version = "0.3.7"
2936
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2937
+
checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b"
2938
+
dependencies = [
2939
+
"cc",
2940
+
"walkdir",
2927
2941
]
2928
2942
2929
2943
[[package]]
···
4390
4404
]
4391
4405
4392
4406
[[package]]
4407
+
name = "serde_spanned"
4408
+
version = "1.0.3"
4409
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4410
+
checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392"
4411
+
dependencies = [
4412
+
"serde_core",
4413
+
]
4414
+
4415
+
[[package]]
4393
4416
name = "serde_urlencoded"
4394
4417
version = "0.7.1"
4395
4418
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4741
4764
"cfg-expr",
4742
4765
"heck 0.5.0",
4743
4766
"pkg-config",
4744
-
"toml",
4767
+
"toml 0.8.23",
4745
4768
"version-compare",
4746
4769
]
4747
4770
···
4756
4779
version = "0.12.16"
4757
4780
source = "registry+https://github.com/rust-lang/crates.io-index"
4758
4781
checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
4782
+
4783
+
[[package]]
4784
+
name = "target-triple"
4785
+
version = "1.0.0"
4786
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4787
+
checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b"
4759
4788
4760
4789
[[package]]
4761
4790
name = "tempfile"
···
5057
5086
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
5058
5087
dependencies = [
5059
5088
"serde",
5060
-
"serde_spanned",
5061
-
"toml_datetime",
5089
+
"serde_spanned 0.6.9",
5090
+
"toml_datetime 0.6.11",
5062
5091
"toml_edit",
5063
5092
]
5064
5093
5065
5094
[[package]]
5095
+
name = "toml"
5096
+
version = "0.9.8"
5097
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5098
+
checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8"
5099
+
dependencies = [
5100
+
"indexmap 2.12.0",
5101
+
"serde_core",
5102
+
"serde_spanned 1.0.3",
5103
+
"toml_datetime 0.7.3",
5104
+
"toml_parser",
5105
+
"toml_writer",
5106
+
"winnow 0.7.13",
5107
+
]
5108
+
5109
+
[[package]]
5066
5110
name = "toml_datetime"
5067
5111
version = "0.6.11"
5068
5112
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5072
5116
]
5073
5117
5074
5118
[[package]]
5119
+
name = "toml_datetime"
5120
+
version = "0.7.3"
5121
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5122
+
checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533"
5123
+
dependencies = [
5124
+
"serde_core",
5125
+
]
5126
+
5127
+
[[package]]
5075
5128
name = "toml_edit"
5076
5129
version = "0.22.27"
5077
5130
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5079
5132
dependencies = [
5080
5133
"indexmap 2.12.0",
5081
5134
"serde",
5082
-
"serde_spanned",
5083
-
"toml_datetime",
5135
+
"serde_spanned 0.6.9",
5136
+
"toml_datetime 0.6.11",
5084
5137
"toml_write",
5085
5138
"winnow 0.7.13",
5086
5139
]
5087
5140
5088
5141
[[package]]
5142
+
name = "toml_parser"
5143
+
version = "1.0.4"
5144
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5145
+
checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e"
5146
+
dependencies = [
5147
+
"winnow 0.7.13",
5148
+
]
5149
+
5150
+
[[package]]
5089
5151
name = "toml_write"
5090
5152
version = "0.1.2"
5091
5153
source = "registry+https://github.com/rust-lang/crates.io-index"
5092
5154
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
5093
5155
5094
5156
[[package]]
5157
+
name = "toml_writer"
5158
+
version = "1.0.4"
5159
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5160
+
checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2"
5161
+
5162
+
[[package]]
5095
5163
name = "tower"
5096
5164
version = "0.5.2"
5097
5165
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5223
5291
version = "0.2.5"
5224
5292
source = "registry+https://github.com/rust-lang/crates.io-index"
5225
5293
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
5294
+
5295
+
[[package]]
5296
+
name = "trybuild"
5297
+
version = "1.0.114"
5298
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5299
+
checksum = "3e17e807bff86d2a06b52bca4276746584a78375055b6e45843925ce2802b335"
5300
+
dependencies = [
5301
+
"glob",
5302
+
"serde",
5303
+
"serde_derive",
5304
+
"serde_json",
5305
+
"target-triple",
5306
+
"termcolor",
5307
+
"toml 0.9.8",
5308
+
]
5226
5309
5227
5310
[[package]]
5228
5311
name = "tungstenite"
···
5476
5559
5477
5560
[[package]]
5478
5561
name = "wasm-bindgen"
5479
-
version = "0.2.104"
5562
+
version = "0.2.105"
5480
5563
source = "registry+https://github.com/rust-lang/crates.io-index"
5481
-
checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d"
5564
+
checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60"
5482
5565
dependencies = [
5483
5566
"cfg-if",
5484
5567
"once_cell",
···
5488
5571
]
5489
5572
5490
5573
[[package]]
5491
-
name = "wasm-bindgen-backend"
5492
-
version = "0.2.104"
5493
-
source = "registry+https://github.com/rust-lang/crates.io-index"
5494
-
checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19"
5495
-
dependencies = [
5496
-
"bumpalo",
5497
-
"log",
5498
-
"proc-macro2",
5499
-
"quote",
5500
-
"syn 2.0.108",
5501
-
"wasm-bindgen-shared",
5502
-
]
5503
-
5504
-
[[package]]
5505
5574
name = "wasm-bindgen-futures"
5506
-
version = "0.4.54"
5575
+
version = "0.4.55"
5507
5576
source = "registry+https://github.com/rust-lang/crates.io-index"
5508
-
checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c"
5577
+
checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0"
5509
5578
dependencies = [
5510
5579
"cfg-if",
5511
5580
"js-sys",
···
5516
5585
5517
5586
[[package]]
5518
5587
name = "wasm-bindgen-macro"
5519
-
version = "0.2.104"
5588
+
version = "0.2.105"
5520
5589
source = "registry+https://github.com/rust-lang/crates.io-index"
5521
-
checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119"
5590
+
checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2"
5522
5591
dependencies = [
5523
5592
"quote",
5524
5593
"wasm-bindgen-macro-support",
···
5526
5595
5527
5596
[[package]]
5528
5597
name = "wasm-bindgen-macro-support"
5529
-
version = "0.2.104"
5598
+
version = "0.2.105"
5530
5599
source = "registry+https://github.com/rust-lang/crates.io-index"
5531
-
checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7"
5600
+
checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc"
5532
5601
dependencies = [
5602
+
"bumpalo",
5533
5603
"proc-macro2",
5534
5604
"quote",
5535
5605
"syn 2.0.108",
5536
-
"wasm-bindgen-backend",
5537
5606
"wasm-bindgen-shared",
5538
5607
]
5539
5608
5540
5609
[[package]]
5541
5610
name = "wasm-bindgen-shared"
5542
-
version = "0.2.104"
5611
+
version = "0.2.105"
5543
5612
source = "registry+https://github.com/rust-lang/crates.io-index"
5544
-
checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1"
5613
+
checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76"
5545
5614
dependencies = [
5546
5615
"unicode-ident",
5547
5616
]
5548
5617
5549
5618
[[package]]
5619
+
name = "wasm-bindgen-test"
5620
+
version = "0.3.55"
5621
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5622
+
checksum = "bfc379bfb624eb59050b509c13e77b4eb53150c350db69628141abce842f2373"
5623
+
dependencies = [
5624
+
"js-sys",
5625
+
"minicov",
5626
+
"wasm-bindgen",
5627
+
"wasm-bindgen-futures",
5628
+
"wasm-bindgen-test-macro",
5629
+
]
5630
+
5631
+
[[package]]
5632
+
name = "wasm-bindgen-test-macro"
5633
+
version = "0.3.55"
5634
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5635
+
checksum = "085b2df989e1e6f9620c1311df6c996e83fe16f57792b272ce1e024ac16a90f1"
5636
+
dependencies = [
5637
+
"proc-macro2",
5638
+
"quote",
5639
+
"syn 2.0.108",
5640
+
]
5641
+
5642
+
[[package]]
5550
5643
name = "wasm-streams"
5551
5644
version = "0.4.2"
5552
5645
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5561
5654
5562
5655
[[package]]
5563
5656
name = "web-sys"
5564
-
version = "0.3.81"
5657
+
version = "0.3.82"
5565
5658
source = "registry+https://github.com/rust-lang/crates.io-index"
5566
-
checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120"
5659
+
checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1"
5567
5660
dependencies = [
5568
5661
"js-sys",
5569
5662
"wasm-bindgen",
+2
-2
crates/jacquard-identity/Cargo.toml
+2
-2
crates/jacquard-identity/Cargo.toml
···
37
37
urlencoding.workspace = true
38
38
tracing = { workspace = true, optional = true }
39
39
n0-future = { workspace = true, optional = true }
40
-
mini-moka = { version = "0.11.0", git = "https://github.com/moka-rs/mini-moka", rev = "da864e849f5d034f32e02197fee9bb5d5af36d3d", optional = true }
40
+
mini-moka = { version = "0.11.0", path = "../mini-moka-vendored", optional = true }
41
41
# mini-moka = { version = "0.10", optional = true }
42
42
43
43
[target.'cfg(not(target_family = "wasm"))'.dependencies]
···
46
46
47
47
48
48
[target.'cfg(target_arch = "wasm32")'.dependencies]
49
-
mini-moka = { version = "0.11.0", git = "https://github.com/moka-rs/mini-moka", rev = "da864e849f5d034f32e02197fee9bb5d5af36d3d", features = ["js"], optional = true }
49
+
mini-moka = { version = "0.11.0",path = "../mini-moka-vendored", features = ["js"], optional = true }
50
50
51
51
52
52
[[example]]
+18
-18
crates/jacquard-lexicon/src/codegen.rs
+18
-18
crates/jacquard-lexicon/src/codegen.rs
···
513
513
assert!(formatted.contains("Account"));
514
514
}
515
515
516
-
#[test]
517
-
fn test_generate_token_type() {
518
-
let corpus =
519
-
LexiconCorpus::load_from_dir("tests/fixtures/test_lexicons").expect("load corpus");
520
-
let codegen = CodeGenerator::new(&corpus, "jacquard_api");
516
+
// #[test]
517
+
// fn test_generate_token_type() {
518
+
// let corpus =
519
+
// LexiconCorpus::load_from_dir("tests/fixtures/test_lexicons").expect("load corpus");
520
+
// let codegen = CodeGenerator::new(&corpus, "jacquard_api");
521
521
522
-
let doc = corpus.get("app.bsky.embed.images").expect("get images");
523
-
let def = doc.defs.get("viewImage").expect("get viewImage def");
522
+
// let doc = corpus.get("app.bsky.embed.images").expect("get images");
523
+
// let def = doc.defs.get("viewImage").expect("get viewImage def");
524
524
525
-
let tokens = codegen
526
-
.generate_def("app.bsky.embed.images", "viewImage", def)
527
-
.expect("generate");
525
+
// let tokens = codegen
526
+
// .generate_def("app.bsky.embed.images", "viewImage", def)
527
+
// .expect("generate");
528
528
529
-
let file: syn::File = syn::parse2(tokens).expect("parse tokens");
530
-
let formatted = prettyplease::unparse(&file);
531
-
println!("\n{}\n", formatted);
529
+
// let file: syn::File = syn::parse2(tokens).expect("parse tokens");
530
+
// let formatted = prettyplease::unparse(&file);
531
+
// println!("\n{}\n", formatted);
532
532
533
-
// Token types are unit structs
534
-
assert!(formatted.contains("struct ViewImage"));
535
-
// Should have Display implementation
536
-
assert!(formatted.contains("impl std::fmt::Display"));
537
-
}
533
+
// // Token types are unit structs
534
+
// assert!(formatted.contains("struct ViewImage"));
535
+
// // Should have Display implementation
536
+
// assert!(formatted.contains("impl std::fmt::Display"));
537
+
// }
538
538
539
539
#[test]
540
540
fn test_generate_array_types() {
+37
crates/mini-moka-vendored/.github/workflows/Audit.yml
+37
crates/mini-moka-vendored/.github/workflows/Audit.yml
···
1
+
name: Cargo Audit
2
+
3
+
on:
4
+
push:
5
+
paths:
6
+
- '**/Cargo.toml'
7
+
- '**/Cargo.lock'
8
+
schedule:
9
+
# https://crontab.guru/
10
+
- cron: '5 20 * * 5'
11
+
12
+
jobs:
13
+
audit:
14
+
runs-on: ubuntu-latest
15
+
steps:
16
+
- name: Checkout Mini Moka
17
+
uses: actions/checkout@v4
18
+
19
+
- name: Install Rust toolchain (Nightly)
20
+
uses: dtolnay/rust-toolchain@master
21
+
with:
22
+
toolchain: nightly
23
+
24
+
- run: cargo clean
25
+
26
+
- name: Check for known security vulnerabilities (Latest versions)
27
+
uses: actions-rs/audit-check@v1
28
+
with:
29
+
token: ${{ secrets.GITHUB_TOKEN }}
30
+
31
+
- name: Downgrade dependencies to minimal versions
32
+
run: cargo update -Z minimal-versions
33
+
34
+
- name: Check for known security vulnerabilities (Minimal versions)
35
+
uses: actions-rs/audit-check@v1
36
+
with:
37
+
token: ${{ secrets.GITHUB_TOKEN }}
+52
crates/mini-moka-vendored/.github/workflows/CI.yml
+52
crates/mini-moka-vendored/.github/workflows/CI.yml
···
1
+
name: CI
2
+
3
+
on:
4
+
push:
5
+
paths-ignore:
6
+
- '.devcontainer/**'
7
+
- '.gitpod.yml'
8
+
- '.vscode/**'
9
+
pull_request:
10
+
paths-ignore:
11
+
- '.devcontainer/**'
12
+
- '.gitpod.yml'
13
+
- '.vscode/**'
14
+
schedule:
15
+
# Run against the last commit on the default branch on Friday at 8pm (UTC?)
16
+
- cron: '0 20 * * 5'
17
+
18
+
jobs:
19
+
test:
20
+
runs-on: ubuntu-latest
21
+
strategy:
22
+
fail-fast: false
23
+
matrix:
24
+
rust:
25
+
- stable
26
+
- beta
27
+
- 1.76.0 # MSRV
28
+
- nightly # For checking minimum version dependencies.
29
+
30
+
steps:
31
+
- name: Checkout Mini Moka
32
+
uses: actions/checkout@v4
33
+
34
+
- name: Install Rust toolchain
35
+
uses: dtolnay/rust-toolchain@master
36
+
with:
37
+
toolchain: ${{ matrix.rust }}
38
+
39
+
- run: cargo clean
40
+
41
+
- name: Downgrade dependencies to minimal versions (Nightly only)
42
+
if: ${{ matrix.rust == 'nightly' }}
43
+
run: cargo update -Z minimal-versions
44
+
45
+
- name: Show cargo tree
46
+
run: cargo tree
47
+
48
+
- name: Run tests (debug, sync feature)
49
+
run: cargo test --features sync
50
+
51
+
- name: Run tests (release, sync feature)
52
+
run: cargo test --release --features sync
+31
crates/mini-moka-vendored/.github/workflows/Kani.yml
+31
crates/mini-moka-vendored/.github/workflows/Kani.yml
···
1
+
name: Kani CI
2
+
3
+
on:
4
+
pull_request:
5
+
paths-ignore:
6
+
- '.vscode/**'
7
+
- CHANGELOG.md
8
+
- README.md
9
+
push:
10
+
paths-ignore:
11
+
- '.vscode/**'
12
+
- CHANGELOG.md
13
+
- README.md
14
+
15
+
jobs:
16
+
run-kani:
17
+
runs-on: ubuntu-latest
18
+
steps:
19
+
- name: Checkout Moka
20
+
uses: actions/checkout@v4
21
+
22
+
- name: Show CPU into
23
+
run: |
24
+
nproc
25
+
lscpu
26
+
free -m
27
+
- name: Run Kani
28
+
uses: model-checking/kani-github-action@v1.0
29
+
# Workaround for https://github.com/moka-rs/mini-moka/issues/36
30
+
with:
31
+
kani-version: '0.54.0'
+42
crates/mini-moka-vendored/.github/workflows/Lints.yml
+42
crates/mini-moka-vendored/.github/workflows/Lints.yml
···
1
+
name: Clippy lints and Rustfmt
2
+
3
+
on:
4
+
push:
5
+
paths-ignore:
6
+
- '.devcontainer/**'
7
+
- '.gitpod.yml'
8
+
- '.vscode/**'
9
+
schedule:
10
+
# Run against the last commit on the default branch on Friday at 7pm (UTC?)
11
+
- cron: '0 19 * * 5'
12
+
13
+
jobs:
14
+
test:
15
+
runs-on: ubuntu-latest
16
+
strategy:
17
+
matrix:
18
+
rust:
19
+
- toolchain: stable
20
+
- toolchain: beta
21
+
rustflags: '--cfg beta_clippy'
22
+
23
+
steps:
24
+
- name: Checkout Mini Moka
25
+
uses: actions/checkout@v4
26
+
27
+
- name: Install Rust toolchain
28
+
uses: dtolnay/rust-toolchain@master
29
+
with:
30
+
toolchain: ${{ matrix.rust.toolchain }}
31
+
components: rustfmt, clippy
32
+
33
+
- run: cargo clean
34
+
35
+
- name: Run Clippy
36
+
run: cargo clippy --lib --tests --all-features --all-targets -- -D warnings
37
+
env:
38
+
RUSTFLAGS: ${{ matrix.rust.rustflags }}
39
+
40
+
- name: Run Rustfmt
41
+
if: ${{ matrix.rust.toolchain == 'stable' }}
42
+
run: cargo fmt --all -- --check
+61
crates/mini-moka-vendored/.github/workflows/LinuxCrossCompileTest.yml
+61
crates/mini-moka-vendored/.github/workflows/LinuxCrossCompileTest.yml
···
1
+
name: Linux cross compile tests
2
+
3
+
on:
4
+
push:
5
+
paths-ignore:
6
+
- ".devcontainer/**"
7
+
- ".gitpod.yml"
8
+
- ".vscode/**"
9
+
- "tests/**"
10
+
pull_request:
11
+
paths-ignore:
12
+
- ".devcontainer/**"
13
+
- ".gitpod.yml"
14
+
- ".vscode/**"
15
+
- "tests/**"
16
+
schedule:
17
+
# Run against the last commit on the default branch on Friday at 9pm (UTC?)
18
+
- cron: "0 21 * * 5"
19
+
20
+
jobs:
21
+
linux-cross:
22
+
runs-on: ubuntu-latest
23
+
strategy:
24
+
fail-fast: false
25
+
matrix:
26
+
platform:
27
+
- target: aarch64-unknown-linux-musl
28
+
rust-version: stable
29
+
- target: i686-unknown-linux-musl
30
+
rust-version: stable
31
+
- target: armv7-unknown-linux-musleabihf
32
+
rust-version: stable
33
+
- target: armv5te-unknown-linux-musleabi
34
+
rust-version: stable
35
+
36
+
steps:
37
+
- name: Checkout Mini Moka
38
+
uses: actions/checkout@v4
39
+
40
+
- name: Install Rust toolchain
41
+
uses: dtolnay/rust-toolchain@master
42
+
with:
43
+
toolchain: ${{ matrix.platform.rust-version }}
44
+
targets: ${{ matrix.platform.target }}
45
+
46
+
- name: Install cross
47
+
uses: taiki-e/install-action@v2
48
+
with:
49
+
tool: cross
50
+
51
+
- name: Remove integration tests and force enable rustc_version crate
52
+
run: |
53
+
rm -rf tests
54
+
sed -i '/actix-rt\|async-std\|reqwest/d' Cargo.toml
55
+
56
+
- run: cargo clean
57
+
58
+
- name: Run tests (sync feature)
59
+
run: |
60
+
cross ${{ matrix.platform.carge-version }} test --release -F sync \
61
+
--target ${{ matrix.platform.target }}
+37
crates/mini-moka-vendored/.github/workflows/Miri.yml
+37
crates/mini-moka-vendored/.github/workflows/Miri.yml
···
1
+
name: Miri tests
2
+
3
+
on:
4
+
push:
5
+
paths-ignore:
6
+
- '.devcontainer/**'
7
+
- '.gitpod.yml'
8
+
- '.vscode/**'
9
+
- 'tests/**'
10
+
pull_request:
11
+
paths-ignore:
12
+
- '.devcontainer/**'
13
+
- '.gitpod.yml'
14
+
- '.vscode/**'
15
+
- 'tests/**'
16
+
schedule:
17
+
# Run against the last commit on the default branch on Friday at 9pm (UTC?)
18
+
- cron: '0 21 * * 5'
19
+
20
+
jobs:
21
+
test:
22
+
runs-on: ubuntu-latest
23
+
24
+
steps:
25
+
- name: Checkout Mini Moka
26
+
uses: actions/checkout@v4
27
+
28
+
- name: Install Rust nightly toolchain with Miri
29
+
uses: dtolnay/rust-toolchain@master
30
+
with:
31
+
toolchain: nightly
32
+
components: miri
33
+
34
+
- run: cargo clean
35
+
36
+
- name: Run Miri test (deque)
37
+
run: cargo miri test deque
+35
crates/mini-moka-vendored/.github/workflows/Trybuild.yml
+35
crates/mini-moka-vendored/.github/workflows/Trybuild.yml
···
1
+
name: Trybuild
2
+
3
+
on:
4
+
push:
5
+
paths-ignore:
6
+
- '.devcontainer/**'
7
+
- '.gitpod.yml'
8
+
- '.vscode/**'
9
+
schedule:
10
+
# Run against the last commit on the default branch on Friday at 9pm (UTC?)
11
+
- cron: '0 21 * * 5'
12
+
13
+
jobs:
14
+
test:
15
+
runs-on: ubuntu-latest
16
+
strategy:
17
+
matrix:
18
+
rust:
19
+
- stable
20
+
- beta
21
+
22
+
steps:
23
+
- name: Checkout Mini Moka
24
+
uses: actions/checkout@v4
25
+
26
+
- name: Install Rust toolchain
27
+
uses: dtolnay/rust-toolchain@master
28
+
with:
29
+
toolchain: ${{ matrix.rust }}
30
+
31
+
- name: Run compile error tests (sync feature, trybuild)
32
+
if: ${{ matrix.rust == 'stable' }}
33
+
run: cargo test ui_trybuild --release --features sync
34
+
env:
35
+
RUSTFLAGS: '--cfg trybuild'
+8
crates/mini-moka-vendored/.gitignore
+8
crates/mini-moka-vendored/.gitignore
+71
crates/mini-moka-vendored/.vscode/settings.json
+71
crates/mini-moka-vendored/.vscode/settings.json
···
1
+
{
2
+
"rust-analyzer.cargo.features": [],
3
+
"rust-analyzer.server.extraEnv": {
4
+
"CARGO_TARGET_DIR": "target/ra"
5
+
},
6
+
"editor.rulers": [85],
7
+
"cSpell.words": [
8
+
"aarch",
9
+
"actix",
10
+
"ahash",
11
+
"armv",
12
+
"benmanes",
13
+
"circleci",
14
+
"CLFU",
15
+
"clippy",
16
+
"compat",
17
+
"cpus",
18
+
"dashmap",
19
+
"deqs",
20
+
"Deque",
21
+
"Deques",
22
+
"devcontainer",
23
+
"docsrs",
24
+
"doctest",
25
+
"doctests",
26
+
"Einziger",
27
+
"else's",
28
+
"Eytan",
29
+
"getrandom",
30
+
"hashbrown",
31
+
"Hasher",
32
+
"Kawano",
33
+
"mapref",
34
+
"Moka",
35
+
"mpsc",
36
+
"MSRV",
37
+
"nanos",
38
+
"nocapture",
39
+
"Ohad",
40
+
"peekable",
41
+
"preds",
42
+
"repr",
43
+
"reqwest",
44
+
"runtimes",
45
+
"rustc",
46
+
"rustdoc",
47
+
"RUSTFLAGS",
48
+
"rustfmt",
49
+
"semver",
50
+
"SIGABRT",
51
+
"SIGILL",
52
+
"smallvec",
53
+
"structs",
54
+
"tagptr",
55
+
"Tatsuya",
56
+
"thiserror",
57
+
"toolchain",
58
+
"triomphe",
59
+
"trybuild",
60
+
"Uninit",
61
+
"unsync",
62
+
"Upsert",
63
+
"usize"
64
+
],
65
+
"files.watcherExclude": {
66
+
"**/target": true
67
+
},
68
+
"cSpell.enableFiletypes": [
69
+
"toml"
70
+
]
71
+
}
+70
crates/mini-moka-vendored/CHANGELOG.md
+70
crates/mini-moka-vendored/CHANGELOG.md
···
1
+
# Mini Moka Cache — Change Log
2
+
3
+
## Version 0.10.3
4
+
5
+
### Fixed
6
+
7
+
- Fixed occasional panic in internal `FrequencySketch` in debug build.
8
+
([#21][gh-issue-0021])
9
+
10
+
11
+
## Version 0.10.2
12
+
13
+
### Fixed
14
+
15
+
- Fixed a memory corruption bug caused by the timing of concurrent `insert`,
16
+
`get` and removal of the same cached entry. ([#15][gh-pull-0015]).
17
+
18
+
19
+
## Version 0.10.1
20
+
21
+
Bumped the minimum supported Rust version (MSRV) to 1.61 (May 19, 2022).
22
+
([#5][gh-pull-0005])
23
+
24
+
### Fixed
25
+
26
+
- Fixed the caches mutating a deque node through a `NonNull` pointer derived from a
27
+
shared reference. ([#6][gh-pull-0006]).
28
+
29
+
30
+
## Version 0.10.0
31
+
32
+
In this version, we removed some dependencies from Mini Moka to make it more
33
+
lightweight.
34
+
35
+
### Removed
36
+
37
+
- Remove the background threads from the `sync::Cache` ([#1][gh-pull-0001]):
38
+
- Also remove the following dependencies:
39
+
- `scheduled-thread-pool`
40
+
- `num_cpus`
41
+
- `once_cell` (Moved to the dev-dependencies)
42
+
- Remove the following dependencies and crate features ([#2][gh-pull-0002]):
43
+
- Removed dependencies:
44
+
- `quanta`
45
+
- `parking_lot`
46
+
- `rustc_version` (from the build-dependencies)
47
+
- Removed crate features:
48
+
- `quanta` (was enabled by default)
49
+
- `atomic64` (was enabled by default)
50
+
51
+
## Version 0.9.6
52
+
53
+
### Added
54
+
55
+
- Move the relevant source code from the GitHub moka-rs/moka repository (at
56
+
[v0.9.6][moka-v0.9.6] tag) to this moka-rs/mini-moka repository.
57
+
- Rename `moka::dash` module to `mini_moka::sync`.
58
+
- Rename `moka::unsync` module to `mini_moka::unsync`.
59
+
- Rename a crate feature `dash` to `sync` and make it a default.
60
+
61
+
<!-- Links -->
62
+
[moka-v0.9.6]: https://github.com/moka-rs/moka/tree/v0.9.6
63
+
64
+
[gh-issue-0021]: https://github.com/moka-rs/mini-moka/issues/21/
65
+
66
+
[gh-pull-0015]: https://github.com/moka-rs/mini-moka/pull/15/
67
+
[gh-pull-0006]: https://github.com/moka-rs/mini-moka/pull/6/
68
+
[gh-pull-0005]: https://github.com/moka-rs/mini-moka/pull/5/
69
+
[gh-pull-0002]: https://github.com/moka-rs/mini-moka/pull/2/
70
+
[gh-pull-0001]: https://github.com/moka-rs/mini-moka/pull/1/
+62
crates/mini-moka-vendored/Cargo.toml
+62
crates/mini-moka-vendored/Cargo.toml
···
1
+
[package]
2
+
name = "mini-moka"
3
+
version = "0.11.0"
4
+
edition = "2018"
5
+
rust-version = "1.76"
6
+
7
+
description = "A lighter edition of Moka, a fast and concurrent cache library"
8
+
license = "MIT OR Apache-2.0"
9
+
# homepage = "https://"
10
+
documentation = "https://docs.rs/mini-moka/"
11
+
repository = "https://github.com/moka-rs/mini-moka"
12
+
keywords = ["cache", "concurrent"]
13
+
categories = ["caching", "concurrency"]
14
+
readme = "README.md"
15
+
exclude = [".circleci", ".devcontainer", ".github", ".gitpod.yml", ".vscode"]
16
+
17
+
[features]
18
+
default = ["sync"]
19
+
js = ["dep:web-time"]
20
+
21
+
sync = ["dashmap"]
22
+
23
+
[dependencies]
24
+
crossbeam-channel = "0.5.5"
25
+
crossbeam-utils = "0.8"
26
+
smallvec = "1.8"
27
+
tagptr = "0.2"
28
+
web-time = { version = "1.1.0", optional = true }
29
+
30
+
# Opt-out serde and stable_deref_trait features
31
+
# https://github.com/Manishearth/triomphe/pull/5
32
+
triomphe = { version = "0.1.13", default-features = false }
33
+
34
+
# Optional dependencies (enabled by default)
35
+
dashmap = { version = "6.1", optional = true }
36
+
37
+
[dev-dependencies]
38
+
anyhow = "1.0.19"
39
+
getrandom = "0.2"
40
+
once_cell = "1.7"
41
+
wasm-bindgen-test = "0.3.50"
42
+
43
+
[target.wasm32-unknown-unknown.dev-dependencies]
44
+
getrandom = { version="0.2", features = ["js"] }
45
+
46
+
[target.'cfg(trybuild)'.dev-dependencies]
47
+
trybuild = "1.0"
48
+
49
+
# https://docs.rs/about/metadata
50
+
[package.metadata.docs.rs]
51
+
# Build the doc with some features enabled.
52
+
features = []
53
+
rustdoc-args = ["--cfg", "docsrs"]
54
+
55
+
[lints.rust]
56
+
unexpected_cfgs = { level = "warn", check-cfg = [
57
+
"cfg(kani)",
58
+
"cfg(skeptic)",
59
+
"cfg(circleci)",
60
+
"cfg(trybuild)",
61
+
"cfg(beta_clippy)",
62
+
] }
+201
crates/mini-moka-vendored/LICENSE-APACHE
+201
crates/mini-moka-vendored/LICENSE-APACHE
···
1
+
Apache License
2
+
Version 2.0, January 2004
3
+
http://www.apache.org/licenses/
4
+
5
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+
1. Definitions.
8
+
9
+
"License" shall mean the terms and conditions for use, reproduction,
10
+
and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+
"Licensor" shall mean the copyright owner or entity authorized by
13
+
the copyright owner that is granting the License.
14
+
15
+
"Legal Entity" shall mean the union of the acting entity and all
16
+
other entities that control, are controlled by, or are under common
17
+
control with that entity. For the purposes of this definition,
18
+
"control" means (i) the power, direct or indirect, to cause the
19
+
direction or management of such entity, whether by contract or
20
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+
outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+
"You" (or "Your") shall mean an individual or Legal Entity
24
+
exercising permissions granted by this License.
25
+
26
+
"Source" form shall mean the preferred form for making modifications,
27
+
including but not limited to software source code, documentation
28
+
source, and configuration files.
29
+
30
+
"Object" form shall mean any form resulting from mechanical
31
+
transformation or translation of a Source form, including but
32
+
not limited to compiled object code, generated documentation,
33
+
and conversions to other media types.
34
+
35
+
"Work" shall mean the work of authorship, whether in Source or
36
+
Object form, made available under the License, as indicated by a
37
+
copyright notice that is included in or attached to the work
38
+
(an example is provided in the Appendix below).
39
+
40
+
"Derivative Works" shall mean any work, whether in Source or Object
41
+
form, that is based on (or derived from) the Work and for which the
42
+
editorial revisions, annotations, elaborations, or other modifications
43
+
represent, as a whole, an original work of authorship. For the purposes
44
+
of this License, Derivative Works shall not include works that remain
45
+
separable from, or merely link (or bind by name) to the interfaces of,
46
+
the Work and Derivative Works thereof.
47
+
48
+
"Contribution" shall mean any work of authorship, including
49
+
the original version of the Work and any modifications or additions
50
+
to that Work or Derivative Works thereof, that is intentionally
51
+
submitted to Licensor for inclusion in the Work by the copyright owner
52
+
or by an individual or Legal Entity authorized to submit on behalf of
53
+
the copyright owner. For the purposes of this definition, "submitted"
54
+
means any form of electronic, verbal, or written communication sent
55
+
to the Licensor or its representatives, including but not limited to
56
+
communication on electronic mailing lists, source code control systems,
57
+
and issue tracking systems that are managed by, or on behalf of, the
58
+
Licensor for the purpose of discussing and improving the Work, but
59
+
excluding communication that is conspicuously marked or otherwise
60
+
designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+
"Contributor" shall mean Licensor and any individual or Legal Entity
63
+
on behalf of whom a Contribution has been received by Licensor and
64
+
subsequently incorporated within the Work.
65
+
66
+
2. Grant of Copyright License. Subject to the terms and conditions of
67
+
this License, each Contributor hereby grants to You a perpetual,
68
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+
copyright license to reproduce, prepare Derivative Works of,
70
+
publicly display, publicly perform, sublicense, and distribute the
71
+
Work and such Derivative Works in Source or Object form.
72
+
73
+
3. Grant of Patent License. Subject to the terms and conditions of
74
+
this License, each Contributor hereby grants to You a perpetual,
75
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+
(except as stated in this section) patent license to make, have made,
77
+
use, offer to sell, sell, import, and otherwise transfer the Work,
78
+
where such license applies only to those patent claims licensable
79
+
by such Contributor that are necessarily infringed by their
80
+
Contribution(s) alone or by combination of their Contribution(s)
81
+
with the Work to which such Contribution(s) was submitted. If You
82
+
institute patent litigation against any entity (including a
83
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+
or a Contribution incorporated within the Work constitutes direct
85
+
or contributory patent infringement, then any patent licenses
86
+
granted to You under this License for that Work shall terminate
87
+
as of the date such litigation is filed.
88
+
89
+
4. Redistribution. You may reproduce and distribute copies of the
90
+
Work or Derivative Works thereof in any medium, with or without
91
+
modifications, and in Source or Object form, provided that You
92
+
meet the following conditions:
93
+
94
+
(a) You must give any other recipients of the Work or
95
+
Derivative Works a copy of this License; and
96
+
97
+
(b) You must cause any modified files to carry prominent notices
98
+
stating that You changed the files; and
99
+
100
+
(c) You must retain, in the Source form of any Derivative Works
101
+
that You distribute, all copyright, patent, trademark, and
102
+
attribution notices from the Source form of the Work,
103
+
excluding those notices that do not pertain to any part of
104
+
the Derivative Works; and
105
+
106
+
(d) If the Work includes a "NOTICE" text file as part of its
107
+
distribution, then any Derivative Works that You distribute must
108
+
include a readable copy of the attribution notices contained
109
+
within such NOTICE file, excluding those notices that do not
110
+
pertain to any part of the Derivative Works, in at least one
111
+
of the following places: within a NOTICE text file distributed
112
+
as part of the Derivative Works; within the Source form or
113
+
documentation, if provided along with the Derivative Works; or,
114
+
within a display generated by the Derivative Works, if and
115
+
wherever such third-party notices normally appear. The contents
116
+
of the NOTICE file are for informational purposes only and
117
+
do not modify the License. You may add Your own attribution
118
+
notices within Derivative Works that You distribute, alongside
119
+
or as an addendum to the NOTICE text from the Work, provided
120
+
that such additional attribution notices cannot be construed
121
+
as modifying the License.
122
+
123
+
You may add Your own copyright statement to Your modifications and
124
+
may provide additional or different license terms and conditions
125
+
for use, reproduction, or distribution of Your modifications, or
126
+
for any such Derivative Works as a whole, provided Your use,
127
+
reproduction, and distribution of the Work otherwise complies with
128
+
the conditions stated in this License.
129
+
130
+
5. Submission of Contributions. Unless You explicitly state otherwise,
131
+
any Contribution intentionally submitted for inclusion in the Work
132
+
by You to the Licensor shall be under the terms and conditions of
133
+
this License, without any additional terms or conditions.
134
+
Notwithstanding the above, nothing herein shall supersede or modify
135
+
the terms of any separate license agreement you may have executed
136
+
with Licensor regarding such Contributions.
137
+
138
+
6. Trademarks. This License does not grant permission to use the trade
139
+
names, trademarks, service marks, or product names of the Licensor,
140
+
except as required for reasonable and customary use in describing the
141
+
origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+
7. Disclaimer of Warranty. Unless required by applicable law or
144
+
agreed to in writing, Licensor provides the Work (and each
145
+
Contributor provides its Contributions) on an "AS IS" BASIS,
146
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+
implied, including, without limitation, any warranties or conditions
148
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+
PARTICULAR PURPOSE. You are solely responsible for determining the
150
+
appropriateness of using or redistributing the Work and assume any
151
+
risks associated with Your exercise of permissions under this License.
152
+
153
+
8. Limitation of Liability. In no event and under no legal theory,
154
+
whether in tort (including negligence), contract, or otherwise,
155
+
unless required by applicable law (such as deliberate and grossly
156
+
negligent acts) or agreed to in writing, shall any Contributor be
157
+
liable to You for damages, including any direct, indirect, special,
158
+
incidental, or consequential damages of any character arising as a
159
+
result of this License or out of the use or inability to use the
160
+
Work (including but not limited to damages for loss of goodwill,
161
+
work stoppage, computer failure or malfunction, or any and all
162
+
other commercial damages or losses), even if such Contributor
163
+
has been advised of the possibility of such damages.
164
+
165
+
9. Accepting Warranty or Additional Liability. While redistributing
166
+
the Work or Derivative Works thereof, You may choose to offer,
167
+
and charge a fee for, acceptance of support, warranty, indemnity,
168
+
or other liability obligations and/or rights consistent with this
169
+
License. However, in accepting such obligations, You may act only
170
+
on Your own behalf and on Your sole responsibility, not on behalf
171
+
of any other Contributor, and only if You agree to indemnify,
172
+
defend, and hold each Contributor harmless for any liability
173
+
incurred by, or claims asserted against, such Contributor by reason
174
+
of your accepting any such warranty or additional liability.
175
+
176
+
END OF TERMS AND CONDITIONS
177
+
178
+
APPENDIX: How to apply the Apache License to your work.
179
+
180
+
To apply the Apache License to your work, attach the following
181
+
boilerplate notice, with the fields enclosed by brackets "[]"
182
+
replaced with your own identifying information. (Don't include
183
+
the brackets!) The text should be enclosed in the appropriate
184
+
comment syntax for the file format. We also recommend that a
185
+
file or class name and description of purpose be included on the
186
+
same "printed page" as the copyright notice for easier
187
+
identification within third-party archives.
188
+
189
+
Copyright 2020 - 2024 Tatsuya Kawano
190
+
191
+
Licensed under the Apache License, Version 2.0 (the "License");
192
+
you may not use this file except in compliance with the License.
193
+
You may obtain a copy of the License at
194
+
195
+
http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+
Unless required by applicable law or agreed to in writing, software
198
+
distributed under the License is distributed on an "AS IS" BASIS,
199
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+
See the License for the specific language governing permissions and
201
+
limitations under the License.
+21
crates/mini-moka-vendored/LICENSE-MIT
+21
crates/mini-moka-vendored/LICENSE-MIT
···
1
+
MIT License
2
+
3
+
Copyright (c) 2020 - 2024 Tatsuya Kawano
4
+
5
+
Permission is hereby granted, free of charge, to any person obtaining a copy
6
+
of this software and associated documentation files (the "Software"), to deal
7
+
in the Software without restriction, including without limitation the rights
8
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+
copies of the Software, and to permit persons to whom the Software is
10
+
furnished to do so, subject to the following conditions:
11
+
12
+
The above copyright notice and this permission notice shall be included in all
13
+
copies or substantial portions of the Software.
14
+
15
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+
SOFTWARE.
+321
crates/mini-moka-vendored/README.md
+321
crates/mini-moka-vendored/README.md
···
1
+
# Vendored in until upstream PR for wasm compat is merged or I reimplement.
2
+
3
+
# Mini Moka
4
+
5
+
[![GitHub Actions][gh-actions-badge]][gh-actions]
6
+
[![crates.io release][release-badge]][crate]
7
+
[![docs][docs-badge]][docs]
8
+
[![dependency status][deps-rs-badge]][deps-rs]
9
+
<!-- [![coverage status][coveralls-badge]][coveralls] -->
10
+
[![license][license-badge]](#license)
11
+
<!-- [](https://app.fossa.com/projects/git%2Bgithub.com%2Fmoka-rs%2Fmini-moka?ref=badge_shield) -->
12
+
13
+
Mini Moka is a fast, concurrent cache library for Rust. Mini Moka is a light edition
14
+
of [Moka][moka-git].
15
+
16
+
Mini Moka provides cache implementations on top of hash maps. They support full
17
+
concurrency of retrievals and a high expected concurrency for updates. Mini Moka also
18
+
provides a non-thread-safe cache implementation for single thread applications.
19
+
20
+
All caches perform a best-effort bounding of a hash map using an entry replacement
21
+
algorithm to determine which entries to evict when the capacity is exceeded.
22
+
23
+
[gh-actions-badge]: https://github.com/moka-rs/mini-moka/workflows/CI/badge.svg
24
+
[release-badge]: https://img.shields.io/crates/v/mini-moka.svg
25
+
[docs-badge]: https://docs.rs/mini-moka/badge.svg
26
+
[deps-rs-badge]: https://deps.rs/repo/github/moka-rs/mini-moka/status.svg
27
+
<!-- [coveralls-badge]: https://coveralls.io/repos/github/mini-moka-rs/moka/badge.svg?branch=main -->
28
+
[license-badge]: https://img.shields.io/crates/l/mini-moka.svg
29
+
<!-- [fossa-badge]: https://app.fossa.com/api/projects/git%2Bgithub.com%2Fmoka-rs%2Fmini-moka.svg?type=shield -->
30
+
31
+
[gh-actions]: https://github.com/moka-rs/mini-moka/actions?query=workflow%3ACI
32
+
[crate]: https://crates.io/crates/mini-moka
33
+
[docs]: https://docs.rs/mini-moka
34
+
[deps-rs]: https://deps.rs/repo/github/moka-rs/mini-moka
35
+
<!-- [coveralls]: https://coveralls.io/github/moka-rs/mini-moka?branch=main -->
36
+
<!-- [fossa]: https://app.fossa.com/projects/git%2Bgithub.com%2Fmoka-rs%2Fmini-moka?ref=badge_shield -->
37
+
38
+
[moka-git]: https://github.com/moka-rs/moka
39
+
[caffeine-git]: https://github.com/ben-manes/caffeine
40
+
41
+
42
+
## Features
43
+
44
+
- Thread-safe, highly concurrent in-memory cache implementation.
45
+
- A cache can be bounded by one of the followings:
46
+
- The maximum number of entries.
47
+
- The total weighted size of entries. (Size aware eviction)
48
+
- Maintains near optimal hit ratio by using an entry replacement algorithms inspired
49
+
by Caffeine:
50
+
- Admission to a cache is controlled by the Least Frequently Used (LFU) policy.
51
+
- Eviction from a cache is controlled by the Least Recently Used (LRU) policy.
52
+
- [More details and some benchmark results are available here][tiny-lfu].
53
+
- Supports expiration policies:
54
+
- Time to live
55
+
- Time to idle
56
+
57
+
<!--
58
+
Mini Moka provides a rich and flexible feature set while maintaining high hit ratio
59
+
and a high level of concurrency for concurrent access. However, it may not be as fast
60
+
as other caches, especially those that focus on much smaller feature sets.
61
+
62
+
If you do not need features like: time to live, and size aware eviction, you may want
63
+
to take a look at the [Quick Cache][quick-cache] crate.
64
+
-->
65
+
66
+
[tiny-lfu]: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies
67
+
<!-- [quick-cache]: https://crates.io/crates/quick_cache -->
68
+
69
+
70
+
## Change Log
71
+
72
+
- [CHANGELOG.md](https://github.com/moka-rs/mini-moka/blob/main/CHANGELOG.md)
73
+
74
+
75
+
## Table of Contents
76
+
77
+
- [Features](#features)
78
+
- [Change Log](#change-log)
79
+
- [Usage](#usage)
80
+
- [Example: Synchronous Cache](#example-synchronous-cache)
81
+
- [Avoiding to clone the value at `get`](#avoiding-to-clone-the-value-at-get)
82
+
- Examples (Part 2)
83
+
- [Size Aware Eviction](#example-size-aware-eviction)
84
+
- [Expiration Policies](#example-expiration-policies)
85
+
- [Minimum Supported Rust Versions](#minimum-supported-rust-versions)
86
+
- [Developing Mini Moka](#developing-mini-moka)
87
+
- [Credits](#credits)
88
+
- [License](#license)
89
+
90
+
91
+
## Usage
92
+
93
+
Add this to your `Cargo.toml`:
94
+
95
+
```toml
96
+
[dependencies]
97
+
mini_moka = "0.10"
98
+
```
99
+
100
+
101
+
## Example: Synchronous Cache
102
+
103
+
The thread-safe, synchronous caches are defined in the `sync` module.
104
+
105
+
Cache entries are manually added using `insert` method, and are stored in the cache
106
+
until either evicted or manually invalidated.
107
+
108
+
Here's an example of reading and updating a cache by using multiple threads:
109
+
110
+
```rust
111
+
// Use the synchronous cache.
112
+
use mini_moka::sync::Cache;
113
+
114
+
use std::thread;
115
+
116
+
fn value(n: usize) -> String {
117
+
format!("value {}", n)
118
+
}
119
+
120
+
fn main() {
121
+
const NUM_THREADS: usize = 16;
122
+
const NUM_KEYS_PER_THREAD: usize = 64;
123
+
124
+
// Create a cache that can store up to 10,000 entries.
125
+
let cache = Cache::new(10_000);
126
+
127
+
// Spawn threads and read and update the cache simultaneously.
128
+
let threads: Vec<_> = (0..NUM_THREADS)
129
+
.map(|i| {
130
+
// To share the same cache across the threads, clone it.
131
+
// This is a cheap operation.
132
+
let my_cache = cache.clone();
133
+
let start = i * NUM_KEYS_PER_THREAD;
134
+
let end = (i + 1) * NUM_KEYS_PER_THREAD;
135
+
136
+
thread::spawn(move || {
137
+
// Insert 64 entries. (NUM_KEYS_PER_THREAD = 64)
138
+
for key in start..end {
139
+
my_cache.insert(key, value(key));
140
+
// get() returns Option<String>, a clone of the stored value.
141
+
assert_eq!(my_cache.get(&key), Some(value(key)));
142
+
}
143
+
144
+
// Invalidate every 4 element of the inserted entries.
145
+
for key in (start..end).step_by(4) {
146
+
my_cache.invalidate(&key);
147
+
}
148
+
})
149
+
})
150
+
.collect();
151
+
152
+
// Wait for all threads to complete.
153
+
threads.into_iter().for_each(|t| t.join().expect("Failed"));
154
+
155
+
// Verify the result.
156
+
for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) {
157
+
if key % 4 == 0 {
158
+
assert_eq!(cache.get(&key), None);
159
+
} else {
160
+
assert_eq!(cache.get(&key), Some(value(key)));
161
+
}
162
+
}
163
+
}
164
+
```
165
+
166
+
167
+
## Avoiding to clone the value at `get`
168
+
169
+
For the concurrent cache (`sync` cache), the return type of `get` method is
170
+
`Option<V>` instead of `Option<&V>`, where `V` is the value type. Every time `get` is
171
+
called for an existing key, it creates a clone of the stored value `V` and returns
172
+
it. This is because the `Cache` allows concurrent updates from threads so a value
173
+
stored in the cache can be dropped or replaced at any time by any other thread. `get`
174
+
cannot return a reference `&V` as it is impossible to guarantee the value outlives
175
+
the reference.
176
+
177
+
If you want to store values that will be expensive to clone, wrap them by
178
+
`std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a thread-safe
179
+
reference-counted pointer and its `clone()` method is cheap.
180
+
181
+
[rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
182
+
183
+
```rust,ignore
184
+
use std::sync::Arc;
185
+
186
+
let key = ...
187
+
let large_value = vec![0u8; 2 * 1024 * 1024]; // 2 MiB
188
+
189
+
// When insert, wrap the large_value by Arc.
190
+
cache.insert(key.clone(), Arc::new(large_value));
191
+
192
+
// get() will call Arc::clone() on the stored value, which is cheap.
193
+
cache.get(&key);
194
+
```
195
+
196
+
197
+
## Example: Size Aware Eviction
198
+
199
+
If different cache entries have different "weights" — e.g. each entry has
200
+
different memory footprints — you can specify a `weigher` closure at the cache
201
+
creation time. The closure should return a weighted size (relative size) of an entry
202
+
in `u32`, and the cache will evict entries when the total weighted size exceeds its
203
+
`max_capacity`.
204
+
205
+
```rust
206
+
use std::convert::TryInto;
207
+
use mini_moka::sync::Cache;
208
+
209
+
fn main() {
210
+
let cache = Cache::builder()
211
+
// A weigher closure takes &K and &V and returns a u32 representing the
212
+
// relative size of the entry. Here, we use the byte length of the value
213
+
// String as the size.
214
+
.weigher(|_key, value: &String| -> u32 {
215
+
value.len().try_into().unwrap_or(u32::MAX)
216
+
})
217
+
// This cache will hold up to 32MiB of values.
218
+
.max_capacity(32 * 1024 * 1024)
219
+
.build();
220
+
cache.insert(0, "zero".to_string());
221
+
}
222
+
```
223
+
224
+
Note that weighted sizes are not used when making eviction selections.
225
+
226
+
227
+
## Example: Expiration Policies
228
+
229
+
Mini Moka supports the following expiration policies:
230
+
231
+
- **Time to live**: A cached entry will be expired after the specified duration past
232
+
from `insert`.
233
+
- **Time to idle**: A cached entry will be expired after the specified duration past
234
+
from `get` or `insert`.
235
+
236
+
To set them, use the `CacheBuilder`.
237
+
238
+
```rust
239
+
use mini_moka::sync::Cache;
240
+
use std::time::Duration;
241
+
242
+
fn main() {
243
+
let cache = Cache::builder()
244
+
// Time to live (TTL): 30 minutes
245
+
.time_to_live(Duration::from_secs(30 * 60))
246
+
// Time to idle (TTI): 5 minutes
247
+
.time_to_idle(Duration::from_secs( 5 * 60))
248
+
// Create the cache.
249
+
.build();
250
+
251
+
// This entry will expire after 5 minutes (TTI) if there is no get().
252
+
cache.insert(0, "zero");
253
+
254
+
// This get() will extend the entry life for another 5 minutes.
255
+
cache.get(&0);
256
+
257
+
// Even though we keep calling get(), the entry will expire
258
+
// after 30 minutes (TTL) from the insert().
259
+
}
260
+
```
261
+
262
+
### A note on expiration policies
263
+
264
+
The cache builders will panic if configured with either `time_to_live` or `time to
265
+
idle` longer than 1000 years. This is done to protect against overflow when computing
266
+
key expiration.
267
+
268
+
269
+
## Minimum Supported Rust Versions
270
+
271
+
Mini Moka's minimum supported Rust versions (MSRV) are the followings:
272
+
273
+
| Feature | MSRV |
274
+
|:-----------------|:--------------------------:|
275
+
| default features | Rust 1.76.0 (Feb 8, 2024) |
276
+
277
+
It will keep a rolling MSRV policy of at least 6 months. If only the default features
278
+
are enabled, MSRV will be updated conservatively. When using other features, MSRV
279
+
might be updated more frequently, up to the latest stable. In both cases, increasing
280
+
MSRV is _not_ considered a semver-breaking change.
281
+
282
+
283
+
## Developing Mini Moka
284
+
285
+
**Running All Tests**
286
+
287
+
To run all tests including doc tests on the README, use the following command:
288
+
289
+
```console
290
+
$ RUSTFLAGS='--cfg trybuild' cargo test --all-features
291
+
```
292
+
293
+
294
+
**Generating the Doc**
295
+
296
+
```console
297
+
$ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs"' \
298
+
doc --no-deps
299
+
```
300
+
301
+
302
+
## Credits
303
+
304
+
### Caffeine
305
+
306
+
Mini Moka's architecture is heavily inspired by the [Caffeine][caffeine-git] library
307
+
for Java. Thanks go to Ben Manes and all contributors of Caffeine.
308
+
309
+
310
+
## License
311
+
312
+
Mini Moka is distributed under either of
313
+
314
+
- The MIT license
315
+
- The Apache License (Version 2.0)
316
+
317
+
at your option.
318
+
319
+
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
320
+
321
+
<!-- [](https://app.fossa.com/projects/git%2Bgithub.com%2Fmoka-rs%2Fmini-moka?ref=badge_large) -->
+49
crates/mini-moka-vendored/src/common.rs
+49
crates/mini-moka-vendored/src/common.rs
···
1
+
use std::convert::TryInto;
2
+
3
+
#[cfg(feature = "sync")]
4
+
pub(crate) mod concurrent;
5
+
6
+
pub(crate) mod builder_utils;
7
+
pub(crate) mod deque;
8
+
pub(crate) mod frequency_sketch;
9
+
pub(crate) mod time;
10
+
11
+
// Note: `CacheRegion` cannot have more than four enum variants. This is because
12
+
// `crate::{sync,unsync}::DeqNodes` uses a `tagptr::TagNonNull<DeqNode<T>, 2>`
13
+
// pointer, where the 2-bit tag is `CacheRegion`.
14
+
#[derive(Clone, Copy, Debug, Eq)]
15
+
pub(crate) enum CacheRegion {
16
+
Window = 0,
17
+
MainProbation = 1,
18
+
MainProtected = 2,
19
+
Other = 3,
20
+
}
21
+
22
+
impl From<usize> for CacheRegion {
23
+
fn from(n: usize) -> Self {
24
+
match n {
25
+
0 => Self::Window,
26
+
1 => Self::MainProbation,
27
+
2 => Self::MainProtected,
28
+
3 => Self::Other,
29
+
_ => panic!("No such CacheRegion variant for {}", n),
30
+
}
31
+
}
32
+
}
33
+
34
+
impl PartialEq<Self> for CacheRegion {
35
+
fn eq(&self, other: &Self) -> bool {
36
+
core::mem::discriminant(self) == core::mem::discriminant(other)
37
+
}
38
+
}
39
+
40
+
impl PartialEq<usize> for CacheRegion {
41
+
fn eq(&self, other: &usize) -> bool {
42
+
*self as usize == *other
43
+
}
44
+
}
45
+
46
+
// Ensures the value fits in a range of `128u32..=u32::MAX`.
47
+
pub(crate) fn sketch_capacity(max_capacity: u64) -> u32 {
48
+
max_capacity.try_into().unwrap_or(u32::MAX).max(128)
49
+
}
+16
crates/mini-moka-vendored/src/common/builder_utils.rs
+16
crates/mini-moka-vendored/src/common/builder_utils.rs
···
1
+
use std::time::Duration;
2
+
3
+
const YEAR_SECONDS: u64 = 365 * 24 * 3600;
4
+
5
+
pub(crate) fn ensure_expirations_or_panic(
6
+
time_to_live: Option<Duration>,
7
+
time_to_idle: Option<Duration>,
8
+
) {
9
+
let max_duration = Duration::from_secs(1_000 * YEAR_SECONDS);
10
+
if let Some(d) = time_to_live {
11
+
assert!(d <= max_duration, "time_to_live is longer than 1000 years");
12
+
}
13
+
if let Some(d) = time_to_idle {
14
+
assert!(d <= max_duration, "time_to_idle is longer than 1000 years");
15
+
}
16
+
}
+255
crates/mini-moka-vendored/src/common/concurrent.rs
+255
crates/mini-moka-vendored/src/common/concurrent.rs
···
1
+
use crate::common::{deque::DeqNode, time::Instant};
2
+
3
+
use std::{ptr::NonNull, sync::Arc};
4
+
use tagptr::TagNonNull;
5
+
use triomphe::Arc as TrioArc;
6
+
7
+
pub(crate) mod constants;
8
+
pub(crate) mod deques;
9
+
pub(crate) mod entry_info;
10
+
pub(crate) mod housekeeper;
11
+
12
+
pub(crate) mod atomic_time;
13
+
14
+
use self::entry_info::EntryInfo;
15
+
16
+
pub(crate) type Weigher<K, V> = Arc<dyn Fn(&K, &V) -> u32 + Send + Sync + 'static>;
17
+
18
+
pub(crate) trait AccessTime {
19
+
fn last_accessed(&self) -> Option<Instant>;
20
+
fn set_last_accessed(&self, timestamp: Instant);
21
+
fn last_modified(&self) -> Option<Instant>;
22
+
fn set_last_modified(&self, timestamp: Instant);
23
+
}
24
+
25
+
pub(crate) struct KeyHash<K> {
26
+
pub(crate) key: Arc<K>,
27
+
pub(crate) hash: u64,
28
+
}
29
+
30
+
impl<K> KeyHash<K> {
31
+
pub(crate) fn new(key: Arc<K>, hash: u64) -> Self {
32
+
Self { key, hash }
33
+
}
34
+
}
35
+
36
+
impl<K> Clone for KeyHash<K> {
37
+
fn clone(&self) -> Self {
38
+
Self {
39
+
key: Arc::clone(&self.key),
40
+
hash: self.hash,
41
+
}
42
+
}
43
+
}
44
+
45
+
pub(crate) struct KeyDate<K> {
46
+
key: Arc<K>,
47
+
entry_info: TrioArc<EntryInfo<K>>,
48
+
}
49
+
50
+
impl<K> KeyDate<K> {
51
+
pub(crate) fn new(key: Arc<K>, entry_info: &TrioArc<EntryInfo<K>>) -> Self {
52
+
Self {
53
+
key,
54
+
entry_info: TrioArc::clone(entry_info),
55
+
}
56
+
}
57
+
58
+
pub(crate) fn key(&self) -> &Arc<K> {
59
+
&self.key
60
+
}
61
+
}
62
+
63
+
pub(crate) struct KeyHashDate<K> {
64
+
key: Arc<K>,
65
+
hash: u64,
66
+
entry_info: TrioArc<EntryInfo<K>>,
67
+
}
68
+
69
+
impl<K> KeyHashDate<K> {
70
+
pub(crate) fn new(kh: KeyHash<K>, entry_info: &TrioArc<EntryInfo<K>>) -> Self {
71
+
Self {
72
+
key: kh.key,
73
+
hash: kh.hash,
74
+
entry_info: TrioArc::clone(entry_info),
75
+
}
76
+
}
77
+
78
+
pub(crate) fn key(&self) -> &Arc<K> {
79
+
&self.key
80
+
}
81
+
82
+
pub(crate) fn hash(&self) -> u64 {
83
+
self.hash
84
+
}
85
+
86
+
pub(crate) fn entry_info(&self) -> &EntryInfo<K> {
87
+
&self.entry_info
88
+
}
89
+
}
90
+
91
+
pub(crate) struct KvEntry<K, V> {
92
+
pub(crate) key: Arc<K>,
93
+
pub(crate) entry: TrioArc<ValueEntry<K, V>>,
94
+
}
95
+
96
+
impl<K, V> KvEntry<K, V> {
97
+
pub(crate) fn new(key: Arc<K>, entry: TrioArc<ValueEntry<K, V>>) -> Self {
98
+
Self { key, entry }
99
+
}
100
+
}
101
+
102
+
impl<K> AccessTime for DeqNode<KeyDate<K>> {
103
+
#[inline]
104
+
fn last_accessed(&self) -> Option<Instant> {
105
+
None
106
+
}
107
+
108
+
#[inline]
109
+
fn set_last_accessed(&self, _timestamp: Instant) {
110
+
unreachable!();
111
+
}
112
+
113
+
#[inline]
114
+
fn last_modified(&self) -> Option<Instant> {
115
+
self.element.entry_info.last_modified()
116
+
}
117
+
118
+
#[inline]
119
+
fn set_last_modified(&self, timestamp: Instant) {
120
+
self.element.entry_info.set_last_modified(timestamp);
121
+
}
122
+
}
123
+
124
+
impl<K> AccessTime for DeqNode<KeyHashDate<K>> {
125
+
#[inline]
126
+
fn last_accessed(&self) -> Option<Instant> {
127
+
self.element.entry_info.last_accessed()
128
+
}
129
+
130
+
#[inline]
131
+
fn set_last_accessed(&self, timestamp: Instant) {
132
+
self.element.entry_info.set_last_accessed(timestamp);
133
+
}
134
+
135
+
#[inline]
136
+
fn last_modified(&self) -> Option<Instant> {
137
+
None
138
+
}
139
+
140
+
#[inline]
141
+
fn set_last_modified(&self, _timestamp: Instant) {
142
+
unreachable!();
143
+
}
144
+
}
145
+
146
+
// DeqNode for an access order queue.
147
+
pub(crate) type KeyDeqNodeAo<K> = TagNonNull<DeqNode<KeyHashDate<K>>, 2>;
148
+
149
+
// DeqNode for the write order queue.
150
+
pub(crate) type KeyDeqNodeWo<K> = NonNull<DeqNode<KeyDate<K>>>;
151
+
152
+
pub(crate) struct ValueEntry<K, V> {
153
+
pub(crate) value: V,
154
+
info: TrioArc<EntryInfo<K>>,
155
+
}
156
+
157
+
impl<K, V> ValueEntry<K, V> {
158
+
pub(crate) fn new(value: V, entry_info: TrioArc<EntryInfo<K>>) -> Self {
159
+
Self {
160
+
value,
161
+
info: entry_info,
162
+
}
163
+
}
164
+
165
+
pub(crate) fn entry_info(&self) -> &TrioArc<EntryInfo<K>> {
166
+
&self.info
167
+
}
168
+
169
+
pub(crate) fn is_admitted(&self) -> bool {
170
+
self.info.is_admitted()
171
+
}
172
+
173
+
pub(crate) fn set_admitted(&self, value: bool) {
174
+
self.info.set_admitted(value);
175
+
}
176
+
177
+
pub(crate) fn is_dirty(&self) -> bool {
178
+
self.info.is_dirty()
179
+
}
180
+
181
+
pub(crate) fn set_dirty(&self, value: bool) {
182
+
self.info.set_dirty(value);
183
+
}
184
+
185
+
#[inline]
186
+
pub(crate) fn policy_weight(&self) -> u32 {
187
+
self.info.policy_weight()
188
+
}
189
+
190
+
pub(crate) fn access_order_q_node(&self) -> Option<KeyDeqNodeAo<K>> {
191
+
self.info.access_order_q_node()
192
+
}
193
+
194
+
pub(crate) fn set_access_order_q_node(&self, node: Option<KeyDeqNodeAo<K>>) {
195
+
self.info.set_access_order_q_node(node);
196
+
}
197
+
198
+
pub(crate) fn take_access_order_q_node(&self) -> Option<KeyDeqNodeAo<K>> {
199
+
self.info.take_access_order_q_node()
200
+
}
201
+
202
+
pub(crate) fn write_order_q_node(&self) -> Option<KeyDeqNodeWo<K>> {
203
+
self.info.write_order_q_node()
204
+
}
205
+
206
+
pub(crate) fn set_write_order_q_node(&self, node: Option<KeyDeqNodeWo<K>>) {
207
+
self.info.set_write_order_q_node(node)
208
+
}
209
+
210
+
pub(crate) fn take_write_order_q_node(&self) -> Option<KeyDeqNodeWo<K>> {
211
+
self.info.take_write_order_q_node()
212
+
}
213
+
214
+
pub(crate) fn unset_q_nodes(&self) {
215
+
self.info.unset_q_nodes();
216
+
}
217
+
}
218
+
219
+
impl<K, V> AccessTime for TrioArc<ValueEntry<K, V>> {
220
+
#[inline]
221
+
fn last_accessed(&self) -> Option<Instant> {
222
+
self.info.last_accessed()
223
+
}
224
+
225
+
#[inline]
226
+
fn set_last_accessed(&self, timestamp: Instant) {
227
+
self.info.set_last_accessed(timestamp);
228
+
}
229
+
230
+
#[inline]
231
+
fn last_modified(&self) -> Option<Instant> {
232
+
self.info.last_modified()
233
+
}
234
+
235
+
#[inline]
236
+
fn set_last_modified(&self, timestamp: Instant) {
237
+
self.info.set_last_modified(timestamp);
238
+
}
239
+
}
240
+
241
+
pub(crate) enum ReadOp<K, V> {
242
+
// u64 is the hash of the key.
243
+
Hit(u64, TrioArc<ValueEntry<K, V>>, Instant),
244
+
Miss(u64),
245
+
}
246
+
247
+
pub(crate) enum WriteOp<K, V> {
248
+
Upsert {
249
+
key_hash: KeyHash<K>,
250
+
value_entry: TrioArc<ValueEntry<K, V>>,
251
+
old_weight: u32,
252
+
new_weight: u32,
253
+
},
254
+
Remove(KvEntry<K, V>),
255
+
}
+35
crates/mini-moka-vendored/src/common/concurrent/atomic_time.rs
+35
crates/mini-moka-vendored/src/common/concurrent/atomic_time.rs
···
1
+
use super::Instant;
2
+
3
+
use std::sync::RwLock;
4
+
5
+
pub(crate) struct AtomicInstant {
6
+
instant: RwLock<Option<Instant>>,
7
+
}
8
+
9
+
impl Default for AtomicInstant {
10
+
fn default() -> Self {
11
+
Self {
12
+
instant: RwLock::new(None),
13
+
}
14
+
}
15
+
}
16
+
17
+
impl AtomicInstant {
18
+
pub(crate) fn new(timestamp: Instant) -> Self {
19
+
let ai = Self::default();
20
+
ai.set_instant(timestamp);
21
+
ai
22
+
}
23
+
24
+
pub(crate) fn is_set(&self) -> bool {
25
+
self.instant.read().expect("lock poisoned").is_some()
26
+
}
27
+
28
+
pub(crate) fn instant(&self) -> Option<Instant> {
29
+
*self.instant.read().expect("lock poisoned")
30
+
}
31
+
32
+
pub(crate) fn set_instant(&self, instant: Instant) {
33
+
*self.instant.write().expect("lock poisoned") = Some(instant);
34
+
}
35
+
}
+10
crates/mini-moka-vendored/src/common/concurrent/constants.rs
+10
crates/mini-moka-vendored/src/common/concurrent/constants.rs
···
1
+
pub(crate) const MAX_SYNC_REPEATS: usize = 4;
2
+
pub(crate) const PERIODICAL_SYNC_INTERVAL_MILLIS: u64 = 500;
3
+
4
+
pub(crate) const READ_LOG_FLUSH_POINT: usize = 64;
5
+
pub(crate) const READ_LOG_SIZE: usize = READ_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS + 2);
6
+
7
+
pub(crate) const WRITE_LOG_FLUSH_POINT: usize = 64;
8
+
pub(crate) const WRITE_LOG_SIZE: usize = WRITE_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS + 2);
9
+
10
+
pub(crate) const WRITE_RETRY_INTERVAL_MICROS: u64 = 50;
+181
crates/mini-moka-vendored/src/common/concurrent/deques.rs
+181
crates/mini-moka-vendored/src/common/concurrent/deques.rs
···
1
+
use super::{KeyDate, KeyHashDate, ValueEntry};
2
+
use crate::common::{
3
+
deque::{DeqNode, Deque},
4
+
CacheRegion,
5
+
};
6
+
7
+
use std::ptr::NonNull;
8
+
use tagptr::TagNonNull;
9
+
use triomphe::Arc as TrioArc;
10
+
pub(crate) struct Deques<K> {
11
+
pub(crate) window: Deque<KeyHashDate<K>>, // Not used yet.
12
+
pub(crate) probation: Deque<KeyHashDate<K>>,
13
+
pub(crate) protected: Deque<KeyHashDate<K>>, // Not used yet.
14
+
pub(crate) write_order: Deque<KeyDate<K>>,
15
+
}
16
+
17
+
impl<K> Default for Deques<K> {
18
+
fn default() -> Self {
19
+
Self {
20
+
window: Deque::new(CacheRegion::Window),
21
+
probation: Deque::new(CacheRegion::MainProbation),
22
+
protected: Deque::new(CacheRegion::MainProtected),
23
+
write_order: Deque::new(CacheRegion::Other),
24
+
}
25
+
}
26
+
}
27
+
28
+
impl<K> Deques<K> {
29
+
pub(crate) fn push_back_ao<V>(
30
+
&mut self,
31
+
region: CacheRegion,
32
+
khd: KeyHashDate<K>,
33
+
entry: &TrioArc<ValueEntry<K, V>>,
34
+
) {
35
+
let node = Box::new(DeqNode::new(khd));
36
+
let node = match region {
37
+
CacheRegion::Window => self.window.push_back(node),
38
+
CacheRegion::MainProbation => self.probation.push_back(node),
39
+
CacheRegion::MainProtected => self.protected.push_back(node),
40
+
_ => unreachable!(),
41
+
};
42
+
let tagged_node = TagNonNull::compose(node, region as usize);
43
+
entry.set_access_order_q_node(Some(tagged_node));
44
+
}
45
+
46
+
pub(crate) fn push_back_wo<V>(&mut self, kd: KeyDate<K>, entry: &TrioArc<ValueEntry<K, V>>) {
47
+
let node = Box::new(DeqNode::new(kd));
48
+
let node = self.write_order.push_back(node);
49
+
entry.set_write_order_q_node(Some(node));
50
+
}
51
+
52
+
pub(crate) fn move_to_back_ao<V>(&mut self, entry: &TrioArc<ValueEntry<K, V>>) {
53
+
if let Some(tagged_node) = entry.access_order_q_node() {
54
+
let (node, tag) = tagged_node.decompose();
55
+
let p = unsafe { node.as_ref() };
56
+
match tag.into() {
57
+
CacheRegion::Window if self.window.contains(p) => {
58
+
unsafe { self.window.move_to_back(node) };
59
+
}
60
+
CacheRegion::MainProbation if self.probation.contains(p) => {
61
+
unsafe { self.probation.move_to_back(node) };
62
+
}
63
+
CacheRegion::MainProtected if self.protected.contains(p) => {
64
+
unsafe { self.protected.move_to_back(node) };
65
+
}
66
+
_ => unreachable!(),
67
+
}
68
+
}
69
+
}
70
+
71
+
pub(crate) fn move_to_back_ao_in_deque<V>(
72
+
deq_name: &str,
73
+
deq: &mut Deque<KeyHashDate<K>>,
74
+
entry: &TrioArc<ValueEntry<K, V>>,
75
+
) {
76
+
if let Some(tagged_node) = entry.access_order_q_node() {
77
+
let (node, tag) = tagged_node.decompose();
78
+
let p = unsafe { node.as_ref() };
79
+
if deq.region() == tag {
80
+
if deq.contains(p) {
81
+
unsafe { deq.move_to_back(node) };
82
+
}
83
+
} else {
84
+
panic!(
85
+
"move_to_back_ao_in_deque - node is not a member of {} deque. {:?}",
86
+
deq_name, p,
87
+
)
88
+
}
89
+
}
90
+
}
91
+
92
+
pub(crate) fn move_to_back_wo<V>(&mut self, entry: &TrioArc<ValueEntry<K, V>>) {
93
+
if let Some(node) = entry.write_order_q_node() {
94
+
let p = unsafe { node.as_ref() };
95
+
if self.write_order.contains(p) {
96
+
unsafe { self.write_order.move_to_back(node) };
97
+
}
98
+
}
99
+
}
100
+
101
+
pub(crate) fn move_to_back_wo_in_deque<V>(
102
+
deq: &mut Deque<KeyDate<K>>,
103
+
entry: &TrioArc<ValueEntry<K, V>>,
104
+
) {
105
+
if let Some(node) = entry.write_order_q_node() {
106
+
let p = unsafe { node.as_ref() };
107
+
if deq.contains(p) {
108
+
unsafe { deq.move_to_back(node) };
109
+
}
110
+
}
111
+
}
112
+
113
+
pub(crate) fn unlink_ao<V>(&mut self, entry: &TrioArc<ValueEntry<K, V>>) {
114
+
if let Some(node) = entry.take_access_order_q_node() {
115
+
self.unlink_node_ao(node);
116
+
}
117
+
}
118
+
119
+
pub(crate) fn unlink_ao_from_deque<V>(
120
+
deq_name: &str,
121
+
deq: &mut Deque<KeyHashDate<K>>,
122
+
entry: &TrioArc<ValueEntry<K, V>>,
123
+
) {
124
+
if let Some(node) = entry.take_access_order_q_node() {
125
+
unsafe { Self::unlink_node_ao_from_deque(deq_name, deq, node) };
126
+
}
127
+
}
128
+
129
+
pub(crate) fn unlink_wo<V>(deq: &mut Deque<KeyDate<K>>, entry: &TrioArc<ValueEntry<K, V>>) {
130
+
if let Some(node) = entry.take_write_order_q_node() {
131
+
Self::unlink_node_wo(deq, node);
132
+
}
133
+
}
134
+
135
+
pub(crate) fn unlink_node_ao(&mut self, tagged_node: TagNonNull<DeqNode<KeyHashDate<K>>, 2>) {
136
+
unsafe {
137
+
match tagged_node.decompose_tag().into() {
138
+
CacheRegion::Window => {
139
+
Self::unlink_node_ao_from_deque("window", &mut self.window, tagged_node)
140
+
}
141
+
CacheRegion::MainProbation => {
142
+
Self::unlink_node_ao_from_deque("probation", &mut self.probation, tagged_node)
143
+
}
144
+
CacheRegion::MainProtected => {
145
+
Self::unlink_node_ao_from_deque("protected", &mut self.protected, tagged_node)
146
+
}
147
+
_ => unreachable!(),
148
+
}
149
+
}
150
+
}
151
+
152
+
unsafe fn unlink_node_ao_from_deque(
153
+
deq_name: &str,
154
+
deq: &mut Deque<KeyHashDate<K>>,
155
+
tagged_node: TagNonNull<DeqNode<KeyHashDate<K>>, 2>,
156
+
) {
157
+
let (node, tag) = tagged_node.decompose();
158
+
let p = node.as_ref();
159
+
if deq.region() == tag {
160
+
if deq.contains(p) {
161
+
// https://github.com/moka-rs/moka/issues/64
162
+
deq.unlink_and_drop(node);
163
+
}
164
+
} else {
165
+
panic!(
166
+
"unlink_node - node is not a member of {} deque. {:?}",
167
+
deq_name, p
168
+
)
169
+
}
170
+
}
171
+
172
+
pub(crate) fn unlink_node_wo(deq: &mut Deque<KeyDate<K>>, node: NonNull<DeqNode<KeyDate<K>>>) {
173
+
unsafe {
174
+
let p = node.as_ref();
175
+
if deq.contains(p) {
176
+
// https://github.com/moka-rs/moka/issues/64
177
+
deq.unlink_and_drop(node);
178
+
}
179
+
}
180
+
}
181
+
}
+150
crates/mini-moka-vendored/src/common/concurrent/entry_info.rs
+150
crates/mini-moka-vendored/src/common/concurrent/entry_info.rs
···
1
+
use std::sync::{
2
+
atomic::{AtomicBool, AtomicU32, Ordering},
3
+
Mutex,
4
+
};
5
+
6
+
use super::{AccessTime, KeyDeqNodeAo, KeyDeqNodeWo};
7
+
use crate::common::{concurrent::atomic_time::AtomicInstant, time::Instant};
8
+
9
+
pub(crate) struct DeqNodes<K> {
10
+
access_order_q_node: Option<KeyDeqNodeAo<K>>,
11
+
write_order_q_node: Option<KeyDeqNodeWo<K>>,
12
+
}
13
+
14
+
// We need this `unsafe impl` as DeqNodes have NonNull pointers.
15
+
unsafe impl<K> Send for DeqNodes<K> {}
16
+
17
+
pub(crate) struct EntryInfo<K> {
18
+
/// `is_admitted` indicates that the entry has been admitted to the
19
+
/// cache. When `false`, it means the entry is _temporary_ admitted to
20
+
/// the cache or evicted from the cache (so it should not have LRU nodes).
21
+
is_admitted: AtomicBool,
22
+
/// `is_dirty` indicates that the entry has been inserted (or updated)
23
+
/// in the hash table, but the history of the insertion has not yet
24
+
/// been applied to the LRU deques and LFU estimator.
25
+
is_dirty: AtomicBool,
26
+
last_accessed: AtomicInstant,
27
+
last_modified: AtomicInstant,
28
+
policy_weight: AtomicU32,
29
+
nodes: Mutex<DeqNodes<K>>,
30
+
}
31
+
32
+
impl<K> EntryInfo<K> {
33
+
#[inline]
34
+
pub(crate) fn new(timestamp: Instant, policy_weight: u32) -> Self {
35
+
Self {
36
+
is_admitted: Default::default(),
37
+
is_dirty: AtomicBool::new(true),
38
+
last_accessed: AtomicInstant::new(timestamp),
39
+
last_modified: AtomicInstant::new(timestamp),
40
+
policy_weight: AtomicU32::new(policy_weight),
41
+
nodes: Mutex::new(DeqNodes {
42
+
access_order_q_node: None,
43
+
write_order_q_node: None,
44
+
}),
45
+
}
46
+
}
47
+
48
+
#[inline]
49
+
pub(crate) fn is_admitted(&self) -> bool {
50
+
self.is_admitted.load(Ordering::Acquire)
51
+
}
52
+
53
+
#[inline]
54
+
pub(crate) fn set_admitted(&self, value: bool) {
55
+
self.is_admitted.store(value, Ordering::Release);
56
+
}
57
+
58
+
#[inline]
59
+
pub(crate) fn is_dirty(&self) -> bool {
60
+
self.is_dirty.load(Ordering::Acquire)
61
+
}
62
+
63
+
#[inline]
64
+
pub(crate) fn set_dirty(&self, value: bool) {
65
+
self.is_dirty.store(value, Ordering::Release);
66
+
}
67
+
68
+
#[inline]
69
+
pub(crate) fn policy_weight(&self) -> u32 {
70
+
self.policy_weight.load(Ordering::Acquire)
71
+
}
72
+
73
+
#[inline]
74
+
pub(crate) fn set_policy_weight(&self, size: u32) {
75
+
self.policy_weight.store(size, Ordering::Release);
76
+
}
77
+
78
+
#[inline]
79
+
pub(crate) fn access_order_q_node(&self) -> Option<KeyDeqNodeAo<K>> {
80
+
self.nodes
81
+
.lock()
82
+
.expect("lock poisoned")
83
+
.access_order_q_node
84
+
}
85
+
86
+
#[inline]
87
+
pub(crate) fn set_access_order_q_node(&self, node: Option<KeyDeqNodeAo<K>>) {
88
+
self.nodes
89
+
.lock()
90
+
.expect("lock poisoned")
91
+
.access_order_q_node = node;
92
+
}
93
+
94
+
#[inline]
95
+
pub(crate) fn take_access_order_q_node(&self) -> Option<KeyDeqNodeAo<K>> {
96
+
self.nodes
97
+
.lock()
98
+
.expect("lock poisoned")
99
+
.access_order_q_node
100
+
.take()
101
+
}
102
+
103
+
#[inline]
104
+
pub(crate) fn write_order_q_node(&self) -> Option<KeyDeqNodeWo<K>> {
105
+
self.nodes.lock().expect("lock poisoned").write_order_q_node
106
+
}
107
+
108
+
#[inline]
109
+
pub(crate) fn set_write_order_q_node(&self, node: Option<KeyDeqNodeWo<K>>) {
110
+
self.nodes.lock().expect("lock poisoned").write_order_q_node = node;
111
+
}
112
+
113
+
#[inline]
114
+
pub(crate) fn take_write_order_q_node(&self) -> Option<KeyDeqNodeWo<K>> {
115
+
self.nodes
116
+
.lock()
117
+
.expect("lock poisoned")
118
+
.write_order_q_node
119
+
.take()
120
+
}
121
+
122
+
#[inline]
123
+
pub(crate) fn unset_q_nodes(&self) {
124
+
let mut nodes = self.nodes.lock().expect("lock poisoned");
125
+
nodes.access_order_q_node = None;
126
+
nodes.write_order_q_node = None;
127
+
}
128
+
}
129
+
130
+
impl<K> AccessTime for EntryInfo<K> {
131
+
#[inline]
132
+
fn last_accessed(&self) -> Option<Instant> {
133
+
self.last_accessed.instant()
134
+
}
135
+
136
+
#[inline]
137
+
fn set_last_accessed(&self, timestamp: Instant) {
138
+
self.last_accessed.set_instant(timestamp);
139
+
}
140
+
141
+
#[inline]
142
+
fn last_modified(&self) -> Option<Instant> {
143
+
self.last_modified.instant()
144
+
}
145
+
146
+
#[inline]
147
+
fn set_last_modified(&self, timestamp: Instant) {
148
+
self.last_modified.set_instant(timestamp);
149
+
}
150
+
}
+77
crates/mini-moka-vendored/src/common/concurrent/housekeeper.rs
+77
crates/mini-moka-vendored/src/common/concurrent/housekeeper.rs
···
1
+
use super::{
2
+
atomic_time::AtomicInstant,
3
+
constants::{
4
+
MAX_SYNC_REPEATS, PERIODICAL_SYNC_INTERVAL_MILLIS, READ_LOG_FLUSH_POINT,
5
+
WRITE_LOG_FLUSH_POINT,
6
+
},
7
+
};
8
+
9
+
use crate::common::time::{CheckedTimeOps, Instant};
10
+
11
+
use std::{
12
+
sync::atomic::{AtomicBool, Ordering},
13
+
time::Duration,
14
+
};
15
+
16
+
pub(crate) trait InnerSync {
17
+
fn sync(&self, max_sync_repeats: usize);
18
+
fn now(&self) -> Instant;
19
+
}
20
+
21
+
pub(crate) struct Housekeeper {
22
+
is_sync_running: AtomicBool,
23
+
sync_after: AtomicInstant,
24
+
}
25
+
26
+
impl Default for Housekeeper {
27
+
fn default() -> Self {
28
+
Self {
29
+
is_sync_running: Default::default(),
30
+
sync_after: AtomicInstant::new(Self::sync_after(Instant::now())),
31
+
}
32
+
}
33
+
}
34
+
35
+
impl Housekeeper {
36
+
pub(crate) fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool {
37
+
self.should_apply(ch_len, READ_LOG_FLUSH_POINT, now)
38
+
}
39
+
40
+
pub(crate) fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool {
41
+
self.should_apply(ch_len, WRITE_LOG_FLUSH_POINT, now)
42
+
}
43
+
44
+
#[inline]
45
+
pub(crate) fn should_apply(&self, ch_len: usize, ch_flush_point: usize, now: Instant) -> bool {
46
+
ch_len >= ch_flush_point || self.sync_after.instant().unwrap() >= now
47
+
}
48
+
49
+
pub(crate) fn try_sync<T: InnerSync>(&self, cache: &T) -> bool {
50
+
// Try to flip the value of sync_scheduled from false to true.
51
+
match self.is_sync_running.compare_exchange(
52
+
false,
53
+
true,
54
+
Ordering::Acquire,
55
+
Ordering::Relaxed,
56
+
) {
57
+
Ok(_) => {
58
+
let now = cache.now();
59
+
self.sync_after.set_instant(Self::sync_after(now));
60
+
61
+
cache.sync(MAX_SYNC_REPEATS);
62
+
63
+
self.is_sync_running.store(false, Ordering::Release);
64
+
true
65
+
}
66
+
Err(_) => false,
67
+
}
68
+
}
69
+
70
+
fn sync_after(now: Instant) -> Instant {
71
+
let dur = Duration::from_millis(PERIODICAL_SYNC_INTERVAL_MILLIS);
72
+
let ts = now.checked_add(dur);
73
+
// Assuming that `now` is current wall clock time, this should never fail at
74
+
// least next millions of years.
75
+
ts.expect("Timestamp overflow")
76
+
}
77
+
}
+773
crates/mini-moka-vendored/src/common/deque.rs
+773
crates/mini-moka-vendored/src/common/deque.rs
···
1
+
// License and Copyright Notice:
2
+
//
3
+
// Some of the code and doc comments in this module were copied from
4
+
// `std::collections::LinkedList` in the Rust standard library.
5
+
// https://github.com/rust-lang/rust/blob/master/src/liballoc/collections/linked_list.rs
6
+
//
7
+
// The original code/comments from LinkedList are dual-licensed under
8
+
// the Apache License, Version 2.0 <https://github.com/rust-lang/rust/blob/master/LICENSE-APACHE>
9
+
// or the MIT license <https://github.com/rust-lang/rust/blob/master/LICENSE-MIT>
10
+
//
11
+
// Copyrights of the original code/comments are retained by their contributors.
12
+
// For full authorship information, see the version control history of
13
+
// https://github.com/rust-lang/rust/ or https://thanks.rust-lang.org
14
+
15
+
use std::{marker::PhantomData, ptr::NonNull};
16
+
17
+
use super::CacheRegion;
18
+
19
+
// `crate::{sync,unsync}::DeqNodes` uses a `tagptr::TagNonNull<DeqNode<T>, 2>`
20
+
// pointer. To reserve the space for the 2-bit tag, use 4 bytes as the *minimum*
21
+
// alignment.
22
+
// https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers
23
+
#[repr(align(4))]
24
+
#[derive(PartialEq, Eq)]
25
+
pub(crate) struct DeqNode<T> {
26
+
next: Option<NonNull<DeqNode<T>>>,
27
+
prev: Option<NonNull<DeqNode<T>>>,
28
+
pub(crate) element: T,
29
+
}
30
+
31
+
impl<T> std::fmt::Debug for DeqNode<T> {
32
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
33
+
f.debug_struct("DeqNode")
34
+
.field("next", &self.next)
35
+
.field("prev", &self.prev)
36
+
.finish()
37
+
}
38
+
}
39
+
40
+
impl<T> DeqNode<T> {
41
+
pub(crate) fn new(element: T) -> Self {
42
+
Self {
43
+
next: None,
44
+
prev: None,
45
+
element,
46
+
}
47
+
}
48
+
49
+
pub(crate) fn next_node_ptr(this: NonNull<Self>) -> Option<NonNull<DeqNode<T>>> {
50
+
unsafe { this.as_ref() }.next
51
+
}
52
+
}
53
+
54
+
/// Cursor is used to remember the current iterating position.
55
+
enum DeqCursor<T> {
56
+
Node(NonNull<DeqNode<T>>),
57
+
Done,
58
+
}
59
+
60
+
pub(crate) struct Deque<T> {
61
+
region: CacheRegion,
62
+
len: usize,
63
+
head: Option<NonNull<DeqNode<T>>>,
64
+
tail: Option<NonNull<DeqNode<T>>>,
65
+
cursor: Option<DeqCursor<T>>,
66
+
marker: PhantomData<Box<DeqNode<T>>>,
67
+
}
68
+
69
+
impl<T> Drop for Deque<T> {
70
+
fn drop(&mut self) {
71
+
struct DropGuard<'a, T>(&'a mut Deque<T>);
72
+
73
+
impl<T> Drop for DropGuard<'_, T> {
74
+
fn drop(&mut self) {
75
+
// Continue the same loop we do below. This only runs when a destructor has
76
+
// panicked. If another one panics this will abort.
77
+
while self.0.pop_front().is_some() {}
78
+
}
79
+
}
80
+
81
+
while let Some(node) = self.pop_front() {
82
+
let guard = DropGuard(self);
83
+
drop(node);
84
+
std::mem::forget(guard);
85
+
}
86
+
}
87
+
}
88
+
89
+
// Inner crate public function/methods
90
+
impl<T> Deque<T> {
91
+
pub(crate) fn new(region: CacheRegion) -> Self {
92
+
Self {
93
+
region,
94
+
len: 0,
95
+
head: None,
96
+
tail: None,
97
+
cursor: None,
98
+
marker: PhantomData,
99
+
}
100
+
}
101
+
102
+
pub(crate) fn region(&self) -> CacheRegion {
103
+
self.region
104
+
}
105
+
106
+
#[cfg(test)]
107
+
pub(crate) fn len(&self) -> usize {
108
+
self.len
109
+
}
110
+
111
+
pub(crate) fn contains(&self, node: &DeqNode<T>) -> bool {
112
+
node.prev.is_some() || self.is_head(node)
113
+
}
114
+
115
+
pub(crate) fn peek_front(&self) -> Option<&DeqNode<T>> {
116
+
self.head.as_ref().map(|node| unsafe { node.as_ref() })
117
+
}
118
+
119
+
pub(crate) fn peek_front_ptr(&self) -> Option<NonNull<DeqNode<T>>> {
120
+
self.head.as_ref().cloned()
121
+
}
122
+
123
+
/// Removes and returns the node at the front of the list.
124
+
pub(crate) fn pop_front(&mut self) -> Option<Box<DeqNode<T>>> {
125
+
// This method takes care not to create mutable references to whole nodes,
126
+
// to maintain validity of aliasing pointers into `element`.
127
+
self.head.map(|node| unsafe {
128
+
if self.is_at_cursor(node.as_ref()) {
129
+
self.advance_cursor();
130
+
}
131
+
132
+
let mut node = Box::from_raw(node.as_ptr());
133
+
self.head = node.next;
134
+
135
+
match self.head {
136
+
None => self.tail = None,
137
+
// Not creating new mutable (unique!) references overlapping `element`.
138
+
Some(head) => (*head.as_ptr()).prev = None,
139
+
}
140
+
141
+
self.len -= 1;
142
+
143
+
node.prev = None;
144
+
node.next = None;
145
+
node
146
+
})
147
+
}
148
+
149
+
#[cfg(test)]
150
+
pub(crate) fn peek_back(&self) -> Option<&DeqNode<T>> {
151
+
self.tail.as_ref().map(|node| unsafe { node.as_ref() })
152
+
}
153
+
154
+
/// Adds the given node to the back of the list.
155
+
pub(crate) fn push_back(&mut self, mut node: Box<DeqNode<T>>) -> NonNull<DeqNode<T>> {
156
+
// This method takes care not to create mutable references to whole nodes,
157
+
// to maintain validity of aliasing pointers into `element`.
158
+
unsafe {
159
+
node.next = None;
160
+
node.prev = self.tail;
161
+
let node = NonNull::new(Box::into_raw(node)).expect("Got a null ptr");
162
+
163
+
match self.tail {
164
+
None => self.head = Some(node),
165
+
// Not creating new mutable (unique!) references overlapping `element`.
166
+
Some(tail) => (*tail.as_ptr()).next = Some(node),
167
+
}
168
+
169
+
self.tail = Some(node);
170
+
self.len += 1;
171
+
node
172
+
}
173
+
}
174
+
175
+
pub(crate) unsafe fn move_to_back(&mut self, mut node: NonNull<DeqNode<T>>) {
176
+
if self.is_tail(node.as_ref()) {
177
+
// Already at the tail. Nothing to do.
178
+
return;
179
+
}
180
+
181
+
if self.is_at_cursor(node.as_ref()) {
182
+
self.advance_cursor();
183
+
}
184
+
185
+
let node = node.as_mut(); // this one is ours now, we can create an &mut.
186
+
187
+
// Not creating new mutable (unique!) references overlapping `element`.
188
+
match node.prev {
189
+
Some(prev) if node.next.is_some() => (*prev.as_ptr()).next = node.next,
190
+
Some(..) => (),
191
+
// This node is the head node.
192
+
None => self.head = node.next,
193
+
};
194
+
195
+
// This node is not the tail node.
196
+
if let Some(next) = node.next.take() {
197
+
(*next.as_ptr()).prev = node.prev;
198
+
199
+
let mut node = NonNull::from(node);
200
+
match self.tail {
201
+
// Not creating new mutable (unique!) references overlapping `element`.
202
+
Some(tail) => {
203
+
node.as_mut().prev = Some(tail);
204
+
(*tail.as_ptr()).next = Some(node)
205
+
}
206
+
None => unreachable!(),
207
+
}
208
+
self.tail = Some(node);
209
+
}
210
+
}
211
+
212
+
pub(crate) fn move_front_to_back(&mut self) {
213
+
if let Some(node) = self.head {
214
+
unsafe { self.move_to_back(node) };
215
+
}
216
+
}
217
+
218
+
/// Unlinks the specified node from the current list.
219
+
///
220
+
/// This method takes care not to create mutable references to `element`, to
221
+
/// maintain validity of aliasing pointers.
222
+
///
223
+
/// IMPORTANT: This method does not drop the node. If the node is no longer
224
+
/// needed, use `unlink_and_drop` instead, or drop it at the caller side.
225
+
/// Otherwise, the node will leak.
226
+
pub(crate) unsafe fn unlink(&mut self, mut node: NonNull<DeqNode<T>>) {
227
+
if self.is_at_cursor(node.as_ref()) {
228
+
self.advance_cursor();
229
+
}
230
+
231
+
let node = node.as_mut(); // this one is ours now, we can create an &mut.
232
+
233
+
// Not creating new mutable (unique!) references overlapping `element`.
234
+
match node.prev {
235
+
Some(prev) => (*prev.as_ptr()).next = node.next,
236
+
// this node is the head node
237
+
None => self.head = node.next,
238
+
};
239
+
240
+
match node.next {
241
+
Some(next) => (*next.as_ptr()).prev = node.prev,
242
+
// this node is the tail node
243
+
None => self.tail = node.prev,
244
+
};
245
+
246
+
node.prev = None;
247
+
node.next = None;
248
+
249
+
self.len -= 1;
250
+
}
251
+
252
+
/// Unlinks the specified node from the current list, and then drop the node.
253
+
///
254
+
/// This method takes care not to create mutable references to `element`, to
255
+
/// maintain validity of aliasing pointers.
256
+
///
257
+
/// Panics:
258
+
pub(crate) unsafe fn unlink_and_drop(&mut self, node: NonNull<DeqNode<T>>) {
259
+
self.unlink(node);
260
+
std::mem::drop(Box::from_raw(node.as_ptr()));
261
+
}
262
+
263
+
#[cfg(test)]
264
+
pub(crate) fn reset_cursor(&mut self) {
265
+
self.cursor = None;
266
+
}
267
+
}
268
+
269
+
impl<'a, T> Iterator for &'a mut Deque<T> {
270
+
type Item = &'a T;
271
+
272
+
fn next(&mut self) -> Option<Self::Item> {
273
+
if self.cursor.is_none() {
274
+
if let Some(head) = self.head {
275
+
self.cursor = Some(DeqCursor::Node(head));
276
+
}
277
+
}
278
+
let elem = if let Some(DeqCursor::Node(node)) = self.cursor {
279
+
unsafe { Some(&(*node.as_ptr()).element) }
280
+
} else {
281
+
None
282
+
};
283
+
self.advance_cursor();
284
+
elem
285
+
}
286
+
}
287
+
288
+
// Private function/methods
289
+
impl<T> Deque<T> {
290
+
fn is_head(&self, node: &DeqNode<T>) -> bool {
291
+
if let Some(head) = self.head {
292
+
std::ptr::eq(unsafe { head.as_ref() }, node)
293
+
} else {
294
+
false
295
+
}
296
+
}
297
+
298
+
fn is_tail(&self, node: &DeqNode<T>) -> bool {
299
+
if let Some(tail) = self.tail {
300
+
std::ptr::eq(unsafe { tail.as_ref() }, node)
301
+
} else {
302
+
false
303
+
}
304
+
}
305
+
306
+
fn is_at_cursor(&self, node: &DeqNode<T>) -> bool {
307
+
if let Some(DeqCursor::Node(cur_node)) = self.cursor {
308
+
std::ptr::eq(unsafe { cur_node.as_ref() }, node)
309
+
} else {
310
+
false
311
+
}
312
+
}
313
+
314
+
fn advance_cursor(&mut self) {
315
+
match self.cursor.take() {
316
+
None => (),
317
+
Some(DeqCursor::Node(node)) => unsafe {
318
+
if let Some(next) = (*node.as_ptr()).next {
319
+
self.cursor = Some(DeqCursor::Node(next));
320
+
} else {
321
+
self.cursor = Some(DeqCursor::Done);
322
+
}
323
+
},
324
+
Some(DeqCursor::Done) => {
325
+
self.cursor = None;
326
+
}
327
+
}
328
+
}
329
+
}
330
+
331
+
#[cfg(test)]
332
+
mod tests {
333
+
use super::{CacheRegion::MainProbation, DeqNode, Deque};
334
+
335
+
#[test]
336
+
#[allow(clippy::cognitive_complexity)]
337
+
fn basics() {
338
+
let mut deque: Deque<String> = Deque::new(MainProbation);
339
+
assert_eq!(deque.len(), 0);
340
+
assert!(deque.peek_front().is_none());
341
+
assert!(deque.peek_back().is_none());
342
+
343
+
// push_back(node1)
344
+
let node1 = DeqNode::new("a".to_string());
345
+
assert!(!deque.contains(&node1));
346
+
let node1 = Box::new(node1);
347
+
let node1_ptr = deque.push_back(node1);
348
+
assert_eq!(deque.len(), 1);
349
+
350
+
// peek_front() -> node1
351
+
let head_a = deque.peek_front().unwrap();
352
+
assert!(deque.contains(head_a));
353
+
assert!(deque.is_head(head_a));
354
+
assert!(deque.is_tail(head_a));
355
+
assert_eq!(head_a.element, "a".to_string());
356
+
357
+
// move_to_back(node1)
358
+
unsafe { deque.move_to_back(node1_ptr) };
359
+
assert_eq!(deque.len(), 1);
360
+
361
+
// peek_front() -> node1
362
+
let head_b = deque.peek_front().unwrap();
363
+
assert!(deque.contains(head_b));
364
+
assert!(deque.is_head(head_b));
365
+
assert!(deque.is_tail(head_b));
366
+
assert!(std::ptr::eq(head_b, node1_ptr.as_ptr()));
367
+
assert!(head_b.prev.is_none());
368
+
assert!(head_b.next.is_none());
369
+
370
+
// peek_back() -> node1
371
+
let tail_a = deque.peek_back().unwrap();
372
+
assert!(deque.contains(tail_a));
373
+
assert!(deque.is_head(tail_a));
374
+
assert!(deque.is_tail(tail_a));
375
+
assert!(std::ptr::eq(tail_a, node1_ptr.as_ptr()));
376
+
assert!(tail_a.prev.is_none());
377
+
assert!(tail_a.next.is_none());
378
+
379
+
// push_back(node2)
380
+
let node2 = DeqNode::new("b".to_string());
381
+
assert!(!deque.contains(&node2));
382
+
let node2_ptr = deque.push_back(Box::new(node2));
383
+
assert_eq!(deque.len(), 2);
384
+
385
+
// peek_front() -> node1
386
+
let head_c = deque.peek_front().unwrap();
387
+
assert!(deque.contains(head_c));
388
+
assert!(deque.is_head(head_c));
389
+
assert!(!deque.is_tail(head_c));
390
+
assert!(std::ptr::eq(head_c, node1_ptr.as_ptr()));
391
+
assert!(head_c.prev.is_none());
392
+
assert!(std::ptr::eq(
393
+
head_c.next.unwrap().as_ptr(),
394
+
node2_ptr.as_ptr()
395
+
));
396
+
397
+
// move_to_back(node2)
398
+
unsafe { deque.move_to_back(node2_ptr) };
399
+
assert_eq!(deque.len(), 2);
400
+
401
+
// peek_front() -> node1
402
+
let head_d = deque.peek_front().unwrap();
403
+
assert!(deque.contains(head_d));
404
+
assert!(deque.is_head(head_d));
405
+
assert!(!deque.is_tail(head_d));
406
+
assert!(std::ptr::eq(head_d, node1_ptr.as_ptr()));
407
+
assert!(head_d.prev.is_none());
408
+
assert!(std::ptr::eq(
409
+
head_d.next.unwrap().as_ptr(),
410
+
node2_ptr.as_ptr()
411
+
));
412
+
413
+
// peek_back() -> node2
414
+
let tail_b = deque.peek_back().unwrap();
415
+
assert!(deque.contains(tail_b));
416
+
assert!(!deque.is_head(tail_b));
417
+
assert!(deque.is_tail(tail_b));
418
+
assert!(std::ptr::eq(tail_b, node2_ptr.as_ptr()));
419
+
assert!(std::ptr::eq(
420
+
tail_b.prev.unwrap().as_ptr(),
421
+
node1_ptr.as_ptr()
422
+
));
423
+
assert_eq!(tail_b.element, "b".to_string());
424
+
assert!(tail_b.next.is_none());
425
+
426
+
// move_to_back(node1)
427
+
unsafe { deque.move_to_back(node1_ptr) };
428
+
assert_eq!(deque.len(), 2);
429
+
430
+
// peek_front() -> node2
431
+
let head_e = deque.peek_front().unwrap();
432
+
assert!(deque.contains(head_e));
433
+
assert!(deque.is_head(head_e));
434
+
assert!(!deque.is_tail(head_e));
435
+
assert!(std::ptr::eq(head_e, node2_ptr.as_ptr()));
436
+
assert!(head_e.prev.is_none());
437
+
assert!(std::ptr::eq(
438
+
head_e.next.unwrap().as_ptr(),
439
+
node1_ptr.as_ptr()
440
+
));
441
+
442
+
// peek_back() -> node1
443
+
let tail_c = deque.peek_back().unwrap();
444
+
assert!(deque.contains(tail_c));
445
+
assert!(!deque.is_head(tail_c));
446
+
assert!(deque.is_tail(tail_c));
447
+
assert!(std::ptr::eq(tail_c, node1_ptr.as_ptr()));
448
+
assert!(std::ptr::eq(
449
+
tail_c.prev.unwrap().as_ptr(),
450
+
node2_ptr.as_ptr()
451
+
));
452
+
assert!(tail_c.next.is_none());
453
+
454
+
// push_back(node3)
455
+
let node3 = DeqNode::new("c".to_string());
456
+
assert!(!deque.contains(&node3));
457
+
let node3_ptr = deque.push_back(Box::new(node3));
458
+
assert_eq!(deque.len(), 3);
459
+
460
+
// peek_front() -> node2
461
+
let head_f = deque.peek_front().unwrap();
462
+
assert!(deque.contains(head_f));
463
+
assert!(deque.is_head(head_f));
464
+
assert!(!deque.is_tail(head_f));
465
+
assert!(std::ptr::eq(head_f, node2_ptr.as_ptr()));
466
+
assert!(head_f.prev.is_none());
467
+
assert!(std::ptr::eq(
468
+
head_f.next.unwrap().as_ptr(),
469
+
node1_ptr.as_ptr()
470
+
));
471
+
472
+
// peek_back() -> node3
473
+
let tail_d = deque.peek_back().unwrap();
474
+
assert!(std::ptr::eq(tail_d, node3_ptr.as_ptr()));
475
+
assert_eq!(tail_d.element, "c".to_string());
476
+
assert!(deque.contains(tail_d));
477
+
assert!(!deque.is_head(tail_d));
478
+
assert!(deque.is_tail(tail_d));
479
+
assert!(std::ptr::eq(tail_d, node3_ptr.as_ptr()));
480
+
assert!(std::ptr::eq(
481
+
tail_d.prev.unwrap().as_ptr(),
482
+
node1_ptr.as_ptr()
483
+
));
484
+
assert!(tail_d.next.is_none());
485
+
486
+
// move_to_back(node1)
487
+
unsafe { deque.move_to_back(node1_ptr) };
488
+
assert_eq!(deque.len(), 3);
489
+
490
+
// peek_front() -> node2
491
+
let head_g = deque.peek_front().unwrap();
492
+
assert!(deque.contains(head_g));
493
+
assert!(deque.is_head(head_g));
494
+
assert!(!deque.is_tail(head_g));
495
+
assert!(std::ptr::eq(head_g, node2_ptr.as_ptr()));
496
+
assert!(head_g.prev.is_none());
497
+
assert!(std::ptr::eq(
498
+
head_g.next.unwrap().as_ptr(),
499
+
node3_ptr.as_ptr()
500
+
));
501
+
502
+
// peek_back() -> node1
503
+
let tail_e = deque.peek_back().unwrap();
504
+
assert!(deque.contains(tail_e));
505
+
assert!(!deque.is_head(tail_e));
506
+
assert!(deque.is_tail(tail_e));
507
+
assert!(std::ptr::eq(tail_e, node1_ptr.as_ptr()));
508
+
assert!(std::ptr::eq(
509
+
tail_e.prev.unwrap().as_ptr(),
510
+
node3_ptr.as_ptr()
511
+
));
512
+
assert!(tail_e.next.is_none());
513
+
514
+
// unlink(node3)
515
+
unsafe { deque.unlink(node3_ptr) };
516
+
assert_eq!(deque.len(), 2);
517
+
let node3_ref = unsafe { node3_ptr.as_ref() };
518
+
assert!(!deque.contains(node3_ref));
519
+
assert!(node3_ref.next.is_none());
520
+
assert!(node3_ref.next.is_none());
521
+
std::mem::drop(unsafe { Box::from_raw(node3_ptr.as_ptr()) });
522
+
523
+
// peek_front() -> node2
524
+
let head_h = deque.peek_front().unwrap();
525
+
assert!(deque.contains(head_h));
526
+
assert!(deque.is_head(head_h));
527
+
assert!(!deque.is_tail(head_h));
528
+
assert!(std::ptr::eq(head_h, node2_ptr.as_ptr()));
529
+
assert!(head_h.prev.is_none());
530
+
assert!(std::ptr::eq(
531
+
head_h.next.unwrap().as_ptr(),
532
+
node1_ptr.as_ptr()
533
+
));
534
+
535
+
// peek_back() -> node1
536
+
let tail_f = deque.peek_back().unwrap();
537
+
assert!(deque.contains(tail_f));
538
+
assert!(!deque.is_head(tail_f));
539
+
assert!(deque.is_tail(tail_f));
540
+
assert!(std::ptr::eq(tail_f, node1_ptr.as_ptr()));
541
+
assert!(std::ptr::eq(
542
+
tail_f.prev.unwrap().as_ptr(),
543
+
node2_ptr.as_ptr()
544
+
));
545
+
assert!(tail_f.next.is_none());
546
+
547
+
// unlink(node2)
548
+
unsafe { deque.unlink(node2_ptr) };
549
+
assert_eq!(deque.len(), 1);
550
+
let node2_ref = unsafe { node2_ptr.as_ref() };
551
+
assert!(!deque.contains(node2_ref));
552
+
assert!(node2_ref.next.is_none());
553
+
assert!(node2_ref.next.is_none());
554
+
std::mem::drop(unsafe { Box::from_raw(node2_ptr.as_ptr()) });
555
+
556
+
// peek_front() -> node1
557
+
let head_g = deque.peek_front().unwrap();
558
+
assert!(deque.contains(head_g));
559
+
assert!(deque.is_head(head_g));
560
+
assert!(deque.is_tail(head_g));
561
+
assert!(std::ptr::eq(head_g, node1_ptr.as_ptr()));
562
+
assert!(head_g.prev.is_none());
563
+
assert!(head_g.next.is_none());
564
+
565
+
// peek_back() -> node1
566
+
let tail_g = deque.peek_back().unwrap();
567
+
assert!(deque.contains(tail_g));
568
+
assert!(deque.is_head(tail_g));
569
+
assert!(deque.is_tail(tail_g));
570
+
assert!(std::ptr::eq(tail_g, node1_ptr.as_ptr()));
571
+
assert!(tail_g.next.is_none());
572
+
assert!(tail_g.next.is_none());
573
+
574
+
// unlink(node1)
575
+
unsafe { deque.unlink(node1_ptr) };
576
+
assert_eq!(deque.len(), 0);
577
+
let node1_ref = unsafe { node1_ptr.as_ref() };
578
+
assert!(!deque.contains(node1_ref));
579
+
assert!(node1_ref.next.is_none());
580
+
assert!(node1_ref.next.is_none());
581
+
std::mem::drop(unsafe { Box::from_raw(node1_ptr.as_ptr()) });
582
+
583
+
// peek_front() -> node1
584
+
let head_h = deque.peek_front();
585
+
assert!(head_h.is_none());
586
+
587
+
// peek_back() -> node1
588
+
let tail_e = deque.peek_back();
589
+
assert!(tail_e.is_none());
590
+
}
591
+
592
+
#[test]
593
+
fn iter() {
594
+
let mut deque: Deque<String> = Deque::new(MainProbation);
595
+
assert!((&mut deque).next().is_none());
596
+
597
+
let node1 = DeqNode::new("a".into());
598
+
deque.push_back(Box::new(node1));
599
+
let node2 = DeqNode::new("b".into());
600
+
let node2_ptr = deque.push_back(Box::new(node2));
601
+
let node3 = DeqNode::new("c".into());
602
+
let node3_ptr = deque.push_back(Box::new(node3));
603
+
604
+
// -------------------------------------------------------
605
+
// First iteration.
606
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
607
+
assert_eq!((&mut deque).next(), Some(&"b".into()));
608
+
assert_eq!((&mut deque).next(), Some(&"c".into()));
609
+
assert!((&mut deque).next().is_none());
610
+
611
+
// -------------------------------------------------------
612
+
// Ensure the iterator restarts.
613
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
614
+
assert_eq!((&mut deque).next(), Some(&"b".into()));
615
+
assert_eq!((&mut deque).next(), Some(&"c".into()));
616
+
assert!((&mut deque).next().is_none());
617
+
618
+
// -------------------------------------------------------
619
+
// Ensure reset_cursor works.
620
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
621
+
assert_eq!((&mut deque).next(), Some(&"b".into()));
622
+
deque.reset_cursor();
623
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
624
+
assert_eq!((&mut deque).next(), Some(&"b".into()));
625
+
assert_eq!((&mut deque).next(), Some(&"c".into()));
626
+
assert!((&mut deque).next().is_none());
627
+
628
+
// -------------------------------------------------------
629
+
// Try to move_to_back during iteration.
630
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
631
+
// Next will be "b", but we move it to the back.
632
+
unsafe { deque.move_to_back(node2_ptr) };
633
+
// Now, next should be "c", and then "b".
634
+
assert_eq!((&mut deque).next(), Some(&"c".into()));
635
+
assert_eq!((&mut deque).next(), Some(&"b".into()));
636
+
assert!((&mut deque).next().is_none());
637
+
638
+
// -------------------------------------------------------
639
+
// Try to unlink during iteration.
640
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
641
+
// Next will be "c", but we unlink it.
642
+
unsafe { deque.unlink_and_drop(node3_ptr) };
643
+
// Now, next should be "b".
644
+
assert_eq!((&mut deque).next(), Some(&"b".into()));
645
+
assert!((&mut deque).next().is_none());
646
+
647
+
// -------------------------------------------------------
648
+
// Try pop_front during iteration.
649
+
let node3 = DeqNode::new("c".into());
650
+
deque.push_back(Box::new(node3));
651
+
652
+
assert_eq!((&mut deque).next(), Some(&"a".into()));
653
+
// Next will be "b", but we call pop_front twice to remove "a" and "b".
654
+
deque.pop_front(); // "a"
655
+
deque.pop_front(); // "b"
656
+
// Now, next should be "c".
657
+
assert_eq!((&mut deque).next(), Some(&"c".into()));
658
+
assert!((&mut deque).next().is_none());
659
+
660
+
// -------------------------------------------------------
661
+
// Check iterating on an empty deque.
662
+
deque.pop_front(); // "c"
663
+
assert!((&mut deque).next().is_none());
664
+
assert!((&mut deque).next().is_none());
665
+
}
666
+
667
+
#[test]
668
+
fn next_node() {
669
+
let mut deque: Deque<String> = Deque::new(MainProbation);
670
+
671
+
let node1 = DeqNode::new("a".into());
672
+
deque.push_back(Box::new(node1));
673
+
let node2 = DeqNode::new("b".into());
674
+
let node2_ptr = deque.push_back(Box::new(node2));
675
+
let node3 = DeqNode::new("c".into());
676
+
let node3_ptr = deque.push_back(Box::new(node3));
677
+
678
+
// -------------------------------------------------------
679
+
// First iteration.
680
+
// peek_front() -> node1
681
+
let node1a = deque.peek_front_ptr().unwrap();
682
+
assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string());
683
+
let node2a = DeqNode::next_node_ptr(node1a).unwrap();
684
+
assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string());
685
+
let node3a = DeqNode::next_node_ptr(node2a).unwrap();
686
+
assert_eq!(unsafe { node3a.as_ref() }.element, "c".to_string());
687
+
assert!(DeqNode::next_node_ptr(node3a).is_none());
688
+
689
+
// -------------------------------------------------------
690
+
// Iterate after a move_to_back.
691
+
// Move "b" to the back. So now "a" -> "c" -> "b".
692
+
unsafe { deque.move_to_back(node2_ptr) };
693
+
let node1a = deque.peek_front_ptr().unwrap();
694
+
assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string());
695
+
let node3a = DeqNode::next_node_ptr(node1a).unwrap();
696
+
assert_eq!(unsafe { node3a.as_ref() }.element, "c".to_string());
697
+
let node2a = DeqNode::next_node_ptr(node3a).unwrap();
698
+
assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string());
699
+
assert!(DeqNode::next_node_ptr(node2a).is_none());
700
+
701
+
// -------------------------------------------------------
702
+
// Iterate after an unlink.
703
+
// Unlink the second node "c". Now "a" -> "c".
704
+
unsafe { deque.unlink_and_drop(node3_ptr) };
705
+
let node1a = deque.peek_front_ptr().unwrap();
706
+
assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string());
707
+
let node2a = DeqNode::next_node_ptr(node1a).unwrap();
708
+
assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string());
709
+
assert!(DeqNode::next_node_ptr(node2a).is_none());
710
+
}
711
+
712
+
#[test]
713
+
fn peek_and_move_to_back() {
714
+
let mut deque: Deque<String> = Deque::new(MainProbation);
715
+
716
+
let node1 = DeqNode::new("a".into());
717
+
deque.push_back(Box::new(node1));
718
+
let node2 = DeqNode::new("b".into());
719
+
let _ = deque.push_back(Box::new(node2));
720
+
let node3 = DeqNode::new("c".into());
721
+
let _ = deque.push_back(Box::new(node3));
722
+
// "a" -> "b" -> "c"
723
+
724
+
let node1a = deque.peek_front_ptr().unwrap();
725
+
assert_eq!(unsafe { node1a.as_ref() }.element, "a".to_string());
726
+
unsafe { deque.move_to_back(node1a) };
727
+
// "b" -> "c" -> "a"
728
+
729
+
let node2a = deque.peek_front_ptr().unwrap();
730
+
assert_eq!(unsafe { node2a.as_ref() }.element, "b".to_string());
731
+
732
+
let node3a = DeqNode::next_node_ptr(node2a).unwrap();
733
+
assert_eq!(unsafe { node3a.as_ref() }.element, "c".to_string());
734
+
unsafe { deque.move_to_back(node3a) };
735
+
// "b" -> "a" -> "c"
736
+
737
+
deque.move_front_to_back();
738
+
// "a" -> "c" -> "b"
739
+
740
+
let node1b = deque.peek_front().unwrap();
741
+
assert_eq!(node1b.element, "a".to_string());
742
+
}
743
+
744
+
#[test]
745
+
fn drop() {
746
+
use std::{cell::RefCell, rc::Rc};
747
+
748
+
struct X(u32, Rc<RefCell<Vec<u32>>>);
749
+
750
+
impl Drop for X {
751
+
fn drop(&mut self) {
752
+
self.1.borrow_mut().push(self.0)
753
+
}
754
+
}
755
+
756
+
let mut deque: Deque<X> = Deque::new(MainProbation);
757
+
let dropped = Rc::new(RefCell::new(Vec::default()));
758
+
759
+
let node1 = DeqNode::new(X(1, Rc::clone(&dropped)));
760
+
let node2 = DeqNode::new(X(2, Rc::clone(&dropped)));
761
+
let node3 = DeqNode::new(X(3, Rc::clone(&dropped)));
762
+
let node4 = DeqNode::new(X(4, Rc::clone(&dropped)));
763
+
deque.push_back(Box::new(node1));
764
+
deque.push_back(Box::new(node2));
765
+
deque.push_back(Box::new(node3));
766
+
deque.push_back(Box::new(node4));
767
+
assert_eq!(deque.len(), 4);
768
+
769
+
std::mem::drop(deque);
770
+
771
+
assert_eq!(*dropped.borrow(), &[1, 2, 3, 4]);
772
+
}
773
+
}
+392
crates/mini-moka-vendored/src/common/frequency_sketch.rs
+392
crates/mini-moka-vendored/src/common/frequency_sketch.rs
···
1
+
// License and Copyright Notice:
2
+
//
3
+
// Some of the code and doc comments in this module were ported or copied from
4
+
// a Java class `com.github.benmanes.caffeine.cache.FrequencySketch` of Caffeine.
5
+
// https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/FrequencySketch.java
6
+
//
7
+
// The original code/comments from Caffeine are licensed under the Apache License,
8
+
// Version 2.0 <https://github.com/ben-manes/caffeine/blob/master/LICENSE>
9
+
//
10
+
// Copyrights of the original code/comments are retained by their contributors.
11
+
// For full authorship information, see the version control history of
12
+
// https://github.com/ben-manes/caffeine/
13
+
14
+
/// A probabilistic multi-set for estimating the popularity of an element within
15
+
/// a time window. The maximum frequency of an element is limited to 15 (4-bits)
16
+
/// and an aging process periodically halves the popularity of all elements.
17
+
#[derive(Default)]
18
+
pub(crate) struct FrequencySketch {
19
+
sample_size: u32,
20
+
table_mask: u32,
21
+
table: Box<[u64]>,
22
+
size: u32,
23
+
}
24
+
25
+
// A mixture of seeds from FNV-1a, CityHash, and Murmur3. (Taken from Caffeine)
26
+
static SEED: [u64; 4] = [
27
+
0xc3a5_c85c_97cb_3127,
28
+
0xb492_b66f_be98_f273,
29
+
0x9ae1_6a3b_2f90_404f,
30
+
0xcbf2_9ce4_8422_2325,
31
+
];
32
+
33
+
static RESET_MASK: u64 = 0x7777_7777_7777_7777;
34
+
35
+
static ONE_MASK: u64 = 0x1111_1111_1111_1111;
36
+
37
+
// -------------------------------------------------------------------------------
38
+
// Some of the code and doc comments in this module were ported or copied from
39
+
// a Java class `com.github.benmanes.caffeine.cache.FrequencySketch` of Caffeine.
40
+
// https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/FrequencySketch.java
41
+
// -------------------------------------------------------------------------------
42
+
//
43
+
// FrequencySketch maintains a 4-bit CountMinSketch [1] with periodic aging to
44
+
// provide the popularity history for the TinyLfu admission policy [2].
45
+
// The time and space efficiency of the sketch allows it to cheaply estimate the
46
+
// frequency of an entry in a stream of cache access events.
47
+
//
48
+
// The counter matrix is represented as a single dimensional array holding 16
49
+
// counters per slot. A fixed depth of four balances the accuracy and cost,
50
+
// resulting in a width of four times the length of the array. To retain an
51
+
// accurate estimation the array's length equals the maximum number of entries
52
+
// in the cache, increased to the closest power-of-two to exploit more efficient
53
+
// bit masking. This configuration results in a confidence of 93.75% and error
54
+
// bound of e / width.
55
+
//
56
+
// The frequency of all entries is aged periodically using a sampling window
57
+
// based on the maximum number of entries in the cache. This is referred to as
58
+
// the reset operation by TinyLfu and keeps the sketch fresh by dividing all
59
+
// counters by two and subtracting based on the number of odd counters
60
+
// found. The O(n) cost of aging is amortized, ideal for hardware pre-fetching,
61
+
// and uses inexpensive bit manipulations per array location.
62
+
//
63
+
// [1] An Improved Data Stream Summary: The Count-Min Sketch and its Applications
64
+
// http://dimacs.rutgers.edu/~graham/pubs/papers/cm-full.pdf
65
+
// [2] TinyLFU: A Highly Efficient Cache Admission Policy
66
+
// https://dl.acm.org/citation.cfm?id=3149371
67
+
//
68
+
// -------------------------------------------------------------------------------
69
+
70
+
impl FrequencySketch {
71
+
/// Initializes and increases the capacity of this `FrequencySketch` instance,
72
+
/// if necessary, to ensure that it can accurately estimate the popularity of
73
+
/// elements given the maximum size of the cache. This operation forgets all
74
+
/// previous counts when resizing.
75
+
pub(crate) fn ensure_capacity(&mut self, cap: u32) {
76
+
// The max byte size of the table, Box<[u64; table_size]>
77
+
//
78
+
// | Pointer width | Max size |
79
+
// |:-----------------|---------:|
80
+
// | 16 bit | 8 KiB |
81
+
// | 32 bit | 128 MiB |
82
+
// | 64 bit or bigger | 8 GiB |
83
+
84
+
let maximum = if cfg!(target_pointer_width = "16") {
85
+
cap.min(1024)
86
+
} else if cfg!(target_pointer_width = "32") {
87
+
cap.min(2u32.pow(24)) // about 16 millions
88
+
} else {
89
+
// Same to Caffeine's limit:
90
+
// `Integer.MAX_VALUE >>> 1` with `ceilingPowerOfTwo()` applied.
91
+
cap.min(2u32.pow(30)) // about 1 billion
92
+
};
93
+
let table_size = if maximum == 0 {
94
+
1
95
+
} else {
96
+
maximum.next_power_of_two()
97
+
};
98
+
99
+
if self.table.len() as u32 >= table_size {
100
+
return;
101
+
}
102
+
103
+
self.table = vec![0; table_size as usize].into_boxed_slice();
104
+
self.table_mask = table_size - 1;
105
+
self.sample_size = if cap == 0 {
106
+
10
107
+
} else {
108
+
maximum.saturating_mul(10).min(i32::MAX as u32)
109
+
};
110
+
}
111
+
112
+
/// Takes the hash value of an element, and returns the estimated number of
113
+
/// occurrences of the element, up to the maximum (15).
114
+
pub(crate) fn frequency(&self, hash: u64) -> u8 {
115
+
if self.table.is_empty() {
116
+
return 0;
117
+
}
118
+
119
+
let start = ((hash & 3) << 2) as u8;
120
+
let mut frequency = u8::MAX;
121
+
for i in 0..4 {
122
+
let index = self.index_of(hash, i);
123
+
let shift = (start + i) << 2;
124
+
let count = ((self.table[index] >> shift) & 0xF) as u8;
125
+
frequency = frequency.min(count);
126
+
}
127
+
frequency
128
+
}
129
+
130
+
/// Take a hash value of an element and increments the popularity of the
131
+
/// element if it does not exceed the maximum (15). The popularity of all
132
+
/// elements will be periodically down sampled when the observed events
133
+
/// exceeds a threshold. This process provides a frequency aging to allow
134
+
/// expired long term entries to fade away.
135
+
pub(crate) fn increment(&mut self, hash: u64) {
136
+
if self.table.is_empty() {
137
+
return;
138
+
}
139
+
140
+
let start = ((hash & 3) << 2) as u8;
141
+
let mut added = false;
142
+
for i in 0..4 {
143
+
let index = self.index_of(hash, i);
144
+
added |= self.increment_at(index, start + i);
145
+
}
146
+
147
+
if added {
148
+
self.size += 1;
149
+
if self.size >= self.sample_size {
150
+
self.reset();
151
+
}
152
+
}
153
+
}
154
+
155
+
/// Takes a table index (each entry has 16 counters) and counter index, and
156
+
/// increments the counter by 1 if it is not already at the maximum value
157
+
/// (15). Returns `true` if incremented.
158
+
fn increment_at(&mut self, table_index: usize, counter_index: u8) -> bool {
159
+
let offset = (counter_index as usize) << 2;
160
+
let mask = 0xF_u64 << offset;
161
+
if self.table[table_index] & mask != mask {
162
+
self.table[table_index] += 1u64 << offset;
163
+
true
164
+
} else {
165
+
false
166
+
}
167
+
}
168
+
169
+
/// Reduces every counter by half of its original value.
170
+
fn reset(&mut self) {
171
+
let mut count = 0u32;
172
+
for entry in self.table.iter_mut() {
173
+
// Count number of odd numbers.
174
+
count += (*entry & ONE_MASK).count_ones();
175
+
*entry = (*entry >> 1) & RESET_MASK;
176
+
}
177
+
self.size = (self.size >> 1) - (count >> 2);
178
+
}
179
+
180
+
/// Returns the table index for the counter at the specified depth.
181
+
fn index_of(&self, hash: u64, depth: u8) -> usize {
182
+
let i = depth as usize;
183
+
let mut hash = hash.wrapping_add(SEED[i]).wrapping_mul(SEED[i]);
184
+
hash = hash.wrapping_add(hash >> 32);
185
+
(hash & (self.table_mask as u64)) as usize
186
+
}
187
+
}
188
+
189
+
// Methods only available for testing.
190
+
#[cfg(test)]
191
+
impl FrequencySketch {
192
+
pub(crate) fn table_len(&self) -> usize {
193
+
self.table.len()
194
+
}
195
+
}
196
+
197
+
// Some test cases were ported from Caffeine at:
198
+
// https://github.com/ben-manes/caffeine/blob/master/caffeine/src/test/java/com/github/benmanes/caffeine/cache/FrequencySketchTest.java
199
+
//
200
+
// To see the debug prints, run test as `cargo test -- --nocapture`
201
+
#[cfg(test)]
202
+
mod tests {
203
+
use super::FrequencySketch;
204
+
use once_cell::sync::Lazy;
205
+
use std::hash::{BuildHasher, Hash};
206
+
207
+
static ITEM: Lazy<u32> = Lazy::new(|| {
208
+
let mut buf = [0; 4];
209
+
getrandom::getrandom(&mut buf).unwrap();
210
+
unsafe { std::mem::transmute::<[u8; 4], u32>(buf) }
211
+
});
212
+
213
+
// This test was ported from Caffeine.
214
+
#[test]
215
+
fn increment_once() {
216
+
let mut sketch = FrequencySketch::default();
217
+
sketch.ensure_capacity(512);
218
+
let hasher = hasher();
219
+
let item_hash = hasher(*ITEM);
220
+
sketch.increment(item_hash);
221
+
assert_eq!(sketch.frequency(item_hash), 1);
222
+
}
223
+
224
+
// This test was ported from Caffeine.
225
+
#[test]
226
+
fn increment_max() {
227
+
let mut sketch = FrequencySketch::default();
228
+
sketch.ensure_capacity(512);
229
+
let hasher = hasher();
230
+
let item_hash = hasher(*ITEM);
231
+
for _ in 0..20 {
232
+
sketch.increment(item_hash);
233
+
}
234
+
assert_eq!(sketch.frequency(item_hash), 15);
235
+
}
236
+
237
+
// This test was ported from Caffeine.
238
+
#[test]
239
+
fn increment_distinct() {
240
+
let mut sketch = FrequencySketch::default();
241
+
sketch.ensure_capacity(512);
242
+
let hasher = hasher();
243
+
sketch.increment(hasher(*ITEM));
244
+
sketch.increment(hasher(ITEM.wrapping_add(1)));
245
+
assert_eq!(sketch.frequency(hasher(*ITEM)), 1);
246
+
assert_eq!(sketch.frequency(hasher(ITEM.wrapping_add(1))), 1);
247
+
assert_eq!(sketch.frequency(hasher(ITEM.wrapping_add(2))), 0);
248
+
}
249
+
250
+
// This test was ported from Caffeine.
251
+
#[test]
252
+
fn index_of_around_zero() {
253
+
let mut sketch = FrequencySketch::default();
254
+
sketch.ensure_capacity(512);
255
+
let mut indexes = std::collections::HashSet::new();
256
+
let hashes = [u64::MAX, 0, 1];
257
+
for hash in hashes.iter() {
258
+
for depth in 0..4 {
259
+
indexes.insert(sketch.index_of(*hash, depth));
260
+
}
261
+
}
262
+
assert_eq!(indexes.len(), 4 * hashes.len())
263
+
}
264
+
265
+
// This test was ported from Caffeine.
266
+
#[test]
267
+
fn reset() {
268
+
let mut reset = false;
269
+
let mut sketch = FrequencySketch::default();
270
+
sketch.ensure_capacity(64);
271
+
let hasher = hasher();
272
+
273
+
for i in 1..(20 * sketch.table.len() as u32) {
274
+
sketch.increment(hasher(i));
275
+
if sketch.size != i {
276
+
reset = true;
277
+
break;
278
+
}
279
+
}
280
+
281
+
assert!(reset);
282
+
assert!(sketch.size <= sketch.sample_size / 2);
283
+
}
284
+
285
+
// This test was ported from Caffeine.
286
+
#[test]
287
+
fn heavy_hitters() {
288
+
let mut sketch = FrequencySketch::default();
289
+
sketch.ensure_capacity(65_536);
290
+
let hasher = hasher();
291
+
292
+
for i in 100..100_000 {
293
+
sketch.increment(hasher(i));
294
+
}
295
+
296
+
for i in (0..10).step_by(2) {
297
+
for _ in 0..i {
298
+
sketch.increment(hasher(i));
299
+
}
300
+
}
301
+
302
+
// A perfect popularity count yields an array [0, 0, 2, 0, 4, 0, 6, 0, 8, 0]
303
+
let popularity = (0..10)
304
+
.map(|i| sketch.frequency(hasher(i)))
305
+
.collect::<Vec<_>>();
306
+
307
+
for (i, freq) in popularity.iter().enumerate() {
308
+
match i {
309
+
2 => assert!(freq <= &popularity[4]),
310
+
4 => assert!(freq <= &popularity[6]),
311
+
6 => assert!(freq <= &popularity[8]),
312
+
8 => (),
313
+
_ => assert!(freq <= &popularity[2]),
314
+
}
315
+
}
316
+
}
317
+
318
+
fn hasher<K: Hash>() -> impl Fn(K) -> u64 {
319
+
let build_hasher = std::collections::hash_map::RandomState::default();
320
+
move |key| build_hasher.hash_one(&key)
321
+
}
322
+
}
323
+
324
+
// Verify that some properties hold such as no panic occurs on any possible inputs.
325
+
#[cfg(kani)]
326
+
mod kani {
327
+
use super::FrequencySketch;
328
+
329
+
const CAPACITIES: &[u32] = &[
330
+
0,
331
+
1,
332
+
1024,
333
+
1025,
334
+
2u32.pow(24),
335
+
2u32.pow(24) + 1,
336
+
2u32.pow(30),
337
+
2u32.pow(30) + 1,
338
+
u32::MAX,
339
+
];
340
+
341
+
#[kani::proof]
342
+
fn verify_ensure_capacity() {
343
+
// Check for arbitrary capacities.
344
+
let capacity = kani::any();
345
+
let mut sketch = FrequencySketch::default();
346
+
sketch.ensure_capacity(capacity);
347
+
}
348
+
349
+
#[kani::proof]
350
+
fn verify_frequency() {
351
+
// Check for some selected capacities.
352
+
for capacity in CAPACITIES {
353
+
let mut sketch = FrequencySketch::default();
354
+
sketch.ensure_capacity(*capacity);
355
+
356
+
// Check for arbitrary hashes.
357
+
let hash = kani::any();
358
+
let frequency = sketch.frequency(hash);
359
+
assert!(frequency <= 15);
360
+
}
361
+
}
362
+
363
+
#[kani::proof]
364
+
fn verify_increment() {
365
+
// Only check for small capacities. Because Kani Rust Verifier is a model
366
+
// checking tool, it will take much longer time (exponential) to check larger
367
+
// capacities here.
368
+
for capacity in &[0, 1, 128] {
369
+
let mut sketch = FrequencySketch::default();
370
+
sketch.ensure_capacity(*capacity);
371
+
372
+
// Check for arbitrary hashes.
373
+
let hash = kani::any();
374
+
sketch.increment(hash);
375
+
}
376
+
}
377
+
378
+
#[kani::proof]
379
+
fn verify_index_of() {
380
+
// Check for arbitrary capacities.
381
+
let capacity = kani::any();
382
+
let mut sketch = FrequencySketch::default();
383
+
sketch.ensure_capacity(capacity);
384
+
385
+
// Check for arbitrary hashes.
386
+
let hash = kani::any();
387
+
for i in 0..4 {
388
+
let index = sketch.index_of(hash, i);
389
+
assert!(index < sketch.table.len());
390
+
}
391
+
}
392
+
}
+32
crates/mini-moka-vendored/src/common/time.rs
+32
crates/mini-moka-vendored/src/common/time.rs
···
1
+
use std::time::Duration;
2
+
3
+
pub(crate) mod clock;
4
+
5
+
pub(crate) use clock::Clock;
6
+
7
+
/// a wrapper type over Instant to force checked additions and prevent
8
+
/// unintentional overflow. The type preserve the Copy semantics for the wrapped
9
+
#[derive(PartialEq, PartialOrd, Clone, Copy)]
10
+
pub(crate) struct Instant(clock::Instant);
11
+
12
+
pub(crate) trait CheckedTimeOps {
13
+
fn checked_add(&self, duration: Duration) -> Option<Self>
14
+
where
15
+
Self: Sized;
16
+
}
17
+
18
+
impl Instant {
19
+
pub(crate) fn new(instant: clock::Instant) -> Instant {
20
+
Instant(instant)
21
+
}
22
+
23
+
pub(crate) fn now() -> Instant {
24
+
Instant(clock::Instant::now())
25
+
}
26
+
}
27
+
28
+
impl CheckedTimeOps for Instant {
29
+
fn checked_add(&self, duration: Duration) -> Option<Instant> {
30
+
self.0.checked_add(duration).map(Instant)
31
+
}
32
+
}
+52
crates/mini-moka-vendored/src/common/time/clock.rs
+52
crates/mini-moka-vendored/src/common/time/clock.rs
···
1
+
use std::sync::{Arc, RwLock};
2
+
3
+
#[cfg(test)]
4
+
use std::time::Duration;
5
+
6
+
#[cfg(not(feature = "js"))]
7
+
pub(crate) type Instant = std::time::Instant;
8
+
9
+
#[cfg(feature = "js")]
10
+
pub(crate) type Instant = web_time::Instant;
11
+
12
+
pub(crate) struct Clock {
13
+
mock: Option<Arc<Mock>>,
14
+
}
15
+
16
+
impl Clock {
17
+
#[cfg(test)]
18
+
pub(crate) fn mock() -> (Clock, Arc<Mock>) {
19
+
let mock = Arc::new(Mock::default());
20
+
let clock = Clock {
21
+
mock: Some(Arc::clone(&mock)),
22
+
};
23
+
(clock, mock)
24
+
}
25
+
26
+
pub(crate) fn now(&self) -> Instant {
27
+
if let Some(mock) = &self.mock {
28
+
*mock.now.read().expect("lock poisoned")
29
+
} else {
30
+
Instant::now()
31
+
}
32
+
}
33
+
}
34
+
35
+
pub(crate) struct Mock {
36
+
now: RwLock<Instant>,
37
+
}
38
+
39
+
impl Default for Mock {
40
+
fn default() -> Self {
41
+
Self {
42
+
now: RwLock::new(Instant::now()),
43
+
}
44
+
}
45
+
}
46
+
47
+
#[cfg(test)]
48
+
impl Mock {
49
+
pub(crate) fn increment(&self, amount: Duration) {
50
+
*self.now.write().expect("lock poisoned") += amount;
51
+
}
52
+
}
+86
crates/mini-moka-vendored/src/lib.rs
+86
crates/mini-moka-vendored/src/lib.rs
···
1
+
#![warn(clippy::all)]
2
+
#![warn(rust_2018_idioms)]
3
+
#![deny(rustdoc::broken_intra_doc_links)]
4
+
#![cfg_attr(docsrs, feature(doc_cfg))]
5
+
6
+
//! Mini Moka is a fast, concurrent cache library for Rust. Mini Moka is a light
7
+
//! edition of [Moka][moka-git].
8
+
//!
9
+
//! Mini Moka provides an in-memory concurrent cache implementation on top of hash
10
+
//! map. It supports high expected concurrency of retrievals and updates.
11
+
//!
12
+
//! Mini Moka also provides an in-memory, non-thread-safe cache implementation for
13
+
//! single thread applications.
14
+
//!
15
+
//! All cache implementations perform a best-effort bounding of the map using an
16
+
//! entry replacement algorithm to determine which entries to evict when the capacity
17
+
//! is exceeded.
18
+
//!
19
+
//! [moka-git]: https://github.com/moka-rs/moka
20
+
//! [caffeine-git]: https://github.com/ben-manes/caffeine
21
+
//!
22
+
//! # Features
23
+
//!
24
+
//! - A thread-safe, highly concurrent in-memory cache implementation.
25
+
//! - A cache can be bounded by one of the followings:
26
+
//! - The maximum number of entries.
27
+
//! - The total weighted size of entries. (Size aware eviction)
28
+
//! - Maintains good hit rate by using entry replacement algorithms inspired by
29
+
//! [Caffeine][caffeine-git]:
30
+
//! - Admission to a cache is controlled by the Least Frequently Used (LFU) policy.
31
+
//! - Eviction from a cache is controlled by the Least Recently Used (LRU) policy.
32
+
//! - Supports expiration policies:
33
+
//! - Time to live
34
+
//! - Time to idle
35
+
//!
36
+
//! # Examples
37
+
//!
38
+
//! See the following document:
39
+
//!
40
+
//! - A thread-safe, synchronous cache:
41
+
//! - [`sync::Cache`][sync-cache-struct]
42
+
//! - A not thread-safe, blocking cache for single threaded applications:
43
+
//! - [`unsync::Cache`][unsync-cache-struct]
44
+
//!
45
+
//! [sync-cache-struct]: ./sync/struct.Cache.html
46
+
//! [unsync-cache-struct]: ./unsync/struct.Cache.html
47
+
//!
48
+
//! # Minimum Supported Rust Versions
49
+
//!
50
+
//! This crate's minimum supported Rust versions (MSRV) are the followings:
51
+
//!
52
+
//! | Feature | MSRV |
53
+
//! |:-----------------|:--------------------------:|
54
+
//! | default features | Rust 1.76.0 (Feb 8, 2024) |
55
+
//!
56
+
//! If only the default features are enabled, MSRV will be updated conservatively.
57
+
//! When using other features, MSRV might be updated more frequently, up to the
58
+
//! latest stable. In both cases, increasing MSRV is _not_ considered a
59
+
//! semver-breaking change.
60
+
61
+
pub(crate) mod common;
62
+
pub(crate) mod policy;
63
+
pub mod unsync;
64
+
65
+
#[cfg(feature = "sync")]
66
+
#[cfg_attr(docsrs, doc(cfg(feature = "sync")))]
67
+
pub mod sync;
68
+
69
+
pub use policy::Policy;
70
+
71
+
#[cfg(test)]
72
+
mod tests {
73
+
#[cfg(all(trybuild, feature = "sync"))]
74
+
#[test]
75
+
fn trybuild_sync() {
76
+
let t = trybuild::TestCases::new();
77
+
t.compile_fail("tests/compile_tests/sync/clone/*.rs");
78
+
}
79
+
}
80
+
81
+
#[cfg(all(doctest, feature = "sync"))]
82
+
mod doctests {
83
+
// https://doc.rust-lang.org/rustdoc/write-documentation/documentation-tests.html#include-items-only-when-collecting-doctests
84
+
#[doc = include_str!("../README.md")]
85
+
struct ReadMeDoctests;
86
+
}
+38
crates/mini-moka-vendored/src/policy.rs
+38
crates/mini-moka-vendored/src/policy.rs
···
1
+
use std::time::Duration;
2
+
3
+
#[derive(Clone, Debug)]
4
+
/// The policy of a cache.
5
+
pub struct Policy {
6
+
max_capacity: Option<u64>,
7
+
time_to_live: Option<Duration>,
8
+
time_to_idle: Option<Duration>,
9
+
}
10
+
11
+
impl Policy {
12
+
pub(crate) fn new(
13
+
max_capacity: Option<u64>,
14
+
time_to_live: Option<Duration>,
15
+
time_to_idle: Option<Duration>,
16
+
) -> Self {
17
+
Self {
18
+
max_capacity,
19
+
time_to_live,
20
+
time_to_idle,
21
+
}
22
+
}
23
+
24
+
/// Returns the `max_capacity` of the cache.
25
+
pub fn max_capacity(&self) -> Option<u64> {
26
+
self.max_capacity
27
+
}
28
+
29
+
/// Returns the `time_to_live` of the cache.
30
+
pub fn time_to_live(&self) -> Option<Duration> {
31
+
self.time_to_live
32
+
}
33
+
34
+
/// Returns the `time_to_idle` of the cache.
35
+
pub fn time_to_idle(&self) -> Option<Duration> {
36
+
self.time_to_idle
37
+
}
38
+
}
+21
crates/mini-moka-vendored/src/sync.rs
+21
crates/mini-moka-vendored/src/sync.rs
···
1
+
//! Provides a thread-safe, concurrent cache implementation built upon
2
+
//! [`dashmap::DashMap`][dashmap].
3
+
//!
4
+
//! [dashmap]: https://docs.rs/dashmap/*/dashmap/struct.DashMap.html
5
+
6
+
mod base_cache;
7
+
mod builder;
8
+
mod cache;
9
+
mod iter;
10
+
mod mapref;
11
+
12
+
pub use builder::CacheBuilder;
13
+
pub use cache::Cache;
14
+
pub use iter::Iter;
15
+
pub use mapref::EntryRef;
16
+
17
+
/// Provides extra methods that will be useful for testing.
18
+
pub trait ConcurrentCacheExt<K, V> {
19
+
/// Performs any pending maintenance operations needed by the cache.
20
+
fn sync(&self);
21
+
}
+1380
crates/mini-moka-vendored/src/sync/base_cache.rs
+1380
crates/mini-moka-vendored/src/sync/base_cache.rs
···
1
+
use super::{iter::DashMapIter, Iter};
2
+
use crate::{
3
+
common::{
4
+
self,
5
+
concurrent::{
6
+
atomic_time::AtomicInstant,
7
+
constants::{
8
+
READ_LOG_FLUSH_POINT, READ_LOG_SIZE, WRITE_LOG_FLUSH_POINT, WRITE_LOG_SIZE,
9
+
},
10
+
deques::Deques,
11
+
entry_info::EntryInfo,
12
+
housekeeper::{Housekeeper, InnerSync},
13
+
AccessTime, KeyDate, KeyHash, KeyHashDate, KvEntry, ReadOp, ValueEntry, Weigher,
14
+
WriteOp,
15
+
},
16
+
deque::{DeqNode, Deque},
17
+
frequency_sketch::FrequencySketch,
18
+
time::{CheckedTimeOps, Clock, Instant},
19
+
CacheRegion,
20
+
},
21
+
Policy,
22
+
};
23
+
24
+
use crossbeam_channel::{Receiver, Sender, TrySendError};
25
+
use crossbeam_utils::atomic::AtomicCell;
26
+
use dashmap::mapref::one::Ref as DashMapRef;
27
+
use smallvec::SmallVec;
28
+
use std::{
29
+
borrow::Borrow,
30
+
collections::hash_map::RandomState,
31
+
hash::{BuildHasher, Hash},
32
+
ptr::NonNull,
33
+
sync::{
34
+
atomic::{AtomicBool, Ordering},
35
+
Arc, Mutex, RwLock,
36
+
},
37
+
time::Duration,
38
+
};
39
+
use triomphe::Arc as TrioArc;
40
+
41
+
pub(crate) struct BaseCache<K, V, S = RandomState> {
42
+
pub(crate) inner: Arc<Inner<K, V, S>>,
43
+
read_op_ch: Sender<ReadOp<K, V>>,
44
+
pub(crate) write_op_ch: Sender<WriteOp<K, V>>,
45
+
pub(crate) housekeeper: Option<Arc<Housekeeper>>,
46
+
}
47
+
48
+
impl<K, V, S> Clone for BaseCache<K, V, S> {
49
+
/// Makes a clone of this shared cache.
50
+
///
51
+
/// This operation is cheap as it only creates thread-safe reference counted
52
+
/// pointers to the shared internal data structures.
53
+
fn clone(&self) -> Self {
54
+
Self {
55
+
inner: Arc::clone(&self.inner),
56
+
read_op_ch: self.read_op_ch.clone(),
57
+
write_op_ch: self.write_op_ch.clone(),
58
+
housekeeper: self.housekeeper.clone(),
59
+
}
60
+
}
61
+
}
62
+
63
+
impl<K, V, S> Drop for BaseCache<K, V, S> {
64
+
fn drop(&mut self) {
65
+
// The housekeeper needs to be dropped before the inner is dropped.
66
+
std::mem::drop(self.housekeeper.take());
67
+
}
68
+
}
69
+
70
+
impl<K, V, S> BaseCache<K, V, S> {
71
+
pub(crate) fn policy(&self) -> Policy {
72
+
self.inner.policy()
73
+
}
74
+
75
+
pub(crate) fn entry_count(&self) -> u64 {
76
+
self.inner.entry_count()
77
+
}
78
+
79
+
pub(crate) fn weighted_size(&self) -> u64 {
80
+
self.inner.weighted_size()
81
+
}
82
+
}
83
+
84
+
impl<K, V, S> BaseCache<K, V, S>
85
+
where
86
+
K: Hash + Eq + Send + Sync + 'static,
87
+
V: Clone + Send + Sync + 'static,
88
+
S: BuildHasher + Clone + Send + Sync + 'static,
89
+
{
90
+
pub(crate) fn new(
91
+
max_capacity: Option<u64>,
92
+
initial_capacity: Option<usize>,
93
+
build_hasher: S,
94
+
weigher: Option<Weigher<K, V>>,
95
+
time_to_live: Option<Duration>,
96
+
time_to_idle: Option<Duration>,
97
+
) -> Self {
98
+
let (r_snd, r_rcv) = crossbeam_channel::bounded(READ_LOG_SIZE);
99
+
let (w_snd, w_rcv) = crossbeam_channel::bounded(WRITE_LOG_SIZE);
100
+
101
+
let inner = Inner::new(
102
+
max_capacity,
103
+
initial_capacity,
104
+
build_hasher,
105
+
weigher,
106
+
r_rcv,
107
+
w_rcv,
108
+
time_to_live,
109
+
time_to_idle,
110
+
);
111
+
Self {
112
+
#[cfg_attr(beta_clippy, allow(clippy::arc_with_non_send_sync))]
113
+
inner: Arc::new(inner),
114
+
read_op_ch: r_snd,
115
+
write_op_ch: w_snd,
116
+
housekeeper: Some(Arc::new(Housekeeper::default())),
117
+
}
118
+
}
119
+
120
+
#[inline]
121
+
pub(crate) fn hash<Q>(&self, key: &Q) -> u64
122
+
where
123
+
Arc<K>: Borrow<Q>,
124
+
Q: Hash + Eq + ?Sized,
125
+
{
126
+
self.inner.hash(key)
127
+
}
128
+
129
+
pub(crate) fn contains_key<Q>(&self, key: &Q) -> bool
130
+
where
131
+
Arc<K>: Borrow<Q>,
132
+
Q: Hash + Eq + ?Sized,
133
+
{
134
+
match self.inner.get(key) {
135
+
None => false,
136
+
Some(entry) => {
137
+
let i = &self.inner;
138
+
let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after());
139
+
let now = i.current_time_from_expiration_clock();
140
+
let entry = &*entry;
141
+
142
+
!is_expired_entry_wo(ttl, va, entry, now)
143
+
&& !is_expired_entry_ao(tti, va, entry, now)
144
+
}
145
+
}
146
+
}
147
+
148
+
pub(crate) fn get_with_hash<Q>(&self, key: &Q, hash: u64) -> Option<V>
149
+
where
150
+
Arc<K>: Borrow<Q>,
151
+
Q: Hash + Eq + ?Sized,
152
+
{
153
+
let record = |op, now| {
154
+
self.record_read_op(op, now)
155
+
.expect("Failed to record a get op");
156
+
};
157
+
let now = self.inner.current_time_from_expiration_clock();
158
+
159
+
match self.inner.get(key) {
160
+
None => {
161
+
record(ReadOp::Miss(hash), now);
162
+
None
163
+
}
164
+
Some(entry) => {
165
+
let i = &self.inner;
166
+
let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after());
167
+
let arc_entry = &*entry;
168
+
169
+
if is_expired_entry_wo(ttl, va, arc_entry, now)
170
+
|| is_expired_entry_ao(tti, va, arc_entry, now)
171
+
{
172
+
// Drop the entry to avoid to deadlock with record_read_op.
173
+
std::mem::drop(entry);
174
+
// Expired or invalidated entry. Record this access as a cache miss
175
+
// rather than a hit.
176
+
record(ReadOp::Miss(hash), now);
177
+
None
178
+
} else {
179
+
// Valid entry.
180
+
let v = arc_entry.value.clone();
181
+
let e = TrioArc::clone(arc_entry);
182
+
// Drop the entry to avoid to deadlock with record_read_op.
183
+
std::mem::drop(entry);
184
+
record(ReadOp::Hit(hash, e, now), now);
185
+
Some(v)
186
+
}
187
+
}
188
+
}
189
+
}
190
+
191
+
#[inline]
192
+
pub(crate) fn remove_entry<Q>(&self, key: &Q) -> Option<KvEntry<K, V>>
193
+
where
194
+
Arc<K>: Borrow<Q>,
195
+
Q: Hash + Eq + ?Sized,
196
+
{
197
+
self.inner.remove_entry(key)
198
+
}
199
+
200
+
#[inline]
201
+
pub(crate) fn apply_reads_writes_if_needed(
202
+
inner: &impl InnerSync,
203
+
ch: &Sender<WriteOp<K, V>>,
204
+
now: Instant,
205
+
housekeeper: Option<&Arc<Housekeeper>>,
206
+
) {
207
+
let w_len = ch.len();
208
+
209
+
if let Some(hk) = housekeeper {
210
+
if hk.should_apply_writes(w_len, now) {
211
+
hk.try_sync(inner);
212
+
}
213
+
}
214
+
}
215
+
216
+
pub(crate) fn invalidate_all(&self) {
217
+
let now = self.inner.current_time_from_expiration_clock();
218
+
self.inner.set_valid_after(now);
219
+
}
220
+
}
221
+
222
+
// Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a.
223
+
// This seems a false positive. The lifetimes are used in the trait bounds.
224
+
// https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes
225
+
#[allow(clippy::extra_unused_lifetimes)]
226
+
impl<'a, K, V, S> BaseCache<K, V, S>
227
+
where
228
+
K: 'a + Eq + Hash,
229
+
V: 'a,
230
+
S: BuildHasher + Clone,
231
+
{
232
+
pub(crate) fn iter(&self) -> Iter<'_, K, V, S> {
233
+
Iter::new(self, self.inner.iter())
234
+
}
235
+
}
236
+
237
+
impl<K, V, S> BaseCache<K, V, S> {
238
+
pub(crate) fn is_expired_entry(&self, entry: &TrioArc<ValueEntry<K, V>>) -> bool {
239
+
let i = &self.inner;
240
+
let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after());
241
+
let now = i.current_time_from_expiration_clock();
242
+
243
+
is_expired_entry_wo(ttl, va, entry, now) || is_expired_entry_ao(tti, va, entry, now)
244
+
}
245
+
}
246
+
247
+
//
248
+
// private methods
249
+
//
250
+
impl<K, V, S> BaseCache<K, V, S>
251
+
where
252
+
K: Hash + Eq + Send + Sync + 'static,
253
+
V: Clone + Send + Sync + 'static,
254
+
S: BuildHasher + Clone + Send + Sync + 'static,
255
+
{
256
+
#[inline]
257
+
fn record_read_op(
258
+
&self,
259
+
op: ReadOp<K, V>,
260
+
now: Instant,
261
+
) -> Result<(), TrySendError<ReadOp<K, V>>> {
262
+
self.apply_reads_if_needed(self.inner.as_ref(), now);
263
+
let ch = &self.read_op_ch;
264
+
match ch.try_send(op) {
265
+
// Discard the ReadOp when the channel is full.
266
+
Ok(()) | Err(TrySendError::Full(_)) => Ok(()),
267
+
Err(e @ TrySendError::Disconnected(_)) => Err(e),
268
+
}
269
+
}
270
+
271
+
#[inline]
272
+
pub(crate) fn do_insert_with_hash(
273
+
&self,
274
+
key: Arc<K>,
275
+
hash: u64,
276
+
value: V,
277
+
) -> (WriteOp<K, V>, Instant) {
278
+
let ts = self.inner.current_time_from_expiration_clock();
279
+
let weight = self.inner.weigh(&key, &value);
280
+
let mut insert_op = None;
281
+
let mut update_op = None;
282
+
283
+
self.inner
284
+
.cache
285
+
.entry(Arc::clone(&key))
286
+
// Update
287
+
.and_modify(|entry| {
288
+
// NOTES on `new_value_entry_from` method:
289
+
// 1. The internal EntryInfo will be shared between the old and new
290
+
// ValueEntries.
291
+
// 2. This method will set the dirty flag to prevent this new
292
+
// ValueEntry from being evicted by an expiration policy.
293
+
// 3. This method will update the policy_weight with the new weight.
294
+
let old_weight = entry.policy_weight();
295
+
*entry = self.new_value_entry_from(value.clone(), ts, weight, entry);
296
+
update_op = Some(WriteOp::Upsert {
297
+
key_hash: KeyHash::new(Arc::clone(&key), hash),
298
+
value_entry: TrioArc::clone(entry),
299
+
old_weight,
300
+
new_weight: weight,
301
+
});
302
+
})
303
+
// Insert
304
+
.or_insert_with(|| {
305
+
let entry = self.new_value_entry(value.clone(), ts, weight);
306
+
insert_op = Some(WriteOp::Upsert {
307
+
key_hash: KeyHash::new(Arc::clone(&key), hash),
308
+
value_entry: TrioArc::clone(&entry),
309
+
old_weight: 0,
310
+
new_weight: weight,
311
+
});
312
+
entry
313
+
});
314
+
315
+
match (insert_op, update_op) {
316
+
(Some(ins_op), None) => (ins_op, ts),
317
+
(None, Some(upd_op)) => (upd_op, ts),
318
+
_ => unreachable!(),
319
+
}
320
+
}
321
+
322
+
#[inline]
323
+
fn new_value_entry(
324
+
&self,
325
+
value: V,
326
+
timestamp: Instant,
327
+
policy_weight: u32,
328
+
) -> TrioArc<ValueEntry<K, V>> {
329
+
let info = TrioArc::new(EntryInfo::new(timestamp, policy_weight));
330
+
TrioArc::new(ValueEntry::new(value, info))
331
+
}
332
+
333
+
#[inline]
334
+
fn new_value_entry_from(
335
+
&self,
336
+
value: V,
337
+
timestamp: Instant,
338
+
policy_weight: u32,
339
+
other: &ValueEntry<K, V>,
340
+
) -> TrioArc<ValueEntry<K, V>> {
341
+
let info = TrioArc::clone(other.entry_info());
342
+
// To prevent this updated ValueEntry from being evicted by an expiration policy,
343
+
// set the dirty flag to true. It will be reset to false when the write is applied.
344
+
info.set_dirty(true);
345
+
info.set_last_accessed(timestamp);
346
+
info.set_last_modified(timestamp);
347
+
info.set_policy_weight(policy_weight);
348
+
TrioArc::new(ValueEntry::new(value, info))
349
+
}
350
+
351
+
#[inline]
352
+
fn apply_reads_if_needed(&self, inner: &impl InnerSync, now: Instant) {
353
+
let len = self.read_op_ch.len();
354
+
355
+
if let Some(hk) = &self.housekeeper {
356
+
if hk.should_apply_reads(len, now) {
357
+
if let Some(h) = &self.housekeeper {
358
+
h.try_sync(inner);
359
+
}
360
+
}
361
+
}
362
+
}
363
+
364
+
#[inline]
365
+
pub(crate) fn current_time_from_expiration_clock(&self) -> Instant {
366
+
self.inner.current_time_from_expiration_clock()
367
+
}
368
+
}
369
+
370
+
//
371
+
// for testing
372
+
//
373
+
#[cfg(test)]
374
+
impl<K, V, S> BaseCache<K, V, S>
375
+
where
376
+
K: Hash + Eq + Send + Sync + 'static,
377
+
V: Clone + Send + Sync + 'static,
378
+
S: BuildHasher + Clone + Send + Sync + 'static,
379
+
{
380
+
pub(crate) fn reconfigure_for_testing(&mut self) {
381
+
// Enable the frequency sketch.
382
+
self.inner.enable_frequency_sketch_for_testing();
383
+
}
384
+
385
+
pub(crate) fn set_expiration_clock(&self, clock: Option<Clock>) {
386
+
self.inner.set_expiration_clock(clock);
387
+
}
388
+
}
389
+
390
+
struct EvictionCounters {
391
+
entry_count: u64,
392
+
weighted_size: u64,
393
+
}
394
+
395
+
impl EvictionCounters {
396
+
#[inline]
397
+
fn new(entry_count: u64, weighted_size: u64) -> Self {
398
+
Self {
399
+
entry_count,
400
+
weighted_size,
401
+
}
402
+
}
403
+
404
+
#[inline]
405
+
fn saturating_add(&mut self, entry_count: u64, weight: u32) {
406
+
self.entry_count += entry_count;
407
+
let total = &mut self.weighted_size;
408
+
*total = total.saturating_add(weight as u64);
409
+
}
410
+
411
+
#[inline]
412
+
fn saturating_sub(&mut self, entry_count: u64, weight: u32) {
413
+
self.entry_count -= entry_count;
414
+
let total = &mut self.weighted_size;
415
+
*total = total.saturating_sub(weight as u64);
416
+
}
417
+
}
418
+
419
+
#[derive(Default)]
420
+
struct EntrySizeAndFrequency {
421
+
policy_weight: u64,
422
+
freq: u32,
423
+
}
424
+
425
+
impl EntrySizeAndFrequency {
426
+
fn new(policy_weight: u32) -> Self {
427
+
Self {
428
+
policy_weight: policy_weight as u64,
429
+
..Default::default()
430
+
}
431
+
}
432
+
433
+
fn add_policy_weight(&mut self, weight: u32) {
434
+
self.policy_weight += weight as u64;
435
+
}
436
+
437
+
fn add_frequency(&mut self, freq: &FrequencySketch, hash: u64) {
438
+
self.freq += freq.frequency(hash) as u32;
439
+
}
440
+
}
441
+
442
+
// Access-Order Queue Node
443
+
type AoqNode<K> = NonNull<DeqNode<KeyHashDate<K>>>;
444
+
445
+
enum AdmissionResult<K> {
446
+
Admitted {
447
+
victim_nodes: SmallVec<[AoqNode<K>; 8]>,
448
+
skipped_nodes: SmallVec<[AoqNode<K>; 4]>,
449
+
},
450
+
Rejected {
451
+
skipped_nodes: SmallVec<[AoqNode<K>; 4]>,
452
+
},
453
+
}
454
+
455
+
type CacheStore<K, V, S> = dashmap::DashMap<Arc<K>, TrioArc<ValueEntry<K, V>>, S>;
456
+
457
+
type CacheEntryRef<'a, K, V> = DashMapRef<'a, Arc<K>, TrioArc<ValueEntry<K, V>>>;
458
+
459
+
pub(crate) struct Inner<K, V, S> {
460
+
max_capacity: Option<u64>,
461
+
entry_count: AtomicCell<u64>,
462
+
weighted_size: AtomicCell<u64>,
463
+
cache: CacheStore<K, V, S>,
464
+
build_hasher: S,
465
+
deques: Mutex<Deques<K>>,
466
+
frequency_sketch: RwLock<FrequencySketch>,
467
+
frequency_sketch_enabled: AtomicBool,
468
+
read_op_ch: Receiver<ReadOp<K, V>>,
469
+
write_op_ch: Receiver<WriteOp<K, V>>,
470
+
time_to_live: Option<Duration>,
471
+
time_to_idle: Option<Duration>,
472
+
valid_after: AtomicInstant,
473
+
weigher: Option<Weigher<K, V>>,
474
+
has_expiration_clock: AtomicBool,
475
+
expiration_clock: RwLock<Option<Clock>>,
476
+
}
477
+
478
+
// functions/methods used by BaseCache
479
+
impl<K, V, S> Inner<K, V, S>
480
+
where
481
+
K: Hash + Eq + Send + Sync + 'static,
482
+
V: Send + Sync + 'static,
483
+
S: BuildHasher + Clone,
484
+
{
485
+
// Disable a Clippy warning for having more than seven arguments.
486
+
// https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments
487
+
#[allow(clippy::too_many_arguments)]
488
+
fn new(
489
+
max_capacity: Option<u64>,
490
+
initial_capacity: Option<usize>,
491
+
build_hasher: S,
492
+
weigher: Option<Weigher<K, V>>,
493
+
read_op_ch: Receiver<ReadOp<K, V>>,
494
+
write_op_ch: Receiver<WriteOp<K, V>>,
495
+
time_to_live: Option<Duration>,
496
+
time_to_idle: Option<Duration>,
497
+
) -> Self {
498
+
let initial_capacity = initial_capacity
499
+
.map(|cap| cap + WRITE_LOG_SIZE)
500
+
.unwrap_or_default();
501
+
let cache =
502
+
dashmap::DashMap::with_capacity_and_hasher(initial_capacity, build_hasher.clone());
503
+
504
+
Self {
505
+
max_capacity,
506
+
entry_count: Default::default(),
507
+
weighted_size: Default::default(),
508
+
cache,
509
+
build_hasher,
510
+
deques: Mutex::new(Default::default()),
511
+
frequency_sketch: RwLock::new(Default::default()),
512
+
frequency_sketch_enabled: Default::default(),
513
+
read_op_ch,
514
+
write_op_ch,
515
+
time_to_live,
516
+
time_to_idle,
517
+
valid_after: Default::default(),
518
+
weigher,
519
+
has_expiration_clock: AtomicBool::new(false),
520
+
expiration_clock: RwLock::new(None),
521
+
}
522
+
}
523
+
524
+
#[inline]
525
+
fn hash<Q>(&self, key: &Q) -> u64
526
+
where
527
+
Arc<K>: Borrow<Q>,
528
+
Q: Hash + Eq + ?Sized,
529
+
{
530
+
self.build_hasher.hash_one(key)
531
+
}
532
+
533
+
#[inline]
534
+
fn get<Q>(&self, key: &Q) -> Option<CacheEntryRef<'_, K, V>>
535
+
where
536
+
Arc<K>: Borrow<Q>,
537
+
Q: Hash + Eq + ?Sized,
538
+
{
539
+
self.cache.get(key)
540
+
}
541
+
542
+
#[inline]
543
+
fn remove_entry<Q>(&self, key: &Q) -> Option<KvEntry<K, V>>
544
+
where
545
+
Arc<K>: Borrow<Q>,
546
+
Q: Hash + Eq + ?Sized,
547
+
{
548
+
self.cache
549
+
.remove(key)
550
+
.map(|(key, entry)| KvEntry::new(key, entry))
551
+
}
552
+
}
553
+
554
+
// functions/methods used by BaseCache
555
+
impl<K, V, S> Inner<K, V, S> {
556
+
fn policy(&self) -> Policy {
557
+
Policy::new(self.max_capacity, self.time_to_live, self.time_to_idle)
558
+
}
559
+
560
+
#[inline]
561
+
fn time_to_live(&self) -> Option<Duration> {
562
+
self.time_to_live
563
+
}
564
+
565
+
#[inline]
566
+
fn time_to_idle(&self) -> Option<Duration> {
567
+
self.time_to_idle
568
+
}
569
+
570
+
#[inline]
571
+
fn entry_count(&self) -> u64 {
572
+
self.entry_count.load()
573
+
}
574
+
575
+
#[inline]
576
+
pub(crate) fn weighted_size(&self) -> u64 {
577
+
self.weighted_size.load()
578
+
}
579
+
580
+
#[inline]
581
+
fn has_expiry(&self) -> bool {
582
+
self.time_to_live.is_some() || self.time_to_idle.is_some()
583
+
}
584
+
585
+
#[inline]
586
+
fn is_write_order_queue_enabled(&self) -> bool {
587
+
self.time_to_live.is_some()
588
+
}
589
+
590
+
#[inline]
591
+
fn valid_after(&self) -> Option<Instant> {
592
+
self.valid_after.instant()
593
+
}
594
+
595
+
#[inline]
596
+
fn set_valid_after(&self, timestamp: Instant) {
597
+
self.valid_after.set_instant(timestamp);
598
+
}
599
+
600
+
#[inline]
601
+
fn has_valid_after(&self) -> bool {
602
+
self.valid_after.is_set()
603
+
}
604
+
605
+
#[inline]
606
+
fn weigh(&self, key: &K, value: &V) -> u32 {
607
+
self.weigher.as_ref().map(|w| w(key, value)).unwrap_or(1)
608
+
}
609
+
610
+
#[inline]
611
+
fn current_time_from_expiration_clock(&self) -> Instant {
612
+
if self.has_expiration_clock.load(Ordering::Relaxed) {
613
+
Instant::new(
614
+
self.expiration_clock
615
+
.read()
616
+
.expect("lock poisoned")
617
+
.as_ref()
618
+
.expect("Cannot get the expiration clock")
619
+
.now(),
620
+
)
621
+
} else {
622
+
Instant::now()
623
+
}
624
+
}
625
+
}
626
+
627
+
// Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a.
628
+
// This seems a false positive. The lifetimes are used in the trait bounds.
629
+
// https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes
630
+
#[allow(clippy::extra_unused_lifetimes)]
631
+
impl<'a, K, V, S> Inner<K, V, S>
632
+
where
633
+
K: 'a + Eq + Hash,
634
+
V: 'a,
635
+
S: BuildHasher + Clone,
636
+
{
637
+
fn iter(&self) -> DashMapIter<'_, K, V, S> {
638
+
self.cache.iter()
639
+
}
640
+
}
641
+
642
+
mod batch_size {
643
+
pub(crate) const EVICTION_BATCH_SIZE: usize = 500;
644
+
}
645
+
646
+
// TODO: Divide this method into smaller methods so that unit tests can do more
647
+
// precise testing.
648
+
// - sync_reads
649
+
// - sync_writes
650
+
// - evict
651
+
// - invalidate_entries
652
+
impl<K, V, S> InnerSync for Inner<K, V, S>
653
+
where
654
+
K: Hash + Eq + Send + Sync + 'static,
655
+
V: Send + Sync + 'static,
656
+
S: BuildHasher + Clone + Send + Sync + 'static,
657
+
{
658
+
fn sync(&self, max_repeats: usize) {
659
+
let mut deqs = self.deques.lock().expect("lock poisoned");
660
+
let mut calls = 0;
661
+
let mut should_sync = true;
662
+
663
+
let current_ec = self.entry_count.load();
664
+
let current_ws = self.weighted_size.load();
665
+
let mut counters = EvictionCounters::new(current_ec, current_ws);
666
+
667
+
while should_sync && calls <= max_repeats {
668
+
let r_len = self.read_op_ch.len();
669
+
if r_len > 0 {
670
+
self.apply_reads(&mut deqs, r_len);
671
+
}
672
+
673
+
let w_len = self.write_op_ch.len();
674
+
if w_len > 0 {
675
+
self.apply_writes(&mut deqs, w_len, &mut counters);
676
+
}
677
+
678
+
if self.should_enable_frequency_sketch(&counters) {
679
+
self.enable_frequency_sketch(&counters);
680
+
}
681
+
682
+
calls += 1;
683
+
should_sync = self.read_op_ch.len() >= READ_LOG_FLUSH_POINT
684
+
|| self.write_op_ch.len() >= WRITE_LOG_FLUSH_POINT;
685
+
}
686
+
687
+
if self.has_expiry() || self.has_valid_after() {
688
+
self.evict_expired(&mut deqs, batch_size::EVICTION_BATCH_SIZE, &mut counters);
689
+
}
690
+
691
+
// Evict if this cache has more entries than its capacity.
692
+
let weights_to_evict = self.weights_to_evict(&counters);
693
+
if weights_to_evict > 0 {
694
+
self.evict_lru_entries(
695
+
&mut deqs,
696
+
batch_size::EVICTION_BATCH_SIZE,
697
+
weights_to_evict,
698
+
&mut counters,
699
+
);
700
+
}
701
+
702
+
debug_assert_eq!(self.entry_count.load(), current_ec);
703
+
debug_assert_eq!(self.weighted_size.load(), current_ws);
704
+
self.entry_count.store(counters.entry_count);
705
+
self.weighted_size.store(counters.weighted_size);
706
+
}
707
+
708
+
fn now(&self) -> Instant {
709
+
self.current_time_from_expiration_clock()
710
+
}
711
+
}
712
+
713
+
//
714
+
// private methods
715
+
//
716
+
impl<K, V, S> Inner<K, V, S>
717
+
where
718
+
K: Hash + Eq + Send + Sync + 'static,
719
+
V: Send + Sync + 'static,
720
+
S: BuildHasher + Clone + Send + Sync + 'static,
721
+
{
722
+
fn has_enough_capacity(&self, candidate_weight: u32, counters: &EvictionCounters) -> bool {
723
+
self.max_capacity
724
+
.map(|limit| counters.weighted_size + candidate_weight as u64 <= limit)
725
+
.unwrap_or(true)
726
+
}
727
+
728
+
fn weights_to_evict(&self, counters: &EvictionCounters) -> u64 {
729
+
self.max_capacity
730
+
.map(|limit| counters.weighted_size.saturating_sub(limit))
731
+
.unwrap_or_default()
732
+
}
733
+
734
+
#[inline]
735
+
fn should_enable_frequency_sketch(&self, counters: &EvictionCounters) -> bool {
736
+
if self.frequency_sketch_enabled.load(Ordering::Acquire) {
737
+
false
738
+
} else if let Some(max_cap) = self.max_capacity {
739
+
counters.weighted_size >= max_cap / 2
740
+
} else {
741
+
false
742
+
}
743
+
}
744
+
745
+
#[inline]
746
+
fn enable_frequency_sketch(&self, counters: &EvictionCounters) {
747
+
if let Some(max_cap) = self.max_capacity {
748
+
let c = counters;
749
+
let cap = if self.weigher.is_none() {
750
+
max_cap
751
+
} else {
752
+
(c.entry_count as f64 * (c.weighted_size as f64 / max_cap as f64)) as u64
753
+
};
754
+
self.do_enable_frequency_sketch(cap);
755
+
}
756
+
}
757
+
758
+
#[cfg(test)]
759
+
fn enable_frequency_sketch_for_testing(&self) {
760
+
if let Some(max_cap) = self.max_capacity {
761
+
self.do_enable_frequency_sketch(max_cap);
762
+
}
763
+
}
764
+
765
+
#[inline]
766
+
fn do_enable_frequency_sketch(&self, cache_capacity: u64) {
767
+
let skt_capacity = common::sketch_capacity(cache_capacity);
768
+
self.frequency_sketch
769
+
.write()
770
+
.expect("lock poisoned")
771
+
.ensure_capacity(skt_capacity);
772
+
self.frequency_sketch_enabled.store(true, Ordering::Release);
773
+
}
774
+
775
+
fn apply_reads(&self, deqs: &mut Deques<K>, count: usize) {
776
+
use ReadOp::*;
777
+
let mut freq = self.frequency_sketch.write().expect("lock poisoned");
778
+
let ch = &self.read_op_ch;
779
+
for _ in 0..count {
780
+
match ch.try_recv() {
781
+
Ok(Hit(hash, entry, timestamp)) => {
782
+
freq.increment(hash);
783
+
entry.set_last_accessed(timestamp);
784
+
if entry.is_admitted() {
785
+
deqs.move_to_back_ao(&entry);
786
+
}
787
+
}
788
+
Ok(Miss(hash)) => freq.increment(hash),
789
+
Err(_) => break,
790
+
}
791
+
}
792
+
}
793
+
794
+
fn apply_writes(&self, deqs: &mut Deques<K>, count: usize, counters: &mut EvictionCounters) {
795
+
use WriteOp::*;
796
+
let freq = self.frequency_sketch.read().expect("lock poisoned");
797
+
let ch = &self.write_op_ch;
798
+
799
+
for _ in 0..count {
800
+
match ch.try_recv() {
801
+
Ok(Upsert {
802
+
key_hash: kh,
803
+
value_entry: entry,
804
+
old_weight,
805
+
new_weight,
806
+
}) => self.handle_upsert(kh, entry, old_weight, new_weight, deqs, &freq, counters),
807
+
Ok(Remove(KvEntry { key: _key, entry })) => {
808
+
Self::handle_remove(deqs, entry, counters)
809
+
}
810
+
Err(_) => break,
811
+
};
812
+
}
813
+
}
814
+
815
+
#[allow(clippy::too_many_arguments)]
816
+
fn handle_upsert(
817
+
&self,
818
+
kh: KeyHash<K>,
819
+
entry: TrioArc<ValueEntry<K, V>>,
820
+
old_weight: u32,
821
+
new_weight: u32,
822
+
deqs: &mut Deques<K>,
823
+
freq: &FrequencySketch,
824
+
counters: &mut EvictionCounters,
825
+
) {
826
+
entry.set_dirty(false);
827
+
828
+
if entry.is_admitted() {
829
+
// The entry has been already admitted, so treat this as an update.
830
+
counters.saturating_sub(0, old_weight);
831
+
counters.saturating_add(0, new_weight);
832
+
deqs.move_to_back_ao(&entry);
833
+
deqs.move_to_back_wo(&entry);
834
+
return;
835
+
}
836
+
837
+
if self.has_enough_capacity(new_weight, counters) {
838
+
// There are enough room in the cache (or the cache is unbounded).
839
+
// Add the candidate to the deques.
840
+
self.handle_admit(kh, &entry, new_weight, deqs, counters);
841
+
return;
842
+
}
843
+
844
+
if let Some(max) = self.max_capacity {
845
+
if new_weight as u64 > max {
846
+
// The candidate is too big to fit in the cache. Reject it.
847
+
self.cache.remove(&Arc::clone(&kh.key));
848
+
return;
849
+
}
850
+
}
851
+
852
+
let skipped_nodes;
853
+
let mut candidate = EntrySizeAndFrequency::new(new_weight);
854
+
candidate.add_frequency(freq, kh.hash);
855
+
856
+
// Try to admit the candidate.
857
+
match Self::admit(&candidate, &self.cache, deqs, freq) {
858
+
AdmissionResult::Admitted {
859
+
victim_nodes,
860
+
skipped_nodes: mut skipped,
861
+
} => {
862
+
// Try to remove the victims from the cache (hash map).
863
+
for victim in victim_nodes {
864
+
if let Some((_vic_key, vic_entry)) =
865
+
self.cache.remove(unsafe { victim.as_ref().element.key() })
866
+
{
867
+
// And then remove the victim from the deques.
868
+
Self::handle_remove(deqs, vic_entry, counters);
869
+
} else {
870
+
// Could not remove the victim from the cache. Skip this
871
+
// victim node as its ValueEntry might have been
872
+
// invalidated. Add it to the skipped nodes.
873
+
skipped.push(victim);
874
+
}
875
+
}
876
+
skipped_nodes = skipped;
877
+
878
+
// Add the candidate to the deques.
879
+
self.handle_admit(kh, &entry, new_weight, deqs, counters);
880
+
}
881
+
AdmissionResult::Rejected { skipped_nodes: s } => {
882
+
skipped_nodes = s;
883
+
// Remove the candidate from the cache (hash map).
884
+
self.cache.remove(&Arc::clone(&kh.key));
885
+
}
886
+
};
887
+
888
+
// Move the skipped nodes to the back of the deque. We do not unlink (drop)
889
+
// them because ValueEntries in the write op queue should be pointing them.
890
+
for node in skipped_nodes {
891
+
unsafe { deqs.probation.move_to_back(node) };
892
+
}
893
+
}
894
+
895
+
/// Performs size-aware admission explained in the paper:
896
+
/// [Lightweight Robust Size Aware Cache Management][size-aware-cache-paper]
897
+
/// by Gil Einziger, Ohad Eytan, Roy Friedman, Ben Manes.
898
+
///
899
+
/// [size-aware-cache-paper]: https://arxiv.org/abs/2105.08770
900
+
///
901
+
/// There are some modifications in this implementation:
902
+
/// - To admit to the main space, candidate's frequency must be higher than
903
+
/// the aggregated frequencies of the potential victims. (In the paper,
904
+
/// `>=` operator is used rather than `>`) The `>` operator will do a better
905
+
/// job to prevent the main space from polluting.
906
+
/// - When a candidate is rejected, the potential victims will stay at the LRU
907
+
/// position of the probation access-order queue. (In the paper, they will be
908
+
/// promoted (to the MRU position?) to force the eviction policy to select a
909
+
/// different set of victims for the next candidate). We may implement the
910
+
/// paper's behavior later?
911
+
///
912
+
#[inline]
913
+
fn admit(
914
+
candidate: &EntrySizeAndFrequency,
915
+
cache: &CacheStore<K, V, S>,
916
+
deqs: &Deques<K>,
917
+
freq: &FrequencySketch,
918
+
) -> AdmissionResult<K> {
919
+
const MAX_CONSECUTIVE_RETRIES: usize = 5;
920
+
let mut retries = 0;
921
+
922
+
let mut victims = EntrySizeAndFrequency::default();
923
+
let mut victim_nodes = SmallVec::default();
924
+
let mut skipped_nodes = SmallVec::default();
925
+
926
+
// Get first potential victim at the LRU position.
927
+
let mut next_victim = deqs.probation.peek_front_ptr();
928
+
929
+
// Aggregate potential victims.
930
+
while victims.policy_weight < candidate.policy_weight {
931
+
if candidate.freq < victims.freq {
932
+
break;
933
+
}
934
+
if let Some(victim) = next_victim.take() {
935
+
next_victim = DeqNode::next_node_ptr(victim);
936
+
let vic_elem = &unsafe { victim.as_ref() }.element;
937
+
938
+
if let Some(vic_entry) = cache.get(vic_elem.key()) {
939
+
victims.add_policy_weight(vic_entry.policy_weight());
940
+
victims.add_frequency(freq, vic_elem.hash());
941
+
victim_nodes.push(victim);
942
+
retries = 0;
943
+
} else {
944
+
// Could not get the victim from the cache (hash map). Skip this node
945
+
// as its ValueEntry might have been invalidated.
946
+
skipped_nodes.push(victim);
947
+
948
+
retries += 1;
949
+
if retries > MAX_CONSECUTIVE_RETRIES {
950
+
break;
951
+
}
952
+
}
953
+
} else {
954
+
// No more potential victims.
955
+
break;
956
+
}
957
+
}
958
+
959
+
// Admit or reject the candidate.
960
+
961
+
// TODO: Implement some randomness to mitigate hash DoS attack.
962
+
// See Caffeine's implementation.
963
+
964
+
if victims.policy_weight >= candidate.policy_weight && candidate.freq > victims.freq {
965
+
AdmissionResult::Admitted {
966
+
victim_nodes,
967
+
skipped_nodes,
968
+
}
969
+
} else {
970
+
AdmissionResult::Rejected { skipped_nodes }
971
+
}
972
+
}
973
+
974
+
fn handle_admit(
975
+
&self,
976
+
kh: KeyHash<K>,
977
+
entry: &TrioArc<ValueEntry<K, V>>,
978
+
policy_weight: u32,
979
+
deqs: &mut Deques<K>,
980
+
counters: &mut EvictionCounters,
981
+
) {
982
+
let key = Arc::clone(&kh.key);
983
+
counters.saturating_add(1, policy_weight);
984
+
deqs.push_back_ao(
985
+
CacheRegion::MainProbation,
986
+
KeyHashDate::new(kh, entry.entry_info()),
987
+
entry,
988
+
);
989
+
if self.is_write_order_queue_enabled() {
990
+
deqs.push_back_wo(KeyDate::new(key, entry.entry_info()), entry);
991
+
}
992
+
entry.set_admitted(true);
993
+
}
994
+
995
+
fn handle_remove(
996
+
deqs: &mut Deques<K>,
997
+
entry: TrioArc<ValueEntry<K, V>>,
998
+
counters: &mut EvictionCounters,
999
+
) {
1000
+
if entry.is_admitted() {
1001
+
entry.set_admitted(false);
1002
+
counters.saturating_sub(1, entry.policy_weight());
1003
+
// The following two unlink_* functions will unset the deq nodes.
1004
+
deqs.unlink_ao(&entry);
1005
+
Deques::unlink_wo(&mut deqs.write_order, &entry);
1006
+
} else {
1007
+
entry.unset_q_nodes();
1008
+
}
1009
+
}
1010
+
1011
+
fn handle_remove_with_deques(
1012
+
ao_deq_name: &str,
1013
+
ao_deq: &mut Deque<KeyHashDate<K>>,
1014
+
wo_deq: &mut Deque<KeyDate<K>>,
1015
+
entry: TrioArc<ValueEntry<K, V>>,
1016
+
counters: &mut EvictionCounters,
1017
+
) {
1018
+
if entry.is_admitted() {
1019
+
entry.set_admitted(false);
1020
+
counters.saturating_sub(1, entry.policy_weight());
1021
+
// The following two unlink_* functions will unset the deq nodes.
1022
+
Deques::unlink_ao_from_deque(ao_deq_name, ao_deq, &entry);
1023
+
Deques::unlink_wo(wo_deq, &entry);
1024
+
} else {
1025
+
entry.unset_q_nodes();
1026
+
}
1027
+
}
1028
+
1029
+
fn evict_expired(
1030
+
&self,
1031
+
deqs: &mut Deques<K>,
1032
+
batch_size: usize,
1033
+
counters: &mut EvictionCounters,
1034
+
) {
1035
+
let now = self.current_time_from_expiration_clock();
1036
+
1037
+
if self.is_write_order_queue_enabled() {
1038
+
self.remove_expired_wo(deqs, batch_size, now, counters);
1039
+
}
1040
+
1041
+
if self.time_to_idle.is_some() || self.has_valid_after() {
1042
+
let (window, probation, protected, wo) = (
1043
+
&mut deqs.window,
1044
+
&mut deqs.probation,
1045
+
&mut deqs.protected,
1046
+
&mut deqs.write_order,
1047
+
);
1048
+
1049
+
let mut rm_expired_ao =
1050
+
|name, deq| self.remove_expired_ao(name, deq, wo, batch_size, now, counters);
1051
+
1052
+
rm_expired_ao("window", window);
1053
+
rm_expired_ao("probation", probation);
1054
+
rm_expired_ao("protected", protected);
1055
+
}
1056
+
}
1057
+
1058
+
#[inline]
1059
+
fn remove_expired_ao(
1060
+
&self,
1061
+
deq_name: &str,
1062
+
deq: &mut Deque<KeyHashDate<K>>,
1063
+
write_order_deq: &mut Deque<KeyDate<K>>,
1064
+
batch_size: usize,
1065
+
now: Instant,
1066
+
counters: &mut EvictionCounters,
1067
+
) {
1068
+
let tti = &self.time_to_idle;
1069
+
let va = &self.valid_after();
1070
+
for _ in 0..batch_size {
1071
+
// Peek the front node of the deque and check if it is expired.
1072
+
let key = deq.peek_front().and_then(|node| {
1073
+
// TODO: Skip the entry if it is dirty. See `evict_lru_entries` method as an example.
1074
+
if is_expired_entry_ao(tti, va, node, now) {
1075
+
Some(Arc::clone(node.element.key()))
1076
+
} else {
1077
+
None
1078
+
}
1079
+
});
1080
+
1081
+
if key.is_none() {
1082
+
break;
1083
+
}
1084
+
1085
+
let key = key.as_ref().unwrap();
1086
+
1087
+
// Remove the key from the map only when the entry is really
1088
+
// expired. This check is needed because it is possible that the entry in
1089
+
// the map has been updated or deleted but its deque node we checked
1090
+
// above have not been updated yet.
1091
+
let maybe_entry = self
1092
+
.cache
1093
+
.remove_if(key, |_, v| is_expired_entry_ao(tti, va, v, now));
1094
+
1095
+
if let Some((_k, entry)) = maybe_entry {
1096
+
Self::handle_remove_with_deques(deq_name, deq, write_order_deq, entry, counters);
1097
+
} else if !self.try_skip_updated_entry(key, deq_name, deq, write_order_deq) {
1098
+
break;
1099
+
}
1100
+
}
1101
+
}
1102
+
1103
+
#[inline]
1104
+
fn try_skip_updated_entry(
1105
+
&self,
1106
+
key: &K,
1107
+
deq_name: &str,
1108
+
deq: &mut Deque<KeyHashDate<K>>,
1109
+
write_order_deq: &mut Deque<KeyDate<K>>,
1110
+
) -> bool {
1111
+
if let Some(entry) = self.cache.get(key) {
1112
+
if entry.is_dirty() {
1113
+
// The key exists and the entry has been updated.
1114
+
Deques::move_to_back_ao_in_deque(deq_name, deq, &entry);
1115
+
Deques::move_to_back_wo_in_deque(write_order_deq, &entry);
1116
+
true
1117
+
} else {
1118
+
// The key exists but something unexpected.
1119
+
false
1120
+
}
1121
+
} else {
1122
+
// Skip this entry as the key might have been invalidated. Since the
1123
+
// invalidated ValueEntry (which should be still in the write op
1124
+
// queue) has a pointer to this node, move the node to the back of
1125
+
// the deque instead of popping (dropping) it.
1126
+
deq.move_front_to_back();
1127
+
true
1128
+
}
1129
+
}
1130
+
1131
+
#[inline]
1132
+
fn remove_expired_wo(
1133
+
&self,
1134
+
deqs: &mut Deques<K>,
1135
+
batch_size: usize,
1136
+
now: Instant,
1137
+
counters: &mut EvictionCounters,
1138
+
) {
1139
+
let ttl = &self.time_to_live;
1140
+
let va = &self.valid_after();
1141
+
for _ in 0..batch_size {
1142
+
let key = deqs.write_order.peek_front().and_then(|node| {
1143
+
// TODO: Skip the entry if it is dirty. See `evict_lru_entries` method as an example.
1144
+
if is_expired_entry_wo(ttl, va, node, now) {
1145
+
Some(Arc::clone(node.element.key()))
1146
+
} else {
1147
+
None
1148
+
}
1149
+
});
1150
+
1151
+
if key.is_none() {
1152
+
break;
1153
+
}
1154
+
1155
+
let key = key.as_ref().unwrap();
1156
+
1157
+
let maybe_entry = self
1158
+
.cache
1159
+
.remove_if(key, |_, v| is_expired_entry_wo(ttl, va, v, now));
1160
+
1161
+
if let Some((_k, entry)) = maybe_entry {
1162
+
Self::handle_remove(deqs, entry, counters);
1163
+
} else if let Some(entry) = self.cache.get(key) {
1164
+
if entry.is_dirty() {
1165
+
deqs.move_to_back_ao(&entry);
1166
+
deqs.move_to_back_wo(&entry);
1167
+
} else {
1168
+
// The key exists but something unexpected. Break.
1169
+
break;
1170
+
}
1171
+
} else {
1172
+
// Skip this entry as the key might have been invalidated. Since the
1173
+
// invalidated ValueEntry (which should be still in the write op
1174
+
// queue) has a pointer to this node, move the node to the back of
1175
+
// the deque instead of popping (dropping) it.
1176
+
deqs.write_order.move_front_to_back();
1177
+
}
1178
+
}
1179
+
}
1180
+
1181
+
fn evict_lru_entries(
1182
+
&self,
1183
+
deqs: &mut Deques<K>,
1184
+
batch_size: usize,
1185
+
weights_to_evict: u64,
1186
+
counters: &mut EvictionCounters,
1187
+
) {
1188
+
const DEQ_NAME: &str = "probation";
1189
+
let mut evicted = 0u64;
1190
+
let (deq, write_order_deq) = (&mut deqs.probation, &mut deqs.write_order);
1191
+
1192
+
for _ in 0..batch_size {
1193
+
if evicted >= weights_to_evict {
1194
+
break;
1195
+
}
1196
+
1197
+
let maybe_key_and_ts = deq.peek_front().map(|node| {
1198
+
let entry_info = node.element.entry_info();
1199
+
(
1200
+
Arc::clone(node.element.key()),
1201
+
entry_info.is_dirty(),
1202
+
entry_info.last_modified(),
1203
+
)
1204
+
});
1205
+
1206
+
let (key, ts) = match maybe_key_and_ts {
1207
+
Some((key, false, Some(ts))) => (key, ts),
1208
+
// TODO: Remove the second pattern `Some((_key, false, None))` once we change
1209
+
// `last_modified` and `last_accessed` in `EntryInfo` from `Option<Instant>` to
1210
+
// `Instant`.
1211
+
Some((key, true, _)) | Some((key, false, None)) => {
1212
+
if self.try_skip_updated_entry(&key, DEQ_NAME, deq, write_order_deq) {
1213
+
continue;
1214
+
} else {
1215
+
break;
1216
+
}
1217
+
}
1218
+
None => break,
1219
+
};
1220
+
1221
+
let maybe_entry = self.cache.remove_if(&key, |_, v| {
1222
+
if let Some(lm) = v.last_modified() {
1223
+
lm == ts
1224
+
} else {
1225
+
false
1226
+
}
1227
+
});
1228
+
1229
+
if let Some((_k, entry)) = maybe_entry {
1230
+
let weight = entry.policy_weight();
1231
+
Self::handle_remove_with_deques(DEQ_NAME, deq, write_order_deq, entry, counters);
1232
+
evicted = evicted.saturating_add(weight as u64);
1233
+
} else if !self.try_skip_updated_entry(&key, DEQ_NAME, deq, write_order_deq) {
1234
+
break;
1235
+
}
1236
+
}
1237
+
}
1238
+
}
1239
+
1240
+
//
1241
+
// for testing
1242
+
//
1243
+
#[cfg(test)]
1244
+
impl<K, V, S> Inner<K, V, S>
1245
+
where
1246
+
K: Hash + Eq,
1247
+
S: BuildHasher + Clone,
1248
+
{
1249
+
fn set_expiration_clock(&self, clock: Option<Clock>) {
1250
+
let mut exp_clock = self.expiration_clock.write().expect("lock poisoned");
1251
+
if let Some(clock) = clock {
1252
+
*exp_clock = Some(clock);
1253
+
self.has_expiration_clock.store(true, Ordering::SeqCst);
1254
+
} else {
1255
+
self.has_expiration_clock.store(false, Ordering::SeqCst);
1256
+
*exp_clock = None;
1257
+
}
1258
+
}
1259
+
}
1260
+
1261
+
//
1262
+
// private free-standing functions
1263
+
//
1264
+
#[inline]
1265
+
fn is_expired_entry_ao(
1266
+
time_to_idle: &Option<Duration>,
1267
+
valid_after: &Option<Instant>,
1268
+
entry: &impl AccessTime,
1269
+
now: Instant,
1270
+
) -> bool {
1271
+
if let Some(ts) = entry.last_accessed() {
1272
+
if let Some(va) = valid_after {
1273
+
if ts < *va {
1274
+
return true;
1275
+
}
1276
+
}
1277
+
if let Some(tti) = time_to_idle {
1278
+
let checked_add = ts.checked_add(*tti);
1279
+
if checked_add.is_none() {
1280
+
panic!("ttl overflow")
1281
+
}
1282
+
return checked_add.unwrap() <= now;
1283
+
}
1284
+
}
1285
+
false
1286
+
}
1287
+
1288
+
#[inline]
1289
+
fn is_expired_entry_wo(
1290
+
time_to_live: &Option<Duration>,
1291
+
valid_after: &Option<Instant>,
1292
+
entry: &impl AccessTime,
1293
+
now: Instant,
1294
+
) -> bool {
1295
+
if let Some(ts) = entry.last_modified() {
1296
+
if let Some(va) = valid_after {
1297
+
if ts < *va {
1298
+
return true;
1299
+
}
1300
+
}
1301
+
if let Some(ttl) = time_to_live {
1302
+
let checked_add = ts.checked_add(*ttl);
1303
+
if checked_add.is_none() {
1304
+
panic!("ttl overflow");
1305
+
}
1306
+
return checked_add.unwrap() <= now;
1307
+
}
1308
+
}
1309
+
false
1310
+
}
1311
+
1312
+
#[cfg(test)]
1313
+
mod tests {
1314
+
use super::BaseCache;
1315
+
1316
+
#[cfg_attr(target_pointer_width = "16", ignore)]
1317
+
#[test]
1318
+
fn test_skt_capacity_will_not_overflow() {
1319
+
use std::collections::hash_map::RandomState;
1320
+
1321
+
// power of two
1322
+
let pot = |exp| 2u64.pow(exp);
1323
+
1324
+
let ensure_sketch_len = |max_capacity, len, name| {
1325
+
let cache = BaseCache::<u8, u8>::new(
1326
+
Some(max_capacity),
1327
+
None,
1328
+
RandomState::default(),
1329
+
None,
1330
+
None,
1331
+
None,
1332
+
);
1333
+
cache.inner.enable_frequency_sketch_for_testing();
1334
+
assert_eq!(
1335
+
cache
1336
+
.inner
1337
+
.frequency_sketch
1338
+
.read()
1339
+
.expect("lock poisoned")
1340
+
.table_len(),
1341
+
len as usize,
1342
+
"{}",
1343
+
name
1344
+
);
1345
+
};
1346
+
1347
+
if cfg!(target_pointer_width = "32") {
1348
+
let pot24 = pot(24);
1349
+
let pot16 = pot(16);
1350
+
ensure_sketch_len(0, 128, "0");
1351
+
ensure_sketch_len(128, 128, "128");
1352
+
ensure_sketch_len(pot16, pot16, "pot16");
1353
+
// due to ceiling to next_power_of_two
1354
+
ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1");
1355
+
// due to ceiling to next_power_of_two
1356
+
ensure_sketch_len(pot24 - 1, pot24, "pot24 - 1");
1357
+
ensure_sketch_len(pot24, pot24, "pot24");
1358
+
ensure_sketch_len(pot(27), pot24, "pot(27)");
1359
+
ensure_sketch_len(u32::MAX as u64, pot24, "u32::MAX");
1360
+
} else {
1361
+
// target_pointer_width: 64 or larger.
1362
+
let pot30 = pot(30);
1363
+
let pot16 = pot(16);
1364
+
ensure_sketch_len(0, 128, "0");
1365
+
ensure_sketch_len(128, 128, "128");
1366
+
ensure_sketch_len(pot16, pot16, "pot16");
1367
+
// due to ceiling to next_power_of_two
1368
+
ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1");
1369
+
1370
+
// The following tests will allocate large memory (~8GiB).
1371
+
// Skip when running on Circle CI.
1372
+
if !cfg!(circleci) {
1373
+
// due to ceiling to next_power_of_two
1374
+
ensure_sketch_len(pot30 - 1, pot30, "pot30- 1");
1375
+
ensure_sketch_len(pot30, pot30, "pot30");
1376
+
ensure_sketch_len(u64::MAX, pot30, "u64::MAX");
1377
+
}
1378
+
};
1379
+
}
1380
+
}
+249
crates/mini-moka-vendored/src/sync/builder.rs
+249
crates/mini-moka-vendored/src/sync/builder.rs
···
1
+
use super::Cache;
2
+
use crate::{common::builder_utils, common::concurrent::Weigher};
3
+
4
+
use std::{
5
+
collections::hash_map::RandomState,
6
+
hash::{BuildHasher, Hash},
7
+
marker::PhantomData,
8
+
sync::Arc,
9
+
time::Duration,
10
+
};
11
+
12
+
/// Builds a [`Cache`][cache-struct] or with various configuration knobs.
13
+
///
14
+
/// [cache-struct]: ./struct.Cache.html
15
+
///
16
+
/// # Examples
17
+
///
18
+
/// ```rust
19
+
/// use mini_moka::sync::Cache;
20
+
/// use std::time::Duration;
21
+
///
22
+
/// let cache = Cache::builder()
23
+
/// // Max 10,000 entries
24
+
/// .max_capacity(10_000)
25
+
/// // Time to live (TTL): 30 minutes
26
+
/// .time_to_live(Duration::from_secs(30 * 60))
27
+
/// // Time to idle (TTI): 5 minutes
28
+
/// .time_to_idle(Duration::from_secs( 5 * 60))
29
+
/// // Create the cache.
30
+
/// .build();
31
+
///
32
+
/// // This entry will expire after 5 minutes (TTI) if there is no get().
33
+
/// cache.insert(0, "zero");
34
+
///
35
+
/// // This get() will extend the entry life for another 5 minutes.
36
+
/// cache.get(&0);
37
+
///
38
+
/// // Even though we keep calling get(), the entry will expire
39
+
/// // after 30 minutes (TTL) from the insert().
40
+
/// ```
41
+
///
42
+
#[must_use]
43
+
pub struct CacheBuilder<K, V, C> {
44
+
max_capacity: Option<u64>,
45
+
initial_capacity: Option<usize>,
46
+
weigher: Option<Weigher<K, V>>,
47
+
time_to_live: Option<Duration>,
48
+
time_to_idle: Option<Duration>,
49
+
cache_type: PhantomData<C>,
50
+
}
51
+
52
+
impl<K, V> Default for CacheBuilder<K, V, Cache<K, V, RandomState>>
53
+
where
54
+
K: Eq + Hash + Send + Sync + 'static,
55
+
V: Clone + Send + Sync + 'static,
56
+
{
57
+
fn default() -> Self {
58
+
Self {
59
+
max_capacity: None,
60
+
initial_capacity: None,
61
+
weigher: None,
62
+
time_to_live: None,
63
+
time_to_idle: None,
64
+
cache_type: Default::default(),
65
+
}
66
+
}
67
+
}
68
+
69
+
impl<K, V> CacheBuilder<K, V, Cache<K, V, RandomState>>
70
+
where
71
+
K: Eq + Hash + Send + Sync + 'static,
72
+
V: Clone + Send + Sync + 'static,
73
+
{
74
+
/// Construct a new `CacheBuilder` that will be used to build a `Cache` or
75
+
/// `SegmentedCache` holding up to `max_capacity` entries.
76
+
pub fn new(max_capacity: u64) -> Self {
77
+
Self {
78
+
max_capacity: Some(max_capacity),
79
+
..Default::default()
80
+
}
81
+
}
82
+
83
+
/// Builds a `Cache<K, V>`.
84
+
///
85
+
/// If you want to build a `SegmentedCache<K, V>`, call `segments` method before
86
+
/// calling this method.
87
+
///
88
+
/// # Panics
89
+
///
90
+
/// Panics if configured with either `time_to_live` or `time_to_idle` higher than
91
+
/// 1000 years. This is done to protect against overflow when computing key
92
+
/// expiration.
93
+
pub fn build(self) -> Cache<K, V, RandomState> {
94
+
let build_hasher = RandomState::default();
95
+
builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle);
96
+
Cache::with_everything(
97
+
self.max_capacity,
98
+
self.initial_capacity,
99
+
build_hasher,
100
+
self.weigher,
101
+
self.time_to_live,
102
+
self.time_to_idle,
103
+
)
104
+
}
105
+
106
+
/// Builds a `Cache<K, V, S>`, with the given `hasher`.
107
+
///
108
+
/// If you want to build a `SegmentedCache<K, V>`, call `segments` method before
109
+
/// calling this method.
110
+
///
111
+
/// # Panics
112
+
///
113
+
/// Panics if configured with either `time_to_live` or `time_to_idle` higher than
114
+
/// 1000 years. This is done to protect against overflow when computing key
115
+
/// expiration.
116
+
pub fn build_with_hasher<S>(self, hasher: S) -> Cache<K, V, S>
117
+
where
118
+
S: BuildHasher + Clone + Send + Sync + 'static,
119
+
{
120
+
builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle);
121
+
Cache::with_everything(
122
+
self.max_capacity,
123
+
self.initial_capacity,
124
+
hasher,
125
+
self.weigher,
126
+
self.time_to_live,
127
+
self.time_to_idle,
128
+
)
129
+
}
130
+
}
131
+
132
+
impl<K, V, C> CacheBuilder<K, V, C> {
133
+
/// Sets the max capacity of the cache.
134
+
pub fn max_capacity(self, max_capacity: u64) -> Self {
135
+
Self {
136
+
max_capacity: Some(max_capacity),
137
+
..self
138
+
}
139
+
}
140
+
141
+
/// Sets the initial capacity (number of entries) of the cache.
142
+
pub fn initial_capacity(self, number_of_entries: usize) -> Self {
143
+
Self {
144
+
initial_capacity: Some(number_of_entries),
145
+
..self
146
+
}
147
+
}
148
+
149
+
/// Sets the weigher closure of the cache.
150
+
///
151
+
/// The closure should take `&K` and `&V` as the arguments and returns a `u32`
152
+
/// representing the relative size of the entry.
153
+
pub fn weigher(self, weigher: impl Fn(&K, &V) -> u32 + Send + Sync + 'static) -> Self {
154
+
Self {
155
+
weigher: Some(Arc::new(weigher)),
156
+
..self
157
+
}
158
+
}
159
+
160
+
/// Sets the time to live of the cache.
161
+
///
162
+
/// A cached entry will be expired after the specified duration past from
163
+
/// `insert`.
164
+
///
165
+
/// # Panics
166
+
///
167
+
/// `CacheBuilder::build*` methods will panic if the given `duration` is longer
168
+
/// than 1000 years. This is done to protect against overflow when computing key
169
+
/// expiration.
170
+
pub fn time_to_live(self, duration: Duration) -> Self {
171
+
Self {
172
+
time_to_live: Some(duration),
173
+
..self
174
+
}
175
+
}
176
+
177
+
/// Sets the time to idle of the cache.
178
+
///
179
+
/// A cached entry will be expired after the specified duration past from `get`
180
+
/// or `insert`.
181
+
///
182
+
/// # Panics
183
+
///
184
+
/// `CacheBuilder::build*` methods will panic if the given `duration` is longer
185
+
/// than 1000 years. This is done to protect against overflow when computing key
186
+
/// expiration.
187
+
pub fn time_to_idle(self, duration: Duration) -> Self {
188
+
Self {
189
+
time_to_idle: Some(duration),
190
+
..self
191
+
}
192
+
}
193
+
}
194
+
195
+
#[cfg(test)]
196
+
mod tests {
197
+
use super::CacheBuilder;
198
+
199
+
use std::time::Duration;
200
+
201
+
#[test]
202
+
fn build_cache() {
203
+
// Cache<char, String>
204
+
let cache = CacheBuilder::new(100).build();
205
+
let policy = cache.policy();
206
+
207
+
assert_eq!(policy.max_capacity(), Some(100));
208
+
assert_eq!(policy.time_to_live(), None);
209
+
assert_eq!(policy.time_to_idle(), None);
210
+
211
+
cache.insert('a', "Alice");
212
+
assert_eq!(cache.get(&'a'), Some("Alice"));
213
+
214
+
let cache = CacheBuilder::new(100)
215
+
.time_to_live(Duration::from_secs(45 * 60))
216
+
.time_to_idle(Duration::from_secs(15 * 60))
217
+
.build();
218
+
let policy = cache.policy();
219
+
220
+
assert_eq!(policy.max_capacity(), Some(100));
221
+
assert_eq!(policy.time_to_live(), Some(Duration::from_secs(45 * 60)));
222
+
assert_eq!(policy.time_to_idle(), Some(Duration::from_secs(15 * 60)));
223
+
224
+
cache.insert('a', "Alice");
225
+
assert_eq!(cache.get(&'a'), Some("Alice"));
226
+
}
227
+
228
+
#[test]
229
+
#[should_panic(expected = "time_to_live is longer than 1000 years")]
230
+
fn build_cache_too_long_ttl() {
231
+
let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600;
232
+
let builder: CacheBuilder<char, String, _> = CacheBuilder::new(100);
233
+
let duration = Duration::from_secs(thousand_years_secs);
234
+
builder
235
+
.time_to_live(duration + Duration::from_secs(1))
236
+
.build();
237
+
}
238
+
239
+
#[test]
240
+
#[should_panic(expected = "time_to_idle is longer than 1000 years")]
241
+
fn build_cache_too_long_tti() {
242
+
let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600;
243
+
let builder: CacheBuilder<char, String, _> = CacheBuilder::new(100);
244
+
let duration = Duration::from_secs(thousand_years_secs);
245
+
builder
246
+
.time_to_idle(duration + Duration::from_secs(1))
247
+
.build();
248
+
}
249
+
}
+1120
crates/mini-moka-vendored/src/sync/cache.rs
+1120
crates/mini-moka-vendored/src/sync/cache.rs
···
1
+
use super::{base_cache::BaseCache, CacheBuilder, ConcurrentCacheExt, EntryRef, Iter};
2
+
use crate::{
3
+
common::{
4
+
concurrent::{
5
+
constants::{MAX_SYNC_REPEATS, WRITE_RETRY_INTERVAL_MICROS},
6
+
housekeeper::{Housekeeper, InnerSync},
7
+
Weigher, WriteOp,
8
+
},
9
+
time::Instant,
10
+
},
11
+
Policy,
12
+
};
13
+
14
+
use crossbeam_channel::{Sender, TrySendError};
15
+
use std::{
16
+
borrow::Borrow,
17
+
collections::hash_map::RandomState,
18
+
fmt,
19
+
hash::{BuildHasher, Hash},
20
+
sync::Arc,
21
+
time::Duration,
22
+
};
23
+
24
+
/// A thread-safe concurrent in-memory cache built upon [`dashmap::DashMap`][dashmap].
25
+
///
26
+
/// The `Cache` uses `DashMap` as the central key-value storage. It performs a
27
+
/// best-effort bounding of the map using an entry replacement algorithm to determine
28
+
/// which entries to evict when the capacity is exceeded.
29
+
///
30
+
/// To use this cache, enable a crate feature called "dash" in your Cargo.toml.
31
+
/// Please note that the API of `dash` cache will _be changed very often_ in next few
32
+
/// releases as this is yet an experimental component.
33
+
///
34
+
/// # Examples
35
+
///
36
+
/// Cache entries are manually added using [`insert`](#method.insert) method, and are
37
+
/// stored in the cache until either evicted or manually invalidated.
38
+
///
39
+
/// Here's an example of reading and updating a cache by using multiple threads:
40
+
///
41
+
/// ```rust
42
+
/// use mini_moka::sync::Cache;
43
+
///
44
+
/// use std::thread;
45
+
///
46
+
/// fn value(n: usize) -> String {
47
+
/// format!("value {}", n)
48
+
/// }
49
+
///
50
+
/// const NUM_THREADS: usize = 16;
51
+
/// const NUM_KEYS_PER_THREAD: usize = 64;
52
+
///
53
+
/// // Create a cache that can store up to 10,000 entries.
54
+
/// let cache = Cache::new(10_000);
55
+
///
56
+
/// // Spawn threads and read and update the cache simultaneously.
57
+
/// let threads: Vec<_> = (0..NUM_THREADS)
58
+
/// .map(|i| {
59
+
/// // To share the same cache across the threads, clone it.
60
+
/// // This is a cheap operation.
61
+
/// let my_cache = cache.clone();
62
+
/// let start = i * NUM_KEYS_PER_THREAD;
63
+
/// let end = (i + 1) * NUM_KEYS_PER_THREAD;
64
+
///
65
+
/// thread::spawn(move || {
66
+
/// // Insert 64 entries. (NUM_KEYS_PER_THREAD = 64)
67
+
/// for key in start..end {
68
+
/// my_cache.insert(key, value(key));
69
+
/// // get() returns Option<String>, a clone of the stored value.
70
+
/// assert_eq!(my_cache.get(&key), Some(value(key)));
71
+
/// }
72
+
///
73
+
/// // Invalidate every 4 element of the inserted entries.
74
+
/// for key in (start..end).step_by(4) {
75
+
/// my_cache.invalidate(&key);
76
+
/// }
77
+
/// })
78
+
/// })
79
+
/// .collect();
80
+
///
81
+
/// // Wait for all threads to complete.
82
+
/// threads.into_iter().for_each(|t| t.join().expect("Failed"));
83
+
///
84
+
/// // Verify the result.
85
+
/// for key in 0..(NUM_THREADS * NUM_KEYS_PER_THREAD) {
86
+
/// if key % 4 == 0 {
87
+
/// assert_eq!(cache.get(&key), None);
88
+
/// } else {
89
+
/// assert_eq!(cache.get(&key), Some(value(key)));
90
+
/// }
91
+
/// }
92
+
/// ```
93
+
///
94
+
/// # Avoiding to clone the value at `get`
95
+
///
96
+
/// The return type of `get` method is `Option<V>` instead of `Option<&V>`. Every
97
+
/// time `get` is called for an existing key, it creates a clone of the stored value
98
+
/// `V` and returns it. This is because the `Cache` allows concurrent updates from
99
+
/// threads so a value stored in the cache can be dropped or replaced at any time by
100
+
/// any other thread. `get` cannot return a reference `&V` as it is impossible to
101
+
/// guarantee the value outlives the reference.
102
+
///
103
+
/// If you want to store values that will be expensive to clone, wrap them by
104
+
/// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a
105
+
/// thread-safe reference-counted pointer and its `clone()` method is cheap.
106
+
///
107
+
/// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
108
+
///
109
+
/// # Size-based Eviction
110
+
///
111
+
/// ```rust
112
+
/// use std::convert::TryInto;
113
+
/// use mini_moka::sync::Cache;
114
+
///
115
+
/// // Evict based on the number of entries in the cache.
116
+
/// let cache = Cache::builder()
117
+
/// // Up to 10,000 entries.
118
+
/// .max_capacity(10_000)
119
+
/// // Create the cache.
120
+
/// .build();
121
+
/// cache.insert(1, "one".to_string());
122
+
///
123
+
/// // Evict based on the byte length of strings in the cache.
124
+
/// let cache = Cache::builder()
125
+
/// // A weigher closure takes &K and &V and returns a u32
126
+
/// // representing the relative size of the entry.
127
+
/// .weigher(|_key, value: &String| -> u32 {
128
+
/// value.len().try_into().unwrap_or(u32::MAX)
129
+
/// })
130
+
/// // This cache will hold up to 32MiB of values.
131
+
/// .max_capacity(32 * 1024 * 1024)
132
+
/// .build();
133
+
/// cache.insert(2, "two".to_string());
134
+
/// ```
135
+
///
136
+
/// If your cache should not grow beyond a certain size, use the `max_capacity`
137
+
/// method of the [`CacheBuilder`][builder-struct] to set the upper bound. The cache
138
+
/// will try to evict entries that have not been used recently or very often.
139
+
///
140
+
/// At the cache creation time, a weigher closure can be set by the `weigher` method
141
+
/// of the `CacheBuilder`. A weigher closure takes `&K` and `&V` as the arguments and
142
+
/// returns a `u32` representing the relative size of the entry:
143
+
///
144
+
/// - If the `weigher` is _not_ set, the cache will treat each entry has the same
145
+
/// size of `1`. This means the cache will be bounded by the number of entries.
146
+
/// - If the `weigher` is set, the cache will call the weigher to calculate the
147
+
/// weighted size (relative size) on an entry. This means the cache will be bounded
148
+
/// by the total weighted size of entries.
149
+
///
150
+
/// Note that weighted sizes are not used when making eviction selections.
151
+
///
152
+
/// [builder-struct]: ./struct.CacheBuilder.html
153
+
///
154
+
/// # Time-based Expirations
155
+
///
156
+
/// `Cache` supports the following expiration policies:
157
+
///
158
+
/// - **Time to live**: A cached entry will be expired after the specified duration
159
+
/// past from `insert`.
160
+
/// - **Time to idle**: A cached entry will be expired after the specified duration
161
+
/// past from `get` or `insert`.
162
+
///
163
+
/// ```rust
164
+
/// use mini_moka::sync::Cache;
165
+
/// use std::time::Duration;
166
+
///
167
+
/// let cache = Cache::builder()
168
+
/// // Time to live (TTL): 30 minutes
169
+
/// .time_to_live(Duration::from_secs(30 * 60))
170
+
/// // Time to idle (TTI): 5 minutes
171
+
/// .time_to_idle(Duration::from_secs( 5 * 60))
172
+
/// // Create the cache.
173
+
/// .build();
174
+
///
175
+
/// // This entry will expire after 5 minutes (TTI) if there is no get().
176
+
/// cache.insert(0, "zero");
177
+
///
178
+
/// // This get() will extend the entry life for another 5 minutes.
179
+
/// cache.get(&0);
180
+
///
181
+
/// // Even though we keep calling get(), the entry will expire
182
+
/// // after 30 minutes (TTL) from the insert().
183
+
/// ```
184
+
///
185
+
/// # Thread Safety
186
+
///
187
+
/// All methods provided by the `Cache` are considered thread-safe, and can be safely
188
+
/// accessed by multiple concurrent threads.
189
+
///
190
+
/// - `Cache<K, V, S>` requires trait bounds `Send`, `Sync` and `'static` for `K`
191
+
/// (key), `V` (value) and `S` (hasher state).
192
+
/// - `Cache<K, V, S>` will implement `Send` and `Sync`.
193
+
///
194
+
/// # Sharing a cache across threads
195
+
///
196
+
/// To share a cache across threads, do one of the followings:
197
+
///
198
+
/// - Create a clone of the cache by calling its `clone` method and pass it to other
199
+
/// thread.
200
+
/// - Wrap the cache by a `sync::OnceCell` or `sync::Lazy` from
201
+
/// [once_cell][once-cell-crate] create, and set it to a `static` variable.
202
+
///
203
+
/// Cloning is a cheap operation for `Cache` as it only creates thread-safe
204
+
/// reference-counted pointers to the internal data structures.
205
+
///
206
+
/// [once-cell-crate]: https://crates.io/crates/once_cell
207
+
///
208
+
/// # Hashing Algorithm
209
+
///
210
+
/// By default, `Cache` uses a hashing algorithm selected to provide resistance
211
+
/// against HashDoS attacks. It will be the same one used by
212
+
/// `std::collections::HashMap`, which is currently SipHash 1-3.
213
+
///
214
+
/// While SipHash's performance is very competitive for medium sized keys, other
215
+
/// hashing algorithms will outperform it for small keys such as integers as well as
216
+
/// large keys such as long strings. However those algorithms will typically not
217
+
/// protect against attacks such as HashDoS.
218
+
///
219
+
/// The hashing algorithm can be replaced on a per-`Cache` basis using the
220
+
/// [`build_with_hasher`][build-with-hasher-method] method of the
221
+
/// `CacheBuilder`. Many alternative algorithms are available on crates.io, such
222
+
/// as the [aHash][ahash-crate] crate.
223
+
///
224
+
/// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher
225
+
/// [ahash-crate]: https://crates.io/crates/ahash
226
+
///
227
+
pub struct Cache<K, V, S = RandomState> {
228
+
base: BaseCache<K, V, S>,
229
+
}
230
+
231
+
// TODO: https://github.com/moka-rs/moka/issues/54
232
+
#[allow(clippy::non_send_fields_in_send_ty)]
233
+
unsafe impl<K, V, S> Send for Cache<K, V, S>
234
+
where
235
+
K: Send + Sync,
236
+
V: Send + Sync,
237
+
S: Send,
238
+
{
239
+
}
240
+
241
+
unsafe impl<K, V, S> Sync for Cache<K, V, S>
242
+
where
243
+
K: Send + Sync,
244
+
V: Send + Sync,
245
+
S: Sync,
246
+
{
247
+
}
248
+
249
+
// NOTE: We cannot do `#[derive(Clone)]` because it will add `Clone` bound to `K`.
250
+
impl<K, V, S> Clone for Cache<K, V, S> {
251
+
/// Makes a clone of this shared cache.
252
+
///
253
+
/// This operation is cheap as it only creates thread-safe reference counted
254
+
/// pointers to the shared internal data structures.
255
+
fn clone(&self) -> Self {
256
+
Self {
257
+
base: self.base.clone(),
258
+
}
259
+
}
260
+
}
261
+
262
+
impl<K, V, S> fmt::Debug for Cache<K, V, S>
263
+
where
264
+
K: Eq + Hash + fmt::Debug,
265
+
V: fmt::Debug,
266
+
S: BuildHasher + Clone,
267
+
{
268
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
269
+
let mut d_map = f.debug_map();
270
+
271
+
for r in self.iter() {
272
+
let (k, v) = r.pair();
273
+
d_map.entry(k, v);
274
+
}
275
+
276
+
d_map.finish()
277
+
}
278
+
}
279
+
280
+
impl<K, V> Cache<K, V, RandomState>
281
+
where
282
+
K: Hash + Eq + Send + Sync + 'static,
283
+
V: Clone + Send + Sync + 'static,
284
+
{
285
+
/// Constructs a new `Cache<K, V>` that will store up to the `max_capacity`.
286
+
///
287
+
/// To adjust various configuration knobs such as `initial_capacity` or
288
+
/// `time_to_live`, use the [`CacheBuilder`][builder-struct].
289
+
///
290
+
/// [builder-struct]: ./struct.CacheBuilder.html
291
+
pub fn new(max_capacity: u64) -> Self {
292
+
let build_hasher = RandomState::default();
293
+
Self::with_everything(Some(max_capacity), None, build_hasher, None, None, None)
294
+
}
295
+
296
+
/// Returns a [`CacheBuilder`][builder-struct], which can builds a `Cache` with
297
+
/// various configuration knobs.
298
+
///
299
+
/// [builder-struct]: ./struct.CacheBuilder.html
300
+
pub fn builder() -> CacheBuilder<K, V, Cache<K, V, RandomState>> {
301
+
CacheBuilder::default()
302
+
}
303
+
}
304
+
305
+
impl<K, V, S> Cache<K, V, S> {
306
+
/// Returns a read-only cache policy of this cache.
307
+
///
308
+
/// At this time, cache policy cannot be modified after cache creation.
309
+
/// A future version may support to modify it.
310
+
pub fn policy(&self) -> Policy {
311
+
self.base.policy()
312
+
}
313
+
314
+
/// Returns an approximate number of entries in this cache.
315
+
///
316
+
/// The value returned is _an estimate_; the actual count may differ if there are
317
+
/// concurrent insertions or removals, or if some entries are pending removal due
318
+
/// to expiration. This inaccuracy can be mitigated by performing a `sync()`
319
+
/// first.
320
+
///
321
+
/// # Example
322
+
///
323
+
/// ```rust
324
+
/// use mini_moka::sync::Cache;
325
+
///
326
+
/// let cache = Cache::new(10);
327
+
/// cache.insert('n', "Netherland Dwarf");
328
+
/// cache.insert('l', "Lop Eared");
329
+
/// cache.insert('d', "Dutch");
330
+
///
331
+
/// // Ensure an entry exists.
332
+
/// assert!(cache.contains_key(&'n'));
333
+
///
334
+
/// // However, followings may print stale number zeros instead of threes.
335
+
/// println!("{}", cache.entry_count()); // -> 0
336
+
/// println!("{}", cache.weighted_size()); // -> 0
337
+
///
338
+
/// // To mitigate the inaccuracy, bring `ConcurrentCacheExt` trait to
339
+
/// // the scope so we can use `sync` method.
340
+
/// use mini_moka::sync::ConcurrentCacheExt;
341
+
/// // Call `sync` to run pending internal tasks.
342
+
/// cache.sync();
343
+
///
344
+
/// // Followings will print the actual numbers.
345
+
/// println!("{}", cache.entry_count()); // -> 3
346
+
/// println!("{}", cache.weighted_size()); // -> 3
347
+
/// ```
348
+
///
349
+
pub fn entry_count(&self) -> u64 {
350
+
self.base.entry_count()
351
+
}
352
+
353
+
/// Returns an approximate total weighted size of entries in this cache.
354
+
///
355
+
/// The value returned is _an estimate_; the actual size may differ if there are
356
+
/// concurrent insertions or removals, or if some entries are pending removal due
357
+
/// to expiration. This inaccuracy can be mitigated by performing a `sync()`
358
+
/// first. See [`entry_count`](#method.entry_count) for a sample code.
359
+
pub fn weighted_size(&self) -> u64 {
360
+
self.base.weighted_size()
361
+
}
362
+
}
363
+
364
+
impl<K, V, S> Cache<K, V, S>
365
+
where
366
+
K: Hash + Eq + Send + Sync + 'static,
367
+
V: Clone + Send + Sync + 'static,
368
+
S: BuildHasher + Clone + Send + Sync + 'static,
369
+
{
370
+
pub(crate) fn with_everything(
371
+
max_capacity: Option<u64>,
372
+
initial_capacity: Option<usize>,
373
+
build_hasher: S,
374
+
weigher: Option<Weigher<K, V>>,
375
+
time_to_live: Option<Duration>,
376
+
time_to_idle: Option<Duration>,
377
+
) -> Self {
378
+
Self {
379
+
base: BaseCache::new(
380
+
max_capacity,
381
+
initial_capacity,
382
+
build_hasher,
383
+
weigher,
384
+
time_to_live,
385
+
time_to_idle,
386
+
),
387
+
}
388
+
}
389
+
390
+
/// Returns `true` if the cache contains a value for the key.
391
+
///
392
+
/// Unlike the `get` method, this method is not considered a cache read operation,
393
+
/// so it does not update the historic popularity estimator or reset the idle
394
+
/// timer for the key.
395
+
///
396
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
397
+
/// on the borrowed form _must_ match those for the key type.
398
+
pub fn contains_key<Q>(&self, key: &Q) -> bool
399
+
where
400
+
Arc<K>: Borrow<Q>,
401
+
Q: Hash + Eq + ?Sized,
402
+
{
403
+
self.base.contains_key(key)
404
+
}
405
+
406
+
/// Returns a _clone_ of the value corresponding to the key.
407
+
///
408
+
/// If you want to store values that will be expensive to clone, wrap them by
409
+
/// `std::sync::Arc` before storing in a cache. [`Arc`][rustdoc-std-arc] is a
410
+
/// thread-safe reference-counted pointer and its `clone()` method is cheap.
411
+
///
412
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
413
+
/// on the borrowed form _must_ match those for the key type.
414
+
///
415
+
/// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
416
+
pub fn get<Q>(&self, key: &Q) -> Option<V>
417
+
where
418
+
Arc<K>: Borrow<Q>,
419
+
Q: Hash + Eq + ?Sized,
420
+
{
421
+
self.base.get_with_hash(key, self.base.hash(key))
422
+
}
423
+
424
+
/// Deprecated, replaced with [`get`](#method.get)
425
+
#[doc(hidden)]
426
+
#[deprecated(since = "0.8.0", note = "Replaced with `get`")]
427
+
pub fn get_if_present<Q>(&self, key: &Q) -> Option<V>
428
+
where
429
+
Arc<K>: Borrow<Q>,
430
+
Q: Hash + Eq + ?Sized,
431
+
{
432
+
self.get(key)
433
+
}
434
+
435
+
/// Inserts a key-value pair into the cache.
436
+
///
437
+
/// If the cache has this key present, the value is updated.
438
+
pub fn insert(&self, key: K, value: V) {
439
+
let hash = self.base.hash(&key);
440
+
let key = Arc::new(key);
441
+
self.insert_with_hash(key, hash, value)
442
+
}
443
+
444
+
pub(crate) fn insert_with_hash(&self, key: Arc<K>, hash: u64, value: V) {
445
+
let (op, now) = self.base.do_insert_with_hash(key, hash, value);
446
+
let hk = self.base.housekeeper.as_ref();
447
+
Self::schedule_write_op(
448
+
self.base.inner.as_ref(),
449
+
&self.base.write_op_ch,
450
+
op,
451
+
now,
452
+
hk,
453
+
)
454
+
.expect("Failed to insert");
455
+
}
456
+
457
+
/// Discards any cached value for the key.
458
+
///
459
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
460
+
/// on the borrowed form _must_ match those for the key type.
461
+
pub fn invalidate<Q>(&self, key: &Q)
462
+
where
463
+
Arc<K>: Borrow<Q>,
464
+
Q: Hash + Eq + ?Sized,
465
+
{
466
+
if let Some(kv) = self.base.remove_entry(key) {
467
+
let op = WriteOp::Remove(kv);
468
+
let now = self.base.current_time_from_expiration_clock();
469
+
let hk = self.base.housekeeper.as_ref();
470
+
Self::schedule_write_op(
471
+
self.base.inner.as_ref(),
472
+
&self.base.write_op_ch,
473
+
op,
474
+
now,
475
+
hk,
476
+
)
477
+
.expect("Failed to remove");
478
+
}
479
+
}
480
+
481
+
/// Discards all cached values.
482
+
///
483
+
/// This method returns immediately and a background thread will evict all the
484
+
/// cached values inserted before the time when this method was called. It is
485
+
/// guaranteed that the `get` method must not return these invalidated values
486
+
/// even if they have not been evicted.
487
+
///
488
+
/// Like the `invalidate` method, this method does not clear the historic
489
+
/// popularity estimator of keys so that it retains the client activities of
490
+
/// trying to retrieve an item.
491
+
pub fn invalidate_all(&self) {
492
+
self.base.invalidate_all();
493
+
}
494
+
}
495
+
496
+
// Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a.
497
+
// This seems a false positive. The lifetimes are used in the trait bounds.
498
+
// https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes
499
+
#[allow(clippy::extra_unused_lifetimes)]
500
+
impl<'a, K, V, S> Cache<K, V, S>
501
+
where
502
+
K: 'a + Eq + Hash,
503
+
V: 'a,
504
+
S: BuildHasher + Clone,
505
+
{
506
+
/// Creates an iterator visiting all key-value pairs in arbitrary order. The
507
+
/// iterator element type is [`EntryRef<'a, K, V, S>`][moka-entry-ref].
508
+
///
509
+
/// Unlike the `get` method, visiting entries via an iterator do not update the
510
+
/// historic popularity estimator or reset idle timers for keys.
511
+
///
512
+
/// # Locking behavior
513
+
///
514
+
/// This iterator relies on the iterator of [`dashmap::DashMap`][dashmap-iter],
515
+
/// which employs read-write locks. May deadlock if the thread holding an
516
+
/// iterator attempts to update the cache.
517
+
///
518
+
/// [moka-entry-ref]: ./struct.EntryRef.html
519
+
/// [dashmap-iter]: <https://docs.rs/dashmap/*/dashmap/struct.DashMap.html#method.iter>
520
+
///
521
+
/// # Examples
522
+
///
523
+
/// ```rust
524
+
/// use mini_moka::sync::Cache;
525
+
///
526
+
/// let cache = Cache::new(100);
527
+
/// cache.insert("Julia", 14);
528
+
///
529
+
/// let mut iter = cache.iter();
530
+
/// let entry_ref = iter.next().unwrap();
531
+
/// assert_eq!(entry_ref.pair(), (&"Julia", &14));
532
+
/// assert_eq!(entry_ref.key(), &"Julia");
533
+
/// assert_eq!(entry_ref.value(), &14);
534
+
/// assert_eq!(*entry_ref, 14);
535
+
///
536
+
/// assert!(iter.next().is_none());
537
+
/// ```
538
+
///
539
+
pub fn iter(&self) -> Iter<'_, K, V, S> {
540
+
self.base.iter()
541
+
}
542
+
}
543
+
544
+
impl<K, V, S> ConcurrentCacheExt<K, V> for Cache<K, V, S>
545
+
where
546
+
K: Hash + Eq + Send + Sync + 'static,
547
+
V: Send + Sync + 'static,
548
+
S: BuildHasher + Clone + Send + Sync + 'static,
549
+
{
550
+
fn sync(&self) {
551
+
self.base.inner.sync(MAX_SYNC_REPEATS);
552
+
}
553
+
}
554
+
555
+
impl<'a, K, V, S> IntoIterator for &'a Cache<K, V, S>
556
+
where
557
+
K: 'a + Eq + Hash,
558
+
V: 'a,
559
+
S: BuildHasher + Clone,
560
+
{
561
+
type Item = EntryRef<'a, K, V>;
562
+
563
+
type IntoIter = Iter<'a, K, V, S>;
564
+
565
+
fn into_iter(self) -> Self::IntoIter {
566
+
self.iter()
567
+
}
568
+
}
569
+
570
+
// private methods
571
+
impl<K, V, S> Cache<K, V, S>
572
+
where
573
+
K: Hash + Eq + Send + Sync + 'static,
574
+
V: Clone + Send + Sync + 'static,
575
+
S: BuildHasher + Clone + Send + Sync + 'static,
576
+
{
577
+
#[inline]
578
+
fn schedule_write_op(
579
+
inner: &impl InnerSync,
580
+
ch: &Sender<WriteOp<K, V>>,
581
+
op: WriteOp<K, V>,
582
+
now: Instant,
583
+
housekeeper: Option<&Arc<Housekeeper>>,
584
+
) -> Result<(), TrySendError<WriteOp<K, V>>> {
585
+
let mut op = op;
586
+
587
+
// NOTES:
588
+
// - This will block when the channel is full.
589
+
// - We are doing a busy-loop here. We were originally calling `ch.send(op)?`,
590
+
// but we got a notable performance degradation.
591
+
loop {
592
+
BaseCache::<K, V, S>::apply_reads_writes_if_needed(inner, ch, now, housekeeper);
593
+
match ch.try_send(op) {
594
+
Ok(()) => break,
595
+
Err(TrySendError::Full(op1)) => {
596
+
op = op1;
597
+
std::thread::sleep(Duration::from_micros(WRITE_RETRY_INTERVAL_MICROS));
598
+
}
599
+
Err(e @ TrySendError::Disconnected(_)) => return Err(e),
600
+
}
601
+
}
602
+
Ok(())
603
+
}
604
+
}
605
+
606
+
// For unit tests.
607
+
#[cfg(test)]
608
+
impl<K, V, S> Cache<K, V, S>
609
+
where
610
+
K: Hash + Eq + Send + Sync + 'static,
611
+
V: Clone + Send + Sync + 'static,
612
+
S: BuildHasher + Clone + Send + Sync + 'static,
613
+
{
614
+
pub(crate) fn is_table_empty(&self) -> bool {
615
+
self.entry_count() == 0
616
+
}
617
+
618
+
pub(crate) fn reconfigure_for_testing(&mut self) {
619
+
self.base.reconfigure_for_testing();
620
+
}
621
+
622
+
pub(crate) fn set_expiration_clock(&self, clock: Option<crate::common::time::Clock>) {
623
+
self.base.set_expiration_clock(clock);
624
+
}
625
+
}
626
+
627
+
// To see the debug prints, run test as `cargo test -- --nocapture`
628
+
#[cfg(test)]
629
+
mod tests {
630
+
use super::{Cache, ConcurrentCacheExt};
631
+
use crate::common::time::Clock;
632
+
633
+
use std::{sync::Arc, time::Duration};
634
+
635
+
#[test]
636
+
fn basic_single_thread() {
637
+
let mut cache = Cache::new(3);
638
+
cache.reconfigure_for_testing();
639
+
640
+
// Make the cache exterior immutable.
641
+
let cache = cache;
642
+
643
+
cache.insert("a", "alice");
644
+
cache.insert("b", "bob");
645
+
assert_eq!(cache.get(&"a"), Some("alice"));
646
+
assert!(cache.contains_key(&"a"));
647
+
assert!(cache.contains_key(&"b"));
648
+
assert_eq!(cache.get(&"b"), Some("bob"));
649
+
cache.sync();
650
+
// counts: a -> 1, b -> 1
651
+
652
+
cache.insert("c", "cindy");
653
+
assert_eq!(cache.get(&"c"), Some("cindy"));
654
+
assert!(cache.contains_key(&"c"));
655
+
// counts: a -> 1, b -> 1, c -> 1
656
+
cache.sync();
657
+
658
+
assert!(cache.contains_key(&"a"));
659
+
assert_eq!(cache.get(&"a"), Some("alice"));
660
+
assert_eq!(cache.get(&"b"), Some("bob"));
661
+
assert!(cache.contains_key(&"b"));
662
+
cache.sync();
663
+
// counts: a -> 2, b -> 2, c -> 1
664
+
665
+
// "d" should not be admitted because its frequency is too low.
666
+
cache.insert("d", "david"); // count: d -> 0
667
+
cache.sync();
668
+
assert_eq!(cache.get(&"d"), None); // d -> 1
669
+
assert!(!cache.contains_key(&"d"));
670
+
671
+
cache.insert("d", "david");
672
+
cache.sync();
673
+
assert!(!cache.contains_key(&"d"));
674
+
assert_eq!(cache.get(&"d"), None); // d -> 2
675
+
676
+
// "d" should be admitted and "c" should be evicted
677
+
// because d's frequency is higher than c's.
678
+
cache.insert("d", "dennis");
679
+
cache.sync();
680
+
assert_eq!(cache.get(&"a"), Some("alice"));
681
+
assert_eq!(cache.get(&"b"), Some("bob"));
682
+
assert_eq!(cache.get(&"c"), None);
683
+
assert_eq!(cache.get(&"d"), Some("dennis"));
684
+
assert!(cache.contains_key(&"a"));
685
+
assert!(cache.contains_key(&"b"));
686
+
assert!(!cache.contains_key(&"c"));
687
+
assert!(cache.contains_key(&"d"));
688
+
689
+
cache.invalidate(&"b");
690
+
assert_eq!(cache.get(&"b"), None);
691
+
assert!(!cache.contains_key(&"b"));
692
+
}
693
+
694
+
#[test]
695
+
fn size_aware_eviction() {
696
+
let weigher = |_k: &&str, v: &(&str, u32)| v.1;
697
+
698
+
let alice = ("alice", 10);
699
+
let bob = ("bob", 15);
700
+
let bill = ("bill", 20);
701
+
let cindy = ("cindy", 5);
702
+
let david = ("david", 15);
703
+
let dennis = ("dennis", 15);
704
+
705
+
let mut cache = Cache::builder().max_capacity(31).weigher(weigher).build();
706
+
cache.reconfigure_for_testing();
707
+
708
+
// Make the cache exterior immutable.
709
+
let cache = cache;
710
+
711
+
cache.insert("a", alice);
712
+
cache.insert("b", bob);
713
+
assert_eq!(cache.get(&"a"), Some(alice));
714
+
assert!(cache.contains_key(&"a"));
715
+
assert!(cache.contains_key(&"b"));
716
+
assert_eq!(cache.get(&"b"), Some(bob));
717
+
cache.sync();
718
+
// order (LRU -> MRU) and counts: a -> 1, b -> 1
719
+
720
+
cache.insert("c", cindy);
721
+
assert_eq!(cache.get(&"c"), Some(cindy));
722
+
assert!(cache.contains_key(&"c"));
723
+
// order and counts: a -> 1, b -> 1, c -> 1
724
+
cache.sync();
725
+
726
+
assert!(cache.contains_key(&"a"));
727
+
assert_eq!(cache.get(&"a"), Some(alice));
728
+
assert_eq!(cache.get(&"b"), Some(bob));
729
+
assert!(cache.contains_key(&"b"));
730
+
cache.sync();
731
+
// order and counts: c -> 1, a -> 2, b -> 2
732
+
733
+
// To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10).
734
+
// "d" must have higher count than 3, which is the aggregated count
735
+
// of "a" and "c".
736
+
cache.insert("d", david); // count: d -> 0
737
+
cache.sync();
738
+
assert_eq!(cache.get(&"d"), None); // d -> 1
739
+
assert!(!cache.contains_key(&"d"));
740
+
741
+
cache.insert("d", david);
742
+
cache.sync();
743
+
assert!(!cache.contains_key(&"d"));
744
+
assert_eq!(cache.get(&"d"), None); // d -> 2
745
+
746
+
cache.insert("d", david);
747
+
cache.sync();
748
+
assert_eq!(cache.get(&"d"), None); // d -> 3
749
+
assert!(!cache.contains_key(&"d"));
750
+
751
+
cache.insert("d", david);
752
+
cache.sync();
753
+
assert!(!cache.contains_key(&"d"));
754
+
assert_eq!(cache.get(&"d"), None); // d -> 4
755
+
756
+
// Finally "d" should be admitted by evicting "c" and "a".
757
+
cache.insert("d", dennis);
758
+
cache.sync();
759
+
assert_eq!(cache.get(&"a"), None);
760
+
assert_eq!(cache.get(&"b"), Some(bob));
761
+
assert_eq!(cache.get(&"c"), None);
762
+
assert_eq!(cache.get(&"d"), Some(dennis));
763
+
assert!(!cache.contains_key(&"a"));
764
+
assert!(cache.contains_key(&"b"));
765
+
assert!(!cache.contains_key(&"c"));
766
+
assert!(cache.contains_key(&"d"));
767
+
768
+
// Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15).
769
+
cache.insert("b", bill);
770
+
cache.sync();
771
+
assert_eq!(cache.get(&"b"), Some(bill));
772
+
assert_eq!(cache.get(&"d"), None);
773
+
assert!(cache.contains_key(&"b"));
774
+
assert!(!cache.contains_key(&"d"));
775
+
776
+
// Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15).
777
+
cache.insert("a", alice);
778
+
cache.insert("b", bob);
779
+
cache.sync();
780
+
assert_eq!(cache.get(&"a"), Some(alice));
781
+
assert_eq!(cache.get(&"b"), Some(bob));
782
+
assert_eq!(cache.get(&"d"), None);
783
+
assert!(cache.contains_key(&"a"));
784
+
assert!(cache.contains_key(&"b"));
785
+
assert!(!cache.contains_key(&"d"));
786
+
787
+
// Verify the sizes.
788
+
assert_eq!(cache.entry_count(), 2);
789
+
assert_eq!(cache.weighted_size(), 25);
790
+
}
791
+
792
+
#[test]
793
+
fn basic_multi_threads() {
794
+
let num_threads = 4;
795
+
let cache = Cache::new(100);
796
+
797
+
// https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect
798
+
#[allow(clippy::needless_collect)]
799
+
let handles = (0..num_threads)
800
+
.map(|id| {
801
+
let cache = cache.clone();
802
+
std::thread::spawn(move || {
803
+
cache.insert(10, format!("{}-100", id));
804
+
cache.get(&10);
805
+
cache.insert(20, format!("{}-200", id));
806
+
cache.invalidate(&10);
807
+
})
808
+
})
809
+
.collect::<Vec<_>>();
810
+
811
+
handles.into_iter().for_each(|h| h.join().expect("Failed"));
812
+
813
+
assert!(cache.get(&10).is_none());
814
+
assert!(cache.get(&20).is_some());
815
+
assert!(!cache.contains_key(&10));
816
+
assert!(cache.contains_key(&20));
817
+
}
818
+
819
+
#[test]
820
+
fn invalidate_all() {
821
+
let mut cache = Cache::new(100);
822
+
cache.reconfigure_for_testing();
823
+
824
+
// Make the cache exterior immutable.
825
+
let cache = cache;
826
+
827
+
cache.insert("a", "alice");
828
+
cache.insert("b", "bob");
829
+
cache.insert("c", "cindy");
830
+
assert_eq!(cache.get(&"a"), Some("alice"));
831
+
assert_eq!(cache.get(&"b"), Some("bob"));
832
+
assert_eq!(cache.get(&"c"), Some("cindy"));
833
+
assert!(cache.contains_key(&"a"));
834
+
assert!(cache.contains_key(&"b"));
835
+
assert!(cache.contains_key(&"c"));
836
+
837
+
// `cache.sync()` is no longer needed here before invalidating. The last
838
+
// modified timestamp of the entries were updated when they were inserted.
839
+
// https://github.com/moka-rs/moka/issues/155
840
+
841
+
cache.invalidate_all();
842
+
cache.sync();
843
+
844
+
cache.insert("d", "david");
845
+
cache.sync();
846
+
847
+
assert!(cache.get(&"a").is_none());
848
+
assert!(cache.get(&"b").is_none());
849
+
assert!(cache.get(&"c").is_none());
850
+
assert_eq!(cache.get(&"d"), Some("david"));
851
+
assert!(!cache.contains_key(&"a"));
852
+
assert!(!cache.contains_key(&"b"));
853
+
assert!(!cache.contains_key(&"c"));
854
+
assert!(cache.contains_key(&"d"));
855
+
}
856
+
857
+
#[test]
858
+
fn time_to_live() {
859
+
let mut cache = Cache::builder()
860
+
.max_capacity(100)
861
+
.time_to_live(Duration::from_secs(10))
862
+
.build();
863
+
864
+
cache.reconfigure_for_testing();
865
+
866
+
let (clock, mock) = Clock::mock();
867
+
cache.set_expiration_clock(Some(clock));
868
+
869
+
// Make the cache exterior immutable.
870
+
let cache = cache;
871
+
872
+
cache.insert("a", "alice");
873
+
cache.sync();
874
+
875
+
mock.increment(Duration::from_secs(5)); // 5 secs from the start.
876
+
cache.sync();
877
+
878
+
assert_eq!(cache.get(&"a"), Some("alice"));
879
+
assert!(cache.contains_key(&"a"));
880
+
881
+
mock.increment(Duration::from_secs(5)); // 10 secs.
882
+
assert_eq!(cache.get(&"a"), None);
883
+
assert!(!cache.contains_key(&"a"));
884
+
885
+
assert_eq!(cache.iter().count(), 0);
886
+
887
+
cache.sync();
888
+
assert!(cache.is_table_empty());
889
+
890
+
cache.insert("b", "bob");
891
+
cache.sync();
892
+
893
+
assert_eq!(cache.entry_count(), 1);
894
+
895
+
mock.increment(Duration::from_secs(5)); // 15 secs.
896
+
cache.sync();
897
+
898
+
assert_eq!(cache.get(&"b"), Some("bob"));
899
+
assert!(cache.contains_key(&"b"));
900
+
assert_eq!(cache.entry_count(), 1);
901
+
902
+
cache.insert("b", "bill");
903
+
cache.sync();
904
+
905
+
mock.increment(Duration::from_secs(5)); // 20 secs
906
+
cache.sync();
907
+
908
+
assert_eq!(cache.get(&"b"), Some("bill"));
909
+
assert!(cache.contains_key(&"b"));
910
+
assert_eq!(cache.entry_count(), 1);
911
+
912
+
mock.increment(Duration::from_secs(5)); // 25 secs
913
+
assert_eq!(cache.get(&"a"), None);
914
+
assert_eq!(cache.get(&"b"), None);
915
+
assert!(!cache.contains_key(&"a"));
916
+
assert!(!cache.contains_key(&"b"));
917
+
918
+
assert_eq!(cache.iter().count(), 0);
919
+
920
+
cache.sync();
921
+
assert!(cache.is_table_empty());
922
+
}
923
+
924
+
#[test]
925
+
fn time_to_idle() {
926
+
let mut cache = Cache::builder()
927
+
.max_capacity(100)
928
+
.time_to_idle(Duration::from_secs(10))
929
+
.build();
930
+
931
+
cache.reconfigure_for_testing();
932
+
933
+
let (clock, mock) = Clock::mock();
934
+
cache.set_expiration_clock(Some(clock));
935
+
936
+
// Make the cache exterior immutable.
937
+
let cache = cache;
938
+
939
+
cache.insert("a", "alice");
940
+
cache.sync();
941
+
942
+
mock.increment(Duration::from_secs(5)); // 5 secs from the start.
943
+
cache.sync();
944
+
945
+
assert_eq!(cache.get(&"a"), Some("alice"));
946
+
947
+
mock.increment(Duration::from_secs(5)); // 10 secs.
948
+
cache.sync();
949
+
950
+
cache.insert("b", "bob");
951
+
cache.sync();
952
+
953
+
assert_eq!(cache.entry_count(), 2);
954
+
955
+
mock.increment(Duration::from_secs(2)); // 12 secs.
956
+
cache.sync();
957
+
958
+
// contains_key does not reset the idle timer for the key.
959
+
assert!(cache.contains_key(&"a"));
960
+
assert!(cache.contains_key(&"b"));
961
+
cache.sync();
962
+
963
+
assert_eq!(cache.entry_count(), 2);
964
+
965
+
mock.increment(Duration::from_secs(3)); // 15 secs.
966
+
assert_eq!(cache.get(&"a"), None);
967
+
assert_eq!(cache.get(&"b"), Some("bob"));
968
+
assert!(!cache.contains_key(&"a"));
969
+
assert!(cache.contains_key(&"b"));
970
+
971
+
assert_eq!(cache.iter().count(), 1);
972
+
973
+
cache.sync();
974
+
assert_eq!(cache.entry_count(), 1);
975
+
976
+
mock.increment(Duration::from_secs(10)); // 25 secs
977
+
assert_eq!(cache.get(&"a"), None);
978
+
assert_eq!(cache.get(&"b"), None);
979
+
assert!(!cache.contains_key(&"a"));
980
+
assert!(!cache.contains_key(&"b"));
981
+
982
+
assert_eq!(cache.iter().count(), 0);
983
+
984
+
cache.sync();
985
+
assert!(cache.is_table_empty());
986
+
}
987
+
988
+
#[test]
989
+
fn test_iter() {
990
+
const NUM_KEYS: usize = 50;
991
+
992
+
fn make_value(key: usize) -> String {
993
+
format!("val: {}", key)
994
+
}
995
+
996
+
let cache = Cache::builder()
997
+
.max_capacity(100)
998
+
.time_to_idle(Duration::from_secs(10))
999
+
.build();
1000
+
1001
+
for key in 0..NUM_KEYS {
1002
+
cache.insert(key, make_value(key));
1003
+
}
1004
+
1005
+
let mut key_set = std::collections::HashSet::new();
1006
+
1007
+
for entry in &cache {
1008
+
let (key, value) = entry.pair();
1009
+
assert_eq!(value, &make_value(*key));
1010
+
1011
+
key_set.insert(*key);
1012
+
}
1013
+
1014
+
// Ensure there are no missing or duplicate keys in the iteration.
1015
+
assert_eq!(key_set.len(), NUM_KEYS);
1016
+
1017
+
// DO NOT REMOVE THE COMMENT FROM THIS BLOCK.
1018
+
// This block demonstrates how you can write a code to get a deadlock.
1019
+
// {
1020
+
// let mut iter = cache.iter();
1021
+
// let _ = iter.next();
1022
+
1023
+
// for key in 0..NUM_KEYS {
1024
+
// cache.insert(key, make_value(key));
1025
+
// println!("{}", key);
1026
+
// }
1027
+
1028
+
// let _ = iter.next();
1029
+
// }
1030
+
}
1031
+
1032
+
/// Runs 16 threads at the same time and ensures no deadlock occurs.
1033
+
///
1034
+
/// - Eight of the threads will update key-values in the cache.
1035
+
/// - Eight others will iterate the cache.
1036
+
///
1037
+
#[test]
1038
+
fn test_iter_multi_threads() {
1039
+
use std::collections::HashSet;
1040
+
1041
+
const NUM_KEYS: usize = 1024;
1042
+
const NUM_THREADS: usize = 16;
1043
+
1044
+
fn make_value(key: usize) -> String {
1045
+
format!("val: {}", key)
1046
+
}
1047
+
1048
+
let cache = Cache::builder()
1049
+
.max_capacity(2048)
1050
+
.time_to_idle(Duration::from_secs(10))
1051
+
.build();
1052
+
1053
+
// Initialize the cache.
1054
+
for key in 0..NUM_KEYS {
1055
+
cache.insert(key, make_value(key));
1056
+
}
1057
+
1058
+
let rw_lock = Arc::new(std::sync::RwLock::<()>::default());
1059
+
let write_lock = rw_lock.write().unwrap();
1060
+
1061
+
// https://rust-lang.github.io/rust-clippy/master/index.html#needless_collect
1062
+
#[allow(clippy::needless_collect)]
1063
+
let handles = (0..NUM_THREADS)
1064
+
.map(|n| {
1065
+
let cache = cache.clone();
1066
+
let rw_lock = Arc::clone(&rw_lock);
1067
+
1068
+
if n % 2 == 0 {
1069
+
// This thread will update the cache.
1070
+
std::thread::spawn(move || {
1071
+
let read_lock = rw_lock.read().unwrap();
1072
+
for key in 0..NUM_KEYS {
1073
+
// TODO: Update keys in a random order?
1074
+
cache.insert(key, make_value(key));
1075
+
}
1076
+
std::mem::drop(read_lock);
1077
+
})
1078
+
} else {
1079
+
// This thread will iterate the cache.
1080
+
std::thread::spawn(move || {
1081
+
let read_lock = rw_lock.read().unwrap();
1082
+
let mut key_set = HashSet::new();
1083
+
for entry in &cache {
1084
+
let (key, value) = entry.pair();
1085
+
assert_eq!(value, &make_value(*key));
1086
+
key_set.insert(*key);
1087
+
}
1088
+
// Ensure there are no missing or duplicate keys in the iteration.
1089
+
assert_eq!(key_set.len(), NUM_KEYS);
1090
+
std::mem::drop(read_lock);
1091
+
})
1092
+
}
1093
+
})
1094
+
.collect::<Vec<_>>();
1095
+
1096
+
// Let these threads to run by releasing the write lock.
1097
+
std::mem::drop(write_lock);
1098
+
1099
+
handles.into_iter().for_each(|h| h.join().expect("Failed"));
1100
+
1101
+
// Ensure there are no missing or duplicate keys in the iteration.
1102
+
let key_set = cache.iter().map(|ent| *ent.key()).collect::<HashSet<_>>();
1103
+
assert_eq!(key_set.len(), NUM_KEYS);
1104
+
}
1105
+
1106
+
#[test]
1107
+
fn test_debug_format() {
1108
+
let cache = Cache::new(10);
1109
+
cache.insert('a', "alice");
1110
+
cache.insert('b', "bob");
1111
+
cache.insert('c', "cindy");
1112
+
1113
+
let debug_str = format!("{:?}", cache);
1114
+
assert!(debug_str.starts_with('{'));
1115
+
assert!(debug_str.contains(r#"'a': "alice""#));
1116
+
assert!(debug_str.contains(r#"'b': "bob""#));
1117
+
assert!(debug_str.contains(r#"'c': "cindy""#));
1118
+
assert!(debug_str.ends_with('}'));
1119
+
}
1120
+
}
+64
crates/mini-moka-vendored/src/sync/iter.rs
+64
crates/mini-moka-vendored/src/sync/iter.rs
···
1
+
use super::{base_cache::BaseCache, mapref::EntryRef};
2
+
use crate::common::concurrent::ValueEntry;
3
+
4
+
use std::{
5
+
hash::{BuildHasher, Hash},
6
+
sync::Arc,
7
+
};
8
+
use triomphe::Arc as TrioArc;
9
+
10
+
pub(crate) type DashMapIter<'a, K, V, S> =
11
+
dashmap::iter::Iter<'a, Arc<K>, TrioArc<ValueEntry<K, V>>, S>;
12
+
13
+
pub struct Iter<'a, K, V, S> {
14
+
cache: &'a BaseCache<K, V, S>,
15
+
map_iter: DashMapIter<'a, K, V, S>,
16
+
}
17
+
18
+
impl<'a, K, V, S> Iter<'a, K, V, S> {
19
+
pub(crate) fn new(cache: &'a BaseCache<K, V, S>, map_iter: DashMapIter<'a, K, V, S>) -> Self {
20
+
Self { cache, map_iter }
21
+
}
22
+
}
23
+
24
+
impl<'a, K, V, S> Iterator for Iter<'a, K, V, S>
25
+
where
26
+
K: Eq + Hash,
27
+
S: BuildHasher + Clone,
28
+
{
29
+
type Item = EntryRef<'a, K, V>;
30
+
31
+
fn next(&mut self) -> Option<Self::Item> {
32
+
for map_ref in &mut self.map_iter {
33
+
if !self.cache.is_expired_entry(map_ref.value()) {
34
+
return Some(EntryRef::new(map_ref));
35
+
}
36
+
}
37
+
38
+
None
39
+
}
40
+
}
41
+
42
+
// Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a.
43
+
// This seems a false positive. The lifetimes are used in the trait bounds.
44
+
// https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes
45
+
#[allow(clippy::extra_unused_lifetimes)]
46
+
unsafe impl<'a, K, V, S> Send for Iter<'_, K, V, S>
47
+
where
48
+
K: 'a + Eq + Hash + Send,
49
+
V: 'a + Send,
50
+
S: 'a + BuildHasher + Clone,
51
+
{
52
+
}
53
+
54
+
// Clippy beta 0.1.83 (f41c7ed9889 2024-10-31) warns about unused lifetimes on 'a.
55
+
// This seems a false positive. The lifetimes are used in the trait bounds.
56
+
// https://rust-lang.github.io/rust-clippy/master/index.html#extra_unused_lifetimes
57
+
#[allow(clippy::extra_unused_lifetimes)]
58
+
unsafe impl<'a, K, V, S> Sync for Iter<'_, K, V, S>
59
+
where
60
+
K: 'a + Eq + Hash + Sync,
61
+
V: 'a + Sync,
62
+
S: 'a + BuildHasher + Clone,
63
+
{
64
+
}
+48
crates/mini-moka-vendored/src/sync/mapref.rs
+48
crates/mini-moka-vendored/src/sync/mapref.rs
···
1
+
use crate::common::concurrent::ValueEntry;
2
+
3
+
use std::{hash::Hash, sync::Arc};
4
+
use triomphe::Arc as TrioArc;
5
+
6
+
type DashMapRef<'a, K, V> =
7
+
dashmap::mapref::multiple::RefMulti<'a, Arc<K>, TrioArc<ValueEntry<K, V>>>;
8
+
9
+
pub struct EntryRef<'a, K, V>(DashMapRef<'a, K, V>);
10
+
11
+
unsafe impl<K, V> Sync for EntryRef<'_, K, V>
12
+
where
13
+
K: Eq + Hash + Send + Sync,
14
+
V: Send + Sync,
15
+
{
16
+
}
17
+
18
+
impl<'a, K, V> EntryRef<'a, K, V>
19
+
where
20
+
K: Eq + Hash,
21
+
{
22
+
pub(crate) fn new(map_ref: DashMapRef<'a, K, V>) -> Self {
23
+
Self(map_ref)
24
+
}
25
+
26
+
pub fn key(&self) -> &K {
27
+
self.0.key()
28
+
}
29
+
30
+
pub fn value(&self) -> &V {
31
+
&self.0.value().value
32
+
}
33
+
34
+
pub fn pair(&self) -> (&K, &V) {
35
+
(self.key(), self.value())
36
+
}
37
+
}
38
+
39
+
impl<K, V> std::ops::Deref for EntryRef<'_, K, V>
40
+
where
41
+
K: Eq + Hash,
42
+
{
43
+
type Target = V;
44
+
45
+
fn deref(&self) -> &V {
46
+
self.value()
47
+
}
48
+
}
+202
crates/mini-moka-vendored/src/unsync.rs
+202
crates/mini-moka-vendored/src/unsync.rs
···
1
+
//! Provides a *not* thread-safe cache implementation built upon
2
+
//! [`std::collections::HashMap`][std-hashmap].
3
+
//!
4
+
//! [std-hashmap]: https://doc.rust-lang.org/std/collections/struct.HashMap.html
5
+
6
+
mod builder;
7
+
mod cache;
8
+
mod deques;
9
+
mod iter;
10
+
11
+
use std::{ptr::NonNull, rc::Rc};
12
+
use tagptr::TagNonNull;
13
+
14
+
pub use builder::CacheBuilder;
15
+
pub use cache::Cache;
16
+
pub use iter::Iter;
17
+
18
+
use crate::common::{deque::DeqNode, time::Instant};
19
+
20
+
pub(crate) type Weigher<K, V> = Box<dyn FnMut(&K, &V) -> u32>;
21
+
22
+
pub(crate) trait AccessTime {
23
+
fn last_accessed(&self) -> Option<Instant>;
24
+
fn set_last_accessed(&mut self, timestamp: Instant);
25
+
fn last_modified(&self) -> Option<Instant>;
26
+
fn set_last_modified(&mut self, timestamp: Instant);
27
+
}
28
+
29
+
pub(crate) struct KeyDate<K> {
30
+
pub(crate) key: Rc<K>,
31
+
pub(crate) timestamp: Option<Instant>,
32
+
}
33
+
34
+
impl<K> KeyDate<K> {
35
+
pub(crate) fn new(key: Rc<K>, timestamp: Option<Instant>) -> Self {
36
+
Self { key, timestamp }
37
+
}
38
+
}
39
+
40
+
pub(crate) struct KeyHashDate<K> {
41
+
pub(crate) key: Rc<K>,
42
+
pub(crate) hash: u64,
43
+
pub(crate) timestamp: Option<Instant>,
44
+
}
45
+
46
+
impl<K> KeyHashDate<K> {
47
+
pub(crate) fn new(key: Rc<K>, hash: u64, timestamp: Option<Instant>) -> Self {
48
+
Self {
49
+
key,
50
+
hash,
51
+
timestamp,
52
+
}
53
+
}
54
+
}
55
+
56
+
// DeqNode for an access order queue.
57
+
type KeyDeqNodeAo<K> = TagNonNull<DeqNode<KeyHashDate<K>>, 2>;
58
+
59
+
// DeqNode for the write order queue.
60
+
type KeyDeqNodeWo<K> = NonNull<DeqNode<KeyDate<K>>>;
61
+
62
+
struct EntryInfo<K> {
63
+
access_order_q_node: Option<KeyDeqNodeAo<K>>,
64
+
write_order_q_node: Option<KeyDeqNodeWo<K>>,
65
+
policy_weight: u32,
66
+
}
67
+
68
+
pub(crate) struct ValueEntry<K, V> {
69
+
pub(crate) value: V,
70
+
info: EntryInfo<K>,
71
+
}
72
+
73
+
impl<K, V> ValueEntry<K, V> {
74
+
pub(crate) fn new(value: V, policy_weight: u32) -> Self {
75
+
Self {
76
+
value,
77
+
info: EntryInfo {
78
+
access_order_q_node: None,
79
+
write_order_q_node: None,
80
+
policy_weight,
81
+
},
82
+
}
83
+
}
84
+
85
+
#[inline]
86
+
pub(crate) fn replace_deq_nodes_with(&mut self, mut other: Self) {
87
+
self.info.access_order_q_node = other.info.access_order_q_node.take();
88
+
self.info.write_order_q_node = other.info.write_order_q_node.take();
89
+
}
90
+
91
+
#[inline]
92
+
pub(crate) fn access_order_q_node(&self) -> Option<KeyDeqNodeAo<K>> {
93
+
self.info.access_order_q_node
94
+
}
95
+
96
+
#[inline]
97
+
pub(crate) fn set_access_order_q_node(&mut self, node: Option<KeyDeqNodeAo<K>>) {
98
+
self.info.access_order_q_node = node;
99
+
}
100
+
101
+
#[inline]
102
+
pub(crate) fn take_access_order_q_node(&mut self) -> Option<KeyDeqNodeAo<K>> {
103
+
self.info.access_order_q_node.take()
104
+
}
105
+
106
+
#[inline]
107
+
pub(crate) fn write_order_q_node(&self) -> Option<KeyDeqNodeWo<K>> {
108
+
self.info.write_order_q_node
109
+
}
110
+
111
+
#[inline]
112
+
pub(crate) fn set_write_order_q_node(&mut self, node: Option<KeyDeqNodeWo<K>>) {
113
+
self.info.write_order_q_node = node;
114
+
}
115
+
116
+
#[inline]
117
+
pub(crate) fn take_write_order_q_node(&mut self) -> Option<KeyDeqNodeWo<K>> {
118
+
self.info.write_order_q_node.take()
119
+
}
120
+
121
+
#[inline]
122
+
pub(crate) fn policy_weight(&self) -> u32 {
123
+
self.info.policy_weight
124
+
}
125
+
126
+
#[inline]
127
+
pub(crate) fn set_policy_weight(&mut self, policy_weight: u32) {
128
+
self.info.policy_weight = policy_weight;
129
+
}
130
+
}
131
+
132
+
impl<K, V> AccessTime for ValueEntry<K, V> {
133
+
#[inline]
134
+
fn last_accessed(&self) -> Option<Instant> {
135
+
self.access_order_q_node()
136
+
.and_then(|node| unsafe { node.as_ref() }.element.timestamp)
137
+
}
138
+
139
+
#[inline]
140
+
fn set_last_accessed(&mut self, timestamp: Instant) {
141
+
if let Some(mut node) = self.info.access_order_q_node {
142
+
unsafe { node.as_mut() }.set_last_accessed(timestamp);
143
+
}
144
+
}
145
+
146
+
#[inline]
147
+
fn last_modified(&self) -> Option<Instant> {
148
+
self.write_order_q_node()
149
+
.and_then(|node| unsafe { node.as_ref() }.element.timestamp)
150
+
}
151
+
152
+
#[inline]
153
+
fn set_last_modified(&mut self, timestamp: Instant) {
154
+
if let Some(mut node) = self.info.write_order_q_node {
155
+
unsafe { node.as_mut() }.set_last_modified(timestamp);
156
+
}
157
+
}
158
+
}
159
+
160
+
impl<K> AccessTime for DeqNode<KeyDate<K>> {
161
+
#[inline]
162
+
fn last_accessed(&self) -> Option<Instant> {
163
+
None
164
+
}
165
+
166
+
#[inline]
167
+
fn set_last_accessed(&mut self, _timestamp: Instant) {
168
+
unreachable!();
169
+
}
170
+
171
+
#[inline]
172
+
fn last_modified(&self) -> Option<Instant> {
173
+
self.element.timestamp
174
+
}
175
+
176
+
#[inline]
177
+
fn set_last_modified(&mut self, timestamp: Instant) {
178
+
self.element.timestamp = Some(timestamp);
179
+
}
180
+
}
181
+
182
+
impl<K> AccessTime for DeqNode<KeyHashDate<K>> {
183
+
#[inline]
184
+
fn last_accessed(&self) -> Option<Instant> {
185
+
self.element.timestamp
186
+
}
187
+
188
+
#[inline]
189
+
fn set_last_accessed(&mut self, timestamp: Instant) {
190
+
self.element.timestamp = Some(timestamp);
191
+
}
192
+
193
+
#[inline]
194
+
fn last_modified(&self) -> Option<Instant> {
195
+
None
196
+
}
197
+
198
+
#[inline]
199
+
fn set_last_modified(&mut self, _timestamp: Instant) {
200
+
unreachable!();
201
+
}
202
+
}
+243
crates/mini-moka-vendored/src/unsync/builder.rs
+243
crates/mini-moka-vendored/src/unsync/builder.rs
···
1
+
use super::{Cache, Weigher};
2
+
use crate::common::builder_utils;
3
+
4
+
use std::{
5
+
collections::hash_map::RandomState,
6
+
hash::{BuildHasher, Hash},
7
+
marker::PhantomData,
8
+
time::Duration,
9
+
};
10
+
11
+
/// Builds a [`Cache`][cache-struct] with various configuration knobs.
12
+
///
13
+
/// [cache-struct]: ./struct.Cache.html
14
+
///
15
+
/// # Examples
16
+
///
17
+
/// ```rust
18
+
/// use mini_moka::unsync::Cache;
19
+
/// use std::time::Duration;
20
+
///
21
+
/// let mut cache = Cache::builder()
22
+
/// // Max 10,000 elements
23
+
/// .max_capacity(10_000)
24
+
/// // Time to live (TTL): 30 minutes
25
+
/// .time_to_live(Duration::from_secs(30 * 60))
26
+
/// // Time to idle (TTI): 5 minutes
27
+
/// .time_to_idle(Duration::from_secs( 5 * 60))
28
+
/// // Create the cache.
29
+
/// .build();
30
+
///
31
+
/// // This entry will expire after 5 minutes (TTI) if there is no get().
32
+
/// cache.insert(0, "zero");
33
+
///
34
+
/// // This get() will extend the entry life for another 5 minutes.
35
+
/// cache.get(&0);
36
+
///
37
+
/// // Even though we keep calling get(), the entry will expire
38
+
/// // after 30 minutes (TTL) from the insert().
39
+
/// ```
40
+
///
41
+
#[must_use]
42
+
pub struct CacheBuilder<K, V, C> {
43
+
max_capacity: Option<u64>,
44
+
initial_capacity: Option<usize>,
45
+
weigher: Option<Weigher<K, V>>,
46
+
time_to_live: Option<Duration>,
47
+
time_to_idle: Option<Duration>,
48
+
cache_type: PhantomData<C>,
49
+
}
50
+
51
+
impl<K, V> Default for CacheBuilder<K, V, Cache<K, V, RandomState>>
52
+
where
53
+
K: Eq + Hash,
54
+
{
55
+
fn default() -> Self {
56
+
Self {
57
+
max_capacity: None,
58
+
initial_capacity: None,
59
+
weigher: None,
60
+
time_to_live: None,
61
+
time_to_idle: None,
62
+
cache_type: Default::default(),
63
+
}
64
+
}
65
+
}
66
+
67
+
impl<K, V> CacheBuilder<K, V, Cache<K, V, RandomState>>
68
+
where
69
+
K: Eq + Hash,
70
+
{
71
+
/// Construct a new `CacheBuilder` that will be used to build a `Cache` holding
72
+
/// up to `max_capacity` entries.
73
+
pub fn new(max_capacity: u64) -> Self {
74
+
Self {
75
+
max_capacity: Some(max_capacity),
76
+
..Default::default()
77
+
}
78
+
}
79
+
80
+
/// Builds a `Cache<K, V>`.
81
+
///
82
+
/// # Panics
83
+
///
84
+
/// Panics if configured with either `time_to_live` or `time_to_idle` higher than
85
+
/// 1000 years. This is done to protect against overflow when computing key
86
+
/// expiration.
87
+
pub fn build(self) -> Cache<K, V, RandomState> {
88
+
let build_hasher = RandomState::default();
89
+
builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle);
90
+
Cache::with_everything(
91
+
self.max_capacity,
92
+
self.initial_capacity,
93
+
build_hasher,
94
+
self.weigher,
95
+
self.time_to_live,
96
+
self.time_to_idle,
97
+
)
98
+
}
99
+
100
+
/// Builds a `Cache<K, V, S>`, with the given `hasher`.
101
+
///
102
+
/// # Panics
103
+
///
104
+
/// Panics if configured with either `time_to_live` or `time_to_idle` higher than
105
+
/// 1000 years. This is done to protect against overflow when computing key
106
+
/// expiration.
107
+
pub fn build_with_hasher<S>(self, hasher: S) -> Cache<K, V, S>
108
+
where
109
+
S: BuildHasher + Clone,
110
+
{
111
+
builder_utils::ensure_expirations_or_panic(self.time_to_live, self.time_to_idle);
112
+
Cache::with_everything(
113
+
self.max_capacity,
114
+
self.initial_capacity,
115
+
hasher,
116
+
self.weigher,
117
+
self.time_to_live,
118
+
self.time_to_idle,
119
+
)
120
+
}
121
+
}
122
+
123
+
impl<K, V, C> CacheBuilder<K, V, C> {
124
+
/// Sets the max capacity of the cache.
125
+
pub fn max_capacity(self, max_capacity: u64) -> Self {
126
+
Self {
127
+
max_capacity: Some(max_capacity),
128
+
..self
129
+
}
130
+
}
131
+
132
+
/// Sets the initial capacity (number of entries) of the cache.
133
+
pub fn initial_capacity(self, number_of_entries: usize) -> Self {
134
+
Self {
135
+
initial_capacity: Some(number_of_entries),
136
+
..self
137
+
}
138
+
}
139
+
140
+
/// Sets the weigher closure of the cache.
141
+
///
142
+
/// The closure should take `&K` and `&V` as the arguments and returns a `u32`
143
+
/// representing the relative size of the entry.
144
+
pub fn weigher(self, weigher: impl FnMut(&K, &V) -> u32 + 'static) -> Self {
145
+
Self {
146
+
weigher: Some(Box::new(weigher)),
147
+
..self
148
+
}
149
+
}
150
+
151
+
/// Sets the time to live of the cache.
152
+
///
153
+
/// A cached entry will be expired after the specified duration past from
154
+
/// `insert`.
155
+
///
156
+
/// # Panics
157
+
///
158
+
/// `CacheBuilder::build*` methods will panic if the given `duration` is longer
159
+
/// than 1000 years. This is done to protect against overflow when computing key
160
+
/// expiration.
161
+
pub fn time_to_live(self, duration: Duration) -> Self {
162
+
Self {
163
+
time_to_live: Some(duration),
164
+
..self
165
+
}
166
+
}
167
+
168
+
/// Sets the time to idle of the cache.
169
+
///
170
+
/// A cached entry will be expired after the specified duration past from `get`
171
+
/// or `insert`.
172
+
///
173
+
/// # Panics
174
+
///
175
+
/// `CacheBuilder::build*` methods will panic if the given `duration` is longer
176
+
/// than 1000 years. This is done to protect against overflow when computing key
177
+
/// expiration.
178
+
pub fn time_to_idle(self, duration: Duration) -> Self {
179
+
Self {
180
+
time_to_idle: Some(duration),
181
+
..self
182
+
}
183
+
}
184
+
}
185
+
186
+
#[cfg(test)]
187
+
mod tests {
188
+
use super::CacheBuilder;
189
+
use std::time::Duration;
190
+
use wasm_bindgen_test::wasm_bindgen_test;
191
+
192
+
#[test]
193
+
#[wasm_bindgen_test]
194
+
fn build_cache() {
195
+
// Cache<char, String>
196
+
let mut cache = CacheBuilder::new(100).build();
197
+
let policy = cache.policy();
198
+
199
+
assert_eq!(policy.max_capacity(), Some(100));
200
+
assert_eq!(policy.time_to_live(), None);
201
+
assert_eq!(policy.time_to_idle(), None);
202
+
203
+
cache.insert('a', "Alice");
204
+
assert_eq!(cache.get(&'a'), Some(&"Alice"));
205
+
206
+
let mut cache = CacheBuilder::new(100)
207
+
.time_to_live(Duration::from_secs(45 * 60))
208
+
.time_to_idle(Duration::from_secs(15 * 60))
209
+
.build();
210
+
let policy = cache.policy();
211
+
212
+
assert_eq!(policy.max_capacity(), Some(100));
213
+
assert_eq!(policy.time_to_live(), Some(Duration::from_secs(45 * 60)));
214
+
assert_eq!(policy.time_to_idle(), Some(Duration::from_secs(15 * 60)));
215
+
216
+
cache.insert('a', "Alice");
217
+
assert_eq!(cache.get(&'a'), Some(&"Alice"));
218
+
}
219
+
220
+
#[test]
221
+
#[wasm_bindgen_test]
222
+
#[should_panic(expected = "time_to_live is longer than 1000 years")]
223
+
fn build_cache_too_long_ttl() {
224
+
let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600;
225
+
let builder: CacheBuilder<char, String, _> = CacheBuilder::new(100);
226
+
let duration = Duration::from_secs(thousand_years_secs);
227
+
builder
228
+
.time_to_live(duration + Duration::from_secs(1))
229
+
.build();
230
+
}
231
+
232
+
#[test]
233
+
#[wasm_bindgen_test]
234
+
#[should_panic(expected = "time_to_idle is longer than 1000 years")]
235
+
fn build_cache_too_long_tti() {
236
+
let thousand_years_secs: u64 = 1000 * 365 * 24 * 3600;
237
+
let builder: CacheBuilder<char, String, _> = CacheBuilder::new(100);
238
+
let duration = Duration::from_secs(thousand_years_secs);
239
+
builder
240
+
.time_to_idle(duration + Duration::from_secs(1))
241
+
.build();
242
+
}
243
+
}
+1468
crates/mini-moka-vendored/src/unsync/cache.rs
+1468
crates/mini-moka-vendored/src/unsync/cache.rs
···
1
+
use super::{
2
+
deques::Deques, AccessTime, CacheBuilder, Iter, KeyDate, KeyHashDate, ValueEntry, Weigher,
3
+
};
4
+
use crate::{
5
+
common::{
6
+
self,
7
+
deque::{DeqNode, Deque},
8
+
frequency_sketch::FrequencySketch,
9
+
time::{CheckedTimeOps, Clock, Instant},
10
+
CacheRegion,
11
+
},
12
+
Policy,
13
+
};
14
+
15
+
use smallvec::SmallVec;
16
+
use std::{
17
+
borrow::Borrow,
18
+
collections::{hash_map::RandomState, HashMap},
19
+
fmt,
20
+
hash::{BuildHasher, Hash},
21
+
ptr::NonNull,
22
+
rc::Rc,
23
+
time::Duration,
24
+
};
25
+
26
+
const EVICTION_BATCH_SIZE: usize = 100;
27
+
28
+
type CacheStore<K, V, S> = std::collections::HashMap<Rc<K>, ValueEntry<K, V>, S>;
29
+
30
+
/// An in-memory cache that is _not_ thread-safe.
31
+
///
32
+
/// `Cache` utilizes a hash table [`std::collections::HashMap`][std-hashmap] from the
33
+
/// standard library for the central key-value storage. `Cache` performs a
34
+
/// best-effort bounding of the map using an entry replacement algorithm to determine
35
+
/// which entries to evict when the capacity is exceeded.
36
+
///
37
+
/// [std-hashmap]: https://doc.rust-lang.org/std/collections/struct.HashMap.html
38
+
///
39
+
/// # Characteristic difference between `unsync` and `sync`/`future` caches
40
+
///
41
+
/// If you use a cache from a single thread application, `unsync::Cache` may
42
+
/// outperform other caches for updates and retrievals because other caches have some
43
+
/// overhead on syncing internal data structures between threads.
44
+
///
45
+
/// However, other caches may outperform `unsync::Cache` on the same operations when
46
+
/// expiration polices are configured on a multi-core system. `unsync::Cache` evicts
47
+
/// expired entries as a part of update and retrieval operations while others evict
48
+
/// them using a dedicated background thread.
49
+
///
50
+
/// # Examples
51
+
///
52
+
/// Cache entries are manually added using the insert method, and are stored in the
53
+
/// cache until either evicted or manually invalidated.
54
+
///
55
+
/// Here's an example of reading and updating a cache by using the main thread:
56
+
///
57
+
///```rust
58
+
/// use mini_moka::unsync::Cache;
59
+
///
60
+
/// const NUM_KEYS: usize = 64;
61
+
///
62
+
/// fn value(n: usize) -> String {
63
+
/// format!("value {}", n)
64
+
/// }
65
+
///
66
+
/// // Create a cache that can store up to 10,000 entries.
67
+
/// let mut cache = Cache::new(10_000);
68
+
///
69
+
/// // Insert 64 entries.
70
+
/// for key in 0..NUM_KEYS {
71
+
/// cache.insert(key, value(key));
72
+
/// }
73
+
///
74
+
/// // Invalidate every 4 element of the inserted entries.
75
+
/// for key in (0..NUM_KEYS).step_by(4) {
76
+
/// cache.invalidate(&key);
77
+
/// }
78
+
///
79
+
/// // Verify the result.
80
+
/// for key in 0..NUM_KEYS {
81
+
/// if key % 4 == 0 {
82
+
/// assert_eq!(cache.get(&key), None);
83
+
/// } else {
84
+
/// assert_eq!(cache.get(&key), Some(&value(key)));
85
+
/// }
86
+
/// }
87
+
/// ```
88
+
///
89
+
/// # Size-based Eviction
90
+
///
91
+
/// ```rust
92
+
/// use std::convert::TryInto;
93
+
/// use mini_moka::unsync::Cache;
94
+
///
95
+
/// // Evict based on the number of entries in the cache.
96
+
/// let mut cache = Cache::builder()
97
+
/// // Up to 10,000 entries.
98
+
/// .max_capacity(10_000)
99
+
/// // Create the cache.
100
+
/// .build();
101
+
/// cache.insert(1, "one".to_string());
102
+
///
103
+
/// // Evict based on the byte length of strings in the cache.
104
+
/// let mut cache = Cache::builder()
105
+
/// // A weigher closure takes &K and &V and returns a u32
106
+
/// // representing the relative size of the entry.
107
+
/// .weigher(|_key, value: &String| -> u32 {
108
+
/// value.len().try_into().unwrap_or(u32::MAX)
109
+
/// })
110
+
/// // This cache will hold up to 32MiB of values.
111
+
/// .max_capacity(32 * 1024 * 1024)
112
+
/// .build();
113
+
/// cache.insert(2, "two".to_string());
114
+
/// ```
115
+
///
116
+
/// If your cache should not grow beyond a certain size, use the `max_capacity`
117
+
/// method of the [`CacheBuilder`][builder-struct] to set the upper bound. The cache
118
+
/// will try to evict entries that have not been used recently or very often.
119
+
///
120
+
/// At the cache creation time, a weigher closure can be set by the `weigher` method
121
+
/// of the `CacheBuilder`. A weigher closure takes `&K` and `&V` as the arguments and
122
+
/// returns a `u32` representing the relative size of the entry:
123
+
///
124
+
/// - If the `weigher` is _not_ set, the cache will treat each entry has the same
125
+
/// size of `1`. This means the cache will be bounded by the number of entries.
126
+
/// - If the `weigher` is set, the cache will call the weigher to calculate the
127
+
/// weighted size (relative size) on an entry. This means the cache will be bounded
128
+
/// by the total weighted size of entries.
129
+
///
130
+
/// Note that weighted sizes are not used when making eviction selections.
131
+
///
132
+
/// [builder-struct]: ./struct.CacheBuilder.html
133
+
///
134
+
/// # Time-based Expirations
135
+
///
136
+
/// `Cache` supports the following expiration policies:
137
+
///
138
+
/// - **Time to live**: A cached entry will be expired after the specified duration
139
+
/// past from `insert`.
140
+
/// - **Time to idle**: A cached entry will be expired after the specified duration
141
+
/// past from `get` or `insert`.
142
+
///
143
+
/// See the [`CacheBuilder`][builder-struct]'s doc for how to configure a cache
144
+
/// with them.
145
+
///
146
+
/// [builder-struct]: ./struct.CacheBuilder.html
147
+
///
148
+
/// # Hashing Algorithm
149
+
///
150
+
/// By default, `Cache` uses a hashing algorithm selected to provide resistance
151
+
/// against HashDoS attacks. It will the same one used by
152
+
/// `std::collections::HashMap`, which is currently SipHash 1-3.
153
+
///
154
+
/// While SipHash's performance is very competitive for medium sized keys, other
155
+
/// hashing algorithms will outperform it for small keys such as integers as well as
156
+
/// large keys such as long strings. However those algorithms will typically not
157
+
/// protect against attacks such as HashDoS.
158
+
///
159
+
/// The hashing algorithm can be replaced on a per-`Cache` basis using the
160
+
/// [`build_with_hasher`][build-with-hasher-method] method of the
161
+
/// `CacheBuilder`. Many alternative algorithms are available on crates.io, such
162
+
/// as the [aHash][ahash-crate] crate.
163
+
///
164
+
/// [build-with-hasher-method]: ./struct.CacheBuilder.html#method.build_with_hasher
165
+
/// [ahash-crate]: https://crates.io/crates/ahash
166
+
///
167
+
pub struct Cache<K, V, S = RandomState> {
168
+
max_capacity: Option<u64>,
169
+
entry_count: u64,
170
+
weighted_size: u64,
171
+
cache: CacheStore<K, V, S>,
172
+
build_hasher: S,
173
+
weigher: Option<Weigher<K, V>>,
174
+
deques: Deques<K>,
175
+
frequency_sketch: FrequencySketch,
176
+
frequency_sketch_enabled: bool,
177
+
time_to_live: Option<Duration>,
178
+
time_to_idle: Option<Duration>,
179
+
expiration_clock: Option<Clock>,
180
+
}
181
+
182
+
impl<K, V, S> fmt::Debug for Cache<K, V, S>
183
+
where
184
+
K: fmt::Debug + Eq + Hash,
185
+
V: fmt::Debug,
186
+
// TODO: Remove these bounds from S.
187
+
S: BuildHasher + Clone,
188
+
{
189
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
190
+
let mut d_map = f.debug_map();
191
+
192
+
for (k, v) in self.iter() {
193
+
d_map.entry(&k, &v);
194
+
}
195
+
196
+
d_map.finish()
197
+
}
198
+
}
199
+
200
+
impl<K, V> Cache<K, V, RandomState>
201
+
where
202
+
K: Hash + Eq,
203
+
{
204
+
/// Constructs a new `Cache<K, V>` that will store up to the `max_capacity` entries.
205
+
///
206
+
/// To adjust various configuration knobs such as `initial_capacity` or
207
+
/// `time_to_live`, use the [`CacheBuilder`][builder-struct].
208
+
///
209
+
/// [builder-struct]: ./struct.CacheBuilder.html
210
+
pub fn new(max_capacity: u64) -> Self {
211
+
let build_hasher = RandomState::default();
212
+
Self::with_everything(Some(max_capacity), None, build_hasher, None, None, None)
213
+
}
214
+
215
+
/// Returns a [`CacheBuilder`][builder-struct], which can builds a `Cache` with
216
+
/// various configuration knobs.
217
+
///
218
+
/// [builder-struct]: ./struct.CacheBuilder.html
219
+
pub fn builder() -> CacheBuilder<K, V, Cache<K, V, RandomState>> {
220
+
CacheBuilder::default()
221
+
}
222
+
}
223
+
224
+
//
225
+
// public
226
+
//
227
+
impl<K, V, S> Cache<K, V, S> {
228
+
/// Returns a read-only cache policy of this cache.
229
+
///
230
+
/// At this time, cache policy cannot be modified after cache creation.
231
+
/// A future version may support to modify it.
232
+
pub fn policy(&self) -> Policy {
233
+
Policy::new(self.max_capacity, self.time_to_live, self.time_to_idle)
234
+
}
235
+
236
+
/// Returns the number of entries in this cache.
237
+
///
238
+
/// # Example
239
+
///
240
+
/// ```rust
241
+
/// use mini_moka::unsync::Cache;
242
+
///
243
+
/// let mut cache = Cache::new(10);
244
+
/// cache.insert('n', "Netherland Dwarf");
245
+
/// cache.insert('l', "Lop Eared");
246
+
/// cache.insert('d', "Dutch");
247
+
///
248
+
/// // Ensure an entry exists.
249
+
/// assert!(cache.contains_key(&'n'));
250
+
///
251
+
/// // Followings will print the actual numbers.
252
+
/// println!("{}", cache.entry_count()); // -> 3
253
+
/// println!("{}", cache.weighted_size()); // -> 3
254
+
/// ```
255
+
///
256
+
pub fn entry_count(&self) -> u64 {
257
+
self.entry_count
258
+
}
259
+
260
+
/// Returns the total weighted size of entries in this cache.
261
+
///
262
+
/// See [`entry_count`](#method.entry_count) for a sample code.
263
+
pub fn weighted_size(&self) -> u64 {
264
+
self.weighted_size
265
+
}
266
+
}
267
+
268
+
impl<K, V, S> Cache<K, V, S>
269
+
where
270
+
K: Hash + Eq,
271
+
S: BuildHasher + Clone,
272
+
{
273
+
pub(crate) fn with_everything(
274
+
max_capacity: Option<u64>,
275
+
initial_capacity: Option<usize>,
276
+
build_hasher: S,
277
+
weigher: Option<Weigher<K, V>>,
278
+
time_to_live: Option<Duration>,
279
+
time_to_idle: Option<Duration>,
280
+
) -> Self {
281
+
let cache = HashMap::with_capacity_and_hasher(
282
+
initial_capacity.unwrap_or_default(),
283
+
build_hasher.clone(),
284
+
);
285
+
286
+
Self {
287
+
max_capacity,
288
+
entry_count: 0,
289
+
weighted_size: 0,
290
+
cache,
291
+
build_hasher,
292
+
weigher,
293
+
deques: Default::default(),
294
+
frequency_sketch: Default::default(),
295
+
frequency_sketch_enabled: false,
296
+
time_to_live,
297
+
time_to_idle,
298
+
expiration_clock: None,
299
+
}
300
+
}
301
+
302
+
/// Returns `true` if the cache contains a value for the key.
303
+
///
304
+
/// Unlike the `get` method, this method is not considered a cache read operation,
305
+
/// so it does not update the historic popularity estimator or reset the idle
306
+
/// timer for the key.
307
+
///
308
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
309
+
/// on the borrowed form _must_ match those for the key type.
310
+
pub fn contains_key<Q>(&mut self, key: &Q) -> bool
311
+
where
312
+
Rc<K>: Borrow<Q>,
313
+
Q: Hash + Eq + ?Sized,
314
+
{
315
+
let timestamp = self.evict_expired_if_needed();
316
+
self.evict_lru_entries();
317
+
318
+
match (self.cache.get(key), timestamp) {
319
+
// Value not found.
320
+
(None, _) => false,
321
+
// Value found, no expiry.
322
+
(Some(_), None) => true,
323
+
// Value found, check if expired.
324
+
(Some(entry), Some(ts)) => {
325
+
!Self::is_expired_entry_wo(&self.time_to_live, entry, ts)
326
+
&& !Self::is_expired_entry_ao(&self.time_to_idle, entry, ts)
327
+
}
328
+
}
329
+
}
330
+
331
+
/// Returns an immutable reference of the value corresponding to the key.
332
+
///
333
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
334
+
/// on the borrowed form _must_ match those for the key type.
335
+
pub fn get<Q>(&mut self, key: &Q) -> Option<&V>
336
+
where
337
+
Rc<K>: Borrow<Q>,
338
+
Q: Hash + Eq + ?Sized,
339
+
{
340
+
let timestamp = self.evict_expired_if_needed();
341
+
self.evict_lru_entries();
342
+
self.frequency_sketch.increment(self.hash(key));
343
+
344
+
match (self.cache.get_mut(key), timestamp, &mut self.deques) {
345
+
// Value not found.
346
+
(None, _, _) => None,
347
+
// Value found, no expiry.
348
+
(Some(entry), None, deqs) => {
349
+
Self::record_hit(deqs, entry, None);
350
+
Some(&entry.value)
351
+
}
352
+
// Value found, check if expired.
353
+
(Some(entry), Some(ts), deqs) => {
354
+
if Self::is_expired_entry_wo(&self.time_to_live, entry, ts)
355
+
|| Self::is_expired_entry_ao(&self.time_to_idle, entry, ts)
356
+
{
357
+
None
358
+
} else {
359
+
Self::record_hit(deqs, entry, timestamp);
360
+
Some(&entry.value)
361
+
}
362
+
}
363
+
}
364
+
}
365
+
366
+
pub(crate) fn is_expired_entry(&self, entry: &ValueEntry<K, V>) -> bool {
367
+
let now = self.current_time_from_expiration_clock();
368
+
Self::is_expired_entry_wo(&self.time_to_live, entry, now)
369
+
|| Self::is_expired_entry_ao(&self.time_to_idle, entry, now)
370
+
}
371
+
372
+
/// Inserts a key-value pair into the cache.
373
+
///
374
+
/// If the cache has this key present, the value is updated.
375
+
pub fn insert(&mut self, key: K, value: V) {
376
+
let timestamp = self.evict_expired_if_needed();
377
+
self.evict_lru_entries();
378
+
let policy_weight = weigh(&mut self.weigher, &key, &value);
379
+
let key = Rc::new(key);
380
+
let entry = ValueEntry::new(value, policy_weight);
381
+
382
+
if let Some(old_entry) = self.cache.insert(Rc::clone(&key), entry) {
383
+
self.handle_update(key, timestamp, policy_weight, old_entry);
384
+
} else {
385
+
let hash = self.hash(&key);
386
+
self.handle_insert(key, hash, policy_weight, timestamp);
387
+
}
388
+
}
389
+
390
+
/// Discards any cached value for the key.
391
+
///
392
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
393
+
/// on the borrowed form _must_ match those for the key type.
394
+
pub fn invalidate<Q>(&mut self, key: &Q)
395
+
where
396
+
Rc<K>: Borrow<Q>,
397
+
Q: Hash + Eq + ?Sized,
398
+
{
399
+
self.evict_expired_if_needed();
400
+
self.evict_lru_entries();
401
+
402
+
if let Some(mut entry) = self.cache.remove(key) {
403
+
let weight = entry.policy_weight();
404
+
self.deques.unlink_ao(&mut entry);
405
+
Deques::unlink_wo(&mut self.deques.write_order, &mut entry);
406
+
self.saturating_sub_from_total_weight(weight as u64);
407
+
}
408
+
}
409
+
410
+
/// Discards any cached value for the key, returning the cached value.
411
+
///
412
+
/// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq`
413
+
/// on the borrowed form _must_ match those for the key type.
414
+
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
415
+
where
416
+
Rc<K>: Borrow<Q>,
417
+
Q: Hash + Eq + ?Sized,
418
+
{
419
+
self.evict_expired_if_needed();
420
+
self.evict_lru_entries();
421
+
422
+
if let Some(mut entry) = self.cache.remove(key) {
423
+
let weight = entry.policy_weight();
424
+
self.deques.unlink_ao(&mut entry);
425
+
crate::unsync::deques::Deques::unlink_wo(&mut self.deques.write_order, &mut entry);
426
+
self.saturating_sub_from_total_weight(weight as u64);
427
+
Some(entry.value)
428
+
} else {
429
+
None
430
+
}
431
+
}
432
+
433
+
/// Discards all cached values.
434
+
///
435
+
/// Like the `invalidate` method, this method does not clear the historic
436
+
/// popularity estimator of keys so that it retains the client activities of
437
+
/// trying to retrieve an item.
438
+
pub fn invalidate_all(&mut self) {
439
+
self.cache.clear();
440
+
self.deques.clear();
441
+
self.weighted_size = 0;
442
+
}
443
+
444
+
/// Discards cached values that satisfy a predicate.
445
+
///
446
+
/// `invalidate_entries_if` takes a closure that returns `true` or `false`.
447
+
/// `invalidate_entries_if` will apply the closure to each cached value,
448
+
/// and if the closure returns `true`, the value will be invalidated.
449
+
///
450
+
/// Like the `invalidate` method, this method does not clear the historic
451
+
/// popularity estimator of keys so that it retains the client activities of
452
+
/// trying to retrieve an item.
453
+
// -----------------------------------------------------------------------
454
+
// (The followings are not doc comments)
455
+
// We need this #[allow(...)] to avoid a false Clippy warning about needless
456
+
// collect to create keys_to_invalidate.
457
+
// clippy 0.1.52 (9a1dfd2dc5c 2021-04-30) in Rust 1.52.0-beta.7
458
+
#[allow(clippy::needless_collect)]
459
+
pub fn invalidate_entries_if(&mut self, mut predicate: impl FnMut(&K, &V) -> bool) {
460
+
let Self { cache, deques, .. } = self;
461
+
462
+
// Since we can't do cache.iter() and cache.remove() at the same time,
463
+
// invalidation needs to run in two steps:
464
+
// 1. Examine all entries in this cache and collect keys to invalidate.
465
+
// 2. Remove entries for the keys.
466
+
467
+
let keys_to_invalidate = cache
468
+
.iter()
469
+
.filter(|(key, entry)| (predicate)(key, &entry.value))
470
+
.map(|(key, _)| Rc::clone(key))
471
+
.collect::<Vec<_>>();
472
+
473
+
let mut invalidated = 0u64;
474
+
475
+
keys_to_invalidate.into_iter().for_each(|k| {
476
+
if let Some(mut entry) = cache.remove(&k) {
477
+
let weight = entry.policy_weight();
478
+
deques.unlink_ao(&mut entry);
479
+
Deques::unlink_wo(&mut deques.write_order, &mut entry);
480
+
invalidated = invalidated.saturating_sub(weight as u64);
481
+
}
482
+
});
483
+
self.saturating_sub_from_total_weight(invalidated);
484
+
}
485
+
486
+
/// Creates an iterator visiting all key-value pairs in arbitrary order. The
487
+
/// iterator element type is `(&K, &V)`.
488
+
///
489
+
/// Unlike the `get` method, visiting entries via an iterator do not update the
490
+
/// historic popularity estimator or reset idle timers for keys.
491
+
///
492
+
/// # Examples
493
+
///
494
+
/// ```rust
495
+
/// use mini_moka::unsync::Cache;
496
+
///
497
+
/// let mut cache = Cache::new(100);
498
+
/// cache.insert("Julia", 14);
499
+
///
500
+
/// let mut iter = cache.iter();
501
+
/// let (k, v) = iter.next().unwrap(); // (&K, &V)
502
+
/// assert_eq!(k, &"Julia");
503
+
/// assert_eq!(v, &14);
504
+
///
505
+
/// assert!(iter.next().is_none());
506
+
/// ```
507
+
///
508
+
pub fn iter(&self) -> Iter<'_, K, V, S> {
509
+
Iter::new(self, self.cache.iter())
510
+
}
511
+
}
512
+
513
+
//
514
+
// private
515
+
//
516
+
impl<K, V, S> Cache<K, V, S>
517
+
where
518
+
K: Hash + Eq,
519
+
S: BuildHasher + Clone,
520
+
{
521
+
#[inline]
522
+
fn hash<Q>(&self, key: &Q) -> u64
523
+
where
524
+
Rc<K>: Borrow<Q>,
525
+
Q: Hash + Eq + ?Sized,
526
+
{
527
+
self.build_hasher.hash_one(key)
528
+
}
529
+
530
+
#[inline]
531
+
fn has_expiry(&self) -> bool {
532
+
self.time_to_live.is_some() || self.time_to_idle.is_some()
533
+
}
534
+
535
+
#[inline]
536
+
fn evict_expired_if_needed(&mut self) -> Option<Instant> {
537
+
if self.has_expiry() {
538
+
let ts = self.current_time_from_expiration_clock();
539
+
self.evict_expired(ts);
540
+
Some(ts)
541
+
} else {
542
+
None
543
+
}
544
+
}
545
+
546
+
#[inline]
547
+
fn current_time_from_expiration_clock(&self) -> Instant {
548
+
if let Some(clock) = &self.expiration_clock {
549
+
Instant::new(clock.now())
550
+
} else {
551
+
Instant::now()
552
+
}
553
+
}
554
+
555
+
#[inline]
556
+
fn is_expired_entry_ao(
557
+
time_to_idle: &Option<Duration>,
558
+
entry: &impl AccessTime,
559
+
now: Instant,
560
+
) -> bool {
561
+
if let (Some(ts), Some(tti)) = (entry.last_accessed(), time_to_idle) {
562
+
let checked_add = ts.checked_add(*tti);
563
+
if checked_add.is_none() {
564
+
panic!("ttl overflow")
565
+
}
566
+
return checked_add.unwrap() <= now;
567
+
}
568
+
false
569
+
}
570
+
571
+
#[inline]
572
+
fn is_expired_entry_wo(
573
+
time_to_live: &Option<Duration>,
574
+
entry: &impl AccessTime,
575
+
now: Instant,
576
+
) -> bool {
577
+
if let (Some(ts), Some(ttl)) = (entry.last_modified(), time_to_live) {
578
+
let checked_add = ts.checked_add(*ttl);
579
+
if checked_add.is_none() {
580
+
panic!("ttl overflow")
581
+
}
582
+
return checked_add.unwrap() <= now;
583
+
}
584
+
false
585
+
}
586
+
587
+
fn record_hit(deques: &mut Deques<K>, entry: &mut ValueEntry<K, V>, ts: Option<Instant>) {
588
+
if let Some(ts) = ts {
589
+
entry.set_last_accessed(ts);
590
+
}
591
+
deques.move_to_back_ao(entry)
592
+
}
593
+
594
+
fn has_enough_capacity(&self, candidate_weight: u32, ws: u64) -> bool {
595
+
self.max_capacity
596
+
.map(|limit| ws + candidate_weight as u64 <= limit)
597
+
.unwrap_or(true)
598
+
}
599
+
600
+
fn weights_to_evict(&self) -> u64 {
601
+
self.max_capacity
602
+
.map(|limit| self.weighted_size.saturating_sub(limit))
603
+
.unwrap_or_default()
604
+
}
605
+
606
+
#[inline]
607
+
fn should_enable_frequency_sketch(&self) -> bool {
608
+
if self.frequency_sketch_enabled {
609
+
false
610
+
} else if let Some(max_cap) = self.max_capacity {
611
+
self.weighted_size >= max_cap / 2
612
+
} else {
613
+
false
614
+
}
615
+
}
616
+
617
+
#[inline]
618
+
fn enable_frequency_sketch(&mut self) {
619
+
if let Some(max_cap) = self.max_capacity {
620
+
let cap = if self.weigher.is_none() {
621
+
max_cap
622
+
} else {
623
+
(self.entry_count as f64 * (self.weighted_size as f64 / max_cap as f64)) as u64
624
+
};
625
+
self.do_enable_frequency_sketch(cap);
626
+
}
627
+
}
628
+
629
+
#[cfg(test)]
630
+
fn enable_frequency_sketch_for_testing(&mut self) {
631
+
if let Some(max_cap) = self.max_capacity {
632
+
self.do_enable_frequency_sketch(max_cap);
633
+
}
634
+
}
635
+
636
+
#[inline]
637
+
fn do_enable_frequency_sketch(&mut self, cache_capacity: u64) {
638
+
let skt_capacity = common::sketch_capacity(cache_capacity);
639
+
self.frequency_sketch.ensure_capacity(skt_capacity);
640
+
self.frequency_sketch_enabled = true;
641
+
}
642
+
643
+
fn saturating_add_to_total_weight(&mut self, weight: u64) {
644
+
let total = &mut self.weighted_size;
645
+
*total = total.saturating_add(weight);
646
+
}
647
+
648
+
fn saturating_sub_from_total_weight(&mut self, weight: u64) {
649
+
let total = &mut self.weighted_size;
650
+
*total = total.saturating_sub(weight);
651
+
}
652
+
653
+
#[inline]
654
+
fn handle_insert(
655
+
&mut self,
656
+
key: Rc<K>,
657
+
hash: u64,
658
+
policy_weight: u32,
659
+
timestamp: Option<Instant>,
660
+
) {
661
+
let has_free_space = self.has_enough_capacity(policy_weight, self.weighted_size);
662
+
let (cache, deqs, freq) = (&mut self.cache, &mut self.deques, &self.frequency_sketch);
663
+
664
+
if has_free_space {
665
+
// Add the candidate to the deque.
666
+
let key = Rc::clone(&key);
667
+
let entry = cache.get_mut(&key).unwrap();
668
+
deqs.push_back_ao(
669
+
CacheRegion::MainProbation,
670
+
KeyHashDate::new(Rc::clone(&key), hash, timestamp),
671
+
entry,
672
+
);
673
+
if self.time_to_live.is_some() {
674
+
deqs.push_back_wo(KeyDate::new(key, timestamp), entry);
675
+
}
676
+
self.entry_count += 1;
677
+
self.saturating_add_to_total_weight(policy_weight as u64);
678
+
679
+
if self.should_enable_frequency_sketch() {
680
+
self.enable_frequency_sketch();
681
+
}
682
+
683
+
return;
684
+
}
685
+
686
+
if let Some(max) = self.max_capacity {
687
+
if policy_weight as u64 > max {
688
+
// The candidate is too big to fit in the cache. Reject it.
689
+
cache.remove(&Rc::clone(&key));
690
+
return;
691
+
}
692
+
}
693
+
694
+
let mut candidate = EntrySizeAndFrequency::new(policy_weight as u64);
695
+
candidate.add_frequency(freq, hash);
696
+
697
+
match Self::admit(&candidate, cache, deqs, freq, &mut self.weigher) {
698
+
AdmissionResult::Admitted {
699
+
victim_nodes,
700
+
victims_weight,
701
+
} => {
702
+
// Remove the victims from the cache (hash map) and deque.
703
+
for victim in victim_nodes {
704
+
// Remove the victim from the hash map.
705
+
let mut vic_entry = cache
706
+
.remove(unsafe { &victim.as_ref().element.key })
707
+
.expect("Cannot remove a victim from the hash map");
708
+
// And then remove the victim from the deques.
709
+
deqs.unlink_ao(&mut vic_entry);
710
+
Deques::unlink_wo(&mut deqs.write_order, &mut vic_entry);
711
+
self.entry_count -= 1;
712
+
}
713
+
714
+
// Add the candidate to the deque.
715
+
let entry = cache.get_mut(&key).unwrap();
716
+
let key = Rc::clone(&key);
717
+
deqs.push_back_ao(
718
+
CacheRegion::MainProbation,
719
+
KeyHashDate::new(Rc::clone(&key), hash, timestamp),
720
+
entry,
721
+
);
722
+
if self.time_to_live.is_some() {
723
+
deqs.push_back_wo(KeyDate::new(key, timestamp), entry);
724
+
}
725
+
726
+
self.entry_count += 1;
727
+
Self::saturating_sub_from_total_weight(self, victims_weight);
728
+
Self::saturating_add_to_total_weight(self, policy_weight as u64);
729
+
730
+
if self.should_enable_frequency_sketch() {
731
+
self.enable_frequency_sketch();
732
+
}
733
+
}
734
+
AdmissionResult::Rejected => {
735
+
// Remove the candidate from the cache.
736
+
cache.remove(&key);
737
+
}
738
+
}
739
+
}
740
+
741
+
/// Performs size-aware admission explained in the paper:
742
+
/// [Lightweight Robust Size Aware Cache Management][size-aware-cache-paper]
743
+
/// by Gil Einziger, Ohad Eytan, Roy Friedman, Ben Manes.
744
+
///
745
+
/// [size-aware-cache-paper]: https://arxiv.org/abs/2105.08770
746
+
///
747
+
/// There are some modifications in this implementation:
748
+
/// - To admit to the main space, candidate's frequency must be higher than
749
+
/// the aggregated frequencies of the potential victims. (In the paper,
750
+
/// `>=` operator is used rather than `>`) The `>` operator will do a better
751
+
/// job to prevent the main space from polluting.
752
+
/// - When a candidate is rejected, the potential victims will stay at the LRU
753
+
/// position of the probation access-order queue. (In the paper, they will be
754
+
/// promoted (to the MRU position?) to force the eviction policy to select a
755
+
/// different set of victims for the next candidate). We may implement the
756
+
/// paper's behavior later?
757
+
///
758
+
#[inline]
759
+
fn admit(
760
+
candidate: &EntrySizeAndFrequency,
761
+
cache: &CacheStore<K, V, S>,
762
+
deqs: &Deques<K>,
763
+
freq: &FrequencySketch,
764
+
weigher: &mut Option<Weigher<K, V>>,
765
+
) -> AdmissionResult<K> {
766
+
let mut victims = EntrySizeAndFrequency::default();
767
+
let mut victim_nodes = SmallVec::default();
768
+
769
+
// Get first potential victim at the LRU position.
770
+
let mut next_victim = deqs.probation.peek_front_ptr();
771
+
772
+
// Aggregate potential victims.
773
+
while victims.weight < candidate.weight {
774
+
if candidate.freq < victims.freq {
775
+
break;
776
+
}
777
+
if let Some(victim) = next_victim.take() {
778
+
next_victim = DeqNode::next_node_ptr(victim);
779
+
let vic_elem = &unsafe { victim.as_ref() }.element;
780
+
781
+
let vic_entry = cache
782
+
.get(&vic_elem.key)
783
+
.expect("Cannot get an victim entry");
784
+
victims.add_policy_weight(vic_elem.key.as_ref(), &vic_entry.value, weigher);
785
+
victims.add_frequency(freq, vic_elem.hash);
786
+
victim_nodes.push(victim);
787
+
} else {
788
+
// No more potential victims.
789
+
break;
790
+
}
791
+
}
792
+
793
+
// Admit or reject the candidate.
794
+
795
+
// TODO: Implement some randomness to mitigate hash DoS attack.
796
+
// See Caffeine's implementation.
797
+
798
+
if victims.weight >= candidate.weight && candidate.freq > victims.freq {
799
+
AdmissionResult::Admitted {
800
+
victim_nodes,
801
+
victims_weight: victims.weight,
802
+
}
803
+
} else {
804
+
AdmissionResult::Rejected
805
+
}
806
+
}
807
+
808
+
fn handle_update(
809
+
&mut self,
810
+
key: Rc<K>,
811
+
timestamp: Option<Instant>,
812
+
policy_weight: u32,
813
+
old_entry: ValueEntry<K, V>,
814
+
) {
815
+
let old_policy_weight = old_entry.policy_weight();
816
+
817
+
let entry = self.cache.get_mut(&key).unwrap();
818
+
entry.replace_deq_nodes_with(old_entry);
819
+
if let Some(ts) = timestamp {
820
+
entry.set_last_accessed(ts);
821
+
entry.set_last_modified(ts);
822
+
}
823
+
entry.set_policy_weight(policy_weight);
824
+
825
+
let deqs = &mut self.deques;
826
+
deqs.move_to_back_ao(entry);
827
+
if self.time_to_live.is_some() {
828
+
deqs.move_to_back_wo(entry);
829
+
}
830
+
831
+
self.saturating_sub_from_total_weight(old_policy_weight as u64);
832
+
self.saturating_add_to_total_weight(policy_weight as u64);
833
+
}
834
+
835
+
fn evict_expired(&mut self, now: Instant) {
836
+
if self.time_to_live.is_some() {
837
+
let (count, weight) = self.remove_expired_wo(EVICTION_BATCH_SIZE, now);
838
+
self.entry_count -= count;
839
+
self.saturating_sub_from_total_weight(weight);
840
+
}
841
+
842
+
if self.time_to_idle.is_some() {
843
+
let deqs = &mut self.deques;
844
+
let (window, probation, protected, wo, cache, time_to_idle) = (
845
+
&mut deqs.window,
846
+
&mut deqs.probation,
847
+
&mut deqs.protected,
848
+
&mut deqs.write_order,
849
+
&mut self.cache,
850
+
&self.time_to_idle,
851
+
);
852
+
853
+
let mut rm_expired_ao = |name, deq| {
854
+
Self::remove_expired_ao(
855
+
name,
856
+
deq,
857
+
wo,
858
+
cache,
859
+
time_to_idle,
860
+
EVICTION_BATCH_SIZE,
861
+
now,
862
+
)
863
+
};
864
+
865
+
let (count1, weight1) = rm_expired_ao("window", window);
866
+
let (count2, weight2) = rm_expired_ao("probation", probation);
867
+
let (count3, weight3) = rm_expired_ao("protected", protected);
868
+
869
+
self.entry_count -= count1 + count2 + count3;
870
+
self.saturating_sub_from_total_weight(weight1);
871
+
self.saturating_sub_from_total_weight(weight2);
872
+
self.saturating_sub_from_total_weight(weight3);
873
+
}
874
+
}
875
+
876
+
// Returns (u64, u64) where (evicted_entry_count, evicted_policy_weight).
877
+
#[inline]
878
+
fn remove_expired_ao(
879
+
deq_name: &str,
880
+
deq: &mut Deque<KeyHashDate<K>>,
881
+
write_order_deq: &mut Deque<KeyDate<K>>,
882
+
cache: &mut CacheStore<K, V, S>,
883
+
time_to_idle: &Option<Duration>,
884
+
batch_size: usize,
885
+
now: Instant,
886
+
) -> (u64, u64) {
887
+
let mut evicted_entry_count = 0u64;
888
+
let mut evicted_policy_weight = 0u64;
889
+
890
+
for _ in 0..batch_size {
891
+
let key = deq
892
+
.peek_front()
893
+
.and_then(|node| {
894
+
if Self::is_expired_entry_ao(time_to_idle, node, now) {
895
+
Some(Some(Rc::clone(&node.element.key)))
896
+
} else {
897
+
None
898
+
}
899
+
})
900
+
.unwrap_or_default();
901
+
902
+
if key.is_none() {
903
+
break;
904
+
}
905
+
906
+
let key = key.unwrap();
907
+
908
+
if let Some(mut entry) = cache.remove(&key) {
909
+
let weight = entry.policy_weight();
910
+
Deques::unlink_ao_from_deque(deq_name, deq, &mut entry);
911
+
Deques::unlink_wo(write_order_deq, &mut entry);
912
+
evicted_entry_count += 1;
913
+
evicted_policy_weight = evicted_policy_weight.saturating_add(weight as u64);
914
+
} else {
915
+
deq.pop_front();
916
+
}
917
+
}
918
+
919
+
(evicted_entry_count, evicted_policy_weight)
920
+
}
921
+
922
+
// Returns (u64, u64) where (evicted_entry_count, evicted_policy_weight).
923
+
#[inline]
924
+
fn remove_expired_wo(&mut self, batch_size: usize, now: Instant) -> (u64, u64) {
925
+
let mut evicted_entry_count = 0u64;
926
+
let mut evicted_policy_weight = 0u64;
927
+
let time_to_live = &self.time_to_live;
928
+
929
+
for _ in 0..batch_size {
930
+
let key = self
931
+
.deques
932
+
.write_order
933
+
.peek_front()
934
+
.and_then(|node| {
935
+
if Self::is_expired_entry_wo(time_to_live, node, now) {
936
+
Some(Some(Rc::clone(&node.element.key)))
937
+
} else {
938
+
None
939
+
}
940
+
})
941
+
.unwrap_or_default();
942
+
943
+
if key.is_none() {
944
+
break;
945
+
}
946
+
947
+
let key = key.unwrap();
948
+
949
+
if let Some(mut entry) = self.cache.remove(&key) {
950
+
let weight = entry.policy_weight();
951
+
self.deques.unlink_ao(&mut entry);
952
+
Deques::unlink_wo(&mut self.deques.write_order, &mut entry);
953
+
evicted_entry_count += 1;
954
+
evicted_policy_weight = evicted_policy_weight.saturating_sub(weight as u64);
955
+
} else {
956
+
self.deques.write_order.pop_front();
957
+
}
958
+
}
959
+
960
+
(evicted_entry_count, evicted_policy_weight)
961
+
}
962
+
963
+
#[inline]
964
+
fn evict_lru_entries(&mut self) {
965
+
const DEQ_NAME: &str = "probation";
966
+
967
+
let weights_to_evict = self.weights_to_evict();
968
+
let mut evicted_count = 0u64;
969
+
let mut evicted_policy_weight = 0u64;
970
+
971
+
{
972
+
let deqs = &mut self.deques;
973
+
let (probation, wo, cache) =
974
+
(&mut deqs.probation, &mut deqs.write_order, &mut self.cache);
975
+
976
+
for _ in 0..EVICTION_BATCH_SIZE {
977
+
if evicted_policy_weight >= weights_to_evict {
978
+
break;
979
+
}
980
+
981
+
// clippy::map_clone will give us a false positive warning here.
982
+
// Version: clippy 0.1.77 (f2048098a1c 2024-02-09) in Rust 1.77.0-beta.2
983
+
#[allow(clippy::map_clone)]
984
+
let key = probation
985
+
.peek_front()
986
+
.map(|node| Rc::clone(&node.element.key));
987
+
988
+
if key.is_none() {
989
+
break;
990
+
}
991
+
let key = key.unwrap();
992
+
993
+
if let Some(mut entry) = cache.remove(&key) {
994
+
let weight = entry.policy_weight();
995
+
Deques::unlink_ao_from_deque(DEQ_NAME, probation, &mut entry);
996
+
Deques::unlink_wo(wo, &mut entry);
997
+
evicted_count += 1;
998
+
evicted_policy_weight = evicted_policy_weight.saturating_add(weight as u64);
999
+
} else {
1000
+
probation.pop_front();
1001
+
}
1002
+
}
1003
+
}
1004
+
1005
+
self.entry_count -= evicted_count;
1006
+
self.saturating_sub_from_total_weight(evicted_policy_weight);
1007
+
}
1008
+
}
1009
+
1010
+
//
1011
+
// for testing
1012
+
//
1013
+
#[cfg(test)]
1014
+
impl<K, V, S> Cache<K, V, S>
1015
+
where
1016
+
K: Hash + Eq,
1017
+
S: BuildHasher + Clone,
1018
+
{
1019
+
fn set_expiration_clock(&mut self, clock: Option<crate::common::time::Clock>) {
1020
+
self.expiration_clock = clock;
1021
+
}
1022
+
}
1023
+
1024
+
#[derive(Default)]
1025
+
struct EntrySizeAndFrequency {
1026
+
weight: u64,
1027
+
freq: u32,
1028
+
}
1029
+
1030
+
impl EntrySizeAndFrequency {
1031
+
fn new(policy_weight: u64) -> Self {
1032
+
Self {
1033
+
weight: policy_weight,
1034
+
..Default::default()
1035
+
}
1036
+
}
1037
+
1038
+
fn add_policy_weight<K, V>(&mut self, key: &K, value: &V, weigher: &mut Option<Weigher<K, V>>) {
1039
+
self.weight += weigh(weigher, key, value) as u64;
1040
+
}
1041
+
1042
+
fn add_frequency(&mut self, freq: &FrequencySketch, hash: u64) {
1043
+
self.freq += freq.frequency(hash) as u32;
1044
+
}
1045
+
}
1046
+
1047
+
// Access-Order Queue Node
1048
+
type AoqNode<K> = NonNull<DeqNode<KeyHashDate<K>>>;
1049
+
1050
+
enum AdmissionResult<K> {
1051
+
Admitted {
1052
+
victim_nodes: SmallVec<[AoqNode<K>; 8]>,
1053
+
victims_weight: u64,
1054
+
},
1055
+
Rejected,
1056
+
}
1057
+
1058
+
//
1059
+
// private free-standing functions
1060
+
//
1061
+
#[inline]
1062
+
fn weigh<K, V>(weigher: &mut Option<Weigher<K, V>>, key: &K, value: &V) -> u32 {
1063
+
weigher.as_mut().map(|w| w(key, value)).unwrap_or(1)
1064
+
}
1065
+
1066
+
// To see the debug prints, run test as `cargo test -- --nocapture`
1067
+
#[cfg(test)]
1068
+
mod tests {
1069
+
use wasm_bindgen_test::wasm_bindgen_test;
1070
+
1071
+
use super::Cache;
1072
+
use crate::common::time::Clock;
1073
+
1074
+
use std::time::Duration;
1075
+
1076
+
#[test]
1077
+
#[wasm_bindgen_test]
1078
+
fn basic_single_thread() {
1079
+
let mut cache = Cache::new(3);
1080
+
cache.enable_frequency_sketch_for_testing();
1081
+
1082
+
cache.insert("a", "alice");
1083
+
cache.insert("b", "bob");
1084
+
assert_eq!(cache.get(&"a"), Some(&"alice"));
1085
+
assert!(cache.contains_key(&"a"));
1086
+
assert!(cache.contains_key(&"b"));
1087
+
assert_eq!(cache.get(&"b"), Some(&"bob"));
1088
+
// counts: a -> 1, b -> 1
1089
+
1090
+
cache.insert("c", "cindy");
1091
+
assert_eq!(cache.get(&"c"), Some(&"cindy"));
1092
+
assert!(cache.contains_key(&"c"));
1093
+
// counts: a -> 1, b -> 1, c -> 1
1094
+
1095
+
assert!(cache.contains_key(&"a"));
1096
+
assert_eq!(cache.get(&"a"), Some(&"alice"));
1097
+
assert_eq!(cache.get(&"b"), Some(&"bob"));
1098
+
assert!(cache.contains_key(&"b"));
1099
+
// counts: a -> 2, b -> 2, c -> 1
1100
+
1101
+
// "d" should not be admitted because its frequency is too low.
1102
+
cache.insert("d", "david"); // count: d -> 0
1103
+
assert_eq!(cache.get(&"d"), None); // d -> 1
1104
+
assert!(!cache.contains_key(&"d"));
1105
+
1106
+
cache.insert("d", "david");
1107
+
assert!(!cache.contains_key(&"d"));
1108
+
assert_eq!(cache.get(&"d"), None); // d -> 2
1109
+
1110
+
// "d" should be admitted and "c" should be evicted
1111
+
// because d's frequency is higher than c's.
1112
+
cache.insert("d", "dennis");
1113
+
assert_eq!(cache.get(&"a"), Some(&"alice"));
1114
+
assert_eq!(cache.get(&"b"), Some(&"bob"));
1115
+
assert_eq!(cache.get(&"c"), None);
1116
+
assert_eq!(cache.get(&"d"), Some(&"dennis"));
1117
+
assert!(cache.contains_key(&"a"));
1118
+
assert!(cache.contains_key(&"b"));
1119
+
assert!(!cache.contains_key(&"c"));
1120
+
assert!(cache.contains_key(&"d"));
1121
+
1122
+
cache.invalidate(&"b");
1123
+
assert_eq!(cache.get(&"b"), None);
1124
+
assert!(!cache.contains_key(&"b"));
1125
+
}
1126
+
1127
+
#[test]
1128
+
#[wasm_bindgen_test]
1129
+
fn size_aware_eviction() {
1130
+
let weigher = |_k: &&str, v: &(&str, u32)| v.1;
1131
+
1132
+
let alice = ("alice", 10);
1133
+
let bob = ("bob", 15);
1134
+
let bill = ("bill", 20);
1135
+
let cindy = ("cindy", 5);
1136
+
let david = ("david", 15);
1137
+
let dennis = ("dennis", 15);
1138
+
1139
+
let mut cache = Cache::builder().max_capacity(31).weigher(weigher).build();
1140
+
cache.enable_frequency_sketch_for_testing();
1141
+
1142
+
cache.insert("a", alice);
1143
+
cache.insert("b", bob);
1144
+
assert_eq!(cache.get(&"a"), Some(&alice));
1145
+
assert!(cache.contains_key(&"a"));
1146
+
assert!(cache.contains_key(&"b"));
1147
+
assert_eq!(cache.get(&"b"), Some(&bob));
1148
+
// order (LRU -> MRU) and counts: a -> 1, b -> 1
1149
+
1150
+
cache.insert("c", cindy);
1151
+
assert_eq!(cache.get(&"c"), Some(&cindy));
1152
+
assert!(cache.contains_key(&"c"));
1153
+
// order and counts: a -> 1, b -> 1, c -> 1
1154
+
1155
+
assert!(cache.contains_key(&"a"));
1156
+
assert_eq!(cache.get(&"a"), Some(&alice));
1157
+
assert_eq!(cache.get(&"b"), Some(&bob));
1158
+
assert!(cache.contains_key(&"b"));
1159
+
// order and counts: c -> 1, a -> 2, b -> 2
1160
+
1161
+
// To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10).
1162
+
// "d" must have higher count than 3, which is the aggregated count
1163
+
// of "a" and "c".
1164
+
cache.insert("d", david); // count: d -> 0
1165
+
assert_eq!(cache.get(&"d"), None); // d -> 1
1166
+
assert!(!cache.contains_key(&"d"));
1167
+
1168
+
cache.insert("d", david);
1169
+
assert!(!cache.contains_key(&"d"));
1170
+
assert_eq!(cache.get(&"d"), None); // d -> 2
1171
+
1172
+
cache.insert("d", david);
1173
+
assert_eq!(cache.get(&"d"), None); // d -> 3
1174
+
assert!(!cache.contains_key(&"d"));
1175
+
1176
+
cache.insert("d", david);
1177
+
assert!(!cache.contains_key(&"d"));
1178
+
assert_eq!(cache.get(&"d"), None); // d -> 4
1179
+
1180
+
// Finally "d" should be admitted by evicting "c" and "a".
1181
+
cache.insert("d", dennis);
1182
+
assert_eq!(cache.get(&"a"), None);
1183
+
assert_eq!(cache.get(&"b"), Some(&bob));
1184
+
assert_eq!(cache.get(&"c"), None);
1185
+
assert_eq!(cache.get(&"d"), Some(&dennis));
1186
+
assert!(!cache.contains_key(&"a"));
1187
+
assert!(cache.contains_key(&"b"));
1188
+
assert!(!cache.contains_key(&"c"));
1189
+
assert!(cache.contains_key(&"d"));
1190
+
1191
+
// Update "b" with "bill" (w: 15 -> 20). This should evict "d" (w: 15).
1192
+
cache.insert("b", bill);
1193
+
assert_eq!(cache.get(&"b"), Some(&bill));
1194
+
assert_eq!(cache.get(&"d"), None);
1195
+
assert!(cache.contains_key(&"b"));
1196
+
assert!(!cache.contains_key(&"d"));
1197
+
1198
+
// Re-add "a" (w: 10) and update "b" with "bob" (w: 20 -> 15).
1199
+
cache.insert("a", alice);
1200
+
cache.insert("b", bob);
1201
+
assert_eq!(cache.get(&"a"), Some(&alice));
1202
+
assert_eq!(cache.get(&"b"), Some(&bob));
1203
+
assert_eq!(cache.get(&"d"), None);
1204
+
assert!(cache.contains_key(&"a"));
1205
+
assert!(cache.contains_key(&"b"));
1206
+
assert!(!cache.contains_key(&"d"));
1207
+
1208
+
// Verify the sizes.
1209
+
assert_eq!(cache.entry_count(), 2);
1210
+
assert_eq!(cache.weighted_size(), 25);
1211
+
}
1212
+
1213
+
#[test]
1214
+
#[wasm_bindgen_test]
1215
+
fn invalidate_all() {
1216
+
let mut cache = Cache::new(100);
1217
+
cache.enable_frequency_sketch_for_testing();
1218
+
1219
+
cache.insert("a", "alice");
1220
+
cache.insert("b", "bob");
1221
+
cache.insert("c", "cindy");
1222
+
assert_eq!(cache.get(&"a"), Some(&"alice"));
1223
+
assert_eq!(cache.get(&"b"), Some(&"bob"));
1224
+
assert_eq!(cache.get(&"c"), Some(&"cindy"));
1225
+
assert!(cache.contains_key(&"a"));
1226
+
assert!(cache.contains_key(&"b"));
1227
+
assert!(cache.contains_key(&"c"));
1228
+
1229
+
cache.invalidate_all();
1230
+
1231
+
cache.insert("d", "david");
1232
+
1233
+
assert!(cache.get(&"a").is_none());
1234
+
assert!(cache.get(&"b").is_none());
1235
+
assert!(cache.get(&"c").is_none());
1236
+
assert_eq!(cache.get(&"d"), Some(&"david"));
1237
+
assert!(!cache.contains_key(&"a"));
1238
+
assert!(!cache.contains_key(&"b"));
1239
+
assert!(!cache.contains_key(&"c"));
1240
+
assert!(cache.contains_key(&"d"));
1241
+
}
1242
+
1243
+
#[test]
1244
+
#[wasm_bindgen_test]
1245
+
fn invalidate_entries_if() {
1246
+
use std::collections::HashSet;
1247
+
1248
+
let mut cache = Cache::new(100);
1249
+
cache.enable_frequency_sketch_for_testing();
1250
+
1251
+
let (clock, mock) = Clock::mock();
1252
+
cache.set_expiration_clock(Some(clock));
1253
+
1254
+
cache.insert(0, "alice");
1255
+
cache.insert(1, "bob");
1256
+
cache.insert(2, "alex");
1257
+
1258
+
mock.increment(Duration::from_secs(5)); // 5 secs from the start.
1259
+
1260
+
assert_eq!(cache.get(&0), Some(&"alice"));
1261
+
assert_eq!(cache.get(&1), Some(&"bob"));
1262
+
assert_eq!(cache.get(&2), Some(&"alex"));
1263
+
assert!(cache.contains_key(&0));
1264
+
assert!(cache.contains_key(&1));
1265
+
assert!(cache.contains_key(&2));
1266
+
1267
+
let names = ["alice", "alex"].iter().cloned().collect::<HashSet<_>>();
1268
+
cache.invalidate_entries_if(move |_k, &v| names.contains(v));
1269
+
1270
+
mock.increment(Duration::from_secs(5)); // 10 secs from the start.
1271
+
1272
+
cache.insert(3, "alice");
1273
+
1274
+
assert!(cache.get(&0).is_none());
1275
+
assert!(cache.get(&2).is_none());
1276
+
assert_eq!(cache.get(&1), Some(&"bob"));
1277
+
// This should survive as it was inserted after calling invalidate_entries_if.
1278
+
assert_eq!(cache.get(&3), Some(&"alice"));
1279
+
1280
+
assert!(!cache.contains_key(&0));
1281
+
assert!(cache.contains_key(&1));
1282
+
assert!(!cache.contains_key(&2));
1283
+
assert!(cache.contains_key(&3));
1284
+
1285
+
assert_eq!(cache.cache.len(), 2);
1286
+
1287
+
mock.increment(Duration::from_secs(5)); // 15 secs from the start.
1288
+
1289
+
cache.invalidate_entries_if(|_k, &v| v == "alice");
1290
+
cache.invalidate_entries_if(|_k, &v| v == "bob");
1291
+
1292
+
assert!(cache.get(&1).is_none());
1293
+
assert!(cache.get(&3).is_none());
1294
+
1295
+
assert!(!cache.contains_key(&1));
1296
+
assert!(!cache.contains_key(&3));
1297
+
1298
+
assert_eq!(cache.cache.len(), 0);
1299
+
}
1300
+
1301
+
#[test]
1302
+
#[wasm_bindgen_test]
1303
+
fn time_to_live() {
1304
+
let mut cache = Cache::builder()
1305
+
.max_capacity(100)
1306
+
.time_to_live(Duration::from_secs(10))
1307
+
.build();
1308
+
cache.enable_frequency_sketch_for_testing();
1309
+
1310
+
let (clock, mock) = Clock::mock();
1311
+
cache.set_expiration_clock(Some(clock));
1312
+
1313
+
cache.insert("a", "alice");
1314
+
1315
+
mock.increment(Duration::from_secs(5)); // 5 secs from the start.
1316
+
1317
+
assert_eq!(cache.get(&"a"), Some(&"alice"));
1318
+
assert!(cache.contains_key(&"a"));
1319
+
1320
+
mock.increment(Duration::from_secs(5)); // 10 secs.
1321
+
1322
+
assert_eq!(cache.get(&"a"), None);
1323
+
assert!(!cache.contains_key(&"a"));
1324
+
assert_eq!(cache.iter().count(), 0);
1325
+
assert!(cache.cache.is_empty());
1326
+
1327
+
cache.insert("b", "bob");
1328
+
1329
+
assert_eq!(cache.cache.len(), 1);
1330
+
1331
+
mock.increment(Duration::from_secs(5)); // 15 secs.
1332
+
1333
+
assert_eq!(cache.get(&"b"), Some(&"bob"));
1334
+
assert!(cache.contains_key(&"b"));
1335
+
assert_eq!(cache.cache.len(), 1);
1336
+
1337
+
cache.insert("b", "bill");
1338
+
1339
+
mock.increment(Duration::from_secs(5)); // 20 secs
1340
+
1341
+
assert_eq!(cache.get(&"b"), Some(&"bill"));
1342
+
assert!(cache.contains_key(&"b"));
1343
+
assert_eq!(cache.cache.len(), 1);
1344
+
1345
+
mock.increment(Duration::from_secs(5)); // 25 secs
1346
+
1347
+
assert_eq!(cache.get(&"a"), None);
1348
+
assert_eq!(cache.get(&"b"), None);
1349
+
assert!(!cache.contains_key(&"a"));
1350
+
assert!(!cache.contains_key(&"b"));
1351
+
assert_eq!(cache.iter().count(), 0);
1352
+
assert!(cache.cache.is_empty());
1353
+
}
1354
+
1355
+
#[test]
1356
+
#[wasm_bindgen_test]
1357
+
fn time_to_idle() {
1358
+
let mut cache = Cache::builder()
1359
+
.max_capacity(100)
1360
+
.time_to_idle(Duration::from_secs(10))
1361
+
.build();
1362
+
cache.enable_frequency_sketch_for_testing();
1363
+
1364
+
let (clock, mock) = Clock::mock();
1365
+
cache.set_expiration_clock(Some(clock));
1366
+
1367
+
cache.insert("a", "alice");
1368
+
1369
+
mock.increment(Duration::from_secs(5)); // 5 secs from the start.
1370
+
1371
+
assert_eq!(cache.get(&"a"), Some(&"alice"));
1372
+
1373
+
mock.increment(Duration::from_secs(5)); // 10 secs.
1374
+
1375
+
cache.insert("b", "bob");
1376
+
1377
+
assert_eq!(cache.cache.len(), 2);
1378
+
1379
+
mock.increment(Duration::from_secs(2)); // 12 secs.
1380
+
1381
+
// contains_key does not reset the idle timer for the key.
1382
+
assert!(cache.contains_key(&"a"));
1383
+
assert!(cache.contains_key(&"b"));
1384
+
1385
+
assert_eq!(cache.cache.len(), 2);
1386
+
1387
+
mock.increment(Duration::from_secs(3)); // 15 secs.
1388
+
1389
+
assert_eq!(cache.get(&"a"), None);
1390
+
assert_eq!(cache.get(&"b"), Some(&"bob"));
1391
+
assert!(!cache.contains_key(&"a"));
1392
+
assert!(cache.contains_key(&"b"));
1393
+
assert_eq!(cache.iter().count(), 1);
1394
+
assert_eq!(cache.cache.len(), 1);
1395
+
1396
+
mock.increment(Duration::from_secs(10)); // 25 secs
1397
+
1398
+
assert_eq!(cache.get(&"a"), None);
1399
+
assert_eq!(cache.get(&"b"), None);
1400
+
assert!(!cache.contains_key(&"a"));
1401
+
assert!(!cache.contains_key(&"b"));
1402
+
assert_eq!(cache.iter().count(), 0);
1403
+
assert!(cache.cache.is_empty());
1404
+
}
1405
+
1406
+
#[cfg_attr(target_pointer_width = "16", ignore)]
1407
+
#[test]
1408
+
#[wasm_bindgen_test]
1409
+
fn test_skt_capacity_will_not_overflow() {
1410
+
// power of two
1411
+
let pot = |exp| 2u64.pow(exp);
1412
+
1413
+
let ensure_sketch_len = |max_capacity, len, name| {
1414
+
let mut cache = Cache::<u8, u8>::new(max_capacity);
1415
+
cache.enable_frequency_sketch_for_testing();
1416
+
assert_eq!(cache.frequency_sketch.table_len(), len as usize, "{}", name);
1417
+
};
1418
+
1419
+
if cfg!(target_pointer_width = "32") {
1420
+
let pot24 = pot(24);
1421
+
let pot16 = pot(16);
1422
+
ensure_sketch_len(0, 128, "0");
1423
+
ensure_sketch_len(128, 128, "128");
1424
+
ensure_sketch_len(pot16, pot16, "pot16");
1425
+
// due to ceiling to next_power_of_two
1426
+
ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1");
1427
+
// due to ceiling to next_power_of_two
1428
+
ensure_sketch_len(pot24 - 1, pot24, "pot24 - 1");
1429
+
ensure_sketch_len(pot24, pot24, "pot24");
1430
+
ensure_sketch_len(pot(27), pot24, "pot(27)");
1431
+
ensure_sketch_len(u32::MAX as u64, pot24, "u32::MAX");
1432
+
} else {
1433
+
// target_pointer_width: 64 or larger.
1434
+
let pot30 = pot(30);
1435
+
let pot16 = pot(16);
1436
+
ensure_sketch_len(0, 128, "0");
1437
+
ensure_sketch_len(128, 128, "128");
1438
+
ensure_sketch_len(pot16, pot16, "pot16");
1439
+
// due to ceiling to next_power_of_two
1440
+
ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1");
1441
+
1442
+
// The following tests will allocate large memory (~8GiB).
1443
+
// Skip when running on Circle CI.
1444
+
if !cfg!(circleci) {
1445
+
// due to ceiling to next_power_of_two
1446
+
ensure_sketch_len(pot30 - 1, pot30, "pot30- 1");
1447
+
ensure_sketch_len(pot30, pot30, "pot30");
1448
+
ensure_sketch_len(u64::MAX, pot30, "u64::MAX");
1449
+
}
1450
+
};
1451
+
}
1452
+
1453
+
#[test]
1454
+
#[wasm_bindgen_test]
1455
+
fn test_debug_format() {
1456
+
let mut cache = Cache::new(10);
1457
+
cache.insert('a', "alice");
1458
+
cache.insert('b', "bob");
1459
+
cache.insert('c', "cindy");
1460
+
1461
+
let debug_str = format!("{:?}", cache);
1462
+
assert!(debug_str.starts_with('{'));
1463
+
assert!(debug_str.contains(r#"'a': "alice""#));
1464
+
assert!(debug_str.contains(r#"'b': "bob""#));
1465
+
assert!(debug_str.contains(r#"'c': "cindy""#));
1466
+
assert!(debug_str.ends_with('}'));
1467
+
}
1468
+
}
+157
crates/mini-moka-vendored/src/unsync/deques.rs
+157
crates/mini-moka-vendored/src/unsync/deques.rs
···
1
+
use super::{KeyDate, KeyHashDate, ValueEntry};
2
+
use crate::common::{
3
+
deque::{DeqNode, Deque},
4
+
CacheRegion,
5
+
};
6
+
7
+
use std::ptr::NonNull;
8
+
use tagptr::TagNonNull;
9
+
10
+
pub(crate) struct Deques<K> {
11
+
pub(crate) window: Deque<KeyHashDate<K>>, // Not used yet.
12
+
pub(crate) probation: Deque<KeyHashDate<K>>,
13
+
pub(crate) protected: Deque<KeyHashDate<K>>, // Not used yet.
14
+
pub(crate) write_order: Deque<KeyDate<K>>,
15
+
}
16
+
17
+
impl<K> Default for Deques<K> {
18
+
fn default() -> Self {
19
+
Self {
20
+
window: Deque::new(CacheRegion::Window),
21
+
probation: Deque::new(CacheRegion::MainProbation),
22
+
protected: Deque::new(CacheRegion::MainProtected),
23
+
write_order: Deque::new(CacheRegion::Other),
24
+
}
25
+
}
26
+
}
27
+
28
+
impl<K> Deques<K> {
29
+
pub(crate) fn clear(&mut self) {
30
+
self.window = Deque::new(CacheRegion::Window);
31
+
self.probation = Deque::new(CacheRegion::MainProbation);
32
+
self.protected = Deque::new(CacheRegion::MainProtected);
33
+
self.write_order = Deque::new(CacheRegion::Other);
34
+
}
35
+
36
+
pub(crate) fn push_back_ao<V>(
37
+
&mut self,
38
+
region: CacheRegion,
39
+
kh: KeyHashDate<K>,
40
+
entry: &mut ValueEntry<K, V>,
41
+
) {
42
+
let node = Box::new(DeqNode::new(kh));
43
+
let node = match region {
44
+
CacheRegion::Window => self.window.push_back(node),
45
+
CacheRegion::MainProbation => self.probation.push_back(node),
46
+
CacheRegion::MainProtected => self.protected.push_back(node),
47
+
CacheRegion::Other => unreachable!(),
48
+
};
49
+
let tagged_node = TagNonNull::compose(node, region as usize);
50
+
entry.set_access_order_q_node(Some(tagged_node));
51
+
}
52
+
53
+
pub(crate) fn push_back_wo<V>(&mut self, kh: KeyDate<K>, entry: &mut ValueEntry<K, V>) {
54
+
let node = Box::new(DeqNode::new(kh));
55
+
let node = self.write_order.push_back(node);
56
+
entry.set_write_order_q_node(Some(node));
57
+
}
58
+
59
+
pub(crate) fn move_to_back_ao<V>(&mut self, entry: &ValueEntry<K, V>) {
60
+
if let Some(tagged_node) = entry.access_order_q_node() {
61
+
let (node, tag) = tagged_node.decompose();
62
+
let p = unsafe { node.as_ref() };
63
+
match tag.into() {
64
+
CacheRegion::Window if self.window.contains(p) => {
65
+
unsafe { self.window.move_to_back(node) };
66
+
}
67
+
CacheRegion::MainProbation if self.probation.contains(p) => {
68
+
unsafe { self.probation.move_to_back(node) };
69
+
}
70
+
CacheRegion::MainProtected if self.protected.contains(p) => {
71
+
unsafe { self.protected.move_to_back(node) };
72
+
}
73
+
_ => unreachable!(),
74
+
}
75
+
}
76
+
}
77
+
78
+
pub(crate) fn move_to_back_wo<V>(&mut self, entry: &ValueEntry<K, V>) {
79
+
let node = entry.write_order_q_node().unwrap();
80
+
let p = unsafe { node.as_ref() };
81
+
if self.write_order.contains(p) {
82
+
unsafe { self.write_order.move_to_back(node) };
83
+
}
84
+
}
85
+
86
+
pub(crate) fn unlink_ao<V>(&mut self, entry: &mut ValueEntry<K, V>) {
87
+
if let Some(node) = entry.take_access_order_q_node() {
88
+
self.unlink_node_ao(node);
89
+
}
90
+
}
91
+
92
+
pub(crate) fn unlink_ao_from_deque<V>(
93
+
deq_name: &str,
94
+
deq: &mut Deque<KeyHashDate<K>>,
95
+
entry: &mut ValueEntry<K, V>,
96
+
) {
97
+
if let Some(node) = entry.take_access_order_q_node() {
98
+
unsafe { Self::unlink_node_ao_from_deque(deq_name, deq, node) };
99
+
}
100
+
}
101
+
102
+
pub(crate) fn unlink_wo<V>(deq: &mut Deque<KeyDate<K>>, entry: &mut ValueEntry<K, V>) {
103
+
if let Some(node) = entry.take_write_order_q_node() {
104
+
Self::unlink_node_wo(deq, node);
105
+
}
106
+
}
107
+
108
+
pub(crate) fn unlink_node_ao(&mut self, tagged_node: TagNonNull<DeqNode<KeyHashDate<K>>, 2>) {
109
+
unsafe {
110
+
match tagged_node.decompose_tag().into() {
111
+
CacheRegion::Window => {
112
+
Self::unlink_node_ao_from_deque("window", &mut self.window, tagged_node)
113
+
}
114
+
CacheRegion::MainProbation => {
115
+
Self::unlink_node_ao_from_deque("probation", &mut self.probation, tagged_node)
116
+
}
117
+
CacheRegion::MainProtected => {
118
+
Self::unlink_node_ao_from_deque("protected", &mut self.protected, tagged_node)
119
+
}
120
+
_ => unreachable!(),
121
+
}
122
+
}
123
+
}
124
+
125
+
unsafe fn unlink_node_ao_from_deque(
126
+
deq_name: &str,
127
+
deq: &mut Deque<KeyHashDate<K>>,
128
+
tagged_node: TagNonNull<DeqNode<KeyHashDate<K>>, 2>,
129
+
) {
130
+
let (node, tag) = tagged_node.decompose();
131
+
if deq.region() == tag && deq.contains(node.as_ref()) {
132
+
// https://github.com/moka-rs/moka/issues/64
133
+
deq.unlink_and_drop(node);
134
+
} else {
135
+
panic!(
136
+
"unlink_node - node is not a member of {} deque. {:?}",
137
+
deq_name,
138
+
node.as_ref()
139
+
)
140
+
}
141
+
}
142
+
143
+
pub(crate) fn unlink_node_wo(deq: &mut Deque<KeyDate<K>>, node: NonNull<DeqNode<KeyDate<K>>>) {
144
+
unsafe {
145
+
let p = node.as_ref();
146
+
if deq.contains(p) {
147
+
// https://github.com/moka-rs/moka/issues/64
148
+
deq.unlink_and_drop(node);
149
+
} else {
150
+
panic!(
151
+
"unlink_node - node is not a member of write_order deque. {:?}",
152
+
p
153
+
)
154
+
}
155
+
}
156
+
}
157
+
}
+36
crates/mini-moka-vendored/src/unsync/iter.rs
+36
crates/mini-moka-vendored/src/unsync/iter.rs
···
1
+
use super::{Cache, ValueEntry};
2
+
3
+
use std::{
4
+
hash::{BuildHasher, Hash},
5
+
rc::Rc,
6
+
};
7
+
8
+
type HashMapIter<'i, K, V> = std::collections::hash_map::Iter<'i, Rc<K>, ValueEntry<K, V>>;
9
+
10
+
pub struct Iter<'i, K, V, S> {
11
+
cache: &'i Cache<K, V, S>,
12
+
iter: HashMapIter<'i, K, V>,
13
+
}
14
+
15
+
impl<'i, K, V, S> Iter<'i, K, V, S> {
16
+
pub(crate) fn new(cache: &'i Cache<K, V, S>, iter: HashMapIter<'i, K, V>) -> Self {
17
+
Self { cache, iter }
18
+
}
19
+
}
20
+
21
+
impl<'i, K, V, S> Iterator for Iter<'i, K, V, S>
22
+
where
23
+
K: Hash + Eq,
24
+
S: BuildHasher + Clone,
25
+
{
26
+
type Item = (&'i K, &'i V);
27
+
28
+
fn next(&mut self) -> Option<Self::Item> {
29
+
for (k, entry) in self.iter.by_ref() {
30
+
if !self.cache.is_expired_entry(entry) {
31
+
return Some((k, &entry.value));
32
+
}
33
+
}
34
+
None
35
+
}
36
+
}
+64
crates/mini-moka-vendored/tests/compile_tests/sync/clone/sync_cache_clone.rs
+64
crates/mini-moka-vendored/tests/compile_tests/sync/clone/sync_cache_clone.rs
···
1
+
// https://github.com/moka-rs/moka/issues/131
2
+
3
+
use std::{collections::hash_map::DefaultHasher, hash::BuildHasher, sync::Arc};
4
+
5
+
use mini_moka::sync::Cache;
6
+
7
+
fn main() {
8
+
f1_fail();
9
+
f2_pass();
10
+
f3_fail();
11
+
f4_pass();
12
+
}
13
+
14
+
const CAP: u64 = 100;
15
+
16
+
fn f1_fail() {
17
+
// This should fail because V is not Clone.
18
+
let _cache: Cache<MyKey, MyValue> = Cache::new(CAP);
19
+
}
20
+
21
+
fn f2_pass() {
22
+
let cache: Cache<MyKey, Arc<MyValue>> = Cache::new(CAP);
23
+
let _ = cache.clone();
24
+
}
25
+
26
+
fn f3_fail() {
27
+
// This should fail because S is not Clone.
28
+
let _cache: Cache<MyKey, Arc<MyValue>, _> = Cache::builder().build_with_hasher(MyBuildHasher1);
29
+
}
30
+
31
+
fn f4_pass() {
32
+
let cache: Cache<MyKey, Arc<MyValue>, _> = Cache::builder().build_with_hasher(MyBuildHasher2);
33
+
let _ = cache.clone();
34
+
}
35
+
36
+
// MyKey is not Clone.
37
+
#[derive(Hash, PartialEq, Eq)]
38
+
pub struct MyKey(i32);
39
+
40
+
// MyValue is not Clone.
41
+
pub struct MyValue(i32);
42
+
43
+
// MyBuildHasher1 is not Clone.
44
+
pub struct MyBuildHasher1;
45
+
46
+
impl BuildHasher for MyBuildHasher1 {
47
+
type Hasher = DefaultHasher;
48
+
49
+
fn build_hasher(&self) -> Self::Hasher {
50
+
unimplemented!()
51
+
}
52
+
}
53
+
54
+
// MyBuildHasher1 is Clone.
55
+
#[derive(Clone)]
56
+
pub struct MyBuildHasher2;
57
+
58
+
impl BuildHasher for MyBuildHasher2 {
59
+
type Hasher = DefaultHasher;
60
+
61
+
fn build_hasher(&self) -> Self::Hasher {
62
+
unimplemented!()
63
+
}
64
+
}
+33
crates/mini-moka-vendored/tests/compile_tests/sync/clone/sync_cache_clone.stderr
+33
crates/mini-moka-vendored/tests/compile_tests/sync/clone/sync_cache_clone.stderr
···
1
+
error[E0277]: the trait bound `MyValue: Clone` is not satisfied
2
+
--> tests/compile_tests/sync/clone/sync_cache_clone.rs:18:41
3
+
|
4
+
18 | let _cache: Cache<MyKey, MyValue> = Cache::new(CAP);
5
+
| ^^^^^^^^^^ the trait `Clone` is not implemented for `MyValue`
6
+
|
7
+
note: required by a bound in `mini_moka::sync::Cache::<K, V>::new`
8
+
--> src/sync/cache.rs
9
+
|
10
+
| V: Clone + Send + Sync + 'static,
11
+
| ^^^^^ required by this bound in `mini_moka::sync::Cache::<K, V>::new`
12
+
help: consider annotating `MyValue` with `#[derive(Clone)]`
13
+
|
14
+
41 | #[derive(Clone)]
15
+
|
16
+
17
+
error[E0277]: the trait bound `MyBuildHasher1: Clone` is not satisfied
18
+
--> tests/compile_tests/sync/clone/sync_cache_clone.rs:28:84
19
+
|
20
+
28 | let _cache: Cache<MyKey, Arc<MyValue>, _> = Cache::builder().build_with_hasher(MyBuildHasher1);
21
+
| ----------------- ^^^^^^^^^^^^^^ the trait `Clone` is not implemented for `MyBuildHasher1`
22
+
| |
23
+
| required by a bound introduced by this call
24
+
|
25
+
note: required by a bound in `mini_moka::sync::CacheBuilder::<K, V, mini_moka::sync::Cache<K, V>>::build_with_hasher`
26
+
--> src/sync/builder.rs
27
+
|
28
+
| S: BuildHasher + Clone + Send + Sync + 'static,
29
+
| ^^^^^ required by this bound in `mini_moka::sync::CacheBuilder::<K, V, mini_moka::sync::Cache<K, V>>::build_with_hasher`
30
+
help: consider annotating `MyBuildHasher1` with `#[derive(Clone)]`
31
+
|
32
+
44 | #[derive(Clone)]
33
+
|