+2
-23
.github/actions/setup-nix/action.yml
+2
-23
.github/actions/setup-nix/action.yml
···
3
3
description: |
4
4
Sets up the Nix environment for wire, removing unnecessary bloat and installing Nix along with proper
5
5
substituters being set
6
-
inputs:
7
-
withQEMU:
8
-
description: Enable QEMU
9
-
default: false
10
6
runs:
11
7
using: "composite"
12
8
steps:
13
-
- uses: wimpysworld/nothing-but-nix@main
14
-
with:
15
-
hatchet-protocol: "carve"
16
9
- name: Generate nix.conf
17
10
shell: bash
18
11
id: config
···
21
14
echo 'config<<EOF'
22
15
echo "system-features = nixos-test benchmark big-parallel kvm"
23
16
24
-
if [ "${{ inputs.withQEMU }}" = "true" ]; then
25
-
echo "extra-platforms = aarch64-linux i686-linux"
26
-
fi
27
-
28
17
echo "substituters = https://cache.nixos.org?priority=1 https://cache.althaea.zone?priority=2 https://cache.garnix.io?priority=3"
29
18
echo "trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= cache.garnix.io:CTFPyKSLcx5RMJKfLo5EEPUObbA78b0YQ2DTCJXqr9g= cache.althaea.zone:BelRpa863X9q3Y+AOnl5SM7QFzre3qb+5I7g2s/mqHI="
30
19
31
20
echo EOF
32
21
} >> "$GITHUB_OUTPUT"
33
-
- uses: cachix/install-nix-action@v31
22
+
- uses: cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15
34
23
with:
35
24
nix_path: nixpkgs=channel:nixos-unstable
36
25
extra_nix_config: ${{ steps.config.outputs.config }}
37
26
- name: Sanity check nix.conf
38
-
if: ${{ inputs.withQEMU == 'true' && runner.debug == '1' }}
27
+
if: ${{ runner.debug == '1' }}
39
28
shell: bash
40
29
run: cat /etc/nix/nix.conf
41
-
- name: Register binfmt
42
-
if: ${{ inputs.withQEMU == 'true' }}
43
-
shell: bash
44
-
run: |
45
-
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
46
-
- name: Sanity check binfmt
47
-
if: ${{ inputs.withQEMU == 'true' && runner.debug == '1' }}
48
-
shell: bash
49
-
run: |
50
-
cat /proc/sys/fs/binfmt_misc/qemu-aarch64
+21
-6
.github/workflows/autofix.yml
+21
-6
.github/workflows/autofix.yml
···
3
3
on:
4
4
pull_request:
5
5
push:
6
-
branches: ["trunk"]
7
6
8
7
permissions:
9
8
contents: read
···
14
13
outputs:
15
14
docs-pnpm: ${{ steps.filter.outputs.docs-pnpm }}
16
15
steps:
17
-
- uses: actions/checkout@v6
18
-
- uses: dorny/paths-filter@v3
16
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
17
+
with:
18
+
persist-credentials: false
19
+
- uses: dorny/paths-filter@668c092af3649c4b664c54e4b704aa46782f6f7c
19
20
id: filter
20
21
with:
21
22
filters: |
22
23
docs-pnpm:
23
24
- 'doc/pnpm-lock.yaml'
24
25
autofix:
25
-
runs-on: ubuntu-latest
26
+
runs-on: blacksmith-2vcpu-ubuntu-2404
26
27
needs: check-changes
28
+
env:
29
+
UV_CACHE_DIR: /tmp/.uv-cache
27
30
steps:
28
-
- uses: actions/checkout@v6
31
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
32
+
with:
33
+
persist-credentials: false
29
34
- uses: ./.github/actions/setup-nix
30
35
- name: Cache Cargo
31
36
uses: actions/cache@v5
···
36
41
~/.cargo/registry/cache/
37
42
~/.cargo/git/db/
38
43
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
44
+
- name: Cache uv
45
+
uses: actions/cache@v5
46
+
with:
47
+
path: /tmp/.uv-cache
48
+
key: uv-${{ runner.os }}-${{ hashFiles('**/uv.lock') }}
49
+
restore-keys: |
50
+
uv-${{ runner.os }}-${{ hashFiles('**/uv.lock') }}
51
+
uv-${{ runner.os }}
39
52
- name: setup sqlx
40
-
run: nix develop -L -v -c sqlx database setup --source ./wire/lib/src/cache/migrations/
53
+
run: nix develop -L -v -c sqlx database setup --source ./crates/core/src/cache/migrations/
41
54
- name: autofix sqlx
42
55
run: nix develop -L -v -c cargo sqlx prepare --workspace
43
56
- name: clippy --fix
···
48
61
- name: Upgrade Hash
49
62
if: ${{ needs.check-changes.outputs.docs-pnpm == 'true' }}
50
63
run: bash ./doc/upgrade.sh
64
+
- name: Minimise uv cache
65
+
run: nix develop -L -v -c uv cache prune --ci
51
66
- uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27
+10
-4
.github/workflows/follow-nixpkgs.yml
+10
-4
.github/workflows/follow-nixpkgs.yml
···
8
8
pre-job:
9
9
continue-on-error: true
10
10
runs-on: ubuntu-latest
11
+
permissions: {}
11
12
outputs:
12
13
number: ${{ steps.skip_check.outputs.number }}
13
14
steps:
14
-
- uses: actions/checkout@v6
15
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
16
+
with:
17
+
persist-credentials: false
15
18
- id: skip_check
16
19
run: |
17
20
echo "number=$(gh pr list --label flake-lock-update --state open --json id | jq 'length')" >> "$GITHUB_OUTPUT"
···
19
22
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
20
23
update:
21
24
runs-on: ubuntu-latest
25
+
permissions: {}
22
26
needs: pre-job
23
27
if: needs.pre-job.outputs.number == '0'
24
28
steps:
25
-
- uses: actions/checkout@v6
29
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
26
30
with:
27
31
ssh-key: ${{ secrets.FOLLOW_NIXPKGS_PRIVATE_KEY }}
28
32
ref: main
33
+
persist-credentials: false
29
34
- uses: ./.github/actions/setup-nix
30
35
- run: |
31
36
git config user.name 'github-actions[bot]'
···
36
41
run: echo "date=$(date +'%Y-%m-%d')" >> "$GITHUB_OUTPUT"
37
42
- name: Create Pull Request
38
43
id: cpr
39
-
uses: peter-evans/create-pull-request@v8
44
+
uses: peter-evans/create-pull-request@0979079bc20c05bbbb590a56c21c4e2b1d1f1bbe
40
45
with:
41
46
title: Update flake.lock ${{ steps.date.outputs.date }}
42
47
labels: flake-lock-update
43
48
branch: ci/flake-update
44
49
- name: Enable automerge
45
50
if: steps.cpr.outputs.pull-request-number
46
-
run: gh pr merge --squash --auto "${{ steps.cpr.outputs.pull-request-number }}"
51
+
run: gh pr merge --squash --auto "${STEPS_CPR_OUTPUTS_PULL_REQUEST_NUMBER}"
47
52
env:
48
53
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
54
+
STEPS_CPR_OUTPUTS_PULL_REQUEST_NUMBER: ${{ steps.cpr.outputs.pull-request-number }}
+1
-1
.github/workflows/labeler.yml
+1
-1
.github/workflows/labeler.yml
+7
-3
.github/workflows/pages.yml
+7
-3
.github/workflows/pages.yml
···
10
10
pre-job:
11
11
continue-on-error: true
12
12
runs-on: ubuntu-latest
13
+
permissions: {}
13
14
outputs:
14
15
should_skip: ${{ steps.skip_check.outputs.should_skip }}
15
16
steps:
16
17
- id: skip_check
17
-
uses: fkirc/skip-duplicate-actions@v5
18
+
uses: fkirc/skip-duplicate-actions@04a1aebece824b56e6ad6a401d015479cd1c50b3
18
19
deploy:
19
20
runs-on: ubuntu-latest
21
+
permissions: {}
20
22
environment:
21
23
name: production
22
24
url: https://wire.althaea.zone/
23
25
if: github.actor != 'dependabot[bot]' && needs.pre-job.outputs.should_skip != 'true'
24
26
steps:
25
-
- uses: actions/checkout@v6
27
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
28
+
with:
29
+
persist-credentials: false
26
30
- uses: ./.github/actions/setup-nix
27
31
- run: nix build .#docs
28
32
if: github.ref == 'refs/heads/stable'
···
30
34
if: github.ref != 'refs/heads/stable'
31
35
- name: Deploy to Cloudflare Pages
32
36
id: deployment
33
-
uses: cloudflare/wrangler-action@v3
37
+
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65
34
38
with:
35
39
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
36
40
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
+17
-6
.github/workflows/pr-preview.yml
+17
-6
.github/workflows/pr-preview.yml
···
7
7
runs-on: ubuntu-latest
8
8
outputs:
9
9
number: ${{ steps.find-pr.outputs.number }}
10
+
permissions: {}
10
11
if: ${{ github.actor != 'dependabot[bot]' }}
11
12
steps:
12
-
- uses: jwalton/gh-find-current-pr@master
13
+
- uses: jwalton/gh-find-current-pr@89ee5799558265a1e0e31fab792ebb4ee91c016b
13
14
id: find-pr
14
15
with:
15
16
state: all
16
17
base-ref:
17
18
runs-on: ubuntu-latest
19
+
permissions: {}
18
20
needs: get-pr
19
21
outputs:
20
22
base-ref: ${{ steps.base-ref.outputs.base-ref }}
···
24
26
- name: Locate Base Ref
25
27
id: base-ref
26
28
run: |
27
-
echo "base-ref=$(gh api /repos/${{ github.repository }}/pulls/${{ needs.get-pr.outputs.number }} | jq -r '.base.ref')" >> "$GITHUB_OUTPUT"
29
+
echo "base-ref=$(gh api /repos/${{ github.repository }}/pulls/${NEEDS_GET_PR_OUTPUTS_NUMBER} | jq -r '.base.ref')" >> "$GITHUB_OUTPUT"
28
30
env:
29
31
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
32
+
NEEDS_GET_PR_OUTPUTS_NUMBER: ${{ needs.get-pr.outputs.number }}
30
33
eval-base:
31
34
runs-on: ubuntu-latest
35
+
permissions: {}
32
36
needs: base-ref
33
37
outputs:
34
38
drv: ${{ steps.drv.outputs.drv }}
35
39
steps:
36
-
- uses: actions/checkout@v6
40
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
37
41
with:
38
42
ref: ${{ needs.base-ref.outputs.base-ref }}
43
+
persist-credentials: false
39
44
- uses: ./.github/actions/setup-nix
40
45
- id: drv
41
46
run: echo "drv=$(nix eval .#docs --json)" >> "$GITHUB_OUTPUT"
42
47
eval-head:
43
48
runs-on: ubuntu-latest
49
+
permissions: {}
44
50
needs: get-pr
45
51
outputs:
46
52
drv: ${{ steps.drv.outputs.drv }}
47
53
if: ${{ needs.get-pr.outputs.number != '' }}
48
54
steps:
49
-
- uses: actions/checkout@v6
55
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
56
+
with:
57
+
persist-credentials: false
50
58
- uses: ./.github/actions/setup-nix
51
59
- id: drv
52
60
run: echo "drv=$(nix eval .#docs --json)" >> "$GITHUB_OUTPUT"
53
61
deploy:
54
62
runs-on: ubuntu-latest
63
+
permissions: {}
55
64
needs:
56
65
- eval-head
57
66
- eval-base
···
62
71
# skip if nothing changed in the docs package
63
72
if: needs.eval-head.outputs.drv != needs.eval-base.outputs.drv
64
73
steps:
65
-
- uses: actions/checkout@v6
74
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
75
+
with:
76
+
persist-credentials: false
66
77
- uses: ./.github/actions/setup-nix
67
78
- run: nix build .#docs
68
79
- name: Deploy to Cloudflare Pages
69
80
id: deployment
70
-
uses: cloudflare/wrangler-action@v3
81
+
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65
71
82
with:
72
83
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
73
84
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
+5
-2
.github/workflows/test.yml
+5
-2
.github/workflows/test.yml
···
7
7
pre-job:
8
8
continue-on-error: true
9
9
runs-on: ubuntu-latest
10
+
permissions: {}
10
11
outputs:
11
12
should_skip: ${{ steps.skip_check.outputs.should_skip }}
12
13
steps:
13
14
- id: skip_check
14
-
uses: fkirc/skip-duplicate-actions@v5
15
+
uses: fkirc/skip-duplicate-actions@04a1aebece824b56e6ad6a401d015479cd1c50b3
15
16
with:
16
17
concurrent_skipping: "same_content_newer"
17
18
cancel_others: "true"
···
22
23
contents: read
23
24
if: needs.pre-job.outputs.should_skip != 'true'
24
25
steps:
25
-
- uses: actions/checkout@v6
26
+
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
27
+
with:
28
+
persist-credentials: false
26
29
- uses: ./.github/actions/setup-nix
27
30
- name: Build Tests
28
31
run: nix build .#cargo-tests -L -vv
+4
.gitignore
+4
.gitignore
+62
-39
CHANGELOG.md
+62
-39
CHANGELOG.md
···
7
7
8
8
## [Unreleased] - yyyy-mm-dd
9
9
10
+
## [v1.1.1] - 2025-01-05
11
+
12
+
### Fixed
13
+
14
+
- Fix a bug where wire was attempting to SSH to the local machine when `buildOnTarget` &
15
+
`allowLocalDeployment` where true.
16
+
17
+
## [v1.1.0] - 2025-12-31
18
+
19
+
### Added
20
+
21
+
- Add a `--substitute-on-destination` argument.
22
+
- Add the `meta.nodeSpecialArgs` meta option.
23
+
- Add `wire build`, a new command to build nodes offline.
24
+
It is distinct from `wire apply build`, as it will not ping
25
+
or push the result, making it useful for CI.
26
+
27
+
### Changed
28
+
29
+
- Build store paths will be output to stdout
30
+
31
+
### Fixed
32
+
33
+
- Fix invalidated caches not actually returning `None`.
34
+
10
35
## [v1.0.0] - 2025-12-17
11
36
12
37
### Added
13
38
14
-
- SIGINT signal handling
39
+
- SIGINT signal handling.
15
40
16
41
### Changed
17
42
18
-
- Fix a bug related to key filtering
19
-
- Invalidate caches that reference garbage collected paths
43
+
- Invalidate caches that reference garbage collected paths.
44
+
45
+
### Fixed
46
+
47
+
- Fix key filtering logic.
20
48
21
49
## [v1.0.0-beta.0] - 2025-12-02
22
50
23
51
### Added
24
52
25
-
- `meta.nodeNixpkgs` was implemented.
26
-
- Caching of hive evaluation for flakes.
53
+
- Implement `meta.nodeNixpkgs`.
54
+
- Add caching of hive evaluation for flakes.
27
55
28
56
### Changed
29
57
30
-
- Tests are now ran on 25.11.
58
+
- Run tests against 25.11.
31
59
32
60
## [v1.0.0-alpha.1] - 2025-11-24
33
61
34
62
### Added
35
63
36
-
- `--handle-unreachable` arg was added. You can use `--handle-unreachable ignore` to
64
+
- Add `--handle-unreachable`. You can use `--handle-unreachable ignore` to
37
65
ignore unreachable nodes in the status of the deployment.
38
-
- A basic progress bar
66
+
- Add a basic progress bar.
39
67
40
68
### Changed
41
69
42
-
- Reverted "Wire will now attempt to use SSH ControlMaster by default."
43
-
- `show` subcommand looks nicer now.
44
-
- `build` step will always build remotely when the node is going to be applied
45
-
locally.
70
+
- Revert "Wire will now attempt to use SSH ControlMaster by default.".
71
+
- Change the `show` subcommand to look nicer now.
72
+
- Change the `build` step to always build remotely when the node is
73
+
going to be applied locally.
46
74
47
75
## [v1.0.0-alpha.0] - 2025-10-22
48
76
49
77
### Added
50
78
51
-
- `--ssh-accept-host` was added.
52
-
- `--on -` will now read additional apply targets from stdin.
53
-
- `{key.name}-key.{path,service}` systemd units where added.
54
-
- `--path` now supports flakerefs (`github:foo/bar`, `git+file:///...`,
55
-
`https://.../main.tar.gz`, etc).
56
-
- `--flake` is now an alias for `--path`.
57
-
- Wire will now attempt to use SSH ControlMaster by default.
79
+
- Add `--ssh-accept-host` argument.
80
+
- Add `--on -` syntax to the `--on` argument.
81
+
Passing `-` will now read additional apply targets from stdin.
82
+
- Add `{key.name}-key.{path,service}` systemd units.
83
+
- Added `--flake` argument as an alias for `--path`.
58
84
- A terminal bell will be output if a sudo / ssh prompt is ever printed.
85
+
- Added a real tutorial, and separated many how-to guides.
86
+
The tutorial leads the user through creating and deploying a wire Hive.
87
+
- Add `config.nixpkgs.flake.source` by default if `meta.nixpkgs` ends
88
+
with `-source` at priority 1000 (default).
59
89
60
90
### Fixed
61
91
62
-
- Fix bug where `--non-interactive` was inversed
63
-
- `./result` links where being created. they will not be created anymore
64
-
- Logging from interactive commands (absence of `--non-interactive`) was
65
-
improved.
66
-
- Passing `sources.nixpkgs` directly from npins to `meta.nixpkgs` has
67
-
been fixed.
92
+
- Fix bug where `--non-interactive` was inversed.
93
+
- Fix a bug where `./result` links where being created.
94
+
- Fix passing `sources.nixpkgs` directly from npins to `meta.nixpkgs`.
95
+
- Fix nodes that will be applied locally running the `push` and `cleanup`
96
+
steps.
68
97
69
98
### Changed
70
99
71
-
- Logs with level `tracing_level::TRACE` are compiled out of release builds
72
-
- Data integrity of keys have been greatly improved
100
+
- Improve logging from interactive commands (absence of `--non-interactive`).
101
+
- Changed `--path` argument to support flakerefs (`github:foo/bar`,
102
+
`git+file:///...`, `https://.../main.tar.gz`, etc).
103
+
- Changed SSH arguments to use ControlMaster by default.
104
+
- Compile-out logs with level `tracing_level::TRACE` in release builds.
105
+
- Improve aata integrity of keys.
73
106
- Unknown SSH keys will be immediately rejected unless `--ssh-accept-host` is passed.
74
-
- Logging was improved.
75
-
- `config.nixpkgs.flake.source` is now set by default if `meta.nixpkgs` ends
76
-
with `-source` at priority 1000 (default).
77
-
- Evaluation has been sped up by doing it in parallel with other steps until
78
-
the .drv is required
79
-
- A node which is going to be applied locally will now never `push` or
80
-
`cleanup`.
81
-
82
-
### Documented
83
-
84
-
- Added a real tutorial, and separated many how-to guides.
85
-
The tutorial leads the user through creating and deploying a wire Hive.
107
+
- Changed evaluation to be ran in parallel with other steps until
108
+
the .drv is required.
86
109
87
110
## [0.5.0] - 2025-09-18
88
111
+64
-67
Cargo.lock
+64
-67
Cargo.lock
···
277
277
278
278
[[package]]
279
279
name = "clap_complete"
280
-
version = "4.5.61"
280
+
version = "4.5.62"
281
281
source = "registry+https://github.com/rust-lang/crates.io-index"
282
-
checksum = "39615915e2ece2550c0149addac32fb5bd312c657f43845bb9088cb9c8a7c992"
282
+
checksum = "004eef6b14ce34759aa7de4aea3217e368f463f46a3ed3764ca4b5a4404003b4"
283
283
dependencies = [
284
284
"clap",
285
285
"clap_lex",
···
606
606
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
607
607
dependencies = [
608
608
"libc",
609
-
"windows-sys 0.60.2",
609
+
"windows-sys 0.52.0",
610
610
]
611
611
612
612
[[package]]
···
1120
1120
]
1121
1121
1122
1122
[[package]]
1123
-
name = "key_agent"
1124
-
version = "1.0.0"
1125
-
dependencies = [
1126
-
"anyhow",
1127
-
"base64",
1128
-
"futures-util",
1129
-
"nix 0.30.1",
1130
-
"prost",
1131
-
"prost-build",
1132
-
"sha2",
1133
-
"tokio",
1134
-
"tokio-util",
1135
-
]
1136
-
1137
-
[[package]]
1138
1123
name = "lazy_static"
1139
1124
version = "1.5.0"
1140
1125
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1144
1129
]
1145
1130
1146
1131
[[package]]
1147
-
name = "lib"
1148
-
version = "1.0.0"
1149
-
dependencies = [
1150
-
"aho-corasick",
1151
-
"anyhow",
1152
-
"base64",
1153
-
"derive_more",
1154
-
"enum_dispatch",
1155
-
"futures",
1156
-
"gethostname",
1157
-
"gjson",
1158
-
"im",
1159
-
"itertools",
1160
-
"key_agent",
1161
-
"miette",
1162
-
"nix 0.30.1",
1163
-
"nix-compat",
1164
-
"num_enum",
1165
-
"owo-colors",
1166
-
"portable-pty",
1167
-
"proc-macro2",
1168
-
"prost",
1169
-
"rand 0.9.2",
1170
-
"serde",
1171
-
"serde_json",
1172
-
"sha2",
1173
-
"sqlx",
1174
-
"strip-ansi-escapes",
1175
-
"syn 2.0.111",
1176
-
"tempdir",
1177
-
"termion",
1178
-
"thiserror 2.0.17",
1179
-
"tokio",
1180
-
"tokio-util",
1181
-
"tracing",
1182
-
"zstd",
1183
-
]
1184
-
1185
-
[[package]]
1186
1132
name = "libc"
1187
1133
version = "0.2.175"
1188
1134
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1955
1901
"errno",
1956
1902
"libc",
1957
1903
"linux-raw-sys",
1958
-
"windows-sys 0.60.2",
1904
+
"windows-sys 0.52.0",
1959
1905
]
1960
1906
1961
1907
[[package]]
···
2725
2671
2726
2672
[[package]]
2727
2673
name = "tracing"
2728
-
version = "0.1.43"
2674
+
version = "0.1.44"
2729
2675
source = "registry+https://github.com/rust-lang/crates.io-index"
2730
-
checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647"
2676
+
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
2731
2677
dependencies = [
2732
2678
"log",
2733
2679
"pin-project-lite",
···
2748
2694
2749
2695
[[package]]
2750
2696
name = "tracing-core"
2751
-
version = "0.1.35"
2697
+
version = "0.1.36"
2752
2698
source = "registry+https://github.com/rust-lang/crates.io-index"
2753
-
checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c"
2699
+
checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a"
2754
2700
dependencies = [
2755
2701
"once_cell",
2756
2702
"valuable",
···
3211
3157
3212
3158
[[package]]
3213
3159
name = "wire"
3214
-
version = "1.0.0"
3160
+
version = "1.1.1"
3215
3161
dependencies = [
3216
3162
"clap",
3217
3163
"clap-markdown",
···
3221
3167
"dhat",
3222
3168
"enum-display-derive",
3223
3169
"futures",
3224
-
"im",
3225
3170
"itertools",
3226
-
"lib",
3227
3171
"miette",
3228
-
"nix-compat",
3229
3172
"owo-colors",
3230
-
"serde",
3231
3173
"serde_json",
3232
3174
"signal-hook",
3233
3175
"signal-hook-tokio",
···
3236
3178
"tracing",
3237
3179
"tracing-log",
3238
3180
"tracing-subscriber",
3181
+
"wire-core",
3182
+
]
3183
+
3184
+
[[package]]
3185
+
name = "wire-core"
3186
+
version = "1.1.1"
3187
+
dependencies = [
3188
+
"aho-corasick",
3189
+
"anyhow",
3190
+
"base64",
3191
+
"derive_more",
3192
+
"enum_dispatch",
3193
+
"futures",
3194
+
"gethostname",
3195
+
"gjson",
3196
+
"im",
3197
+
"itertools",
3198
+
"miette",
3199
+
"nix 0.30.1",
3200
+
"nix-compat",
3201
+
"num_enum",
3202
+
"owo-colors",
3203
+
"portable-pty",
3204
+
"proc-macro2",
3205
+
"prost",
3206
+
"rand 0.9.2",
3207
+
"serde",
3208
+
"serde_json",
3209
+
"sha2",
3210
+
"sqlx",
3211
+
"strip-ansi-escapes",
3212
+
"syn 2.0.111",
3213
+
"tempdir",
3214
+
"termion",
3215
+
"thiserror 2.0.17",
3216
+
"tokio",
3217
+
"tokio-util",
3218
+
"tracing",
3219
+
"wire-key-agent",
3220
+
"zstd",
3221
+
]
3222
+
3223
+
[[package]]
3224
+
name = "wire-key-agent"
3225
+
version = "1.1.1"
3226
+
dependencies = [
3227
+
"anyhow",
3228
+
"base64",
3229
+
"futures-util",
3230
+
"nix 0.30.1",
3231
+
"prost",
3232
+
"prost-build",
3233
+
"sha2",
3234
+
"tokio",
3235
+
"tokio-util",
3239
3236
]
3240
3237
3241
3238
[[package]]
+2
-2
Cargo.toml
+2
-2
Cargo.toml
···
1
1
[workspace]
2
-
members = ["wire/key_agent", "wire/lib", "wire/cli"]
2
+
members = ["crates/key_agent", "crates/core", "crates/cli"]
3
3
resolver = "2"
4
4
package.edition = "2024"
5
-
package.version = "1.0.0"
5
+
package.version = "1.1.1"
6
6
7
7
[workspace.metadata.crane]
8
8
name = "wire"
+3
-39
README.md
+3
-39
README.md
···
1
-

2
-

3
-

1
+

2
+

3
+

4
4
5
5
wire is a tool to deploy nixos systems. its usage is inspired by colmena however it is not a fork.
6
6
7
7
Read the [The Tutorial](https://wire.althaea.zone/tutorial/overview.html), [Guides](https://wire.althaea.zone/guides/installation.html), or continue reading this readme for development information.
8
8
9
-
## Tree Layout
10
-
11
-
```
12
-
wire
13
-
โโโ wire
14
-
โ โโโ lib
15
-
โ โ โโโ Rust library containing business logic, consumed by `wire`
16
-
โ โโโ cli
17
-
โ โ โโโ Rust binary, using `lib`
18
-
โ โโโ key_agent
19
-
โ โโโ Rust binary ran on a target node. receives key file bytes and metadata w/ protobuf over SSH stdin
20
-
โโโ doc
21
-
โ โโโ a [vitepress](https://vitepress.dev/) site
22
-
โโโ runtime
23
-
โ โโโ Nix files used during runtime to evaluate nodes
24
-
โโโ bench
25
-
โ โโโ A little tool to benchmark wire against a large hive
26
-
โโโtests
27
-
โโโ Directories used during cargo & NixOS VM testing
28
-
```
29
-
30
9
## Development
31
10
32
11
Please use `nix develop` for access to the development environment and to ensure
33
12
your changes are ran against the defined git hooks. For simplicity, you may wish
34
13
to use [direnv](https://github.com/direnv/direnv).
35
-
36
-
### Testing
37
-
38
-
#### dhat profiling
39
-
40
-
```sh
41
-
$ just build-dhat
42
-
```
43
-
44
-
#### Testing
45
-
46
-
```sh
47
-
$ cargo test
48
-
$ nix flake check
49
-
```
+32
crates/cli/Cargo.toml
+32
crates/cli/Cargo.toml
···
1
+
[package]
2
+
name = "wire"
3
+
version.workspace = true
4
+
edition.workspace = true
5
+
6
+
[lints]
7
+
workspace = true
8
+
9
+
[features]
10
+
dhat-heap = []
11
+
12
+
[dependencies]
13
+
clap = { workspace = true }
14
+
clap-verbosity-flag = { workspace = true }
15
+
tokio = { workspace = true }
16
+
tracing = { workspace = true }
17
+
tracing-log = { workspace = true }
18
+
tracing-subscriber = { workspace = true }
19
+
wire-core = { path = "../core" }
20
+
serde_json = { workspace = true }
21
+
miette = { workspace = true }
22
+
thiserror = { workspace = true }
23
+
enum-display-derive = "0.1.1"
24
+
futures = "0.3.31"
25
+
clap-num = "1.2.0"
26
+
clap-markdown = "0.1.5"
27
+
itertools = "0.14.0"
28
+
dhat = "0.3.2"
29
+
clap_complete = { version = "4.5.60", features = ["unstable-dynamic"] }
30
+
owo-colors = { workspace = true }
31
+
signal-hook-tokio = { version = "0.3.1", features = ["futures-v0_3"] }
32
+
signal-hook = "0.3.18"
+98
crates/cli/default.nix
+98
crates/cli/default.nix
···
1
+
{ getSystem, inputs, ... }:
2
+
{
3
+
perSystem =
4
+
{
5
+
pkgs,
6
+
lib,
7
+
self',
8
+
buildRustProgram,
9
+
system,
10
+
...
11
+
}:
12
+
let
13
+
cleanSystem = system: lib.replaceStrings [ "-" ] [ "_" ] system;
14
+
agents = lib.strings.concatMapStrings (
15
+
system: "--set WIRE_KEY_AGENT_${cleanSystem system} ${(getSystem system).packages.agent} "
16
+
) (import inputs.linux-systems);
17
+
in
18
+
{
19
+
packages = {
20
+
default = self'.packages.wire;
21
+
wire-unwrapped = buildRustProgram {
22
+
name = "wire";
23
+
pname = "wire";
24
+
cargoExtraArgs = "-p wire";
25
+
doCheck = true;
26
+
nativeBuildInputs = [
27
+
pkgs.installShellFiles
28
+
pkgs.sqlx-cli
29
+
];
30
+
preBuild = ''
31
+
export DATABASE_URL=sqlite:./db.sqlite3
32
+
sqlx database create
33
+
sqlx migrate run --source ./crates/core/src/cache/migrations/
34
+
'';
35
+
postInstall = ''
36
+
installShellCompletion --cmd wire \
37
+
--bash <(COMPLETE=bash $out/bin/wire) \
38
+
--fish <(COMPLETE=fish $out/bin/wire) \
39
+
--zsh <(COMPLETE=zsh $out/bin/wire)
40
+
'';
41
+
};
42
+
43
+
wire-unwrapped-dev = self'.packages.wire-unwrapped.overrideAttrs {
44
+
CARGO_PROFILE = "dev";
45
+
};
46
+
47
+
wire-unwrapped-perf = buildRustProgram {
48
+
name = "wire";
49
+
pname = "wire";
50
+
CARGO_PROFILE = "profiling";
51
+
cargoExtraArgs = "-p wire";
52
+
};
53
+
54
+
wire = pkgs.symlinkJoin {
55
+
name = "wire";
56
+
paths = [ self'.packages.wire-unwrapped ];
57
+
nativeBuildInputs = [
58
+
pkgs.makeWrapper
59
+
];
60
+
postBuild = ''
61
+
wrapProgram $out/bin/wire ${agents}
62
+
'';
63
+
meta.mainProgram = "wire";
64
+
};
65
+
66
+
wire-small = pkgs.symlinkJoin {
67
+
name = "wire";
68
+
paths = [ self'.packages.wire-unwrapped ];
69
+
nativeBuildInputs = [
70
+
pkgs.makeWrapper
71
+
];
72
+
postBuild = ''
73
+
wrapProgram $out/bin/wire --set WIRE_KEY_AGENT_${cleanSystem system} ${self'.packages.agent}
74
+
'';
75
+
meta.mainProgram = "wire";
76
+
};
77
+
78
+
wire-dev = self'.packages.wire.overrideAttrs {
79
+
paths = [ self'.packages.wire-unwrapped-dev ];
80
+
};
81
+
82
+
wire-small-dev = self'.packages.wire-small.overrideAttrs {
83
+
paths = [ self'.packages.wire-unwrapped-dev ];
84
+
};
85
+
86
+
wire-small-perf = self'.packages.wire-small.overrideAttrs {
87
+
paths = [ self'.packages.wire-unwrapped-perf ];
88
+
};
89
+
90
+
wire-diagnostics-md = self'.packages.wire-unwrapped.overrideAttrs {
91
+
DIAGNOSTICS_MD_OUTPUT = "/build/source";
92
+
installPhase = ''
93
+
mv /build/source/DIAGNOSTICS.md $out
94
+
'';
95
+
};
96
+
};
97
+
};
98
+
}
+341
crates/cli/src/apply.rs
+341
crates/cli/src/apply.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use futures::{FutureExt, StreamExt};
5
+
use itertools::{Either, Itertools};
6
+
use miette::{Diagnostic, IntoDiagnostic, Result};
7
+
use std::any::Any;
8
+
use std::collections::HashSet;
9
+
use std::io::{Read, stderr};
10
+
use std::sync::Arc;
11
+
use std::sync::atomic::AtomicBool;
12
+
use thiserror::Error;
13
+
use tracing::{error, info};
14
+
use wire_core::hive::node::{Context, GoalExecutor, Name, Node, Objective, StepState};
15
+
use wire_core::hive::{Hive, HiveLocation};
16
+
use wire_core::status::STATUS;
17
+
use wire_core::{SubCommandModifiers, errors::HiveLibError};
18
+
19
+
use crate::cli::{ApplyTarget, CommonVerbArgs, Partitions};
20
+
21
+
#[derive(Debug, Error, Diagnostic)]
22
+
#[error("node {} failed to apply", .0)]
23
+
struct NodeError(
24
+
Name,
25
+
#[source]
26
+
#[diagnostic_source]
27
+
HiveLibError,
28
+
);
29
+
30
+
#[derive(Debug, Error, Diagnostic)]
31
+
#[error("{} node(s) failed to apply.", .0.len())]
32
+
struct NodeErrors(#[related] Vec<NodeError>);
33
+
34
+
// returns Names and Tags
35
+
fn read_apply_targets_from_stdin() -> Result<(Vec<String>, Vec<Name>)> {
36
+
let mut buf = String::new();
37
+
let mut stdin = std::io::stdin().lock();
38
+
stdin.read_to_string(&mut buf).into_diagnostic()?;
39
+
40
+
Ok(buf
41
+
.split_whitespace()
42
+
.map(|x| ApplyTarget::from(x.to_string()))
43
+
.fold((Vec::new(), Vec::new()), |(mut tags, mut names), target| {
44
+
match target {
45
+
ApplyTarget::Node(name) => names.push(name),
46
+
ApplyTarget::Tag(tag) => tags.push(tag),
47
+
ApplyTarget::Stdin => {}
48
+
}
49
+
(tags, names)
50
+
}))
51
+
}
52
+
53
+
fn resolve_targets(
54
+
on: &[ApplyTarget],
55
+
modifiers: &mut SubCommandModifiers,
56
+
) -> (HashSet<String>, HashSet<Name>) {
57
+
on.iter().fold(
58
+
(HashSet::new(), HashSet::new()),
59
+
|(mut tags, mut names), target| {
60
+
match target {
61
+
ApplyTarget::Tag(tag) => {
62
+
tags.insert(tag.clone());
63
+
}
64
+
ApplyTarget::Node(name) => {
65
+
names.insert(name.clone());
66
+
}
67
+
ApplyTarget::Stdin => {
68
+
// implies non_interactive
69
+
modifiers.non_interactive = true;
70
+
71
+
let (found_tags, found_names) = read_apply_targets_from_stdin().unwrap();
72
+
names.extend(found_names);
73
+
tags.extend(found_tags);
74
+
}
75
+
}
76
+
(tags, names)
77
+
},
78
+
)
79
+
}
80
+
81
+
fn partition_arr<T>(arr: Vec<T>, partition: &Partitions) -> Vec<T>
82
+
where
83
+
T: Any + Clone,
84
+
{
85
+
if arr.is_empty() {
86
+
return arr;
87
+
}
88
+
89
+
let items_per_chunk = arr.len().div_ceil(partition.maximum);
90
+
91
+
arr.chunks(items_per_chunk)
92
+
.nth(partition.current - 1)
93
+
.unwrap_or(&[])
94
+
.to_vec()
95
+
}
96
+
97
+
pub async fn apply<F>(
98
+
hive: &mut Hive,
99
+
should_shutdown: Arc<AtomicBool>,
100
+
location: HiveLocation,
101
+
args: CommonVerbArgs,
102
+
partition: Partitions,
103
+
make_objective: F,
104
+
mut modifiers: SubCommandModifiers,
105
+
) -> Result<()>
106
+
where
107
+
F: Fn(&Name, &Node) -> Objective,
108
+
{
109
+
let location = Arc::new(location);
110
+
111
+
let (tags, names) = resolve_targets(&args.on, &mut modifiers);
112
+
113
+
let selected_names: Vec<_> = hive
114
+
.nodes
115
+
.iter()
116
+
.filter(|(name, node)| {
117
+
args.on.is_empty()
118
+
|| names.contains(name)
119
+
|| node.tags.iter().any(|tag| tags.contains(tag))
120
+
})
121
+
.sorted_by_key(|(name, _)| *name)
122
+
.map(|(name, _)| name.clone())
123
+
.collect();
124
+
125
+
let num_selected = selected_names.len();
126
+
127
+
let partitioned_names = partition_arr(selected_names, &partition);
128
+
129
+
if num_selected != partitioned_names.len() {
130
+
info!(
131
+
"Partitioning reduced selected number of nodes from {num_selected} to {}",
132
+
partitioned_names.len()
133
+
);
134
+
}
135
+
136
+
STATUS
137
+
.lock()
138
+
.add_many(&partitioned_names.iter().collect::<Vec<_>>());
139
+
140
+
let mut set = hive
141
+
.nodes
142
+
.iter_mut()
143
+
.filter(|(name, _)| partitioned_names.contains(name))
144
+
.map(|(name, node)| {
145
+
info!("Resolved {:?} to include {}", args.on, name);
146
+
147
+
let objective = make_objective(name, node);
148
+
149
+
let context = Context {
150
+
node,
151
+
name,
152
+
objective,
153
+
state: StepState::default(),
154
+
hive_location: location.clone(),
155
+
modifiers,
156
+
should_quit: should_shutdown.clone(),
157
+
};
158
+
159
+
GoalExecutor::new(context)
160
+
.execute()
161
+
.map(move |result| (name, result))
162
+
})
163
+
.peekable();
164
+
165
+
if set.peek().is_none() {
166
+
error!("There are no nodes selected for deployment");
167
+
}
168
+
169
+
let futures = futures::stream::iter(set).buffer_unordered(args.parallel);
170
+
let result = futures.collect::<Vec<_>>().await;
171
+
let (successful, errors): (Vec<_>, Vec<_>) =
172
+
result
173
+
.into_iter()
174
+
.partition_map(|(name, result)| match result {
175
+
Ok(..) => Either::Left(name),
176
+
Err(err) => Either::Right((name, err)),
177
+
});
178
+
179
+
if !successful.is_empty() {
180
+
info!(
181
+
"Successfully applied goal to {} node(s): {:?}",
182
+
successful.len(),
183
+
successful
184
+
);
185
+
}
186
+
187
+
if !errors.is_empty() {
188
+
// clear the status bar if we are about to print error messages
189
+
STATUS.lock().clear(&mut stderr());
190
+
191
+
return Err(NodeErrors(
192
+
errors
193
+
.into_iter()
194
+
.map(|(name, error)| NodeError(name.clone(), error))
195
+
.collect(),
196
+
)
197
+
.into());
198
+
}
199
+
200
+
Ok(())
201
+
}
202
+
203
+
#[cfg(test)]
204
+
mod tests {
205
+
use super::*;
206
+
207
+
#[test]
208
+
#[allow(clippy::too_many_lines)]
209
+
fn test_partitioning() {
210
+
let arr = (1..=10).collect::<Vec<_>>();
211
+
assert_eq!(arr, partition_arr(arr.clone(), &Partitions::default()));
212
+
213
+
assert_eq!(
214
+
vec![1, 2, 3, 4, 5],
215
+
partition_arr(
216
+
arr.clone(),
217
+
&Partitions {
218
+
current: 1,
219
+
maximum: 2
220
+
}
221
+
)
222
+
);
223
+
assert_eq!(
224
+
vec![6, 7, 8, 9, 10],
225
+
partition_arr(
226
+
arr,
227
+
&Partitions {
228
+
current: 2,
229
+
maximum: 2
230
+
}
231
+
)
232
+
);
233
+
234
+
// test odd number
235
+
let arr = (1..10).collect::<Vec<_>>();
236
+
assert_eq!(
237
+
arr.clone(),
238
+
partition_arr(arr.clone(), &Partitions::default())
239
+
);
240
+
241
+
assert_eq!(
242
+
vec![1, 2, 3, 4, 5],
243
+
partition_arr(
244
+
arr.clone(),
245
+
&Partitions {
246
+
current: 1,
247
+
maximum: 2
248
+
}
249
+
)
250
+
);
251
+
assert_eq!(
252
+
vec![6, 7, 8, 9],
253
+
partition_arr(
254
+
arr.clone(),
255
+
&Partitions {
256
+
current: 2,
257
+
maximum: 2
258
+
}
259
+
)
260
+
);
261
+
262
+
// test large number of partitions
263
+
let arr = (1..=10).collect::<Vec<_>>();
264
+
assert_eq!(
265
+
arr.clone(),
266
+
partition_arr(arr.clone(), &Partitions::default())
267
+
);
268
+
269
+
for i in 1..=10 {
270
+
assert_eq!(
271
+
vec![i],
272
+
partition_arr(
273
+
arr.clone(),
274
+
&Partitions {
275
+
current: i,
276
+
maximum: 10
277
+
}
278
+
)
279
+
);
280
+
281
+
assert_eq!(
282
+
vec![i],
283
+
partition_arr(
284
+
arr.clone(),
285
+
&Partitions {
286
+
current: i,
287
+
maximum: 15
288
+
}
289
+
)
290
+
);
291
+
}
292
+
293
+
// stretching thin with higher partitions will start to leave higher ones empty
294
+
assert_eq!(
295
+
Vec::<usize>::new(),
296
+
partition_arr(
297
+
arr,
298
+
&Partitions {
299
+
current: 11,
300
+
maximum: 15
301
+
}
302
+
)
303
+
);
304
+
305
+
// test the above holds for a lot of numbers
306
+
for i in 1..1000 {
307
+
let arr: Vec<usize> = (0..i).collect();
308
+
let total = arr.len();
309
+
310
+
assert_eq!(
311
+
arr.clone(),
312
+
partition_arr(arr.clone(), &Partitions::default()),
313
+
);
314
+
315
+
let buckets = 2;
316
+
let chunk_size = total.div_ceil(buckets);
317
+
let split_index = std::cmp::min(chunk_size, total);
318
+
319
+
assert_eq!(
320
+
&arr.clone()[..split_index],
321
+
partition_arr(
322
+
arr.clone(),
323
+
&Partitions {
324
+
current: 1,
325
+
maximum: 2
326
+
}
327
+
),
328
+
);
329
+
assert_eq!(
330
+
&arr.clone()[split_index..],
331
+
partition_arr(
332
+
arr.clone(),
333
+
&Partitions {
334
+
current: 2,
335
+
maximum: 2
336
+
}
337
+
),
338
+
);
339
+
}
340
+
}
341
+
}
+398
crates/cli/src/cli.rs
+398
crates/cli/src/cli.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use clap::builder::PossibleValue;
5
+
use clap::{Args, Parser, Subcommand, ValueEnum};
6
+
use clap::{ValueHint, crate_version};
7
+
use clap_complete::CompletionCandidate;
8
+
use clap_complete::engine::ArgValueCompleter;
9
+
use clap_num::number_range;
10
+
use clap_verbosity_flag::InfoLevel;
11
+
use tokio::runtime::Handle;
12
+
use wire_core::SubCommandModifiers;
13
+
use wire_core::commands::common::get_hive_node_names;
14
+
use wire_core::hive::node::{Goal as HiveGoal, HandleUnreachable, Name, SwitchToConfigurationGoal};
15
+
use wire_core::hive::{Hive, get_hive_location};
16
+
17
+
use std::io::IsTerminal;
18
+
use std::{
19
+
fmt::{self, Display, Formatter},
20
+
sync::Arc,
21
+
};
22
+
23
+
#[allow(clippy::struct_excessive_bools)]
24
+
#[derive(Parser)]
25
+
#[command(
26
+
name = "wire",
27
+
bin_name = "wire",
28
+
about = "a tool to deploy nixos systems",
29
+
version = format!("{}\nDebug: Hive::SCHEMA_VERSION {}", crate_version!(), Hive::SCHEMA_VERSION)
30
+
)]
31
+
pub struct Cli {
32
+
#[command(subcommand)]
33
+
pub command: Commands,
34
+
35
+
#[command(flatten)]
36
+
pub verbose: clap_verbosity_flag::Verbosity<InfoLevel>,
37
+
38
+
/// Path or flake reference
39
+
#[arg(long, global = true, default_value = std::env::current_dir().unwrap().into_os_string(), visible_alias("flake"))]
40
+
pub path: String,
41
+
42
+
/// Hide progress bars.
43
+
///
44
+
/// Defaults to true if stdin does not refer to a tty (unix pipelines, in CI).
45
+
#[arg(long, global = true, default_value_t = !std::io::stdin().is_terminal())]
46
+
pub no_progress: bool,
47
+
48
+
/// Never accept user input.
49
+
///
50
+
/// Defaults to true if stdin does not refer to a tty (unix pipelines, in CI).
51
+
#[arg(long, global = true, default_value_t = !std::io::stdin().is_terminal())]
52
+
pub non_interactive: bool,
53
+
54
+
/// Show trace logs
55
+
#[arg(long, global = true, default_value_t = false)]
56
+
pub show_trace: bool,
57
+
58
+
#[cfg(debug_assertions)]
59
+
#[arg(long, hide = true, global = true)]
60
+
pub markdown_help: bool,
61
+
}
62
+
63
+
#[derive(Clone, Debug)]
64
+
pub enum ApplyTarget {
65
+
Node(Name),
66
+
Tag(String),
67
+
Stdin,
68
+
}
69
+
70
+
impl From<String> for ApplyTarget {
71
+
fn from(value: String) -> Self {
72
+
if value == "-" {
73
+
return ApplyTarget::Stdin;
74
+
}
75
+
76
+
if let Some(stripped) = value.strip_prefix("@") {
77
+
ApplyTarget::Tag(stripped.to_string())
78
+
} else {
79
+
ApplyTarget::Node(Name(Arc::from(value.as_str())))
80
+
}
81
+
}
82
+
}
83
+
84
+
impl Display for ApplyTarget {
85
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
86
+
match self {
87
+
ApplyTarget::Node(name) => name.fmt(f),
88
+
ApplyTarget::Tag(tag) => write!(f, "@{tag}"),
89
+
ApplyTarget::Stdin => write!(f, "#stdin"),
90
+
}
91
+
}
92
+
}
93
+
94
+
fn more_than_zero(s: &str) -> Result<usize, String> {
95
+
number_range(s, 1, usize::MAX)
96
+
}
97
+
98
+
fn parse_partitions(s: &str) -> Result<Partitions, String> {
99
+
let parts: [&str; 2] = s
100
+
.split('/')
101
+
.collect::<Vec<_>>()
102
+
.try_into()
103
+
.map_err(|_| "partition must contain exactly one '/'")?;
104
+
105
+
let (current, maximum) =
106
+
std::array::from_fn(|i| parts[i].parse::<usize>().map_err(|x| x.to_string())).into();
107
+
let (current, maximum) = (current?, maximum?);
108
+
109
+
if current > maximum {
110
+
return Err("current is more than total".to_string());
111
+
}
112
+
113
+
if current == 0 || maximum == 0 {
114
+
return Err("partition segments cannot be 0.".to_string());
115
+
}
116
+
117
+
Ok(Partitions { current, maximum })
118
+
}
119
+
120
+
#[derive(Clone)]
121
+
pub enum HandleUnreachableArg {
122
+
Ignore,
123
+
FailNode,
124
+
}
125
+
126
+
impl Display for HandleUnreachableArg {
127
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
128
+
match self {
129
+
Self::Ignore => write!(f, "ignore"),
130
+
Self::FailNode => write!(f, "fail-node"),
131
+
}
132
+
}
133
+
}
134
+
135
+
impl clap::ValueEnum for HandleUnreachableArg {
136
+
fn value_variants<'a>() -> &'a [Self] {
137
+
&[Self::Ignore, Self::FailNode]
138
+
}
139
+
140
+
fn to_possible_value(&self) -> Option<clap::builder::PossibleValue> {
141
+
match self {
142
+
Self::Ignore => Some(PossibleValue::new("ignore")),
143
+
Self::FailNode => Some(PossibleValue::new("fail-node")),
144
+
}
145
+
}
146
+
}
147
+
148
+
impl From<HandleUnreachableArg> for HandleUnreachable {
149
+
fn from(value: HandleUnreachableArg) -> Self {
150
+
match value {
151
+
HandleUnreachableArg::Ignore => Self::Ignore,
152
+
HandleUnreachableArg::FailNode => Self::FailNode,
153
+
}
154
+
}
155
+
}
156
+
157
+
#[derive(Args)]
158
+
pub struct CommonVerbArgs {
159
+
/// List of literal node names, a literal `-`, or `@` prefixed tags.
160
+
///
161
+
/// `-` will read additional values from stdin, separated by whitespace.
162
+
/// Any `-` implies `--non-interactive`.
163
+
#[arg(short, long, value_name = "NODE | @TAG | `-`", num_args = 1.., add = ArgValueCompleter::new(node_names_completer), value_hint = ValueHint::Unknown)]
164
+
pub on: Vec<ApplyTarget>,
165
+
166
+
#[arg(short, long, default_value_t = 10, value_parser=more_than_zero)]
167
+
pub parallel: usize,
168
+
}
169
+
170
+
#[allow(clippy::struct_excessive_bools)]
171
+
#[derive(Args)]
172
+
pub struct ApplyArgs {
173
+
#[command(flatten)]
174
+
pub common: CommonVerbArgs,
175
+
176
+
#[arg(value_enum, default_value_t)]
177
+
pub goal: Goal,
178
+
179
+
/// Skip key uploads. noop when [GOAL] = Keys
180
+
#[arg(short, long, default_value_t = false)]
181
+
pub no_keys: bool,
182
+
183
+
/// Overrides deployment.buildOnTarget.
184
+
#[arg(short, long, value_name = "NODE")]
185
+
pub always_build_local: Vec<String>,
186
+
187
+
/// Reboot the nodes after activation
188
+
#[arg(short, long, default_value_t = false)]
189
+
pub reboot: bool,
190
+
191
+
/// Enable `--substitute-on-destination` in Nix subcommands.
192
+
#[arg(short, long, default_value_t = true)]
193
+
pub substitute_on_destination: bool,
194
+
195
+
/// How to handle an unreachable node in the ping step.
196
+
///
197
+
/// This only effects the ping step.
198
+
/// wire will still fail the node if it becomes unreachable after activation
199
+
#[arg(long, default_value_t = HandleUnreachableArg::FailNode)]
200
+
pub handle_unreachable: HandleUnreachableArg,
201
+
202
+
/// Unconditionally accept SSH host keys [!!]
203
+
///
204
+
/// Sets `StrictHostKeyChecking` to `no`.
205
+
/// Vulnerable to man-in-the-middle attacks, use with caution.
206
+
#[arg(long, default_value_t = false)]
207
+
pub ssh_accept_host: bool,
208
+
}
209
+
210
+
#[derive(Clone, Debug)]
211
+
pub struct Partitions {
212
+
pub current: usize,
213
+
pub maximum: usize,
214
+
}
215
+
216
+
impl Default for Partitions {
217
+
fn default() -> Self {
218
+
Self {
219
+
current: 1,
220
+
maximum: 1,
221
+
}
222
+
}
223
+
}
224
+
225
+
#[derive(Args)]
226
+
pub struct BuildArgs {
227
+
#[command(flatten)]
228
+
pub common: CommonVerbArgs,
229
+
230
+
/// Partition builds into buckets.
231
+
///
232
+
/// In the format of `current/total`, where 1 <= current <= total.
233
+
#[arg(short = 'P', default_value="1/1", long, value_parser=parse_partitions)]
234
+
pub partition: Option<Partitions>,
235
+
}
236
+
237
+
#[derive(Subcommand)]
238
+
pub enum Commands {
239
+
/// Deploy nodes
240
+
Apply(ApplyArgs),
241
+
/// Build nodes offline
242
+
///
243
+
/// This is distinct from `wire apply build`, as it will not ping or push
244
+
/// the result, making it useful for CI.
245
+
///
246
+
/// Additionally, you may partition the build jobs into buckets.
247
+
Build(BuildArgs),
248
+
/// Inspect hive
249
+
#[clap(visible_alias = "show")]
250
+
Inspect {
251
+
#[arg(value_enum, default_value_t)]
252
+
selection: Inspection,
253
+
254
+
/// Return in JSON format
255
+
#[arg(short, long, default_value_t = false)]
256
+
json: bool,
257
+
},
258
+
}
259
+
260
+
#[derive(Clone, Debug, Default, ValueEnum, Display)]
261
+
pub enum Inspection {
262
+
/// Output all data wire has on the entire hive
263
+
#[default]
264
+
Full,
265
+
/// Only output a list of node names
266
+
Names,
267
+
}
268
+
269
+
#[derive(Clone, Debug, Default, ValueEnum, Display)]
270
+
pub enum Goal {
271
+
/// Make the configuration the boot default and activate now
272
+
#[default]
273
+
Switch,
274
+
/// Build the configuration & push the results
275
+
Build,
276
+
/// Copy the system derivation to the remote hosts
277
+
Push,
278
+
/// Push deployment keys to the remote hosts
279
+
Keys,
280
+
/// Activate the system profile on next boot
281
+
Boot,
282
+
/// Activate the configuration, but don't make it the boot default
283
+
Test,
284
+
/// Show what would be done if this configuration were activated.
285
+
DryActivate,
286
+
}
287
+
288
+
impl TryFrom<Goal> for HiveGoal {
289
+
type Error = miette::Error;
290
+
291
+
fn try_from(value: Goal) -> Result<Self, Self::Error> {
292
+
match value {
293
+
Goal::Build => Ok(HiveGoal::Build),
294
+
Goal::Push => Ok(HiveGoal::Push),
295
+
Goal::Boot => Ok(HiveGoal::SwitchToConfiguration(
296
+
SwitchToConfigurationGoal::Boot,
297
+
)),
298
+
Goal::Switch => Ok(HiveGoal::SwitchToConfiguration(
299
+
SwitchToConfigurationGoal::Switch,
300
+
)),
301
+
Goal::Test => Ok(HiveGoal::SwitchToConfiguration(
302
+
SwitchToConfigurationGoal::Test,
303
+
)),
304
+
Goal::DryActivate => Ok(HiveGoal::SwitchToConfiguration(
305
+
SwitchToConfigurationGoal::DryActivate,
306
+
)),
307
+
Goal::Keys => Ok(HiveGoal::Keys),
308
+
}
309
+
}
310
+
}
311
+
312
+
pub trait ToSubCommandModifiers {
313
+
fn to_subcommand_modifiers(&self) -> SubCommandModifiers;
314
+
}
315
+
316
+
impl ToSubCommandModifiers for Cli {
317
+
fn to_subcommand_modifiers(&self) -> SubCommandModifiers {
318
+
SubCommandModifiers {
319
+
show_trace: self.show_trace,
320
+
non_interactive: self.non_interactive,
321
+
ssh_accept_host: match &self.command {
322
+
Commands::Apply(args) if args.ssh_accept_host => {
323
+
wire_core::StrictHostKeyChecking::No
324
+
}
325
+
_ => wire_core::StrictHostKeyChecking::default(),
326
+
},
327
+
}
328
+
}
329
+
}
330
+
331
+
fn node_names_completer(current: &std::ffi::OsStr) -> Vec<CompletionCandidate> {
332
+
tokio::task::block_in_place(|| {
333
+
let handle = Handle::current();
334
+
let modifiers = SubCommandModifiers::default();
335
+
let mut completions = vec![];
336
+
337
+
if current.is_empty() || current == "-" {
338
+
completions.push(
339
+
CompletionCandidate::new("-").help(Some("Read stdin as --on arguments".into())),
340
+
);
341
+
}
342
+
343
+
let Ok(current_dir) = std::env::current_dir() else {
344
+
return completions;
345
+
};
346
+
347
+
let Ok(hive_location) = handle.block_on(get_hive_location(
348
+
current_dir.display().to_string(),
349
+
modifiers,
350
+
)) else {
351
+
return completions;
352
+
};
353
+
354
+
let Some(current) = current.to_str() else {
355
+
return completions;
356
+
};
357
+
358
+
if current.starts_with('@') {
359
+
return vec![];
360
+
}
361
+
362
+
if let Ok(names) =
363
+
handle.block_on(async { get_hive_node_names(&hive_location, modifiers).await })
364
+
{
365
+
for name in names {
366
+
if name.starts_with(current) {
367
+
completions.push(CompletionCandidate::new(name));
368
+
}
369
+
}
370
+
}
371
+
372
+
completions
373
+
})
374
+
}
375
+
376
+
#[cfg(test)]
377
+
mod tests {
378
+
use std::assert_matches::assert_matches;
379
+
380
+
use crate::cli::{Partitions, parse_partitions};
381
+
382
+
#[test]
383
+
fn test_partition_parsing() {
384
+
assert_matches!(parse_partitions(""), Err(..));
385
+
assert_matches!(parse_partitions("/"), Err(..));
386
+
assert_matches!(parse_partitions(" / "), Err(..));
387
+
assert_matches!(parse_partitions("abc/"), Err(..));
388
+
assert_matches!(parse_partitions("abc"), Err(..));
389
+
assert_matches!(parse_partitions("1/1"), Ok(Partitions {
390
+
current,
391
+
maximum
392
+
}) if current == 1 && maximum == 1);
393
+
assert_matches!(parse_partitions("0/1"), Err(..));
394
+
assert_matches!(parse_partitions("-11/1"), Err(..));
395
+
assert_matches!(parse_partitions("100/99"), Err(..));
396
+
assert_matches!(parse_partitions("5/10"), Ok(Partitions { current, maximum }) if current == 5 && maximum == 10);
397
+
}
398
+
}
+173
crates/cli/src/main.rs
+173
crates/cli/src/main.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
#![deny(clippy::pedantic)]
5
+
#![feature(sync_nonpoison)]
6
+
#![feature(nonpoison_mutex)]
7
+
#![feature(assert_matches)]
8
+
9
+
use std::process::Command;
10
+
use std::sync::Arc;
11
+
use std::sync::atomic::AtomicBool;
12
+
13
+
use crate::cli::Cli;
14
+
use crate::cli::Partitions;
15
+
use crate::cli::ToSubCommandModifiers;
16
+
use crate::sigint::handle_signals;
17
+
use crate::tracing_setup::setup_logging;
18
+
use clap::CommandFactory;
19
+
use clap::Parser;
20
+
use clap_complete::CompleteEnv;
21
+
use miette::IntoDiagnostic;
22
+
use miette::Result;
23
+
use signal_hook::consts::SIGINT;
24
+
use signal_hook_tokio::Signals;
25
+
use tracing::error;
26
+
use tracing::warn;
27
+
use wire_core::cache::InspectionCache;
28
+
use wire_core::commands::common::get_hive_node_names;
29
+
use wire_core::hive::Hive;
30
+
use wire_core::hive::get_hive_location;
31
+
use wire_core::hive::node::ApplyObjective;
32
+
use wire_core::hive::node::Objective;
33
+
use wire_core::hive::node::should_apply_locally;
34
+
35
+
#[macro_use]
36
+
extern crate enum_display_derive;
37
+
38
+
mod apply;
39
+
mod cli;
40
+
mod sigint;
41
+
mod tracing_setup;
42
+
43
+
#[cfg(feature = "dhat-heap")]
44
+
#[global_allocator]
45
+
static ALLOC: dhat::Alloc = dhat::Alloc;
46
+
47
+
#[tokio::main]
48
+
async fn main() -> Result<()> {
49
+
#[cfg(feature = "dhat-heap")]
50
+
let _profiler = dhat::Profiler::new_heap();
51
+
CompleteEnv::with_factory(Cli::command).complete();
52
+
53
+
let args = Cli::parse();
54
+
55
+
let modifiers = args.to_subcommand_modifiers();
56
+
// disable progress when running inspect mode.
57
+
setup_logging(
58
+
&args.verbose,
59
+
!matches!(args.command, cli::Commands::Inspect { .. }) && !&args.no_progress,
60
+
);
61
+
62
+
#[cfg(debug_assertions)]
63
+
if args.markdown_help {
64
+
clap_markdown::print_help_markdown::<Cli>();
65
+
return Ok(());
66
+
}
67
+
68
+
if !check_nix_available() {
69
+
miette::bail!("Nix is not available on this system.");
70
+
}
71
+
72
+
let signals = Signals::new([SIGINT]).into_diagnostic()?;
73
+
let signals_handle = signals.handle();
74
+
let should_shutdown = Arc::new(AtomicBool::new(false));
75
+
let signals_task = tokio::spawn(handle_signals(signals, should_shutdown.clone()));
76
+
77
+
let location = get_hive_location(args.path, modifiers).await?;
78
+
let cache = InspectionCache::new().await;
79
+
80
+
match args.command {
81
+
cli::Commands::Apply(apply_args) => {
82
+
let mut hive = Hive::new_from_path(&location, cache.clone(), modifiers).await?;
83
+
let goal: wire_core::hive::node::Goal = apply_args.goal.clone().try_into().unwrap();
84
+
85
+
// Respect user's --always-build-local arg
86
+
hive.force_always_local(apply_args.always_build_local)?;
87
+
88
+
apply::apply(
89
+
&mut hive,
90
+
should_shutdown,
91
+
location,
92
+
apply_args.common,
93
+
Partitions::default(),
94
+
|name, node| {
95
+
Objective::Apply(ApplyObjective {
96
+
goal,
97
+
no_keys: apply_args.no_keys,
98
+
reboot: apply_args.reboot,
99
+
substitute_on_destination: apply_args.substitute_on_destination,
100
+
should_apply_locally: should_apply_locally(
101
+
node.allow_local_deployment,
102
+
&name.0,
103
+
),
104
+
handle_unreachable: apply_args.handle_unreachable.clone().into(),
105
+
})
106
+
},
107
+
modifiers,
108
+
)
109
+
.await?;
110
+
}
111
+
cli::Commands::Build(build_args) => {
112
+
let mut hive = Hive::new_from_path(&location, cache.clone(), modifiers).await?;
113
+
114
+
apply::apply(
115
+
&mut hive,
116
+
should_shutdown,
117
+
location,
118
+
build_args.common,
119
+
build_args.partition.unwrap_or_default(),
120
+
|_name, _node| Objective::BuildLocally,
121
+
modifiers,
122
+
)
123
+
.await?;
124
+
}
125
+
cli::Commands::Inspect { json, selection } => println!("{}", {
126
+
match selection {
127
+
cli::Inspection::Full => {
128
+
let hive = Hive::new_from_path(&location, cache.clone(), modifiers).await?;
129
+
if json {
130
+
serde_json::to_string(&hive).into_diagnostic()?
131
+
} else {
132
+
warn!("use --json to output something scripting suitable");
133
+
format!("{hive}")
134
+
}
135
+
}
136
+
cli::Inspection::Names => {
137
+
serde_json::to_string(&get_hive_node_names(&location, modifiers).await?)
138
+
.into_diagnostic()?
139
+
}
140
+
}
141
+
}),
142
+
}
143
+
144
+
if let Some(cache) = cache {
145
+
cache.gc().await.into_diagnostic()?;
146
+
}
147
+
148
+
signals_handle.close();
149
+
signals_task.await.into_diagnostic()?;
150
+
151
+
Ok(())
152
+
}
153
+
154
+
fn check_nix_available() -> bool {
155
+
match Command::new("nix")
156
+
.stdout(std::process::Stdio::null())
157
+
.stderr(std::process::Stdio::null())
158
+
.spawn()
159
+
{
160
+
Ok(_) => true,
161
+
Err(e) => {
162
+
if let std::io::ErrorKind::NotFound = e.kind() {
163
+
false
164
+
} else {
165
+
error!(
166
+
"Something weird happened checking for nix availability, {}",
167
+
e
168
+
);
169
+
false
170
+
}
171
+
}
172
+
}
173
+
}
+21
crates/cli/src/sigint.rs
+21
crates/cli/src/sigint.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::sync::{Arc, atomic::AtomicBool};
5
+
6
+
use signal_hook::consts::SIGINT;
7
+
use signal_hook_tokio::Signals;
8
+
9
+
use futures::stream::StreamExt;
10
+
use tracing::info;
11
+
12
+
pub(crate) async fn handle_signals(mut signals: Signals, should_shutdown: Arc<AtomicBool>) {
13
+
while let Some(signal) = signals.next().await {
14
+
if let SIGINT = signal
15
+
&& !should_shutdown.load(std::sync::atomic::Ordering::Relaxed)
16
+
{
17
+
info!("Received SIGINT, attempting to shut down executor tasks.");
18
+
should_shutdown.store(true, std::sync::atomic::Ordering::Relaxed);
19
+
}
20
+
}
21
+
}
+284
crates/cli/src/tracing_setup.rs
+284
crates/cli/src/tracing_setup.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::{
5
+
collections::VecDeque,
6
+
io::{self, Stderr, Write, stderr},
7
+
time::Duration,
8
+
};
9
+
10
+
use clap_verbosity_flag::{LogLevel, Verbosity};
11
+
use owo_colors::{OwoColorize, Stream, Style};
12
+
use tracing::{Level, Subscriber};
13
+
use tracing_log::AsTrace;
14
+
use tracing_subscriber::{
15
+
Layer,
16
+
field::{RecordFields, VisitFmt},
17
+
fmt::{
18
+
FormatEvent, FormatFields, FormattedFields,
19
+
format::{self, DefaultFields, DefaultVisitor, Format, Full},
20
+
},
21
+
layer::{Context, SubscriberExt},
22
+
registry::LookupSpan,
23
+
util::SubscriberInitExt,
24
+
};
25
+
use wire_core::{STDIN_CLOBBER_LOCK, status::STATUS};
26
+
27
+
/// The non-clobbering writer ensures that log lines are held while interactive
28
+
/// prompts are shown to the user. If logs where shown, they would "clobber" the
29
+
/// sudo / ssh prompt.
30
+
///
31
+
/// Additionally, the `STDIN_CLOBBER_LOCK` is used to ensure that no two
32
+
/// interactive prompts are shown at the same time.
33
+
struct NonClobberingWriter {
34
+
queue: VecDeque<Vec<u8>>,
35
+
stderr: Stderr,
36
+
}
37
+
38
+
impl NonClobberingWriter {
39
+
fn new() -> Self {
40
+
NonClobberingWriter {
41
+
queue: VecDeque::with_capacity(100),
42
+
stderr: stderr(),
43
+
}
44
+
}
45
+
46
+
/// expects the caller to write the status line
47
+
fn dump_previous(&mut self) -> Result<(), io::Error> {
48
+
STATUS.lock().clear(&mut self.stderr);
49
+
50
+
for buf in self.queue.iter().rev() {
51
+
self.stderr.write(buf).map(|_| ())?;
52
+
}
53
+
54
+
Ok(())
55
+
}
56
+
}
57
+
58
+
impl Write for NonClobberingWriter {
59
+
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
60
+
if let 1.. = STDIN_CLOBBER_LOCK.available_permits() {
61
+
self.dump_previous().map(|()| 0)?;
62
+
63
+
STATUS.lock().write_above_status(buf, &mut self.stderr)
64
+
} else {
65
+
self.queue.push_front(buf.to_vec());
66
+
67
+
Ok(buf.len())
68
+
}
69
+
}
70
+
71
+
fn flush(&mut self) -> std::io::Result<()> {
72
+
self.stderr.flush()
73
+
}
74
+
}
75
+
76
+
/// Handles event formatting, which falls back to the default formatter
77
+
/// passed.
78
+
struct WireEventFormat(Format<Full, ()>);
79
+
/// Formats the node's name with `WireFieldVisitor`
80
+
struct WireFieldFormat;
81
+
struct WireFieldVisitor<'a>(DefaultVisitor<'a>);
82
+
/// `WireLayer` injects `WireFieldFormat` as an extension on the event
83
+
struct WireLayer;
84
+
85
+
impl<'a> WireFieldVisitor<'a> {
86
+
fn new(writer: format::Writer<'a>, is_empty: bool) -> Self {
87
+
Self(DefaultVisitor::new(writer, is_empty))
88
+
}
89
+
}
90
+
91
+
impl<'writer> FormatFields<'writer> for WireFieldFormat {
92
+
fn format_fields<R: RecordFields>(
93
+
&self,
94
+
writer: format::Writer<'writer>,
95
+
fields: R,
96
+
) -> std::fmt::Result {
97
+
let mut v = WireFieldVisitor::new(writer, true);
98
+
fields.record(&mut v);
99
+
Ok(())
100
+
}
101
+
}
102
+
103
+
impl tracing::field::Visit for WireFieldVisitor<'_> {
104
+
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
105
+
if field.name() == "node" {
106
+
let _ = write!(
107
+
self.0.writer(),
108
+
"{:?}",
109
+
value.if_supports_color(Stream::Stderr, |text| text.bold())
110
+
);
111
+
}
112
+
}
113
+
}
114
+
115
+
const fn get_style(level: Level) -> Style {
116
+
let mut style = Style::new();
117
+
118
+
style = match level {
119
+
Level::TRACE => style.purple(),
120
+
Level::DEBUG => style.blue(),
121
+
Level::INFO => style.green(),
122
+
Level::WARN => style.yellow(),
123
+
Level::ERROR => style.red(),
124
+
};
125
+
126
+
style
127
+
}
128
+
129
+
const fn fmt_level(level: Level) -> &'static str {
130
+
match level {
131
+
Level::TRACE => "TRACE",
132
+
Level::DEBUG => "DEBUG",
133
+
Level::INFO => " INFO",
134
+
Level::WARN => " WARN",
135
+
Level::ERROR => "ERROR",
136
+
}
137
+
}
138
+
139
+
impl<S, N> FormatEvent<S, N> for WireEventFormat
140
+
where
141
+
S: Subscriber + for<'a> LookupSpan<'a>,
142
+
N: for<'a> FormatFields<'a> + 'static,
143
+
{
144
+
fn format_event(
145
+
&self,
146
+
ctx: &tracing_subscriber::fmt::FmtContext<'_, S, N>,
147
+
mut writer: tracing_subscriber::fmt::format::Writer<'_>,
148
+
event: &tracing::Event<'_>,
149
+
) -> std::fmt::Result {
150
+
let metadata = event.metadata();
151
+
152
+
// skip events without an "event_scope"
153
+
let Some(scope) = ctx.event_scope() else {
154
+
return self.0.format_event(ctx, writer, event);
155
+
};
156
+
157
+
// skip spans without a parent
158
+
let Some(parent) = scope.last() else {
159
+
return self.0.format_event(ctx, writer, event);
160
+
};
161
+
162
+
// skip spans that dont refer to the goal step executor
163
+
if parent.name() != "execute" {
164
+
return self.0.format_event(ctx, writer, event);
165
+
}
166
+
167
+
// skip spans that dont refer to a specific node being executed
168
+
if parent.fields().field("node").is_none() {
169
+
return self.0.format_event(ctx, writer, event);
170
+
}
171
+
172
+
let style = get_style(*metadata.level());
173
+
174
+
// write the log level with colour
175
+
write!(
176
+
writer,
177
+
"{} ",
178
+
fmt_level(*metadata.level()).if_supports_color(Stream::Stderr, |x| { x.style(style) })
179
+
)?;
180
+
181
+
// extract the formatted node name into a string
182
+
let parent_ext = parent.extensions();
183
+
let node_name = &parent_ext
184
+
.get::<FormattedFields<WireFieldFormat>>()
185
+
.unwrap();
186
+
187
+
write!(writer, "{node_name}")?;
188
+
189
+
// write the step name
190
+
if let Some(step) = ctx.event_scope().unwrap().from_root().nth(1) {
191
+
write!(writer, " {}", step.name().italic())?;
192
+
}
193
+
194
+
write!(writer, " | ")?;
195
+
196
+
// write the default fields, including the actual message and other data
197
+
let mut fields = FormattedFields::<DefaultFields>::new(String::new());
198
+
199
+
ctx.format_fields(fields.as_writer(), event)?;
200
+
201
+
write!(writer, "{fields}")?;
202
+
writeln!(writer)?;
203
+
204
+
Ok(())
205
+
}
206
+
}
207
+
208
+
impl<S> Layer<S> for WireLayer
209
+
where
210
+
S: Subscriber + for<'a> LookupSpan<'a>,
211
+
{
212
+
fn on_new_span(
213
+
&self,
214
+
attrs: &tracing::span::Attributes<'_>,
215
+
id: &tracing::span::Id,
216
+
ctx: Context<'_, S>,
217
+
) {
218
+
let span = ctx.span(id).unwrap();
219
+
220
+
if span.extensions().get::<WireFieldFormat>().is_some() {
221
+
return;
222
+
}
223
+
224
+
let mut fields = FormattedFields::<WireFieldFormat>::new(String::new());
225
+
if WireFieldFormat
226
+
.format_fields(fields.as_writer(), attrs)
227
+
.is_ok()
228
+
{
229
+
span.extensions_mut().insert(fields);
230
+
}
231
+
}
232
+
}
233
+
234
+
async fn status_tick_worker() {
235
+
let mut interval = tokio::time::interval(Duration::from_secs(1));
236
+
let mut stderr = stderr();
237
+
238
+
loop {
239
+
interval.tick().await;
240
+
241
+
if STDIN_CLOBBER_LOCK.available_permits() < 1 {
242
+
continue;
243
+
}
244
+
245
+
let mut status = STATUS.lock();
246
+
247
+
status.clear(&mut stderr);
248
+
status.write_status(&mut stderr);
249
+
}
250
+
}
251
+
252
+
/// Set up logging for the application
253
+
/// Uses `WireFieldFormat` if -v was never passed
254
+
pub fn setup_logging<L: LogLevel>(verbosity: &Verbosity<L>, show_progress: bool) {
255
+
let filter = verbosity.log_level_filter().as_trace();
256
+
let registry = tracing_subscriber::registry();
257
+
258
+
STATUS.lock().show_progress(show_progress);
259
+
260
+
// spawn worker to tick the status bar
261
+
if show_progress {
262
+
tokio::spawn(status_tick_worker());
263
+
}
264
+
265
+
if verbosity.is_present() {
266
+
let layer = tracing_subscriber::fmt::layer()
267
+
.without_time()
268
+
.with_target(false)
269
+
.with_writer(NonClobberingWriter::new)
270
+
.with_filter(filter);
271
+
272
+
registry.with(layer).init();
273
+
return;
274
+
}
275
+
276
+
let event_formatter = WireEventFormat(format::format().without_time().with_target(false));
277
+
278
+
let layer = tracing_subscriber::fmt::layer()
279
+
.event_format(event_formatter)
280
+
.with_writer(NonClobberingWriter::new)
281
+
.with_filter(filter);
282
+
283
+
registry.with(layer).with(WireLayer).init();
284
+
}
+51
crates/core/Cargo.toml
+51
crates/core/Cargo.toml
···
1
+
[package]
2
+
name = "wire-core"
3
+
version.workspace = true
4
+
edition.workspace = true
5
+
6
+
[lints]
7
+
workspace = true
8
+
9
+
[features]
10
+
no_web_tests = []
11
+
12
+
[dependencies]
13
+
tokio = { workspace = true }
14
+
serde = { workspace = true }
15
+
serde_json = { workspace = true }
16
+
tracing = { workspace = true }
17
+
im = { workspace = true }
18
+
thiserror = "2.0.17"
19
+
derive_more = { version = "2.0.1", features = ["display"] }
20
+
wire-key-agent = { path = "../key_agent" }
21
+
futures = "0.3.31"
22
+
prost = { workspace = true }
23
+
gethostname = "1.1.0"
24
+
nix.workspace = true
25
+
miette = { workspace = true }
26
+
rand = "0.9.2"
27
+
tokio-util = { workspace = true }
28
+
portable-pty = "0.9.0"
29
+
anyhow.workspace = true
30
+
itertools = "0.14.0"
31
+
enum_dispatch = "0.3.13"
32
+
sha2 = { workspace = true }
33
+
base64 = { workspace = true }
34
+
nix-compat = { workspace = true }
35
+
strip-ansi-escapes = "0.2.1"
36
+
aho-corasick = "1.1.4"
37
+
num_enum = "0.7.5"
38
+
gjson = "0.8.1"
39
+
owo-colors = { workspace = true }
40
+
termion = "4.0.6"
41
+
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
42
+
zstd = "0.13.3"
43
+
44
+
[dev-dependencies]
45
+
tempdir = "0.3"
46
+
47
+
[build-dependencies]
48
+
miette = { workspace = true }
49
+
syn = "2.0.109"
50
+
proc-macro2 = "1.0.103"
51
+
itertools = "0.14.0"
+206
crates/core/build.rs
+206
crates/core/build.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use miette::{Context, IntoDiagnostic as _, Result, miette};
5
+
use std::fmt::Write;
6
+
use std::{
7
+
env,
8
+
fmt::{self, Display, Formatter},
9
+
fs::{self},
10
+
path::Path,
11
+
};
12
+
13
+
use itertools::Itertools;
14
+
use proc_macro2::TokenTree;
15
+
use syn::{Expr, Item, ItemEnum, Lit, Meta, MetaList, MetaNameValue, parse_file};
16
+
17
+
macro_rules! p {
18
+
($($tokens: tt)*) => {
19
+
println!("cargo::warning={}", format!($($tokens)*))
20
+
}
21
+
}
22
+
23
+
#[derive(Debug)]
24
+
struct DerivedError {
25
+
code: Option<String>,
26
+
help: Option<String>,
27
+
message: Option<String>,
28
+
doc_string: String,
29
+
}
30
+
31
+
impl Display for DerivedError {
32
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
33
+
write!(
34
+
f,
35
+
"## `{code}` {{#{code}}}
36
+
37
+
{doc}
38
+
{message}
39
+
{help}",
40
+
doc = self.doc_string,
41
+
code = self.code.as_ref().unwrap(),
42
+
help = match &self.help {
43
+
Some(help) => format!(
44
+
"
45
+
::: tip HELP
46
+
{help}
47
+
:::"
48
+
),
49
+
None => String::new(),
50
+
},
51
+
message = match &self.message {
52
+
Some(message) => format!(
53
+
"
54
+
```txt [message]
55
+
{message}
56
+
```"
57
+
),
58
+
None => String::new(),
59
+
}
60
+
)
61
+
}
62
+
}
63
+
64
+
impl DerivedError {
65
+
fn get_error(&mut self, list: &MetaList) -> Result<(), miette::Error> {
66
+
if list.path.segments.last().unwrap().ident != "error" {
67
+
return Err(miette!("Not an error"));
68
+
}
69
+
70
+
self.message = Some(
71
+
list.tokens
72
+
.clone()
73
+
.into_iter()
74
+
.filter(|tok| matches!(tok, TokenTree::Literal(tok) if tok.to_string().starts_with('"')))
75
+
.map(|tok| tok.to_string())
76
+
.join(""),
77
+
);
78
+
79
+
Err(miette!("No error msg found"))
80
+
}
81
+
82
+
fn update_diagnostic(&mut self, list: &MetaList) -> Result<(), miette::Error> {
83
+
if list.path.segments.last().unwrap().ident != "diagnostic" {
84
+
return Err(miette!("Not a diagnostic"));
85
+
}
86
+
87
+
let vec: Vec<_> = list.tokens.clone().into_iter().collect();
88
+
89
+
// Find `diagnostic(code(x::y::z))`
90
+
let code: Option<String> = if let Some((_, TokenTree::Group(group))) =
91
+
vec.iter().tuple_windows().find(|(ident, group)| {
92
+
matches!(ident, TokenTree::Ident(ident) if ident == "code")
93
+
&& matches!(group, TokenTree::Group(..))
94
+
}) {
95
+
Some(group.stream().to_string().replace(' ', ""))
96
+
} else {
97
+
None
98
+
};
99
+
100
+
// Find `diagnostic(help("hi"))`
101
+
let help: Option<String> = if let Some((_, TokenTree::Group(group))) =
102
+
vec.iter().tuple_windows().find(|(ident, group)| {
103
+
matches!(ident, TokenTree::Ident(ident) if ident == "help")
104
+
&& matches!(group, TokenTree::Group(..))
105
+
}) {
106
+
Some(group.stream().to_string())
107
+
} else {
108
+
None
109
+
};
110
+
111
+
if let Some(code) = code {
112
+
self.code = Some(code);
113
+
self.help = help;
114
+
return Ok(());
115
+
}
116
+
117
+
Err(miette!("Had no code."))
118
+
}
119
+
120
+
fn update_from_list(&mut self, list: &MetaList) {
121
+
let _ = self.get_error(list);
122
+
let _ = self.update_diagnostic(list);
123
+
}
124
+
125
+
fn update_from_namevalue(&mut self, list: MetaNameValue) -> Result<(), miette::Error> {
126
+
if list.path.segments.last().unwrap().ident != "doc" {
127
+
return Err(miette!("Not a doc string"));
128
+
}
129
+
130
+
if let Expr::Lit(lit) = list.value
131
+
&& let Lit::Str(str) = lit.lit
132
+
{
133
+
let _ = write!(self.doc_string, "{}\n\n", &str.value()[1..]);
134
+
}
135
+
136
+
Ok(())
137
+
}
138
+
}
139
+
140
+
fn main() -> Result<()> {
141
+
println!("cargo:rerun-if-changed=src/errors.rs");
142
+
143
+
let manifest_dir = env::var("CARGO_MANIFEST_DIR").into_diagnostic()?;
144
+
let Ok(md_out_dir) = env::var("DIAGNOSTICS_MD_OUTPUT") else {
145
+
return Ok(());
146
+
};
147
+
148
+
let src_path = Path::new(&manifest_dir).join("src/errors.rs");
149
+
let src = fs::read_to_string(&src_path)
150
+
.into_diagnostic()
151
+
.wrap_err("reading errors.rs")?;
152
+
153
+
let syntax_tree = parse_file(&src)
154
+
.into_diagnostic()
155
+
.wrap_err("parsing errors.rs")?;
156
+
let mut entries: Vec<DerivedError> = Vec::new();
157
+
158
+
for item in &syntax_tree.items {
159
+
if let Item::Enum(ItemEnum { variants, .. }) = item {
160
+
for variant in variants {
161
+
let mut entry = DerivedError {
162
+
code: None,
163
+
help: None,
164
+
message: None,
165
+
doc_string: String::new(),
166
+
};
167
+
168
+
for attribute in variant.attrs.clone() {
169
+
match attribute.meta {
170
+
Meta::List(list) => {
171
+
entry.update_from_list(&list);
172
+
}
173
+
Meta::NameValue(nv) => {
174
+
let _ = entry.update_from_namevalue(nv);
175
+
}
176
+
Meta::Path(_) => {}
177
+
}
178
+
}
179
+
180
+
if entry.code.is_some() {
181
+
entries.push(entry);
182
+
}
183
+
}
184
+
}
185
+
}
186
+
187
+
fs::create_dir_all(Path::new(&md_out_dir))
188
+
.into_diagnostic()
189
+
.wrap_err("creating target directory")?;
190
+
fs::write(
191
+
Path::new(&md_out_dir).join("DIAGNOSTICS.md"),
192
+
entries
193
+
.iter()
194
+
.map(std::string::ToString::to_string)
195
+
.join("\n\n"),
196
+
)
197
+
.into_diagnostic()
198
+
.wrap_err("writing DIAGNOSTICS.md")?;
199
+
200
+
p!(
201
+
"wrote to {:?}",
202
+
Path::new(&md_out_dir).join("DIAGNOSTICS.md")
203
+
);
204
+
205
+
Ok(())
206
+
}
+13
crates/core/src/cache/migrations/20251124234730_init.sql
+13
crates/core/src/cache/migrations/20251124234730_init.sql
···
1
+
create table hive_inspection (
2
+
id integer primary key autoincrement,
3
+
json_value text not null unique
4
+
) strict;
5
+
6
+
create table cached_inspection (
7
+
store_path text,
8
+
hash text,
9
+
10
+
inspection_id integer references hive_inspection(id) not null,
11
+
12
+
primary key (store_path, hash)
13
+
) strict;
+16
crates/core/src/cache/migrations/20251126222409_blobs.sql
+16
crates/core/src/cache/migrations/20251126222409_blobs.sql
···
1
+
create table inspection_blobs (
2
+
id integer primary key autoincrement,
3
+
json_value blob not null unique,
4
+
schema_version integer not null
5
+
) strict;
6
+
7
+
create table inspection_cache (
8
+
store_path text,
9
+
hash text,
10
+
blob_id integer references inspection_blobs (id) not null,
11
+
primary key (store_path, hash)
12
+
) strict;
13
+
14
+
drop table cached_inspection;
15
+
16
+
drop table hive_inspection;
+237
crates/core/src/cache/mod.rs
+237
crates/core/src/cache/mod.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::{
5
+
env,
6
+
path::{Path, PathBuf},
7
+
};
8
+
9
+
use sqlx::{
10
+
Pool, Sqlite,
11
+
migrate::Migrator,
12
+
sqlite::{SqliteConnectOptions, SqlitePoolOptions},
13
+
};
14
+
use tokio::fs::create_dir_all;
15
+
use tracing::{debug, error, trace};
16
+
17
+
use crate::hive::{FlakePrefetch, Hive};
18
+
19
+
#[derive(Clone)]
20
+
pub struct InspectionCache {
21
+
pool: Pool<Sqlite>,
22
+
}
23
+
24
+
static MIGRATOR: Migrator = sqlx::migrate!("src/cache/migrations");
25
+
26
+
async fn get_cache_directory() -> Option<PathBuf> {
27
+
let home = PathBuf::from(
28
+
env::var("HOME")
29
+
.inspect_err(|_| error!("HOME env var not found"))
30
+
.ok()?,
31
+
);
32
+
33
+
trace!(home = ?home);
34
+
35
+
let cache_home = env::var("XDG_CACHE_HOME")
36
+
.inspect_err(|_| debug!("XDG_CACHE_HOME not found"))
37
+
.ok()
38
+
.map(PathBuf::from)
39
+
.unwrap_or(home.join(".cache"));
40
+
41
+
let cache_directory = cache_home.join("wire");
42
+
43
+
trace!(cache_directory = ?cache_directory);
44
+
45
+
let _ = create_dir_all(&cache_directory).await;
46
+
47
+
Some(cache_directory)
48
+
}
49
+
50
+
impl InspectionCache {
51
+
pub async fn new() -> Option<Self> {
52
+
let cache_path = get_cache_directory().await?.join("inspect.db");
53
+
debug!(cache_path = ?cache_path);
54
+
55
+
let pool = SqlitePoolOptions::new()
56
+
.max_connections(1)
57
+
.connect_with(
58
+
SqliteConnectOptions::new()
59
+
.filename(cache_path)
60
+
.create_if_missing(true),
61
+
)
62
+
.await
63
+
.inspect_err(|x| error!("failed to open cache db: {x}"))
64
+
.ok()?;
65
+
66
+
MIGRATOR
67
+
.run(&pool)
68
+
.await
69
+
.inspect_err(|err| error!("failed to run cache migrations: {err:?}"))
70
+
.ok()?;
71
+
72
+
Some(Self { pool })
73
+
}
74
+
75
+
fn cache_invalid(store_path: &String) -> bool {
76
+
let path = Path::new(store_path);
77
+
78
+
// possible TOCTOU
79
+
!path.exists()
80
+
}
81
+
82
+
pub async fn get_hive(&self, prefetch: &FlakePrefetch) -> Option<Hive> {
83
+
struct Query {
84
+
json_value: Vec<u8>,
85
+
store_path: String,
86
+
}
87
+
88
+
let cached_blob = sqlx::query_as!(
89
+
Query,
90
+
"
91
+
select
92
+
inspection_blobs.json_value,
93
+
inspection_cache.store_path
94
+
from
95
+
inspection_blobs
96
+
join inspection_cache on inspection_cache.blob_id = inspection_blobs.id
97
+
where
98
+
inspection_cache.store_path = $1
99
+
and inspection_cache.hash = $2
100
+
and inspection_blobs.schema_version = $3
101
+
limit
102
+
1
103
+
",
104
+
prefetch.store_path,
105
+
prefetch.hash,
106
+
Hive::SCHEMA_VERSION
107
+
)
108
+
.fetch_optional(&self.pool)
109
+
.await
110
+
.inspect_err(|x| error!("failed to fetch cached hive: {x}"))
111
+
.ok()??;
112
+
113
+
// the cached path may of been garbage collected, discard it
114
+
// it is quite hard to replicate this bug but its occurred to me
115
+
// atleast once
116
+
if Self::cache_invalid(&cached_blob.store_path) {
117
+
trace!("discarding cache that does not exist in the nix store");
118
+
return None;
119
+
}
120
+
121
+
trace!(
122
+
"read {} bytes of zstd data from cache",
123
+
cached_blob.json_value.len()
124
+
);
125
+
126
+
let json_string = zstd::decode_all(cached_blob.json_value.as_slice())
127
+
.inspect_err(|err| error!("failed to decode cached zstd data: {err}"))
128
+
.ok()?;
129
+
130
+
trace!(
131
+
"inflated {} > {} in decoding",
132
+
cached_blob.json_value.len(),
133
+
json_string.len()
134
+
);
135
+
136
+
serde_json::from_slice(&json_string)
137
+
.inspect_err(|err| {
138
+
error!("could not use cached evaluation: {err}");
139
+
})
140
+
.ok()
141
+
}
142
+
143
+
pub async fn store_hive(&self, prefetch: &FlakePrefetch, json_value: &String) {
144
+
let Ok(json_value) = zstd::encode_all(json_value.as_bytes(), 0)
145
+
.inspect_err(|err| error!("failed to encode data w/ zstd: {err}"))
146
+
else {
147
+
return;
148
+
};
149
+
150
+
let hive_inspection = sqlx::query_scalar!(
151
+
"
152
+
insert into inspection_blobs (json_value, schema_version)
153
+
values ($1, $2)
154
+
on conflict(json_value)
155
+
do update set json_value = excluded.json_value
156
+
returning inspection_blobs.id
157
+
",
158
+
json_value,
159
+
Hive::SCHEMA_VERSION
160
+
)
161
+
.fetch_one(&self.pool)
162
+
.await
163
+
.inspect_err(|x| error!("could not insert hive_inspection: {x}"));
164
+
165
+
let Ok(blob_id) = hive_inspection else {
166
+
return;
167
+
};
168
+
169
+
let cached_inspection = sqlx::query!(
170
+
"
171
+
insert into
172
+
inspection_cache (store_path, hash, blob_id)
173
+
values
174
+
($1, $2, $3)
175
+
",
176
+
prefetch.store_path,
177
+
prefetch.hash,
178
+
blob_id
179
+
)
180
+
.execute(&self.pool)
181
+
.await;
182
+
183
+
if let Err(err) = cached_inspection {
184
+
error!("could not insert cached_inspection: {err}");
185
+
}
186
+
}
187
+
188
+
pub async fn gc(&self) -> Result<(), sqlx::Error> {
189
+
// keep newest 30 AND
190
+
// delete caches that refer to a blob w/ wrong schema
191
+
sqlx::query!(
192
+
"delete from inspection_cache
193
+
where
194
+
blob_id in (
195
+
select
196
+
id
197
+
from
198
+
inspection_blobs
199
+
where
200
+
schema_version != $1
201
+
)
202
+
or ROWID in (
203
+
select
204
+
ROWID
205
+
from
206
+
inspection_cache
207
+
order by
208
+
ROWID desc
209
+
limit
210
+
-1
211
+
offset
212
+
30
213
+
)",
214
+
Hive::SCHEMA_VERSION
215
+
)
216
+
.execute(&self.pool)
217
+
.await?;
218
+
219
+
// delete orphaned blobs
220
+
sqlx::query!(
221
+
"delete from inspection_blobs
222
+
where
223
+
not exists (
224
+
select
225
+
1
226
+
from
227
+
inspection_cache
228
+
where
229
+
inspection_cache.blob_id = inspection_blobs.id
230
+
)"
231
+
)
232
+
.execute(&self.pool)
233
+
.await?;
234
+
235
+
Ok(())
236
+
}
237
+
}
+74
crates/core/src/commands/builder.rs
+74
crates/core/src/commands/builder.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt;
5
+
6
+
pub(crate) struct CommandStringBuilder {
7
+
command: String,
8
+
}
9
+
10
+
impl CommandStringBuilder {
11
+
pub(crate) fn nix() -> Self {
12
+
Self {
13
+
command: "nix".to_string(),
14
+
}
15
+
}
16
+
17
+
pub(crate) fn new<S: AsRef<str>>(s: S) -> Self {
18
+
Self {
19
+
command: s.as_ref().trim().to_string(),
20
+
}
21
+
}
22
+
23
+
pub(crate) fn arg<S: AsRef<str>>(&mut self, argument: S) {
24
+
let argument = argument.as_ref().trim();
25
+
self.command.push(' ');
26
+
self.command.push_str(argument);
27
+
}
28
+
29
+
pub(crate) fn opt_arg<S: AsRef<str>>(&mut self, opt: bool, argument: S) {
30
+
if !opt {
31
+
return;
32
+
}
33
+
34
+
self.arg(argument);
35
+
}
36
+
37
+
pub(crate) fn args<S: AsRef<str>>(&mut self, arguments: &[S]) {
38
+
for arg in arguments {
39
+
self.arg(arg);
40
+
}
41
+
}
42
+
}
43
+
44
+
impl fmt::Display for CommandStringBuilder {
45
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
46
+
write!(f, "{}", self.command)
47
+
}
48
+
}
49
+
50
+
impl AsRef<str> for CommandStringBuilder {
51
+
fn as_ref(&self) -> &str {
52
+
&self.command
53
+
}
54
+
}
55
+
56
+
#[cfg(test)]
57
+
mod tests {
58
+
use crate::commands::builder::CommandStringBuilder;
59
+
60
+
#[test]
61
+
fn command_builder() {
62
+
let mut builder = CommandStringBuilder::new("a");
63
+
builder.arg(" b ");
64
+
builder.args(&[" c ", "d", "e"]);
65
+
builder.opt_arg(false, "f");
66
+
builder.opt_arg(true, "g");
67
+
68
+
assert_eq!(
69
+
builder.to_string(),
70
+
std::convert::AsRef::<str>::as_ref(&builder)
71
+
);
72
+
assert_eq!(builder.to_string(), "a b c d e g");
73
+
}
74
+
}
+177
crates/core/src/commands/common.rs
+177
crates/core/src/commands/common.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::collections::HashMap;
5
+
6
+
use tracing::instrument;
7
+
8
+
use crate::{
9
+
EvalGoal, SubCommandModifiers,
10
+
commands::{
11
+
CommandArguments, Either, WireCommandChip, builder::CommandStringBuilder, run_command,
12
+
run_command_with_env,
13
+
},
14
+
errors::{CommandError, HiveInitialisationError, HiveLibError},
15
+
hive::{
16
+
HiveLocation,
17
+
node::{Context, Objective, Push},
18
+
},
19
+
};
20
+
21
+
fn get_common_copy_path_help(error: &CommandError) -> Option<String> {
22
+
if let CommandError::CommandFailed { logs, .. } = error
23
+
&& (logs.contains("error: unexpected end-of-file"))
24
+
{
25
+
Some("wire requires the deploying user or wire binary cache is trusted on the remote server. if you're attempting to make that change, skip keys with --no-keys. please read https://wire.althaea.zone/guides/keys for more information".to_string())
26
+
} else {
27
+
None
28
+
}
29
+
}
30
+
31
+
pub async fn push(context: &Context<'_>, push: Push<'_>) -> Result<(), HiveLibError> {
32
+
let mut command_string = CommandStringBuilder::nix();
33
+
34
+
command_string.args(&["--extra-experimental-features", "nix-command", "copy"]);
35
+
if let Objective::Apply(apply_objective) = context.objective {
36
+
command_string.opt_arg(
37
+
apply_objective.substitute_on_destination,
38
+
"--substitute-on-destination",
39
+
);
40
+
}
41
+
command_string.arg("--to");
42
+
command_string.args(&[
43
+
format!(
44
+
"ssh://{user}@{host}",
45
+
user = context.node.target.user,
46
+
host = context.node.target.get_preferred_host()?,
47
+
),
48
+
match push {
49
+
Push::Derivation(drv) => format!("{drv} --derivation"),
50
+
Push::Path(path) => path.clone(),
51
+
},
52
+
]);
53
+
54
+
let child = run_command_with_env(
55
+
&CommandArguments::new(command_string, context.modifiers)
56
+
.mode(crate::commands::ChildOutputMode::Nix),
57
+
HashMap::from([(
58
+
"NIX_SSHOPTS".into(),
59
+
context
60
+
.node
61
+
.target
62
+
.create_ssh_opts(context.modifiers, false)?,
63
+
)]),
64
+
)
65
+
.await?;
66
+
67
+
let status = child.wait_till_success().await;
68
+
69
+
let help = if let Err(ref error) = status {
70
+
get_common_copy_path_help(error).map(Box::new)
71
+
} else {
72
+
None
73
+
};
74
+
75
+
status.map_err(|error| HiveLibError::NixCopyError {
76
+
name: context.name.clone(),
77
+
path: push.to_string(),
78
+
error: Box::new(error),
79
+
help,
80
+
})?;
81
+
82
+
Ok(())
83
+
}
84
+
85
+
fn get_common_command_help(error: &CommandError) -> Option<String> {
86
+
if let CommandError::CommandFailed { logs, .. } = error
87
+
// marshmallow: your using this repo as a hive you idiot
88
+
&& (logs.contains("attribute 'inspect' missing")
89
+
// using a flake that does not provide `wire`
90
+
|| logs.contains("does not provide attribute 'packages.x86_64-linux.wire'")
91
+
// using a file called `hive.nix` that is not actually a hive
92
+
|| logs.contains("attribute 'inspect' in selection path"))
93
+
{
94
+
Some("Double check this `--path` or `--flake` is a wire hive. You may be pointing to the wrong directory.".to_string())
95
+
} else {
96
+
None
97
+
}
98
+
}
99
+
100
+
pub async fn get_hive_node_names(
101
+
location: &HiveLocation,
102
+
modifiers: SubCommandModifiers,
103
+
) -> Result<Vec<String>, HiveLibError> {
104
+
let output = evaluate_hive_attribute(location, &EvalGoal::Names, modifiers).await?;
105
+
serde_json::from_str(&output).map_err(|err| {
106
+
HiveLibError::HiveInitialisationError(HiveInitialisationError::ParseEvaluateError(err))
107
+
})
108
+
}
109
+
110
+
/// Evaluates the hive in flakeref with regards to the given goal,
111
+
/// and returns stdout.
112
+
#[instrument(ret(level = tracing::Level::TRACE), skip_all)]
113
+
pub async fn evaluate_hive_attribute(
114
+
location: &HiveLocation,
115
+
goal: &EvalGoal<'_>,
116
+
modifiers: SubCommandModifiers,
117
+
) -> Result<String, HiveLibError> {
118
+
let attribute = match location {
119
+
HiveLocation::Flake { uri, .. } => {
120
+
format!(
121
+
"{uri}#wire --apply \"hive: {}\"",
122
+
match goal {
123
+
EvalGoal::Inspect => "hive.inspect".to_string(),
124
+
EvalGoal::Names => "hive.names".to_string(),
125
+
EvalGoal::GetTopLevel(node) => format!("hive.topLevels.{node}"),
126
+
}
127
+
)
128
+
}
129
+
HiveLocation::HiveNix(path) => {
130
+
format!(
131
+
"--file {} {}",
132
+
&path.to_string_lossy(),
133
+
match goal {
134
+
EvalGoal::Inspect => "inspect".to_string(),
135
+
EvalGoal::Names => "names".to_string(),
136
+
EvalGoal::GetTopLevel(node) => format!("topLevels.{node}"),
137
+
}
138
+
)
139
+
}
140
+
};
141
+
142
+
let mut command_string = CommandStringBuilder::nix();
143
+
command_string.args(&[
144
+
"--extra-experimental-features",
145
+
"nix-command",
146
+
"--extra-experimental-features",
147
+
"flakes",
148
+
"eval",
149
+
"--json",
150
+
]);
151
+
command_string.opt_arg(modifiers.show_trace, "--show-trace");
152
+
command_string.arg(&attribute);
153
+
154
+
let child = run_command(
155
+
&CommandArguments::new(command_string, modifiers)
156
+
.mode(crate::commands::ChildOutputMode::Nix),
157
+
)
158
+
.await?;
159
+
160
+
let status = child.wait_till_success().await;
161
+
162
+
let help = if let Err(ref error) = status {
163
+
get_common_command_help(error).map(Box::new)
164
+
} else {
165
+
None
166
+
};
167
+
168
+
status
169
+
.map_err(|source| HiveLibError::NixEvalError {
170
+
attribute,
171
+
source,
172
+
help,
173
+
})
174
+
.map(|x| match x {
175
+
Either::Left((_, stdout)) | Either::Right((_, stdout)) => stdout,
176
+
})
177
+
}
+240
crates/core/src/commands/mod.rs
+240
crates/core/src/commands/mod.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use crate::commands::pty::{InteractiveChildChip, interactive_command_with_env};
5
+
use std::{collections::HashMap, str::from_utf8, sync::LazyLock};
6
+
7
+
use aho_corasick::AhoCorasick;
8
+
use gjson::Value;
9
+
use itertools::Itertools;
10
+
use nix_compat::log::{AT_NIX_PREFIX, VerbosityLevel};
11
+
use num_enum::TryFromPrimitive;
12
+
use tracing::{debug, error, info, trace, warn};
13
+
14
+
use crate::{
15
+
SubCommandModifiers,
16
+
commands::noninteractive::{NonInteractiveChildChip, non_interactive_command_with_env},
17
+
errors::{CommandError, HiveLibError},
18
+
hive::node::{Node, Target},
19
+
};
20
+
21
+
pub(crate) mod builder;
22
+
pub mod common;
23
+
pub(crate) mod noninteractive;
24
+
pub(crate) mod pty;
25
+
26
+
#[derive(Copy, Clone, Debug)]
27
+
pub(crate) enum ChildOutputMode {
28
+
Nix,
29
+
Generic,
30
+
Interactive,
31
+
}
32
+
33
+
#[derive(Debug)]
34
+
pub enum Either<L, R> {
35
+
Left(L),
36
+
Right(R),
37
+
}
38
+
39
+
#[derive(Debug)]
40
+
pub(crate) struct CommandArguments<'t, S: AsRef<str>> {
41
+
modifiers: SubCommandModifiers,
42
+
target: Option<&'t Target>,
43
+
output_mode: ChildOutputMode,
44
+
command_string: S,
45
+
keep_stdin_open: bool,
46
+
privilege_escalation_command: Option<String>,
47
+
log_stdout: bool,
48
+
}
49
+
50
+
static AHO_CORASICK: LazyLock<AhoCorasick> = LazyLock::new(|| {
51
+
AhoCorasick::builder()
52
+
.ascii_case_insensitive(false)
53
+
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
54
+
.build([AT_NIX_PREFIX])
55
+
.unwrap()
56
+
});
57
+
58
+
impl<'a, S: AsRef<str>> CommandArguments<'a, S> {
59
+
pub(crate) const fn new(command_string: S, modifiers: SubCommandModifiers) -> Self {
60
+
Self {
61
+
command_string,
62
+
keep_stdin_open: false,
63
+
privilege_escalation_command: None,
64
+
log_stdout: false,
65
+
target: None,
66
+
output_mode: ChildOutputMode::Generic,
67
+
modifiers,
68
+
}
69
+
}
70
+
71
+
pub(crate) const fn execute_on_remote(mut self, target: Option<&'a Target>) -> Self {
72
+
self.target = target;
73
+
self
74
+
}
75
+
76
+
pub(crate) const fn mode(mut self, mode: ChildOutputMode) -> Self {
77
+
self.output_mode = mode;
78
+
self
79
+
}
80
+
81
+
pub(crate) const fn keep_stdin_open(mut self) -> Self {
82
+
self.keep_stdin_open = true;
83
+
self
84
+
}
85
+
86
+
pub(crate) fn elevated(mut self, node: &Node) -> Self {
87
+
self.privilege_escalation_command =
88
+
Some(node.privilege_escalation_command.iter().join(" "));
89
+
self
90
+
}
91
+
92
+
pub(crate) const fn is_elevated(&self) -> bool {
93
+
self.privilege_escalation_command.is_some()
94
+
}
95
+
96
+
pub(crate) const fn log_stdout(mut self) -> Self {
97
+
self.log_stdout = true;
98
+
self
99
+
}
100
+
}
101
+
102
+
pub(crate) async fn run_command<S: AsRef<str>>(
103
+
arguments: &CommandArguments<'_, S>,
104
+
) -> Result<Either<InteractiveChildChip, NonInteractiveChildChip>, HiveLibError> {
105
+
run_command_with_env(arguments, HashMap::new()).await
106
+
}
107
+
108
+
pub(crate) async fn run_command_with_env<S: AsRef<str>>(
109
+
arguments: &CommandArguments<'_, S>,
110
+
envs: HashMap<String, String>,
111
+
) -> Result<Either<InteractiveChildChip, NonInteractiveChildChip>, HiveLibError> {
112
+
// use the non interactive command runner when forced
113
+
// ... or when there is no reason for interactivity, local and unprivileged
114
+
if arguments.modifiers.non_interactive
115
+
|| (arguments.target.is_none() && !arguments.is_elevated())
116
+
{
117
+
return Ok(Either::Right(non_interactive_command_with_env(
118
+
arguments, envs,
119
+
)?));
120
+
}
121
+
122
+
Ok(Either::Left(
123
+
interactive_command_with_env(arguments, envs).await?,
124
+
))
125
+
}
126
+
127
+
pub(crate) trait WireCommandChip {
128
+
type ExitStatus;
129
+
130
+
async fn wait_till_success(self) -> Result<Self::ExitStatus, CommandError>;
131
+
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError>;
132
+
}
133
+
134
+
type ExitStatus = Either<(portable_pty::ExitStatus, String), (std::process::ExitStatus, String)>;
135
+
136
+
impl WireCommandChip for Either<InteractiveChildChip, NonInteractiveChildChip> {
137
+
type ExitStatus = ExitStatus;
138
+
139
+
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError> {
140
+
match self {
141
+
Self::Left(left) => left.write_stdin(data).await,
142
+
Self::Right(right) => right.write_stdin(data).await,
143
+
}
144
+
}
145
+
146
+
async fn wait_till_success(self) -> Result<Self::ExitStatus, CommandError> {
147
+
match self {
148
+
Self::Left(left) => left.wait_till_success().await.map(Either::Left),
149
+
Self::Right(right) => right.wait_till_success().await.map(Either::Right),
150
+
}
151
+
}
152
+
}
153
+
154
+
fn trace_gjson_str<'a>(log: &'a Value<'a>, msg: &'a str) -> Option<String> {
155
+
if msg.is_empty() {
156
+
return None;
157
+
}
158
+
159
+
let level = log.get("level");
160
+
161
+
if !level.exists() {
162
+
return None;
163
+
}
164
+
165
+
let level = match VerbosityLevel::try_from_primitive(level.u64()) {
166
+
Ok(level) => level,
167
+
Err(err) => {
168
+
error!("nix log `level` did not match to a VerbosityLevel: {err:?}");
169
+
return None;
170
+
}
171
+
};
172
+
173
+
let msg = strip_ansi_escapes::strip_str(msg);
174
+
175
+
match level {
176
+
VerbosityLevel::Info => info!("{msg}"),
177
+
VerbosityLevel::Warn | VerbosityLevel::Notice => warn!("{msg}"),
178
+
VerbosityLevel::Error => error!("{msg}"),
179
+
VerbosityLevel::Debug => debug!("{msg}"),
180
+
VerbosityLevel::Vomit | VerbosityLevel::Talkative | VerbosityLevel::Chatty => {
181
+
trace!("{msg}");
182
+
}
183
+
}
184
+
185
+
if matches!(
186
+
level,
187
+
VerbosityLevel::Error | VerbosityLevel::Warn | VerbosityLevel::Notice
188
+
) {
189
+
return Some(msg);
190
+
}
191
+
192
+
None
193
+
}
194
+
195
+
impl ChildOutputMode {
196
+
/// this function is by far the biggest hotspot in the whole tree
197
+
/// Returns a string if this log is notable to be stored as an error message
198
+
fn trace_slice(self, line: &mut [u8]) -> Option<String> {
199
+
let slice = match self {
200
+
Self::Generic | Self::Interactive => {
201
+
let string = String::from_utf8_lossy(line);
202
+
let stripped = strip_ansi_escapes::strip_str(&string);
203
+
warn!("{stripped}");
204
+
return Some(string.to_string());
205
+
}
206
+
Self::Nix => {
207
+
let position = AHO_CORASICK.find(&line).map(|x| &mut line[x.end()..]);
208
+
209
+
if let Some(json_buf) = position {
210
+
json_buf
211
+
} else {
212
+
// usually happens when ssh is outputting something
213
+
warn!("{}", String::from_utf8_lossy(line));
214
+
return None;
215
+
}
216
+
}
217
+
};
218
+
219
+
let Ok(str) = from_utf8(slice) else {
220
+
error!("nix log was not valid utf8!");
221
+
return None;
222
+
};
223
+
224
+
let log = gjson::parse(str);
225
+
226
+
let text = log.get("text");
227
+
228
+
if text.exists() {
229
+
return trace_gjson_str(&log, text.str());
230
+
}
231
+
232
+
let text = log.get("msg");
233
+
234
+
if text.exists() {
235
+
return trace_gjson_str(&log, text.str());
236
+
}
237
+
238
+
None
239
+
}
240
+
}
+199
crates/core/src/commands/noninteractive.rs
+199
crates/core/src/commands/noninteractive.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::{
5
+
collections::{HashMap, VecDeque},
6
+
process::ExitStatus,
7
+
sync::Arc,
8
+
};
9
+
10
+
use crate::{
11
+
SubCommandModifiers,
12
+
commands::{ChildOutputMode, CommandArguments, WireCommandChip},
13
+
errors::{CommandError, HiveLibError},
14
+
hive::node::Target,
15
+
};
16
+
use itertools::Itertools;
17
+
use tokio::{
18
+
io::{AsyncWriteExt, BufReader},
19
+
process::{Child, ChildStdin, Command},
20
+
sync::Mutex,
21
+
task::JoinSet,
22
+
};
23
+
use tracing::{Instrument, debug, instrument, trace};
24
+
25
+
pub(crate) struct NonInteractiveChildChip {
26
+
error_collection: Arc<Mutex<VecDeque<String>>>,
27
+
stdout_collection: Arc<Mutex<VecDeque<String>>>,
28
+
child: Child,
29
+
joinset: JoinSet<()>,
30
+
original_command: String,
31
+
stdin: ChildStdin,
32
+
}
33
+
34
+
#[instrument(skip_all, name = "run", fields(elevated = %arguments.is_elevated()))]
35
+
pub(crate) fn non_interactive_command_with_env<S: AsRef<str>>(
36
+
arguments: &CommandArguments<S>,
37
+
envs: HashMap<String, String>,
38
+
) -> Result<NonInteractiveChildChip, HiveLibError> {
39
+
let mut command = if let Some(target) = arguments.target {
40
+
create_sync_ssh_command(target, arguments.modifiers)?
41
+
} else {
42
+
let mut command = Command::new("sh");
43
+
44
+
command.arg("-c");
45
+
46
+
command
47
+
};
48
+
49
+
let command_string = format!(
50
+
"{command_string}{extra}",
51
+
command_string = arguments.command_string.as_ref(),
52
+
extra = match arguments.output_mode {
53
+
ChildOutputMode::Generic | ChildOutputMode::Interactive => "",
54
+
ChildOutputMode::Nix => " --log-format internal-json",
55
+
}
56
+
);
57
+
58
+
let command_string = if let Some(escalation_command) = &arguments.privilege_escalation_command {
59
+
format!("{escalation_command} sh -c '{command_string}'")
60
+
} else {
61
+
command_string
62
+
};
63
+
64
+
debug!("{command_string}");
65
+
66
+
command.arg(&command_string);
67
+
command.stdin(std::process::Stdio::piped());
68
+
command.stderr(std::process::Stdio::piped());
69
+
command.stdout(std::process::Stdio::piped());
70
+
command.kill_on_drop(true);
71
+
// command.env_clear();
72
+
command.envs(envs);
73
+
74
+
let mut child = command.spawn().unwrap();
75
+
let error_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
76
+
let stdout_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
77
+
let stdin = child.stdin.take().unwrap();
78
+
79
+
let stdout_handle = child
80
+
.stdout
81
+
.take()
82
+
.ok_or(HiveLibError::CommandError(CommandError::NoHandle))?;
83
+
let stderr_handle = child
84
+
.stderr
85
+
.take()
86
+
.ok_or(HiveLibError::CommandError(CommandError::NoHandle))?;
87
+
88
+
let mut joinset = JoinSet::new();
89
+
let output_mode = Arc::new(arguments.output_mode);
90
+
91
+
joinset.spawn(
92
+
handle_io(
93
+
stderr_handle,
94
+
output_mode.clone(),
95
+
error_collection.clone(),
96
+
true,
97
+
true,
98
+
)
99
+
.in_current_span(),
100
+
);
101
+
joinset.spawn(
102
+
handle_io(
103
+
stdout_handle,
104
+
output_mode.clone(),
105
+
stdout_collection.clone(),
106
+
false,
107
+
arguments.log_stdout,
108
+
)
109
+
.in_current_span(),
110
+
);
111
+
112
+
Ok(NonInteractiveChildChip {
113
+
error_collection,
114
+
stdout_collection,
115
+
child,
116
+
joinset,
117
+
original_command: arguments.command_string.as_ref().to_string(),
118
+
stdin,
119
+
})
120
+
}
121
+
122
+
impl WireCommandChip for NonInteractiveChildChip {
123
+
type ExitStatus = (ExitStatus, String);
124
+
125
+
async fn wait_till_success(mut self) -> Result<Self::ExitStatus, CommandError> {
126
+
let status = self.child.wait().await.unwrap();
127
+
let _ = self.joinset.join_all().await;
128
+
129
+
if !status.success() {
130
+
let logs = self.error_collection.lock().await.iter().rev().join("\n");
131
+
132
+
return Err(CommandError::CommandFailed {
133
+
command_ran: self.original_command,
134
+
logs,
135
+
code: match status.code() {
136
+
Some(code) => format!("code {code}"),
137
+
None => "no exit code".to_string(),
138
+
},
139
+
reason: "known-status",
140
+
});
141
+
}
142
+
143
+
let stdout = self.stdout_collection.lock().await.iter().rev().join("\n");
144
+
145
+
Ok((status, stdout))
146
+
}
147
+
148
+
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError> {
149
+
trace!("Writing {} bytes", data.len());
150
+
self.stdin.write_all(&data).await.unwrap();
151
+
Ok(())
152
+
}
153
+
}
154
+
155
+
#[instrument(skip_all, name = "log")]
156
+
pub async fn handle_io<R>(
157
+
reader: R,
158
+
output_mode: Arc<ChildOutputMode>,
159
+
collection: Arc<Mutex<VecDeque<String>>>,
160
+
is_error: bool,
161
+
should_log: bool,
162
+
) where
163
+
R: tokio::io::AsyncRead + Unpin,
164
+
{
165
+
let mut io_reader = tokio::io::AsyncBufReadExt::lines(BufReader::new(reader));
166
+
167
+
while let Some(line) = io_reader.next_line().await.unwrap() {
168
+
let mut line = line.into_bytes();
169
+
170
+
let log = if should_log {
171
+
Some(output_mode.trace_slice(&mut line))
172
+
} else {
173
+
None
174
+
};
175
+
176
+
if !is_error {
177
+
let mut queue = collection.lock().await;
178
+
queue.push_front(String::from_utf8_lossy(&line).to_string());
179
+
} else if let Some(error_msg) = log.flatten() {
180
+
let mut queue = collection.lock().await;
181
+
queue.push_front(error_msg);
182
+
// add at most 20 message to the front, drop the rest.
183
+
queue.truncate(20);
184
+
}
185
+
}
186
+
187
+
debug!("io_handler: goodbye!");
188
+
}
189
+
190
+
fn create_sync_ssh_command(
191
+
target: &Target,
192
+
modifiers: SubCommandModifiers,
193
+
) -> Result<Command, HiveLibError> {
194
+
let mut command = Command::new("ssh");
195
+
command.args(target.create_ssh_args(modifiers, true, false)?);
196
+
command.arg(target.get_preferred_host()?.to_string());
197
+
198
+
Ok(command)
199
+
}
+102
crates/core/src/commands/pty/input.rs
+102
crates/core/src/commands/pty/input.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::os::fd::{AsFd, OwnedFd};
5
+
6
+
use nix::{
7
+
poll::{PollFd, PollFlags, PollTimeout, poll},
8
+
unistd::read,
9
+
};
10
+
use tracing::{Span, debug, error, instrument, trace};
11
+
12
+
use crate::{
13
+
commands::pty::{MasterWriter, THREAD_BEGAN_SIGNAL, THREAD_QUIT_SIGNAL},
14
+
errors::CommandError,
15
+
};
16
+
17
+
/// Exits on any data written to `cancel_pipe_r`
18
+
/// A pipe is used to cancel the function.
19
+
#[instrument(skip_all, level = "trace", parent = span)]
20
+
pub(super) fn watch_stdin_from_user(
21
+
cancel_pipe_r: &OwnedFd,
22
+
mut master_writer: MasterWriter,
23
+
write_pipe_r: &OwnedFd,
24
+
span: Span,
25
+
) -> Result<(), CommandError> {
26
+
const WRITER_POSITION: usize = 0;
27
+
const SIGNAL_POSITION: usize = 1;
28
+
const USER_POSITION: usize = 2;
29
+
30
+
let mut buffer = [0u8; 1024];
31
+
let stdin = std::io::stdin();
32
+
let mut cancel_pipe_buf = [0u8; 1];
33
+
34
+
let user_stdin_fd = stdin.as_fd();
35
+
let cancel_pipe_r_fd = cancel_pipe_r.as_fd();
36
+
37
+
let mut all_fds = vec![
38
+
PollFd::new(write_pipe_r.as_fd(), PollFlags::POLLIN),
39
+
PollFd::new(cancel_pipe_r.as_fd(), PollFlags::POLLIN),
40
+
PollFd::new(user_stdin_fd, PollFlags::POLLIN),
41
+
];
42
+
43
+
loop {
44
+
match poll(&mut all_fds, PollTimeout::NONE) {
45
+
Ok(0) => {} // timeout, impossible
46
+
Ok(_) => {
47
+
// The user stdin pipe can be removed
48
+
if all_fds.get(USER_POSITION).is_some()
49
+
&& let Some(events) = all_fds[USER_POSITION].revents()
50
+
&& events.contains(PollFlags::POLLIN)
51
+
{
52
+
trace!("Got stdin from user...");
53
+
let n = read(user_stdin_fd, &mut buffer).map_err(CommandError::PosixPipe)?;
54
+
master_writer
55
+
.write_all(&buffer[..n])
56
+
.map_err(CommandError::WritingMasterStdout)?;
57
+
master_writer
58
+
.flush()
59
+
.map_err(CommandError::WritingMasterStdout)?;
60
+
}
61
+
62
+
if let Some(events) = all_fds[WRITER_POSITION].revents()
63
+
&& events.contains(PollFlags::POLLIN)
64
+
{
65
+
trace!("Got stdin from writer...");
66
+
let n = read(write_pipe_r, &mut buffer).map_err(CommandError::PosixPipe)?;
67
+
master_writer
68
+
.write_all(&buffer[..n])
69
+
.map_err(CommandError::WritingMasterStdout)?;
70
+
master_writer
71
+
.flush()
72
+
.map_err(CommandError::WritingMasterStdout)?;
73
+
}
74
+
75
+
if let Some(events) = all_fds[SIGNAL_POSITION].revents()
76
+
&& events.contains(PollFlags::POLLIN)
77
+
{
78
+
let n = read(cancel_pipe_r_fd, &mut cancel_pipe_buf)
79
+
.map_err(CommandError::PosixPipe)?;
80
+
let message = &cancel_pipe_buf[..n];
81
+
82
+
trace!("Got byte from signal pipe: {message:?}");
83
+
84
+
if message == THREAD_QUIT_SIGNAL {
85
+
return Ok(());
86
+
}
87
+
88
+
if message == THREAD_BEGAN_SIGNAL {
89
+
all_fds.remove(USER_POSITION);
90
+
}
91
+
}
92
+
}
93
+
Err(e) => {
94
+
error!("Poll error: {e}");
95
+
break;
96
+
}
97
+
}
98
+
}
99
+
100
+
debug!("stdin_thread: goodbye");
101
+
Ok(())
102
+
}
+63
crates/core/src/commands/pty/logbuffer.rs
+63
crates/core/src/commands/pty/logbuffer.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
/// Split into its own struct to be tested nicer
5
+
pub(crate) struct LogBuffer {
6
+
buffer: Vec<u8>,
7
+
}
8
+
9
+
impl LogBuffer {
10
+
pub const fn new() -> Self {
11
+
Self { buffer: Vec::new() }
12
+
}
13
+
14
+
pub fn process_slice(&mut self, slice: &[u8]) {
15
+
self.buffer.extend_from_slice(slice);
16
+
}
17
+
18
+
pub fn next_line(&mut self) -> Option<Vec<u8>> {
19
+
let line_end = self.buffer.iter().position(|x| *x == b'\n')?;
20
+
21
+
let drained = self.buffer.drain(..line_end).collect();
22
+
self.buffer.remove(0);
23
+
Some(drained)
24
+
}
25
+
26
+
#[cfg(test)]
27
+
fn take_lines(&mut self) -> Vec<Vec<u8>> {
28
+
let mut lines = vec![];
29
+
30
+
while let Some(line) = self.next_line() {
31
+
lines.push(line);
32
+
}
33
+
34
+
lines
35
+
}
36
+
}
37
+
38
+
#[cfg(test)]
39
+
mod tests {
40
+
use super::*;
41
+
42
+
#[test]
43
+
fn test_split_line_processing() {
44
+
let mut log_buffer = LogBuffer::new();
45
+
46
+
log_buffer.process_slice(b"Writing key KeySpec { destination: \"/et");
47
+
log_buffer.process_slice(b"c/keys/buildbot.aws.key\", user: \"buildbot\", group: \"buildbot-worker\", permissions: 384, length: 32, last: false, crc: 1370815231 }, 32 bytes of data");
48
+
log_buffer.process_slice(b"\n");
49
+
log_buffer.process_slice(b"xxx");
50
+
log_buffer.process_slice(b"xx_WIRE");
51
+
log_buffer.process_slice(b"_QUIT\n");
52
+
let lines = log_buffer.take_lines();
53
+
assert_eq!(lines.len(), 2);
54
+
assert_eq!(
55
+
String::from_utf8_lossy(lines.first().unwrap()),
56
+
"Writing key KeySpec { destination: \"/etc/keys/buildbot.aws.key\", user: \"buildbot\", group: \"buildbot-worker\", permissions: 384, length: 32, last: false, crc: 1370815231 }, 32 bytes of data"
57
+
);
58
+
assert_eq!(lines.get(1), Some(&"xxxxx_WIRE_QUIT".as_bytes().to_vec()));
59
+
60
+
// taking leaves none
61
+
assert_eq!(log_buffer.take_lines().len(), 0);
62
+
}
63
+
}
+566
crates/core/src/commands/pty/mod.rs
+566
crates/core/src/commands/pty/mod.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use crate::commands::pty::output::{WatchStdoutArguments, handle_pty_stdout};
5
+
use crate::status::STATUS;
6
+
use aho_corasick::PatternID;
7
+
use itertools::Itertools;
8
+
use nix::sys::termios::{LocalFlags, SetArg, Termios, tcgetattr, tcsetattr};
9
+
use nix::unistd::pipe;
10
+
use nix::unistd::write as posix_write;
11
+
use portable_pty::{CommandBuilder, NativePtySystem, PtyPair, PtySize};
12
+
use rand::distr::Alphabetic;
13
+
use std::collections::VecDeque;
14
+
use std::io::stderr;
15
+
use std::sync::{LazyLock, Mutex};
16
+
use std::{
17
+
io::{Read, Write},
18
+
os::fd::{AsFd, OwnedFd},
19
+
sync::Arc,
20
+
};
21
+
use tokio::sync::{oneshot, watch};
22
+
use tracing::instrument;
23
+
use tracing::{Span, debug, trace};
24
+
25
+
use crate::commands::CommandArguments;
26
+
use crate::commands::pty::input::watch_stdin_from_user;
27
+
use crate::errors::CommandError;
28
+
use crate::{SubCommandModifiers, acquire_stdin_lock};
29
+
use crate::{
30
+
commands::{ChildOutputMode, WireCommandChip},
31
+
errors::HiveLibError,
32
+
hive::node::Target,
33
+
};
34
+
35
+
mod input;
36
+
mod logbuffer;
37
+
mod output;
38
+
39
+
type MasterWriter = Box<dyn Write + Send>;
40
+
type MasterReader = Box<dyn Read + Send>;
41
+
42
+
/// the underlying command began
43
+
const THREAD_BEGAN_SIGNAL: &[u8; 1] = b"b";
44
+
const THREAD_QUIT_SIGNAL: &[u8; 1] = b"q";
45
+
46
+
type Child = Box<dyn portable_pty::Child + Send + Sync>;
47
+
48
+
pub(crate) struct InteractiveChildChip {
49
+
child: Child,
50
+
51
+
cancel_stdin_pipe_w: OwnedFd,
52
+
write_stdin_pipe_w: OwnedFd,
53
+
54
+
stderr_collection: Arc<Mutex<VecDeque<String>>>,
55
+
stdout_collection: Arc<Mutex<VecDeque<String>>>,
56
+
57
+
original_command: String,
58
+
59
+
status_receiver: watch::Receiver<Status>,
60
+
stdout_handle: tokio::task::JoinHandle<Result<(), CommandError>>,
61
+
}
62
+
63
+
/// sets and reverts terminal options (the terminal user interaction is performed)
64
+
/// reverts data when dropped
65
+
struct StdinTermiosAttrGuard(Termios);
66
+
67
+
#[derive(Debug)]
68
+
enum Status {
69
+
Running,
70
+
Done { success: bool },
71
+
}
72
+
73
+
#[derive(Debug)]
74
+
enum SearchFindings {
75
+
None,
76
+
Started,
77
+
Terminate,
78
+
}
79
+
80
+
static STARTED_PATTERN: LazyLock<PatternID> = LazyLock::new(|| PatternID::must(0));
81
+
static SUCCEEDED_PATTERN: LazyLock<PatternID> = LazyLock::new(|| PatternID::must(1));
82
+
static FAILED_PATTERN: LazyLock<PatternID> = LazyLock::new(|| PatternID::must(2));
83
+
84
+
/// substitutes STDOUT with #$line. stdout is far less common than stderr.
85
+
const IO_SUBS: &str = "1> >(while IFS= read -r line; do echo \"#$line\"; done)";
86
+
87
+
fn create_ending_segment<S: AsRef<str>>(
88
+
arguments: &CommandArguments<'_, S>,
89
+
needles: &Needles,
90
+
) -> String {
91
+
let Needles {
92
+
succeed,
93
+
fail,
94
+
start,
95
+
} = needles;
96
+
97
+
format!(
98
+
"echo -e '{succeed}' || echo '{failed}'",
99
+
succeed = if matches!(arguments.output_mode, ChildOutputMode::Interactive) {
100
+
format!(
101
+
"{start}\\n{succeed}",
102
+
start = String::from_utf8_lossy(start),
103
+
succeed = String::from_utf8_lossy(succeed)
104
+
)
105
+
} else {
106
+
String::from_utf8_lossy(succeed).to_string()
107
+
},
108
+
failed = String::from_utf8_lossy(fail)
109
+
)
110
+
}
111
+
112
+
fn create_starting_segment<S: AsRef<str>>(
113
+
arguments: &CommandArguments<'_, S>,
114
+
start_needle: &Arc<Vec<u8>>,
115
+
) -> String {
116
+
if matches!(arguments.output_mode, ChildOutputMode::Interactive) {
117
+
String::new()
118
+
} else {
119
+
format!(
120
+
"echo '{start}' && ",
121
+
start = String::from_utf8_lossy(start_needle)
122
+
)
123
+
}
124
+
}
125
+
126
+
#[instrument(skip_all, name = "run-int", fields(elevated = %arguments.is_elevated(), mode = ?arguments.output_mode))]
127
+
pub(crate) async fn interactive_command_with_env<S: AsRef<str>>(
128
+
arguments: &CommandArguments<'_, S>,
129
+
envs: std::collections::HashMap<String, String>,
130
+
) -> Result<InteractiveChildChip, HiveLibError> {
131
+
print_authenticate_warning(arguments)?;
132
+
133
+
let needles = create_needles();
134
+
let pty_system = NativePtySystem::default();
135
+
let pty_pair = portable_pty::PtySystem::openpty(&pty_system, PtySize::default()).unwrap();
136
+
setup_master(&pty_pair)?;
137
+
138
+
let command_string = &format!(
139
+
"{starting}{command} {flags} {IO_SUBS} && {ending}",
140
+
command = arguments.command_string.as_ref(),
141
+
flags = match arguments.output_mode {
142
+
ChildOutputMode::Nix => "--log-format internal-json",
143
+
ChildOutputMode::Generic | ChildOutputMode::Interactive => "",
144
+
},
145
+
starting = create_starting_segment(arguments, &needles.start),
146
+
ending = create_ending_segment(arguments, &needles)
147
+
);
148
+
149
+
debug!("{command_string}");
150
+
151
+
let mut command = build_command(arguments, command_string)?;
152
+
153
+
// give command all env vars
154
+
for (key, value) in envs {
155
+
command.env(key, value);
156
+
}
157
+
158
+
let clobber_guard = acquire_stdin_lock().await;
159
+
let _guard = StdinTermiosAttrGuard::new().map_err(HiveLibError::CommandError)?;
160
+
let child = pty_pair
161
+
.slave
162
+
.spawn_command(command)
163
+
.map_err(|x| HiveLibError::CommandError(CommandError::PortablePty(x)))?;
164
+
165
+
// Release any handles owned by the slave: we don't need it now
166
+
// that we've spawned the child.
167
+
drop(pty_pair.slave);
168
+
169
+
let reader = pty_pair
170
+
.master
171
+
.try_clone_reader()
172
+
.map_err(|x| HiveLibError::CommandError(CommandError::PortablePty(x)))?;
173
+
let master_writer = pty_pair
174
+
.master
175
+
.take_writer()
176
+
.map_err(|x| HiveLibError::CommandError(CommandError::PortablePty(x)))?;
177
+
178
+
let stderr_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
179
+
let stdout_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
180
+
let (began_tx, began_rx) = oneshot::channel::<()>();
181
+
let (status_sender, status_receiver) = watch::channel(Status::Running);
182
+
183
+
let stdout_handle = {
184
+
let arguments = WatchStdoutArguments {
185
+
began_tx,
186
+
reader,
187
+
needles,
188
+
output_mode: arguments.output_mode,
189
+
stderr_collection: stderr_collection.clone(),
190
+
stdout_collection: stdout_collection.clone(),
191
+
span: Span::current(),
192
+
log_stdout: arguments.log_stdout,
193
+
status_sender,
194
+
};
195
+
196
+
tokio::task::spawn_blocking(move || handle_pty_stdout(arguments))
197
+
};
198
+
199
+
let (write_stdin_pipe_r, write_stdin_pipe_w) =
200
+
pipe().map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
201
+
let (cancel_stdin_pipe_r, cancel_stdin_pipe_w) =
202
+
pipe().map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
203
+
204
+
tokio::task::spawn_blocking(move || {
205
+
watch_stdin_from_user(
206
+
&cancel_stdin_pipe_r,
207
+
master_writer,
208
+
&write_stdin_pipe_r,
209
+
Span::current(),
210
+
)
211
+
});
212
+
213
+
debug!("Setup threads");
214
+
215
+
let () = began_rx
216
+
.await
217
+
.map_err(|x| HiveLibError::CommandError(CommandError::OneshotRecvError(x)))?;
218
+
219
+
drop(clobber_guard);
220
+
221
+
if arguments.keep_stdin_open {
222
+
trace!("Sending THREAD_BEGAN_SIGNAL");
223
+
224
+
posix_write(&cancel_stdin_pipe_w, THREAD_BEGAN_SIGNAL)
225
+
.map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
226
+
} else {
227
+
trace!("Sending THREAD_QUIT_SIGNAL");
228
+
229
+
posix_write(&cancel_stdin_pipe_w, THREAD_QUIT_SIGNAL)
230
+
.map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
231
+
}
232
+
233
+
Ok(InteractiveChildChip {
234
+
child,
235
+
cancel_stdin_pipe_w,
236
+
write_stdin_pipe_w,
237
+
stderr_collection,
238
+
stdout_collection,
239
+
original_command: arguments.command_string.as_ref().to_string(),
240
+
status_receiver,
241
+
stdout_handle,
242
+
})
243
+
}
244
+
245
+
fn print_authenticate_warning<S: AsRef<str>>(
246
+
arguments: &CommandArguments<S>,
247
+
) -> Result<(), HiveLibError> {
248
+
if !arguments.is_elevated() {
249
+
return Ok(());
250
+
}
251
+
252
+
let _ = STATUS.lock().write_above_status(
253
+
&format!(
254
+
"{} | Authenticate for \"sudo {}\":\n",
255
+
arguments
256
+
.target
257
+
.map_or(Ok("localhost (!)".to_string()), |target| Ok(format!(
258
+
"{}@{}:{}",
259
+
target.user,
260
+
target.get_preferred_host()?,
261
+
target.port
262
+
)))?,
263
+
arguments.command_string.as_ref()
264
+
)
265
+
.into_bytes(),
266
+
&mut stderr(),
267
+
);
268
+
269
+
Ok(())
270
+
}
271
+
272
+
struct Needles {
273
+
succeed: Arc<Vec<u8>>,
274
+
fail: Arc<Vec<u8>>,
275
+
start: Arc<Vec<u8>>,
276
+
}
277
+
278
+
fn create_needles() -> Needles {
279
+
let tmp_prefix = rand::distr::SampleString::sample_string(&Alphabetic, &mut rand::rng(), 5);
280
+
281
+
Needles {
282
+
succeed: Arc::new(format!("{tmp_prefix}_W_Q").as_bytes().to_vec()),
283
+
fail: Arc::new(format!("{tmp_prefix}_W_F").as_bytes().to_vec()),
284
+
start: Arc::new(format!("{tmp_prefix}_W_S").as_bytes().to_vec()),
285
+
}
286
+
}
287
+
288
+
fn setup_master(pty_pair: &PtyPair) -> Result<(), HiveLibError> {
289
+
if let Some(fd) = pty_pair.master.as_raw_fd() {
290
+
// convert raw fd to a BorrowedFd
291
+
// safe as `fd` is dropped well before `pty_pair.master`
292
+
let fd = unsafe { std::os::unix::io::BorrowedFd::borrow_raw(fd) };
293
+
let mut termios =
294
+
tcgetattr(fd).map_err(|x| HiveLibError::CommandError(CommandError::TermAttrs(x)))?;
295
+
296
+
termios.local_flags &= !LocalFlags::ECHO;
297
+
// Key agent does not work well without canonical mode
298
+
termios.local_flags &= !LocalFlags::ICANON;
299
+
// Actually quit
300
+
termios.local_flags &= !LocalFlags::ISIG;
301
+
302
+
tcsetattr(fd, SetArg::TCSANOW, &termios)
303
+
.map_err(|x| HiveLibError::CommandError(CommandError::TermAttrs(x)))?;
304
+
}
305
+
306
+
Ok(())
307
+
}
308
+
309
+
fn build_command<S: AsRef<str>>(
310
+
arguments: &CommandArguments<'_, S>,
311
+
command_string: &String,
312
+
) -> Result<CommandBuilder, HiveLibError> {
313
+
let mut command = if let Some(target) = arguments.target {
314
+
let mut command = create_int_ssh_command(target, arguments.modifiers)?;
315
+
316
+
// force ssh to use our pseudo terminal
317
+
command.arg("-tt");
318
+
319
+
command
320
+
} else {
321
+
let mut command = portable_pty::CommandBuilder::new("sh");
322
+
323
+
command.arg("-c");
324
+
325
+
command
326
+
};
327
+
328
+
if arguments.is_elevated() {
329
+
command.arg(format!("sudo -u root -- sh -c '{command_string}'"));
330
+
} else {
331
+
command.arg(command_string);
332
+
}
333
+
334
+
Ok(command)
335
+
}
336
+
337
+
impl WireCommandChip for InteractiveChildChip {
338
+
type ExitStatus = (portable_pty::ExitStatus, String);
339
+
340
+
#[instrument(skip_all)]
341
+
async fn wait_till_success(mut self) -> Result<Self::ExitStatus, CommandError> {
342
+
drop(self.write_stdin_pipe_w);
343
+
344
+
let exit_status = tokio::task::spawn_blocking(move || self.child.wait())
345
+
.await
346
+
.map_err(CommandError::JoinError)?
347
+
.map_err(CommandError::WaitForStatus)?;
348
+
349
+
debug!("exit_status: {exit_status:?}");
350
+
351
+
self.stdout_handle
352
+
.await
353
+
.map_err(|_| CommandError::ThreadPanic)??;
354
+
355
+
let status = self
356
+
.status_receiver
357
+
.wait_for(|value| matches!(value, Status::Done { .. }))
358
+
.await
359
+
.unwrap();
360
+
361
+
let _ = posix_write(&self.cancel_stdin_pipe_w, THREAD_QUIT_SIGNAL);
362
+
363
+
if let Status::Done { success: true } = *status {
364
+
let logs = self
365
+
.stdout_collection
366
+
.lock()
367
+
.unwrap()
368
+
.iter()
369
+
.rev()
370
+
.map(|x| x.trim())
371
+
.join("\n");
372
+
373
+
return Ok((exit_status, logs));
374
+
}
375
+
376
+
debug!("child did not succeed");
377
+
378
+
let logs = self
379
+
.stderr_collection
380
+
.lock()
381
+
.unwrap()
382
+
.iter()
383
+
.rev()
384
+
.join("\n");
385
+
386
+
Err(CommandError::CommandFailed {
387
+
command_ran: self.original_command,
388
+
logs,
389
+
code: format!("code {}", exit_status.exit_code()),
390
+
reason: match *status {
391
+
Status::Done { .. } => "marked-unsuccessful",
392
+
Status::Running => "child-crashed-before-succeeding",
393
+
},
394
+
})
395
+
}
396
+
397
+
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError> {
398
+
trace!("Writing {} bytes to stdin", data.len());
399
+
400
+
posix_write(&self.write_stdin_pipe_w, &data)
401
+
.map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
402
+
403
+
Ok(())
404
+
}
405
+
}
406
+
407
+
impl StdinTermiosAttrGuard {
408
+
fn new() -> Result<Self, CommandError> {
409
+
let stdin = std::io::stdin();
410
+
let stdin_fd = stdin.as_fd();
411
+
412
+
let mut termios = tcgetattr(stdin_fd).map_err(CommandError::TermAttrs)?;
413
+
let original_termios = termios.clone();
414
+
415
+
termios.local_flags &= !(LocalFlags::ECHO | LocalFlags::ICANON);
416
+
tcsetattr(stdin_fd, SetArg::TCSANOW, &termios).map_err(CommandError::TermAttrs)?;
417
+
418
+
Ok(StdinTermiosAttrGuard(original_termios))
419
+
}
420
+
}
421
+
422
+
impl Drop for StdinTermiosAttrGuard {
423
+
fn drop(&mut self) {
424
+
let stdin = std::io::stdin();
425
+
let stdin_fd = stdin.as_fd();
426
+
427
+
let _ = tcsetattr(stdin_fd, SetArg::TCSANOW, &self.0);
428
+
}
429
+
}
430
+
431
+
fn create_int_ssh_command(
432
+
target: &Target,
433
+
modifiers: SubCommandModifiers,
434
+
) -> Result<portable_pty::CommandBuilder, HiveLibError> {
435
+
let mut command = portable_pty::CommandBuilder::new("ssh");
436
+
command.args(target.create_ssh_args(modifiers, false, false)?);
437
+
command.arg(target.get_preferred_host()?.to_string());
438
+
Ok(command)
439
+
}
440
+
441
+
#[cfg(test)]
442
+
mod tests {
443
+
use aho_corasick::AhoCorasick;
444
+
use tokio::sync::oneshot::error::TryRecvError;
445
+
446
+
use crate::commands::pty::output::handle_rawmode_data;
447
+
448
+
use super::*;
449
+
use std::assert_matches::assert_matches;
450
+
451
+
#[test]
452
+
fn test_rawmode_data() {
453
+
let aho_corasick = AhoCorasick::builder()
454
+
.ascii_case_insensitive(false)
455
+
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
456
+
.build(["START_NEEDLE", "SUCCEEDED_NEEDLE", "FAILED_NEEDLE"])
457
+
.unwrap();
458
+
let mut stderr = vec![];
459
+
let (began_tx, mut began_rx) = oneshot::channel::<()>();
460
+
let mut began_tx = Some(began_tx);
461
+
let (status_sender, _) = watch::channel(Status::Running);
462
+
463
+
// each "Bla" is 4 bytes.
464
+
let buffer = "bla bla bla START_NEEDLE bla bla bla".as_bytes();
465
+
let mut raw_mode_buffer = vec![];
466
+
467
+
// handle 1 "bla"
468
+
assert_matches!(
469
+
handle_rawmode_data(
470
+
&mut stderr,
471
+
buffer,
472
+
4,
473
+
&mut raw_mode_buffer,
474
+
&aho_corasick,
475
+
&status_sender,
476
+
&mut began_tx
477
+
),
478
+
Ok(SearchFindings::None)
479
+
);
480
+
assert_matches!(began_rx.try_recv(), Err(TryRecvError::Empty));
481
+
assert!(began_tx.is_some());
482
+
assert_eq!(raw_mode_buffer, b"bla ");
483
+
assert_matches!(*status_sender.borrow(), Status::Running);
484
+
485
+
let buffer = &buffer[4..];
486
+
487
+
// handle 2 "bla"'s and half a "START_NEEDLE"
488
+
let n = 4 + 4 + 6;
489
+
assert_matches!(
490
+
handle_rawmode_data(
491
+
&mut stderr,
492
+
buffer,
493
+
n,
494
+
&mut raw_mode_buffer,
495
+
&aho_corasick,
496
+
&status_sender,
497
+
&mut began_tx
498
+
),
499
+
Ok(SearchFindings::None)
500
+
);
501
+
assert_matches!(began_rx.try_recv(), Err(TryRecvError::Empty));
502
+
assert!(began_tx.is_some());
503
+
assert_matches!(*status_sender.borrow(), Status::Running);
504
+
assert_eq!(raw_mode_buffer, b"bla bla bla START_");
505
+
506
+
let buffer = &buffer[n..];
507
+
508
+
// handle rest of the data
509
+
let n = buffer.len();
510
+
assert_matches!(
511
+
handle_rawmode_data(
512
+
&mut stderr,
513
+
buffer,
514
+
n,
515
+
&mut raw_mode_buffer,
516
+
&aho_corasick,
517
+
&status_sender,
518
+
&mut began_tx
519
+
),
520
+
Ok(SearchFindings::Started)
521
+
);
522
+
assert_matches!(began_rx.try_recv(), Ok(()));
523
+
assert_matches!(began_tx, None);
524
+
assert_eq!(raw_mode_buffer, b"bla bla bla START_NEEDLE bla bla bla");
525
+
assert_matches!(*status_sender.borrow(), Status::Running);
526
+
527
+
// test failed needle
528
+
let buffer = "bla FAILED_NEEDLE bla".as_bytes();
529
+
let mut raw_mode_buffer = vec![];
530
+
531
+
let n = buffer.len();
532
+
assert_matches!(
533
+
handle_rawmode_data(
534
+
&mut stderr,
535
+
buffer,
536
+
n,
537
+
&mut raw_mode_buffer,
538
+
&aho_corasick,
539
+
&status_sender,
540
+
&mut began_tx
541
+
),
542
+
Ok(SearchFindings::Terminate)
543
+
);
544
+
assert_matches!(*status_sender.borrow(), Status::Done { success: false });
545
+
546
+
// test succeed needle
547
+
let buffer = "bla SUCCEEDED_NEEDLE bla".as_bytes();
548
+
let mut raw_mode_buffer = vec![];
549
+
let (status_sender, _) = watch::channel(Status::Running);
550
+
551
+
let n = buffer.len();
552
+
assert_matches!(
553
+
handle_rawmode_data(
554
+
&mut stderr,
555
+
buffer,
556
+
n,
557
+
&mut raw_mode_buffer,
558
+
&aho_corasick,
559
+
&status_sender,
560
+
&mut began_tx
561
+
),
562
+
Ok(SearchFindings::Terminate)
563
+
);
564
+
assert_matches!(*status_sender.borrow(), Status::Done { success: true });
565
+
}
566
+
}
+264
crates/core/src/commands/pty/output.rs
+264
crates/core/src/commands/pty/output.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use crate::{
5
+
commands::{
6
+
ChildOutputMode,
7
+
pty::{
8
+
FAILED_PATTERN, Needles, STARTED_PATTERN, SUCCEEDED_PATTERN, SearchFindings, Status,
9
+
logbuffer::LogBuffer,
10
+
},
11
+
},
12
+
errors::CommandError,
13
+
};
14
+
use aho_corasick::AhoCorasick;
15
+
use std::{
16
+
collections::VecDeque,
17
+
io::Write,
18
+
sync::{Arc, Mutex},
19
+
};
20
+
use tokio::sync::{oneshot, watch};
21
+
use tracing::{Span, debug, instrument};
22
+
23
+
pub(super) struct WatchStdoutArguments {
24
+
pub began_tx: oneshot::Sender<()>,
25
+
pub reader: super::MasterReader,
26
+
pub needles: Needles,
27
+
pub output_mode: ChildOutputMode,
28
+
pub stderr_collection: Arc<Mutex<VecDeque<String>>>,
29
+
pub stdout_collection: Arc<Mutex<VecDeque<String>>>,
30
+
pub status_sender: watch::Sender<Status>,
31
+
pub span: Span,
32
+
pub log_stdout: bool,
33
+
}
34
+
35
+
/// Handles data from the PTY, and logs or prompts the user depending on the state
36
+
/// of the command.
37
+
///
38
+
/// Emits a message on the `began_tx` when the command is considered started.
39
+
///
40
+
/// Records stderr and stdout when it is considered notable (all stdout, last few stderr messages)
41
+
#[instrument(skip_all, name = "log", parent = arguments.span)]
42
+
pub(super) fn handle_pty_stdout(arguments: WatchStdoutArguments) -> Result<(), CommandError> {
43
+
let WatchStdoutArguments {
44
+
began_tx,
45
+
mut reader,
46
+
needles,
47
+
output_mode,
48
+
stdout_collection,
49
+
stderr_collection,
50
+
status_sender,
51
+
log_stdout,
52
+
..
53
+
} = arguments;
54
+
55
+
let aho_corasick = AhoCorasick::builder()
56
+
.ascii_case_insensitive(false)
57
+
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
58
+
.build([
59
+
needles.start.as_ref(),
60
+
needles.succeed.as_ref(),
61
+
needles.fail.as_ref(),
62
+
])
63
+
.unwrap();
64
+
65
+
let mut buffer = [0u8; 1024];
66
+
let mut stderr = std::io::stderr();
67
+
let mut began = false;
68
+
let mut log_buffer = LogBuffer::new();
69
+
let mut raw_mode_buffer = Vec::new();
70
+
let mut belled = false;
71
+
let mut began_tx = Some(began_tx);
72
+
73
+
'outer: loop {
74
+
match reader.read(&mut buffer) {
75
+
Ok(0) => break 'outer,
76
+
Ok(n) => {
77
+
// this block is responsible for outputting the "raw" data,
78
+
// mostly sudo prompts.
79
+
if !began {
80
+
let findings = handle_rawmode_data(
81
+
&mut stderr,
82
+
&buffer,
83
+
n,
84
+
&mut raw_mode_buffer,
85
+
&aho_corasick,
86
+
&status_sender,
87
+
&mut began_tx,
88
+
)?;
89
+
90
+
match findings {
91
+
SearchFindings::Terminate => break 'outer,
92
+
SearchFindings::Started => {
93
+
began = true;
94
+
continue;
95
+
}
96
+
SearchFindings::None => {}
97
+
}
98
+
99
+
if belled {
100
+
continue;
101
+
}
102
+
103
+
stderr
104
+
.write(b"\x07") // bell
105
+
.map_err(CommandError::WritingClientStderr)?;
106
+
stderr.flush().map_err(CommandError::WritingClientStderr)?;
107
+
108
+
belled = true;
109
+
110
+
continue;
111
+
}
112
+
113
+
log_buffer.process_slice(&buffer[..n]);
114
+
115
+
while let Some(mut line) = log_buffer.next_line() {
116
+
let findings =
117
+
search_string(&aho_corasick, &line, &status_sender, &mut began_tx);
118
+
119
+
match findings {
120
+
SearchFindings::Terminate => break 'outer,
121
+
SearchFindings::Started => {
122
+
began = true;
123
+
continue;
124
+
}
125
+
SearchFindings::None => {}
126
+
}
127
+
128
+
handle_normal_data(
129
+
&stderr_collection,
130
+
&stdout_collection,
131
+
&mut line,
132
+
log_stdout,
133
+
output_mode,
134
+
);
135
+
}
136
+
}
137
+
Err(e) => {
138
+
eprintln!("Error reading from PTY: {e}");
139
+
break;
140
+
}
141
+
}
142
+
}
143
+
144
+
began_tx.map(|began_tx| began_tx.send(()));
145
+
146
+
// failsafe if there were errors or the reader stopped
147
+
if matches!(*status_sender.borrow(), Status::Running) {
148
+
status_sender.send_replace(Status::Done { success: false });
149
+
}
150
+
151
+
debug!("stdout: goodbye");
152
+
153
+
Ok(())
154
+
}
155
+
156
+
/// handles raw data, prints to stderr when a prompt is detected
157
+
pub(super) fn handle_rawmode_data<W: std::io::Write>(
158
+
stderr: &mut W,
159
+
buffer: &[u8],
160
+
n: usize,
161
+
raw_mode_buffer: &mut Vec<u8>,
162
+
aho_corasick: &AhoCorasick,
163
+
status_sender: &watch::Sender<Status>,
164
+
began_tx: &mut Option<oneshot::Sender<()>>,
165
+
) -> Result<SearchFindings, CommandError> {
166
+
raw_mode_buffer.extend_from_slice(&buffer[..n]);
167
+
168
+
let findings = search_string(aho_corasick, raw_mode_buffer, status_sender, began_tx);
169
+
170
+
if matches!(
171
+
findings,
172
+
SearchFindings::Started | SearchFindings::Terminate
173
+
) {
174
+
return Ok(findings);
175
+
}
176
+
177
+
stderr
178
+
.write_all(&buffer[..n])
179
+
.map_err(CommandError::WritingClientStderr)?;
180
+
181
+
stderr.flush().map_err(CommandError::WritingClientStderr)?;
182
+
183
+
Ok(findings)
184
+
}
185
+
186
+
/// handles data when the command is considered "started", logs and records errors as appropriate
187
+
fn handle_normal_data(
188
+
stderr_collection: &Arc<Mutex<VecDeque<String>>>,
189
+
stdout_collection: &Arc<Mutex<VecDeque<String>>>,
190
+
line: &mut [u8],
191
+
log_stdout: bool,
192
+
output_mode: ChildOutputMode,
193
+
) {
194
+
if line.starts_with(b"#") {
195
+
let stripped = &mut line[1..];
196
+
197
+
if log_stdout {
198
+
output_mode.trace_slice(stripped);
199
+
}
200
+
201
+
let mut queue = stdout_collection.lock().unwrap();
202
+
queue.push_front(String::from_utf8_lossy(stripped).to_string());
203
+
return;
204
+
}
205
+
206
+
let log = output_mode.trace_slice(line);
207
+
208
+
if let Some(error_msg) = log {
209
+
let mut queue = stderr_collection.lock().unwrap();
210
+
211
+
// add at most 20 message to the front, drop the rest.
212
+
queue.push_front(error_msg);
213
+
queue.truncate(20);
214
+
}
215
+
}
216
+
217
+
/// returns true if the command is considered stopped
218
+
fn search_string(
219
+
aho_corasick: &AhoCorasick,
220
+
haystack: &[u8],
221
+
status_sender: &watch::Sender<Status>,
222
+
began_tx: &mut Option<oneshot::Sender<()>>,
223
+
) -> SearchFindings {
224
+
let searched = aho_corasick
225
+
.find_iter(haystack)
226
+
.map(|x| x.pattern())
227
+
.collect::<Vec<_>>();
228
+
229
+
let started = if searched.contains(&STARTED_PATTERN) {
230
+
debug!("start needle was found, switching mode...");
231
+
if let Some(began_tx) = began_tx.take() {
232
+
let _ = began_tx.send(());
233
+
}
234
+
true
235
+
} else {
236
+
false
237
+
};
238
+
239
+
let succeeded = if searched.contains(&SUCCEEDED_PATTERN) {
240
+
debug!("succeed needle was found, marking child as succeeding.");
241
+
status_sender.send_replace(Status::Done { success: true });
242
+
true
243
+
} else {
244
+
false
245
+
};
246
+
247
+
let failed = if searched.contains(&FAILED_PATTERN) {
248
+
debug!("failed needle was found, elevated child did not succeed.");
249
+
status_sender.send_replace(Status::Done { success: false });
250
+
true
251
+
} else {
252
+
false
253
+
};
254
+
255
+
if succeeded || failed {
256
+
return SearchFindings::Terminate;
257
+
}
258
+
259
+
if started {
260
+
return SearchFindings::Started;
261
+
}
262
+
263
+
SearchFindings::None
264
+
}
+376
crates/core/src/errors.rs
+376
crates/core/src/errors.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
#![allow(unused_assignments)]
5
+
6
+
use std::{num::ParseIntError, path::PathBuf, process::ExitStatus, sync::mpsc::RecvError};
7
+
8
+
use miette::{Diagnostic, SourceSpan};
9
+
use nix_compat::flakeref::{FlakeRef, FlakeRefError};
10
+
use thiserror::Error;
11
+
use tokio::task::JoinError;
12
+
13
+
use crate::hive::node::{Name, SwitchToConfigurationGoal};
14
+
15
+
#[cfg(debug_assertions)]
16
+
const DOCS_URL: &str = "http://localhost:5173/reference/errors.html";
17
+
#[cfg(not(debug_assertions))]
18
+
const DOCS_URL: &str = "https://wire.althaea.zone/reference/errors.html";
19
+
20
+
#[derive(Debug, Diagnostic, Error)]
21
+
pub enum KeyError {
22
+
#[diagnostic(
23
+
code(wire::key::File),
24
+
url("{DOCS_URL}#{}", self.code().unwrap())
25
+
)]
26
+
#[error("error reading file")]
27
+
File(#[source] std::io::Error),
28
+
29
+
#[diagnostic(
30
+
code(wire::key::SpawningCommand),
31
+
help("Ensure wire has the correct $PATH for this command"),
32
+
url("{DOCS_URL}#{}", self.code().unwrap())
33
+
)]
34
+
#[error("error spawning key command")]
35
+
CommandSpawnError {
36
+
#[source]
37
+
error: std::io::Error,
38
+
39
+
#[source_code]
40
+
command: String,
41
+
42
+
#[label(primary, "Program ran")]
43
+
command_span: Option<SourceSpan>,
44
+
},
45
+
46
+
#[diagnostic(
47
+
code(wire::key::Resolving),
48
+
url("{DOCS_URL}#{}", self.code().unwrap())
49
+
)]
50
+
#[error("Error resolving key command child process")]
51
+
CommandResolveError {
52
+
#[source]
53
+
error: std::io::Error,
54
+
55
+
#[source_code]
56
+
command: String,
57
+
},
58
+
59
+
#[diagnostic(
60
+
code(wire::key::CommandExit),
61
+
url("{DOCS_URL}#{}", self.code().unwrap())
62
+
)]
63
+
#[error("key command failed with status {}: {}", .0,.1)]
64
+
CommandError(ExitStatus, String),
65
+
66
+
#[diagnostic(
67
+
code(wire::key::Empty),
68
+
url("{DOCS_URL}#{}", self.code().unwrap())
69
+
)]
70
+
#[error("Command list empty")]
71
+
Empty,
72
+
73
+
#[diagnostic(
74
+
code(wire::key::ParseKeyPermissions),
75
+
help("Refer to the documentation for the format of key file permissions."),
76
+
url("{DOCS_URL}#{}", self.code().unwrap())
77
+
)]
78
+
#[error("Failed to parse key permissions")]
79
+
ParseKeyPermissions(#[source] ParseIntError),
80
+
}
81
+
82
+
#[derive(Debug, Diagnostic, Error)]
83
+
pub enum ActivationError {
84
+
#[diagnostic(
85
+
code(wire::activation::SwitchToConfiguration),
86
+
url("{DOCS_URL}#{}", self.code().unwrap())
87
+
)]
88
+
#[error("failed to run switch-to-configuration {0} on node {1}")]
89
+
SwitchToConfigurationError(SwitchToConfigurationGoal, Name, #[source] CommandError),
90
+
}
91
+
92
+
#[derive(Debug, Diagnostic, Error)]
93
+
pub enum NetworkError {
94
+
#[diagnostic(
95
+
code(wire::network::HostUnreachable),
96
+
help(
97
+
"If you failed due to a fault in DNS, note that a node can have multiple targets defined."
98
+
),
99
+
url("{DOCS_URL}#{}", self.code().unwrap())
100
+
)]
101
+
#[error("Cannot reach host {host}")]
102
+
HostUnreachable {
103
+
host: String,
104
+
#[source]
105
+
source: CommandError,
106
+
},
107
+
108
+
#[diagnostic(
109
+
code(wire::network::HostUnreachableAfterReboot),
110
+
url("{DOCS_URL}#{}", self.code().unwrap())
111
+
)]
112
+
#[error("Failed to get regain connection to {0} after activation.")]
113
+
HostUnreachableAfterReboot(String),
114
+
115
+
#[diagnostic(
116
+
code(wire::network::HostsExhausted),
117
+
url("{DOCS_URL}#{}", self.code().unwrap())
118
+
)]
119
+
#[error("Ran out of contactable hosts")]
120
+
HostsExhausted,
121
+
}
122
+
123
+
#[derive(Debug, Diagnostic, Error)]
124
+
pub enum HiveInitialisationError {
125
+
#[diagnostic(
126
+
code(wire::hive_init::NoHiveFound),
127
+
help(
128
+
"Double check the path is correct. You can adjust the hive path with `--path` when the hive lies outside of the CWD."
129
+
),
130
+
url("{DOCS_URL}#{}", self.code().unwrap())
131
+
)]
132
+
#[error("No hive could be found in {}", .0.display())]
133
+
NoHiveFound(PathBuf),
134
+
135
+
#[diagnostic(
136
+
code(wire::hive_init::Parse),
137
+
help("If you cannot resolve this problem, please create an issue."),
138
+
url("{DOCS_URL}#{}", self.code().unwrap())
139
+
)]
140
+
#[error("Failed to parse internal wire json.")]
141
+
ParseEvaluateError(#[source] serde_json::Error),
142
+
143
+
#[diagnostic(
144
+
code(wire::hive_init::ParsePrefetch),
145
+
help("please create an issue."),
146
+
url("{DOCS_URL}#{}", self.code().unwrap())
147
+
)]
148
+
#[error("Failed to parse `nix flake prefetch --json`.")]
149
+
ParsePrefetchError(#[source] serde_json::Error),
150
+
151
+
#[diagnostic(
152
+
code(wire::hive_init::NodeDoesNotExist),
153
+
help("Please create an issue!"),
154
+
url("{DOCS_URL}#{}", self.code().unwrap())
155
+
)]
156
+
#[error("node {0} not exist in hive")]
157
+
NodeDoesNotExist(String),
158
+
}
159
+
160
+
#[derive(Debug, Diagnostic, Error)]
161
+
pub enum HiveLocationError {
162
+
#[diagnostic(
163
+
code(wire::hive_location::MalformedPath),
164
+
url("{DOCS_URL}#{}", self.code().unwrap())
165
+
)]
166
+
#[error("Path was malformed: {}", .0.display())]
167
+
MalformedPath(PathBuf),
168
+
169
+
#[diagnostic(
170
+
code(wire::hive_location::Malformed),
171
+
url("{DOCS_URL}#{}", self.code().unwrap())
172
+
)]
173
+
#[error("--path was malformed")]
174
+
Malformed(#[source] FlakeRefError),
175
+
176
+
#[diagnostic(
177
+
code(wire::hive_location::TypeUnsupported),
178
+
url("{DOCS_URL}#{}", self.code().unwrap())
179
+
)]
180
+
#[error("The flakref had an unsupported type: {:#?}", .0)]
181
+
TypeUnsupported(Box<FlakeRef>),
182
+
}
183
+
184
+
#[derive(Debug, Diagnostic, Error)]
185
+
pub enum CommandError {
186
+
#[diagnostic(
187
+
code(wire::command::TermAttrs),
188
+
url("{DOCS_URL}#{}", self.code().unwrap())
189
+
)]
190
+
#[error("Failed to set PTY attrs")]
191
+
TermAttrs(#[source] nix::errno::Errno),
192
+
193
+
#[diagnostic(
194
+
code(wire::command::PosixPipe),
195
+
url("{DOCS_URL}#{}", self.code().unwrap())
196
+
)]
197
+
#[error("There was an error in regards to a pipe")]
198
+
PosixPipe(#[source] nix::errno::Errno),
199
+
200
+
/// Error wrapped around `portable_pty`'s anyhow
201
+
/// errors
202
+
#[diagnostic(
203
+
code(wire::command::PortablePty),
204
+
url("{DOCS_URL}#{}", self.code().unwrap())
205
+
)]
206
+
#[error("There was an error from the portable_pty crate")]
207
+
PortablePty(#[source] anyhow::Error),
208
+
209
+
#[diagnostic(
210
+
code(wire::command::Joining),
211
+
url("{DOCS_URL}#{}", self.code().unwrap())
212
+
)]
213
+
#[error("Failed to join on some tokio task")]
214
+
JoinError(#[source] JoinError),
215
+
216
+
#[diagnostic(
217
+
code(wire::command::WaitForStatus),
218
+
url("{DOCS_URL}#{}", self.code().unwrap())
219
+
)]
220
+
#[error("Failed to wait for the child's status")]
221
+
WaitForStatus(#[source] std::io::Error),
222
+
223
+
#[diagnostic(
224
+
code(wire::detached::NoHandle),
225
+
help("This should never happen, please create an issue!"),
226
+
url("{DOCS_URL}#{}", self.code().unwrap())
227
+
)]
228
+
#[error("There was no handle to child io")]
229
+
NoHandle,
230
+
231
+
#[diagnostic(
232
+
code(wire::command::WritingClientStdout),
233
+
url("{DOCS_URL}#{}", self.code().unwrap())
234
+
)]
235
+
#[error("Failed to write to client stderr.")]
236
+
WritingClientStderr(#[source] std::io::Error),
237
+
238
+
#[diagnostic(
239
+
code(wire::command::WritingMasterStdin),
240
+
url("{DOCS_URL}#{}", self.code().unwrap())
241
+
)]
242
+
#[error("Failed to write to PTY master stdout.")]
243
+
WritingMasterStdout(#[source] std::io::Error),
244
+
245
+
#[diagnostic(
246
+
code(wire::command::Recv),
247
+
url("{DOCS_URL}#{}", self.code().unwrap()),
248
+
help("please create an issue!"),
249
+
)]
250
+
#[error("Failed to receive a message from the begin channel")]
251
+
RecvError(#[source] RecvError),
252
+
253
+
#[diagnostic(
254
+
code(wire::command::ThreadPanic),
255
+
url("{DOCS_URL}#{}", self.code().unwrap()),
256
+
help("please create an issue!"),
257
+
)]
258
+
#[error("Thread panicked")]
259
+
ThreadPanic,
260
+
261
+
#[diagnostic(
262
+
code(wire::command::CommandFailed),
263
+
url("{DOCS_URL}#{}", self.code().unwrap()),
264
+
help("`nix` commands are filtered, run with -vvv to view all"),
265
+
)]
266
+
#[error("{command_ran} failed ({reason}) with {code} (last 20 lines):\n{logs}")]
267
+
CommandFailed {
268
+
command_ran: String,
269
+
logs: String,
270
+
code: String,
271
+
reason: &'static str,
272
+
},
273
+
274
+
#[diagnostic(
275
+
code(wire::command::RuntimeDirectory),
276
+
url("{DOCS_URL}#{}", self.code().unwrap())
277
+
)]
278
+
#[error("error creating $XDG_RUNTIME_DIR/wire")]
279
+
RuntimeDirectory(#[source] std::io::Error),
280
+
281
+
#[diagnostic(
282
+
code(wire::command::RuntimeDirectoryMissing),
283
+
url("{DOCS_URL}#{}", self.code().unwrap())
284
+
)]
285
+
#[error("$XDG_RUNTIME_DIR could not be used.")]
286
+
RuntimeDirectoryMissing(#[source] std::env::VarError),
287
+
288
+
#[diagnostic(
289
+
code(wire::command::OneshotRecvError),
290
+
url("{DOCS_URL}#{}", self.code().unwrap())
291
+
)]
292
+
#[error("Error waiting for begin message")]
293
+
OneshotRecvError(#[source] tokio::sync::oneshot::error::RecvError),
294
+
}
295
+
296
+
#[derive(Debug, Diagnostic, Error)]
297
+
pub enum HiveLibError {
298
+
#[error(transparent)]
299
+
#[diagnostic(transparent)]
300
+
HiveInitialisationError(HiveInitialisationError),
301
+
302
+
#[error(transparent)]
303
+
#[diagnostic(transparent)]
304
+
NetworkError(NetworkError),
305
+
306
+
#[error(transparent)]
307
+
#[diagnostic(transparent)]
308
+
ActivationError(ActivationError),
309
+
310
+
#[error(transparent)]
311
+
#[diagnostic(transparent)]
312
+
CommandError(CommandError),
313
+
314
+
#[error(transparent)]
315
+
#[diagnostic(transparent)]
316
+
HiveLocationError(HiveLocationError),
317
+
318
+
#[error("Failed to apply key {}", .0)]
319
+
KeyError(
320
+
String,
321
+
#[source]
322
+
#[diagnostic_source]
323
+
KeyError,
324
+
),
325
+
326
+
#[diagnostic(
327
+
code(wire::BuildNode),
328
+
url("{DOCS_URL}#{}", self.code().unwrap())
329
+
)]
330
+
#[error("failed to build node {name}")]
331
+
NixBuildError {
332
+
name: Name,
333
+
#[source]
334
+
source: CommandError,
335
+
},
336
+
337
+
#[diagnostic(
338
+
code(wire::CopyPath),
339
+
url("{DOCS_URL}#{}", self.code().unwrap())
340
+
)]
341
+
#[error("failed to copy path {path} to node {name}")]
342
+
NixCopyError {
343
+
name: Name,
344
+
path: String,
345
+
#[source]
346
+
error: Box<CommandError>,
347
+
#[help]
348
+
help: Option<Box<String>>,
349
+
},
350
+
351
+
#[diagnostic(code(wire::Evaluate))]
352
+
#[error("failed to evaluate `{attribute}` from the context of a hive.")]
353
+
NixEvalError {
354
+
attribute: String,
355
+
356
+
#[source]
357
+
source: CommandError,
358
+
359
+
#[help]
360
+
help: Option<Box<String>>,
361
+
},
362
+
363
+
#[diagnostic(
364
+
code(wire::Encoding),
365
+
url("{DOCS_URL}#{}", self.code().unwrap())
366
+
)]
367
+
#[error("error encoding length delimited data")]
368
+
Encoding(#[source] std::io::Error),
369
+
370
+
#[diagnostic(
371
+
code(wire::SIGINT),
372
+
url("{DOCS_URL}#{}", self.code().unwrap())
373
+
)]
374
+
#[error("SIGINT received, shut down")]
375
+
Sigint,
376
+
}
+480
crates/core/src/hive/mod.rs
+480
crates/core/src/hive/mod.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use itertools::Itertools;
5
+
use nix_compat::flakeref::FlakeRef;
6
+
use node::{Name, Node};
7
+
use owo_colors::{OwoColorize, Stream};
8
+
use serde::de::Error;
9
+
use serde::{Deserialize, Deserializer, Serialize};
10
+
use std::collections::HashMap;
11
+
use std::collections::hash_map::OccupiedEntry;
12
+
use std::ffi::OsStr;
13
+
use std::fmt::Display;
14
+
use std::fs;
15
+
use std::path::PathBuf;
16
+
use std::str::FromStr;
17
+
use std::sync::Arc;
18
+
use tracing::{debug, info, instrument};
19
+
20
+
use crate::cache::InspectionCache;
21
+
use crate::commands::builder::CommandStringBuilder;
22
+
use crate::commands::common::evaluate_hive_attribute;
23
+
use crate::commands::{CommandArguments, Either, WireCommandChip, run_command};
24
+
use crate::errors::{HiveInitialisationError, HiveLocationError};
25
+
use crate::{EvalGoal, HiveLibError, SubCommandModifiers};
26
+
pub mod node;
27
+
pub mod steps;
28
+
29
+
#[derive(Serialize, Deserialize, Debug, PartialEq)]
30
+
#[serde(deny_unknown_fields)]
31
+
pub struct Hive {
32
+
pub nodes: HashMap<Name, Node>,
33
+
34
+
#[serde(deserialize_with = "check_schema_version", rename = "_schema")]
35
+
pub schema: u32,
36
+
}
37
+
38
+
pub enum Action<'a> {
39
+
Inspect,
40
+
EvaluateNode(OccupiedEntry<'a, String, Node>),
41
+
}
42
+
43
+
fn check_schema_version<'de, D: Deserializer<'de>>(d: D) -> Result<u32, D::Error> {
44
+
let version = u32::deserialize(d)?;
45
+
if version != Hive::SCHEMA_VERSION {
46
+
return Err(D::Error::custom(
47
+
"Version mismatch for Hive. Please ensure the binary and your wire input match!",
48
+
));
49
+
}
50
+
Ok(version)
51
+
}
52
+
53
+
impl Hive {
54
+
pub const SCHEMA_VERSION: u32 = 1;
55
+
56
+
#[instrument(skip_all, name = "eval_hive")]
57
+
pub async fn new_from_path(
58
+
location: &HiveLocation,
59
+
cache: Option<InspectionCache>,
60
+
modifiers: SubCommandModifiers,
61
+
) -> Result<Hive, HiveLibError> {
62
+
info!("evaluating hive {location:?}");
63
+
64
+
if let Some(ref cache) = cache
65
+
&& let HiveLocation::Flake { prefetch, .. } = location
66
+
&& let Some(hive) = cache.get_hive(prefetch).await
67
+
{
68
+
return Ok(hive);
69
+
}
70
+
71
+
let output = evaluate_hive_attribute(location, &EvalGoal::Inspect, modifiers).await?;
72
+
73
+
let hive: Hive = serde_json::from_str(&output).map_err(|err| {
74
+
HiveLibError::HiveInitialisationError(HiveInitialisationError::ParseEvaluateError(err))
75
+
})?;
76
+
77
+
if let Some(cache) = cache
78
+
&& let HiveLocation::Flake { prefetch, .. } = location
79
+
{
80
+
cache.store_hive(prefetch, &output).await;
81
+
}
82
+
83
+
Ok(hive)
84
+
}
85
+
86
+
/// # Errors
87
+
///
88
+
/// Returns an error if a node in nodes does not exist in the hive.
89
+
pub fn force_always_local(&mut self, nodes: Vec<String>) -> Result<(), HiveLibError> {
90
+
for node in nodes {
91
+
info!("Forcing a local build for {node}");
92
+
93
+
self.nodes
94
+
.get_mut(&Name(Arc::from(node.clone())))
95
+
.ok_or(HiveLibError::HiveInitialisationError(
96
+
HiveInitialisationError::NodeDoesNotExist(node.clone()),
97
+
))?
98
+
.build_remotely = false;
99
+
}
100
+
101
+
Ok(())
102
+
}
103
+
}
104
+
105
+
impl Display for Hive {
106
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
107
+
for (name, node) in &self.nodes {
108
+
writeln!(
109
+
f,
110
+
"Node {} {}:\n",
111
+
name.bold(),
112
+
format!("({})", node.host_platform)
113
+
.italic()
114
+
.if_supports_color(Stream::Stdout, |x| x.dimmed()),
115
+
)?;
116
+
117
+
if !node.tags.is_empty() {
118
+
write!(f, " > {}", "Tags:".bold())?;
119
+
writeln!(f, " {:?}", node.tags)?;
120
+
}
121
+
122
+
write!(f, " > {}", "Connection:".bold())?;
123
+
writeln!(f, " {{{}}}", node.target)?;
124
+
125
+
write!(
126
+
f,
127
+
" > {} {}{}",
128
+
"Build remotely".bold(),
129
+
"`deployment.buildOnTarget`"
130
+
.if_supports_color(Stream::Stdout, |x| x.dimmed())
131
+
.italic(),
132
+
":".bold()
133
+
)?;
134
+
writeln!(f, " {}", node.build_remotely)?;
135
+
136
+
write!(
137
+
f,
138
+
" > {} {}{}",
139
+
"Local apply allowed".bold(),
140
+
"`deployment.allowLocalDeployment`"
141
+
.if_supports_color(Stream::Stdout, |x| x.dimmed())
142
+
.italic(),
143
+
":".bold()
144
+
)?;
145
+
writeln!(f, " {}", node.allow_local_deployment)?;
146
+
147
+
if !node.keys.is_empty() {
148
+
write!(f, " > {}", "Keys:".bold())?;
149
+
writeln!(f, " {} key(s)", node.keys.len())?;
150
+
151
+
for key in &node.keys {
152
+
writeln!(f, " > {key}")?;
153
+
}
154
+
}
155
+
156
+
writeln!(f)?;
157
+
}
158
+
159
+
let total_keys = self
160
+
.nodes
161
+
.values()
162
+
.flat_map(|node| node.keys.iter())
163
+
.collect::<Vec<_>>();
164
+
let distinct_keys = self
165
+
.nodes
166
+
.values()
167
+
.flat_map(|node| node.keys.iter())
168
+
.unique()
169
+
.collect::<Vec<_>>()
170
+
.len();
171
+
172
+
write!(f, "{}", "Summary:".bold())?;
173
+
writeln!(
174
+
f,
175
+
" {} total node(s), totalling {} keys ({distinct_keys} distinct).",
176
+
self.nodes.len(),
177
+
total_keys.len()
178
+
)?;
179
+
writeln!(
180
+
f,
181
+
"{}",
182
+
"Note: Listed connections are tried from Left to Right".italic(),
183
+
)?;
184
+
185
+
Ok(())
186
+
}
187
+
}
188
+
189
+
#[derive(Debug, PartialEq, Eq, Deserialize)]
190
+
pub struct FlakePrefetch {
191
+
pub(crate) hash: String,
192
+
#[serde(rename = "storePath")]
193
+
pub(crate) store_path: String,
194
+
}
195
+
196
+
#[derive(Debug, PartialEq, Eq)]
197
+
pub enum HiveLocation {
198
+
HiveNix(PathBuf),
199
+
Flake {
200
+
uri: String,
201
+
prefetch: FlakePrefetch,
202
+
},
203
+
}
204
+
205
+
impl HiveLocation {
206
+
async fn get_flake(
207
+
uri: String,
208
+
modifiers: SubCommandModifiers,
209
+
) -> Result<HiveLocation, HiveLibError> {
210
+
let mut command_string = CommandStringBuilder::nix();
211
+
command_string.args(&[
212
+
"flake",
213
+
"prefetch",
214
+
"--extra-experimental-features",
215
+
"nix-command",
216
+
"--extra-experimental-features",
217
+
"flakes",
218
+
"--json",
219
+
]);
220
+
command_string.arg(&uri);
221
+
222
+
let command = run_command(
223
+
&CommandArguments::new(command_string, modifiers)
224
+
.mode(crate::commands::ChildOutputMode::Generic),
225
+
)
226
+
.await?;
227
+
228
+
let result = command
229
+
.wait_till_success()
230
+
.await
231
+
.map_err(HiveLibError::CommandError)?;
232
+
233
+
debug!(hash_json = ?result);
234
+
235
+
let prefetch = serde_json::from_str(&match result {
236
+
Either::Left((.., output)) | Either::Right((.., output)) => output,
237
+
})
238
+
.map_err(|x| {
239
+
HiveLibError::HiveInitialisationError(HiveInitialisationError::ParsePrefetchError(x))
240
+
})?;
241
+
242
+
debug!(prefetch = ?prefetch);
243
+
244
+
Ok(HiveLocation::Flake { uri, prefetch })
245
+
}
246
+
}
247
+
248
+
pub async fn get_hive_location(
249
+
path: String,
250
+
modifiers: SubCommandModifiers,
251
+
) -> Result<HiveLocation, HiveLibError> {
252
+
let flakeref = FlakeRef::from_str(&path);
253
+
254
+
let path_to_location = async |path: PathBuf| {
255
+
Ok(match path.file_name().and_then(OsStr::to_str) {
256
+
Some("hive.nix") => HiveLocation::HiveNix(path.clone()),
257
+
Some(_) => {
258
+
if fs::metadata(path.join("flake.nix")).is_ok() {
259
+
HiveLocation::get_flake(path.display().to_string(), modifiers).await?
260
+
} else {
261
+
HiveLocation::HiveNix(path.join("hive.nix"))
262
+
}
263
+
}
264
+
None => {
265
+
return Err(HiveLibError::HiveLocationError(
266
+
HiveLocationError::MalformedPath(path.clone()),
267
+
));
268
+
}
269
+
})
270
+
};
271
+
272
+
match flakeref {
273
+
Err(nix_compat::flakeref::FlakeRefError::UrlParseError(_err)) => {
274
+
let path = PathBuf::from(path);
275
+
Ok(path_to_location(path).await?)
276
+
}
277
+
Ok(FlakeRef::Path { path, .. }) => Ok(path_to_location(path).await?),
278
+
Ok(
279
+
FlakeRef::Git { .. }
280
+
| FlakeRef::GitHub { .. }
281
+
| FlakeRef::GitLab { .. }
282
+
| FlakeRef::Tarball { .. }
283
+
| FlakeRef::Mercurial { .. }
284
+
| FlakeRef::SourceHut { .. },
285
+
) => Ok(HiveLocation::get_flake(path, modifiers).await?),
286
+
Err(err) => Err(HiveLibError::HiveLocationError(
287
+
HiveLocationError::Malformed(err),
288
+
)),
289
+
Ok(flakeref) => Err(HiveLibError::HiveLocationError(
290
+
HiveLocationError::TypeUnsupported(Box::new(flakeref)),
291
+
)),
292
+
}
293
+
}
294
+
295
+
#[cfg(test)]
296
+
mod tests {
297
+
use im::vector;
298
+
299
+
use crate::{
300
+
errors::CommandError,
301
+
get_test_path,
302
+
hive::steps::keys::{Key, Source, UploadKeyAt},
303
+
location,
304
+
test_support::make_flake_sandbox,
305
+
};
306
+
307
+
use super::*;
308
+
use std::{assert_matches::assert_matches, env};
309
+
310
+
// flake should always come before hive.nix
311
+
#[tokio::test]
312
+
async fn test_hive_dot_nix_priority() {
313
+
let location = location!(get_test_path!());
314
+
315
+
assert_matches!(location, HiveLocation::Flake { .. });
316
+
}
317
+
318
+
#[tokio::test]
319
+
#[cfg_attr(feature = "no_web_tests", ignore)]
320
+
async fn test_hive_file() {
321
+
let location = location!(get_test_path!());
322
+
323
+
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
324
+
.await
325
+
.unwrap();
326
+
327
+
let node = Node {
328
+
target: node::Target::from_host("192.168.122.96"),
329
+
..Default::default()
330
+
};
331
+
332
+
let mut nodes = HashMap::new();
333
+
nodes.insert(Name("node-a".into()), node);
334
+
335
+
assert_eq!(
336
+
hive,
337
+
Hive {
338
+
nodes,
339
+
schema: Hive::SCHEMA_VERSION
340
+
}
341
+
);
342
+
}
343
+
344
+
#[tokio::test]
345
+
#[cfg_attr(feature = "no_web_tests", ignore)]
346
+
async fn non_trivial_hive() {
347
+
let location = location!(get_test_path!());
348
+
349
+
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
350
+
.await
351
+
.unwrap();
352
+
353
+
let node = Node {
354
+
target: node::Target::from_host("name"),
355
+
keys: vector![Key {
356
+
name: "different-than-a".into(),
357
+
dest_dir: "/run/keys/".into(),
358
+
path: "/run/keys/different-than-a".into(),
359
+
group: "root".into(),
360
+
user: "root".into(),
361
+
permissions: "0600".into(),
362
+
source: Source::String("hi".into()),
363
+
upload_at: UploadKeyAt::PreActivation,
364
+
environment: im::HashMap::new()
365
+
}],
366
+
build_remotely: true,
367
+
..Default::default()
368
+
};
369
+
370
+
let mut nodes = HashMap::new();
371
+
nodes.insert(Name("node-a".into()), node);
372
+
373
+
assert_eq!(
374
+
hive,
375
+
Hive {
376
+
nodes,
377
+
schema: Hive::SCHEMA_VERSION
378
+
}
379
+
);
380
+
}
381
+
382
+
#[tokio::test]
383
+
#[cfg_attr(feature = "no_web_tests", ignore)]
384
+
async fn flake_hive() {
385
+
let tmp_dir = make_flake_sandbox(&get_test_path!()).unwrap();
386
+
387
+
let location = get_hive_location(
388
+
tmp_dir.path().display().to_string(),
389
+
SubCommandModifiers::default(),
390
+
)
391
+
.await
392
+
.unwrap();
393
+
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
394
+
.await
395
+
.unwrap();
396
+
397
+
let mut nodes = HashMap::new();
398
+
399
+
// a merged node
400
+
nodes.insert(Name("node-a".into()), Node::from_host("node-a"));
401
+
// a non-merged node
402
+
nodes.insert(Name("node-b".into()), Node::from_host("node-b"));
403
+
404
+
assert_eq!(
405
+
hive,
406
+
Hive {
407
+
nodes,
408
+
schema: Hive::SCHEMA_VERSION
409
+
}
410
+
);
411
+
412
+
tmp_dir.close().unwrap();
413
+
}
414
+
415
+
#[tokio::test]
416
+
async fn no_nixpkgs() {
417
+
let location = location!(get_test_path!());
418
+
419
+
assert_matches!(
420
+
Hive::new_from_path(&location, None, SubCommandModifiers::default()).await,
421
+
Err(HiveLibError::NixEvalError {
422
+
source: CommandError::CommandFailed {
423
+
logs,
424
+
..
425
+
},
426
+
..
427
+
})
428
+
if logs.contains("makeHive called without meta.nixpkgs specified")
429
+
);
430
+
}
431
+
432
+
#[tokio::test]
433
+
async fn _keys_should_fail() {
434
+
let location = location!(get_test_path!());
435
+
436
+
assert_matches!(
437
+
Hive::new_from_path(&location, None, SubCommandModifiers::default()).await,
438
+
Err(HiveLibError::NixEvalError {
439
+
source: CommandError::CommandFailed {
440
+
logs,
441
+
..
442
+
},
443
+
..
444
+
})
445
+
if logs.contains("The option `deployment._keys' is read-only, but it's set multiple times.")
446
+
);
447
+
}
448
+
449
+
#[tokio::test]
450
+
async fn test_force_always_local() {
451
+
let mut location: PathBuf = env::var("WIRE_TEST_DIR").unwrap().into();
452
+
location.push("non_trivial_hive");
453
+
let location = location!(location);
454
+
455
+
let mut hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
456
+
.await
457
+
.unwrap();
458
+
459
+
assert_matches!(
460
+
hive.force_always_local(vec!["non-existent".to_string()]),
461
+
Err(HiveLibError::HiveInitialisationError(
462
+
HiveInitialisationError::NodeDoesNotExist(node)
463
+
)) if node == "non-existent"
464
+
);
465
+
466
+
for node in hive.nodes.values() {
467
+
assert!(node.build_remotely);
468
+
}
469
+
470
+
assert_matches!(hive.force_always_local(vec!["node-a".to_string()]), Ok(()));
471
+
472
+
assert!(
473
+
!hive
474
+
.nodes
475
+
.get(&Name("node-a".into()))
476
+
.unwrap()
477
+
.build_remotely
478
+
);
479
+
}
480
+
}
+939
crates/core/src/hive/node.rs
+939
crates/core/src/hive/node.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
#![allow(clippy::missing_errors_doc)]
5
+
use enum_dispatch::enum_dispatch;
6
+
use gethostname::gethostname;
7
+
use serde::{Deserialize, Serialize};
8
+
use std::assert_matches::debug_assert_matches;
9
+
use std::fmt::Display;
10
+
use std::sync::Arc;
11
+
use std::sync::atomic::AtomicBool;
12
+
use tokio::sync::oneshot;
13
+
use tracing::{Instrument, Level, Span, debug, error, event, instrument, trace};
14
+
15
+
use crate::commands::builder::CommandStringBuilder;
16
+
use crate::commands::common::evaluate_hive_attribute;
17
+
use crate::commands::{CommandArguments, WireCommandChip, run_command};
18
+
use crate::errors::NetworkError;
19
+
use crate::hive::HiveLocation;
20
+
use crate::hive::steps::build::Build;
21
+
use crate::hive::steps::cleanup::CleanUp;
22
+
use crate::hive::steps::evaluate::Evaluate;
23
+
use crate::hive::steps::keys::{Key, Keys, PushKeyAgent, UploadKeyAt};
24
+
use crate::hive::steps::ping::Ping;
25
+
use crate::hive::steps::push::{PushBuildOutput, PushEvaluatedOutput};
26
+
use crate::status::STATUS;
27
+
use crate::{EvalGoal, StrictHostKeyChecking, SubCommandModifiers};
28
+
29
+
use super::HiveLibError;
30
+
use super::steps::activate::SwitchToConfiguration;
31
+
32
+
#[derive(
33
+
Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq, PartialOrd, Ord, derive_more::Display,
34
+
)]
35
+
pub struct Name(pub Arc<str>);
36
+
37
+
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)]
38
+
pub struct Target {
39
+
pub hosts: Vec<Arc<str>>,
40
+
pub user: Arc<str>,
41
+
pub port: u32,
42
+
43
+
#[serde(skip)]
44
+
current_host: usize,
45
+
}
46
+
47
+
impl Target {
48
+
#[instrument(ret(level = tracing::Level::DEBUG), skip_all)]
49
+
pub fn create_ssh_opts(
50
+
&self,
51
+
modifiers: SubCommandModifiers,
52
+
master: bool,
53
+
) -> Result<String, HiveLibError> {
54
+
self.create_ssh_args(modifiers, false, master)
55
+
.map(|x| x.join(" "))
56
+
}
57
+
58
+
#[instrument(ret(level = tracing::Level::DEBUG))]
59
+
pub fn create_ssh_args(
60
+
&self,
61
+
modifiers: SubCommandModifiers,
62
+
non_interactive_forced: bool,
63
+
master: bool,
64
+
) -> Result<Vec<String>, HiveLibError> {
65
+
let mut vector = vec![
66
+
"-l".to_string(),
67
+
self.user.to_string(),
68
+
"-p".to_string(),
69
+
self.port.to_string(),
70
+
];
71
+
let mut options = vec![
72
+
format!(
73
+
"StrictHostKeyChecking={}",
74
+
match modifiers.ssh_accept_host {
75
+
StrictHostKeyChecking::AcceptNew => "accept-new",
76
+
StrictHostKeyChecking::No => "no",
77
+
}
78
+
)
79
+
.to_string(),
80
+
];
81
+
82
+
options.extend(["PasswordAuthentication=no".to_string()]);
83
+
options.extend(["KbdInteractiveAuthentication=no".to_string()]);
84
+
85
+
vector.push("-o".to_string());
86
+
vector.extend(options.into_iter().intersperse("-o".to_string()));
87
+
88
+
Ok(vector)
89
+
}
90
+
}
91
+
92
+
#[cfg(test)]
93
+
impl Default for Target {
94
+
fn default() -> Self {
95
+
Target {
96
+
hosts: vec!["NAME".into()],
97
+
user: "root".into(),
98
+
port: 22,
99
+
current_host: 0,
100
+
}
101
+
}
102
+
}
103
+
104
+
#[cfg(test)]
105
+
impl<'a> Context<'a> {
106
+
fn create_test_context(
107
+
hive_location: HiveLocation,
108
+
name: &'a Name,
109
+
node: &'a mut Node,
110
+
) -> Self {
111
+
Context {
112
+
name,
113
+
node,
114
+
hive_location: Arc::new(hive_location),
115
+
modifiers: SubCommandModifiers::default(),
116
+
objective: Objective::Apply(ApplyObjective {
117
+
goal: Goal::SwitchToConfiguration(SwitchToConfigurationGoal::Switch),
118
+
no_keys: false,
119
+
reboot: false,
120
+
should_apply_locally: false,
121
+
substitute_on_destination: false,
122
+
handle_unreachable: HandleUnreachable::default(),
123
+
}),
124
+
state: StepState::default(),
125
+
should_quit: Arc::new(AtomicBool::new(false)),
126
+
}
127
+
}
128
+
}
129
+
130
+
impl Target {
131
+
pub fn get_preferred_host(&self) -> Result<&Arc<str>, HiveLibError> {
132
+
self.hosts
133
+
.get(self.current_host)
134
+
.ok_or(HiveLibError::NetworkError(NetworkError::HostsExhausted))
135
+
}
136
+
137
+
pub const fn host_failed(&mut self) {
138
+
self.current_host += 1;
139
+
}
140
+
141
+
#[cfg(test)]
142
+
#[must_use]
143
+
pub fn from_host(host: &str) -> Self {
144
+
Target {
145
+
hosts: vec![host.into()],
146
+
..Default::default()
147
+
}
148
+
}
149
+
}
150
+
151
+
impl Display for Target {
152
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
153
+
let hosts = itertools::Itertools::join(
154
+
&mut self
155
+
.hosts
156
+
.iter()
157
+
.map(|host| format!("{}@{host}:{}", self.user, self.port)),
158
+
", ",
159
+
);
160
+
161
+
write!(f, "{hosts}")
162
+
}
163
+
}
164
+
165
+
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
166
+
pub struct Node {
167
+
#[serde(rename = "target")]
168
+
pub target: Target,
169
+
170
+
#[serde(rename = "buildOnTarget")]
171
+
pub build_remotely: bool,
172
+
173
+
#[serde(rename = "allowLocalDeployment")]
174
+
pub allow_local_deployment: bool,
175
+
176
+
#[serde(default)]
177
+
pub tags: im::HashSet<String>,
178
+
179
+
#[serde(rename(deserialize = "_keys", serialize = "keys"))]
180
+
pub keys: im::Vector<Key>,
181
+
182
+
#[serde(rename(deserialize = "_hostPlatform", serialize = "host_platform"))]
183
+
pub host_platform: Arc<str>,
184
+
185
+
#[serde(rename(
186
+
deserialize = "privilegeEscalationCommand",
187
+
serialize = "privilege_escalation_command"
188
+
))]
189
+
pub privilege_escalation_command: im::Vector<Arc<str>>,
190
+
}
191
+
192
+
#[cfg(test)]
193
+
impl Default for Node {
194
+
fn default() -> Self {
195
+
Node {
196
+
target: Target::default(),
197
+
keys: im::Vector::new(),
198
+
tags: im::HashSet::new(),
199
+
privilege_escalation_command: vec!["sudo".into(), "--".into()].into(),
200
+
allow_local_deployment: true,
201
+
build_remotely: false,
202
+
host_platform: "x86_64-linux".into(),
203
+
}
204
+
}
205
+
}
206
+
207
+
impl Node {
208
+
#[cfg(test)]
209
+
#[must_use]
210
+
pub fn from_host(host: &str) -> Self {
211
+
Node {
212
+
target: Target::from_host(host),
213
+
..Default::default()
214
+
}
215
+
}
216
+
217
+
/// Tests the connection to a node
218
+
pub async fn ping(&self, modifiers: SubCommandModifiers) -> Result<(), HiveLibError> {
219
+
let host = self.target.get_preferred_host()?;
220
+
221
+
let mut command_string = CommandStringBuilder::new("ssh");
222
+
command_string.arg(format!("{}@{host}", self.target.user));
223
+
command_string.arg(self.target.create_ssh_opts(modifiers, true)?);
224
+
command_string.arg("exit");
225
+
226
+
let output = run_command(
227
+
&CommandArguments::new(command_string, modifiers)
228
+
.log_stdout()
229
+
.mode(crate::commands::ChildOutputMode::Interactive),
230
+
)
231
+
.await?;
232
+
233
+
output.wait_till_success().await.map_err(|source| {
234
+
HiveLibError::NetworkError(NetworkError::HostUnreachable {
235
+
host: host.to_string(),
236
+
source,
237
+
})
238
+
})?;
239
+
240
+
Ok(())
241
+
}
242
+
}
243
+
244
+
#[must_use]
245
+
pub fn should_apply_locally(allow_local_deployment: bool, name: &str) -> bool {
246
+
*name == *gethostname() && allow_local_deployment
247
+
}
248
+
249
+
#[derive(derive_more::Display)]
250
+
pub enum Push<'a> {
251
+
Derivation(&'a Derivation),
252
+
Path(&'a String),
253
+
}
254
+
255
+
#[derive(Deserialize, Clone, Debug)]
256
+
pub struct Derivation(String);
257
+
258
+
impl Display for Derivation {
259
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
260
+
self.0.fmt(f).and_then(|()| write!(f, "^*"))
261
+
}
262
+
}
263
+
264
+
#[derive(derive_more::Display, Debug, Clone, Copy)]
265
+
pub enum SwitchToConfigurationGoal {
266
+
Switch,
267
+
Boot,
268
+
Test,
269
+
DryActivate,
270
+
}
271
+
272
+
#[derive(derive_more::Display, Clone, Copy)]
273
+
pub enum Goal {
274
+
SwitchToConfiguration(SwitchToConfigurationGoal),
275
+
Build,
276
+
Push,
277
+
Keys,
278
+
}
279
+
280
+
// TODO: Get rid of this allow and resolve it
281
+
#[allow(clippy::struct_excessive_bools)]
282
+
#[derive(Clone, Copy)]
283
+
pub struct ApplyObjective {
284
+
pub goal: Goal,
285
+
pub no_keys: bool,
286
+
pub reboot: bool,
287
+
pub should_apply_locally: bool,
288
+
pub substitute_on_destination: bool,
289
+
pub handle_unreachable: HandleUnreachable,
290
+
}
291
+
292
+
#[derive(Clone, Copy)]
293
+
pub enum Objective {
294
+
Apply(ApplyObjective),
295
+
BuildLocally,
296
+
}
297
+
298
+
#[enum_dispatch]
299
+
pub(crate) trait ExecuteStep: Send + Sync + Display + std::fmt::Debug {
300
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError>;
301
+
302
+
fn should_execute(&self, context: &Context) -> bool;
303
+
}
304
+
305
+
// may include other options such as FailAll in the future
306
+
#[non_exhaustive]
307
+
#[derive(Clone, Copy, Default)]
308
+
pub enum HandleUnreachable {
309
+
Ignore,
310
+
#[default]
311
+
FailNode,
312
+
}
313
+
314
+
#[derive(Default)]
315
+
pub struct StepState {
316
+
pub evaluation: Option<Derivation>,
317
+
pub evaluation_rx: Option<oneshot::Receiver<Result<Derivation, HiveLibError>>>,
318
+
pub build: Option<String>,
319
+
pub key_agent_directory: Option<String>,
320
+
}
321
+
322
+
pub struct Context<'a> {
323
+
pub name: &'a Name,
324
+
pub node: &'a mut Node,
325
+
pub hive_location: Arc<HiveLocation>,
326
+
pub modifiers: SubCommandModifiers,
327
+
pub state: StepState,
328
+
pub should_quit: Arc<AtomicBool>,
329
+
pub objective: Objective,
330
+
}
331
+
332
+
#[enum_dispatch(ExecuteStep)]
333
+
#[derive(Debug, PartialEq)]
334
+
enum Step {
335
+
Ping,
336
+
PushKeyAgent,
337
+
Keys,
338
+
Evaluate,
339
+
PushEvaluatedOutput,
340
+
Build,
341
+
PushBuildOutput,
342
+
SwitchToConfiguration,
343
+
CleanUp,
344
+
}
345
+
346
+
impl Display for Step {
347
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
348
+
match self {
349
+
Self::Ping(step) => step.fmt(f),
350
+
Self::PushKeyAgent(step) => step.fmt(f),
351
+
Self::Keys(step) => step.fmt(f),
352
+
Self::Evaluate(step) => step.fmt(f),
353
+
Self::PushEvaluatedOutput(step) => step.fmt(f),
354
+
Self::Build(step) => step.fmt(f),
355
+
Self::PushBuildOutput(step) => step.fmt(f),
356
+
Self::SwitchToConfiguration(step) => step.fmt(f),
357
+
Self::CleanUp(step) => step.fmt(f),
358
+
}
359
+
}
360
+
}
361
+
362
+
pub struct GoalExecutor<'a> {
363
+
steps: Vec<Step>,
364
+
context: Context<'a>,
365
+
}
366
+
367
+
/// returns Err if the application should shut down.
368
+
fn app_shutdown_guard(context: &Context) -> Result<(), HiveLibError> {
369
+
if context
370
+
.should_quit
371
+
.load(std::sync::atomic::Ordering::Relaxed)
372
+
{
373
+
return Err(HiveLibError::Sigint);
374
+
}
375
+
376
+
Ok(())
377
+
}
378
+
379
+
impl<'a> GoalExecutor<'a> {
380
+
#[must_use]
381
+
pub fn new(context: Context<'a>) -> Self {
382
+
Self {
383
+
steps: vec![
384
+
Step::Ping(Ping),
385
+
Step::PushKeyAgent(PushKeyAgent),
386
+
Step::Keys(Keys {
387
+
filter: UploadKeyAt::NoFilter,
388
+
}),
389
+
Step::Keys(Keys {
390
+
filter: UploadKeyAt::PreActivation,
391
+
}),
392
+
Step::Evaluate(super::steps::evaluate::Evaluate),
393
+
Step::PushEvaluatedOutput(super::steps::push::PushEvaluatedOutput),
394
+
Step::Build(super::steps::build::Build),
395
+
Step::PushBuildOutput(super::steps::push::PushBuildOutput),
396
+
Step::SwitchToConfiguration(SwitchToConfiguration),
397
+
Step::Keys(Keys {
398
+
filter: UploadKeyAt::PostActivation,
399
+
}),
400
+
],
401
+
context,
402
+
}
403
+
}
404
+
405
+
#[instrument(skip_all, name = "eval")]
406
+
async fn evaluate_task(
407
+
tx: oneshot::Sender<Result<Derivation, HiveLibError>>,
408
+
hive_location: Arc<HiveLocation>,
409
+
name: Name,
410
+
modifiers: SubCommandModifiers,
411
+
) {
412
+
let output =
413
+
evaluate_hive_attribute(&hive_location, &EvalGoal::GetTopLevel(&name), modifiers)
414
+
.await
415
+
.map(|output| {
416
+
serde_json::from_str::<Derivation>(&output).expect("failed to parse derivation")
417
+
});
418
+
419
+
debug!(output = ?output, done = true);
420
+
421
+
let _ = tx.send(output);
422
+
}
423
+
424
+
#[instrument(skip_all, fields(node = %self.context.name))]
425
+
pub async fn execute(mut self) -> Result<(), HiveLibError> {
426
+
app_shutdown_guard(&self.context)?;
427
+
428
+
let (tx, rx) = oneshot::channel();
429
+
self.context.state.evaluation_rx = Some(rx);
430
+
431
+
// The name of this span should never be changed without updating
432
+
// `wire/cli/tracing_setup.rs`
433
+
debug_assert_matches!(Span::current().metadata().unwrap().name(), "execute");
434
+
// This span should always have a `node` field by the same file
435
+
debug_assert!(
436
+
Span::current()
437
+
.metadata()
438
+
.unwrap()
439
+
.fields()
440
+
.field("node")
441
+
.is_some()
442
+
);
443
+
444
+
let spawn_evaluator = match self.context.objective {
445
+
Objective::Apply(apply_objective) => !matches!(apply_objective.goal, Goal::Keys),
446
+
Objective::BuildLocally => true,
447
+
};
448
+
449
+
if spawn_evaluator {
450
+
tokio::spawn(
451
+
GoalExecutor::evaluate_task(
452
+
tx,
453
+
self.context.hive_location.clone(),
454
+
self.context.name.clone(),
455
+
self.context.modifiers,
456
+
)
457
+
.in_current_span(),
458
+
);
459
+
}
460
+
461
+
let steps = self
462
+
.steps
463
+
.iter()
464
+
.filter(|step| step.should_execute(&self.context))
465
+
.inspect(|step| {
466
+
trace!("Will execute step `{step}` for {}", self.context.name);
467
+
})
468
+
.collect::<Vec<_>>();
469
+
let length = steps.len();
470
+
471
+
for (position, step) in steps.iter().enumerate() {
472
+
app_shutdown_guard(&self.context)?;
473
+
474
+
event!(
475
+
Level::INFO,
476
+
step = step.to_string(),
477
+
progress = format!("{}/{length}", position + 1)
478
+
);
479
+
480
+
STATUS
481
+
.lock()
482
+
.set_node_step(self.context.name, step.to_string());
483
+
484
+
if let Err(err) = step.execute(&mut self.context).await.inspect_err(|_| {
485
+
error!("Failed to execute `{step}`");
486
+
}) {
487
+
// discard error from cleanup
488
+
let _ = CleanUp.execute(&mut self.context).await;
489
+
490
+
if let Objective::Apply(apply_objective) = self.context.objective
491
+
&& matches!(step, Step::Ping(..))
492
+
&& matches!(
493
+
apply_objective.handle_unreachable,
494
+
HandleUnreachable::Ignore,
495
+
)
496
+
{
497
+
return Ok(());
498
+
}
499
+
500
+
STATUS.lock().mark_node_failed(self.context.name);
501
+
502
+
return Err(err);
503
+
}
504
+
}
505
+
506
+
STATUS.lock().mark_node_succeeded(self.context.name);
507
+
508
+
Ok(())
509
+
}
510
+
}
511
+
512
+
#[cfg(test)]
513
+
mod tests {
514
+
use rand::distr::Alphabetic;
515
+
516
+
use super::*;
517
+
use crate::{
518
+
function_name, get_test_path,
519
+
hive::{Hive, get_hive_location},
520
+
location,
521
+
};
522
+
use std::{assert_matches::assert_matches, path::PathBuf};
523
+
use std::{collections::HashMap, env};
524
+
525
+
fn get_steps(goal_executor: GoalExecutor) -> std::vec::Vec<Step> {
526
+
goal_executor
527
+
.steps
528
+
.into_iter()
529
+
.filter(|step| step.should_execute(&goal_executor.context))
530
+
.collect::<Vec<_>>()
531
+
}
532
+
533
+
#[tokio::test]
534
+
#[cfg_attr(feature = "no_web_tests", ignore)]
535
+
async fn default_values_match() {
536
+
let mut path = get_test_path!();
537
+
538
+
let location =
539
+
get_hive_location(path.display().to_string(), SubCommandModifiers::default())
540
+
.await
541
+
.unwrap();
542
+
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
543
+
.await
544
+
.unwrap();
545
+
546
+
let node = Node::default();
547
+
548
+
let mut nodes = HashMap::new();
549
+
nodes.insert(Name("NAME".into()), node);
550
+
551
+
path.push("hive.nix");
552
+
553
+
assert_eq!(
554
+
hive,
555
+
Hive {
556
+
nodes,
557
+
schema: Hive::SCHEMA_VERSION
558
+
}
559
+
);
560
+
}
561
+
562
+
#[tokio::test]
563
+
async fn order_build_locally() {
564
+
let location = location!(get_test_path!());
565
+
let mut node = Node {
566
+
build_remotely: false,
567
+
..Default::default()
568
+
};
569
+
let name = &Name(function_name!().into());
570
+
let executor = GoalExecutor::new(Context::create_test_context(location, name, &mut node));
571
+
let steps = get_steps(executor);
572
+
573
+
assert_eq!(
574
+
steps,
575
+
vec![
576
+
Ping.into(),
577
+
PushKeyAgent.into(),
578
+
Keys {
579
+
filter: UploadKeyAt::PreActivation
580
+
}
581
+
.into(),
582
+
crate::hive::steps::evaluate::Evaluate.into(),
583
+
crate::hive::steps::build::Build.into(),
584
+
crate::hive::steps::push::PushBuildOutput.into(),
585
+
SwitchToConfiguration.into(),
586
+
Keys {
587
+
filter: UploadKeyAt::PostActivation
588
+
}
589
+
.into(),
590
+
]
591
+
);
592
+
}
593
+
594
+
#[tokio::test]
595
+
async fn order_keys_only() {
596
+
let location = location!(get_test_path!());
597
+
let mut node = Node::default();
598
+
let name = &Name(function_name!().into());
599
+
let mut context = Context::create_test_context(location, name, &mut node);
600
+
601
+
let Objective::Apply(ref mut apply_objective) = context.objective else {
602
+
unreachable!()
603
+
};
604
+
605
+
apply_objective.goal = Goal::Keys;
606
+
607
+
let executor = GoalExecutor::new(context);
608
+
let steps = get_steps(executor);
609
+
610
+
assert_eq!(
611
+
steps,
612
+
vec![
613
+
Ping.into(),
614
+
PushKeyAgent.into(),
615
+
Keys {
616
+
filter: UploadKeyAt::NoFilter
617
+
}
618
+
.into(),
619
+
]
620
+
);
621
+
}
622
+
623
+
#[tokio::test]
624
+
async fn order_build() {
625
+
let location = location!(get_test_path!());
626
+
let mut node = Node::default();
627
+
let name = &Name(function_name!().into());
628
+
let mut context = Context::create_test_context(location, name, &mut node);
629
+
630
+
let Objective::Apply(ref mut apply_objective) = context.objective else {
631
+
unreachable!()
632
+
};
633
+
apply_objective.goal = Goal::Build;
634
+
635
+
let executor = GoalExecutor::new(context);
636
+
let steps = get_steps(executor);
637
+
638
+
assert_eq!(
639
+
steps,
640
+
vec![
641
+
Ping.into(),
642
+
crate::hive::steps::evaluate::Evaluate.into(),
643
+
crate::hive::steps::build::Build.into(),
644
+
crate::hive::steps::push::PushBuildOutput.into(),
645
+
]
646
+
);
647
+
}
648
+
649
+
#[tokio::test]
650
+
async fn order_push_only() {
651
+
let location = location!(get_test_path!());
652
+
let mut node = Node::default();
653
+
let name = &Name(function_name!().into());
654
+
let mut context = Context::create_test_context(location, name, &mut node);
655
+
656
+
let Objective::Apply(ref mut apply_objective) = context.objective else {
657
+
unreachable!()
658
+
};
659
+
apply_objective.goal = Goal::Push;
660
+
661
+
let executor = GoalExecutor::new(context);
662
+
let steps = get_steps(executor);
663
+
664
+
assert_eq!(
665
+
steps,
666
+
vec![
667
+
Ping.into(),
668
+
crate::hive::steps::evaluate::Evaluate.into(),
669
+
crate::hive::steps::push::PushEvaluatedOutput.into(),
670
+
]
671
+
);
672
+
}
673
+
674
+
#[tokio::test]
675
+
async fn order_remote_build() {
676
+
let location = location!(get_test_path!());
677
+
let mut node = Node {
678
+
build_remotely: true,
679
+
..Default::default()
680
+
};
681
+
682
+
let name = &Name(function_name!().into());
683
+
let executor = GoalExecutor::new(Context::create_test_context(location, name, &mut node));
684
+
let steps = get_steps(executor);
685
+
686
+
assert_eq!(
687
+
steps,
688
+
vec![
689
+
Ping.into(),
690
+
PushKeyAgent.into(),
691
+
Keys {
692
+
filter: UploadKeyAt::PreActivation
693
+
}
694
+
.into(),
695
+
crate::hive::steps::evaluate::Evaluate.into(),
696
+
crate::hive::steps::push::PushEvaluatedOutput.into(),
697
+
crate::hive::steps::build::Build.into(),
698
+
SwitchToConfiguration.into(),
699
+
Keys {
700
+
filter: UploadKeyAt::PostActivation
701
+
}
702
+
.into(),
703
+
]
704
+
);
705
+
}
706
+
707
+
#[tokio::test]
708
+
async fn order_nokeys() {
709
+
let location = location!(get_test_path!());
710
+
let mut node = Node::default();
711
+
712
+
let name = &Name(function_name!().into());
713
+
let mut context = Context::create_test_context(location, name, &mut node);
714
+
715
+
let Objective::Apply(ref mut apply_objective) = context.objective else {
716
+
unreachable!()
717
+
};
718
+
apply_objective.no_keys = true;
719
+
720
+
let executor = GoalExecutor::new(context);
721
+
let steps = get_steps(executor);
722
+
723
+
assert_eq!(
724
+
steps,
725
+
vec![
726
+
Ping.into(),
727
+
crate::hive::steps::evaluate::Evaluate.into(),
728
+
crate::hive::steps::build::Build.into(),
729
+
crate::hive::steps::push::PushBuildOutput.into(),
730
+
SwitchToConfiguration.into(),
731
+
]
732
+
);
733
+
}
734
+
735
+
#[tokio::test]
736
+
async fn order_should_apply_locally() {
737
+
let location = location!(get_test_path!());
738
+
let mut node = Node::default();
739
+
740
+
let name = &Name(function_name!().into());
741
+
let mut context = Context::create_test_context(location, name, &mut node);
742
+
743
+
let Objective::Apply(ref mut apply_objective) = context.objective else {
744
+
unreachable!()
745
+
};
746
+
apply_objective.no_keys = true;
747
+
apply_objective.should_apply_locally = true;
748
+
749
+
let executor = GoalExecutor::new(context);
750
+
let steps = get_steps(executor);
751
+
752
+
assert_eq!(
753
+
steps,
754
+
vec![
755
+
crate::hive::steps::evaluate::Evaluate.into(),
756
+
crate::hive::steps::build::Build.into(),
757
+
SwitchToConfiguration.into(),
758
+
]
759
+
);
760
+
}
761
+
762
+
#[tokio::test]
763
+
async fn order_build_only() {
764
+
let location = location!(get_test_path!());
765
+
let mut node = Node::default();
766
+
767
+
let name = &Name(function_name!().into());
768
+
let mut context = Context::create_test_context(location, name, &mut node);
769
+
770
+
context.objective = Objective::BuildLocally;
771
+
772
+
let executor = GoalExecutor::new(context);
773
+
let steps = get_steps(executor);
774
+
775
+
assert_eq!(
776
+
steps,
777
+
vec![
778
+
crate::hive::steps::evaluate::Evaluate.into(),
779
+
crate::hive::steps::build::Build.into()
780
+
]
781
+
);
782
+
}
783
+
784
+
#[test]
785
+
fn target_fails_increments() {
786
+
let mut target = Target::from_host("localhost");
787
+
788
+
assert_eq!(target.current_host, 0);
789
+
790
+
for i in 0..100 {
791
+
target.host_failed();
792
+
assert_eq!(target.current_host, i + 1);
793
+
}
794
+
}
795
+
796
+
#[test]
797
+
fn get_preferred_host_fails() {
798
+
let mut target = Target {
799
+
hosts: vec![
800
+
"un.reachable.1".into(),
801
+
"un.reachable.2".into(),
802
+
"un.reachable.3".into(),
803
+
"un.reachable.4".into(),
804
+
"un.reachable.5".into(),
805
+
],
806
+
..Default::default()
807
+
};
808
+
809
+
assert_ne!(
810
+
target.get_preferred_host().unwrap().to_string(),
811
+
"un.reachable.5"
812
+
);
813
+
814
+
for i in 1..=5 {
815
+
assert_eq!(
816
+
target.get_preferred_host().unwrap().to_string(),
817
+
format!("un.reachable.{i}")
818
+
);
819
+
target.host_failed();
820
+
}
821
+
822
+
for _ in 0..5 {
823
+
assert_matches!(
824
+
target.get_preferred_host(),
825
+
Err(HiveLibError::NetworkError(NetworkError::HostsExhausted))
826
+
);
827
+
}
828
+
}
829
+
830
+
#[test]
831
+
fn test_ssh_opts() {
832
+
let target = Target::from_host("hello-world");
833
+
let subcommand_modifiers = SubCommandModifiers {
834
+
non_interactive: false,
835
+
..Default::default()
836
+
};
837
+
let tmp = format!(
838
+
"/tmp/{}",
839
+
rand::distr::SampleString::sample_string(&Alphabetic, &mut rand::rng(), 10)
840
+
);
841
+
842
+
std::fs::create_dir(&tmp).unwrap();
843
+
844
+
unsafe { env::set_var("XDG_RUNTIME_DIR", &tmp) }
845
+
846
+
let args = [
847
+
"-l".to_string(),
848
+
target.user.to_string(),
849
+
"-p".to_string(),
850
+
target.port.to_string(),
851
+
"-o".to_string(),
852
+
"StrictHostKeyChecking=accept-new".to_string(),
853
+
"-o".to_string(),
854
+
"PasswordAuthentication=no".to_string(),
855
+
"-o".to_string(),
856
+
"KbdInteractiveAuthentication=no".to_string(),
857
+
];
858
+
859
+
assert_eq!(
860
+
target
861
+
.create_ssh_args(subcommand_modifiers, false, false)
862
+
.unwrap(),
863
+
args
864
+
);
865
+
assert_eq!(
866
+
target.create_ssh_opts(subcommand_modifiers, false).unwrap(),
867
+
args.join(" ")
868
+
);
869
+
870
+
assert_eq!(
871
+
target
872
+
.create_ssh_args(subcommand_modifiers, false, true)
873
+
.unwrap(),
874
+
[
875
+
"-l".to_string(),
876
+
target.user.to_string(),
877
+
"-p".to_string(),
878
+
target.port.to_string(),
879
+
"-o".to_string(),
880
+
"StrictHostKeyChecking=accept-new".to_string(),
881
+
"-o".to_string(),
882
+
"PasswordAuthentication=no".to_string(),
883
+
"-o".to_string(),
884
+
"KbdInteractiveAuthentication=no".to_string(),
885
+
]
886
+
);
887
+
888
+
assert_eq!(
889
+
target
890
+
.create_ssh_args(subcommand_modifiers, true, true)
891
+
.unwrap(),
892
+
[
893
+
"-l".to_string(),
894
+
target.user.to_string(),
895
+
"-p".to_string(),
896
+
target.port.to_string(),
897
+
"-o".to_string(),
898
+
"StrictHostKeyChecking=accept-new".to_string(),
899
+
"-o".to_string(),
900
+
"PasswordAuthentication=no".to_string(),
901
+
"-o".to_string(),
902
+
"KbdInteractiveAuthentication=no".to_string(),
903
+
]
904
+
);
905
+
906
+
// forced non interactive is the same as --non-interactive
907
+
assert_eq!(
908
+
target
909
+
.create_ssh_args(subcommand_modifiers, true, false)
910
+
.unwrap(),
911
+
target
912
+
.create_ssh_args(
913
+
SubCommandModifiers {
914
+
non_interactive: true,
915
+
..Default::default()
916
+
},
917
+
false,
918
+
false
919
+
)
920
+
.unwrap()
921
+
);
922
+
}
923
+
924
+
#[tokio::test]
925
+
async fn context_quits_sigint() {
926
+
let location = location!(get_test_path!());
927
+
let mut node = Node::default();
928
+
929
+
let name = &Name(function_name!().into());
930
+
let context = Context::create_test_context(location, name, &mut node);
931
+
context
932
+
.should_quit
933
+
.store(true, std::sync::atomic::Ordering::Relaxed);
934
+
let executor = GoalExecutor::new(context);
935
+
let status = executor.execute().await;
936
+
937
+
assert_matches!(status, Err(HiveLibError::Sigint));
938
+
}
939
+
}
+219
crates/core/src/hive/steps/activate.rs
+219
crates/core/src/hive/steps/activate.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt::Display;
5
+
6
+
use tracing::{error, info, instrument, warn};
7
+
8
+
use crate::{
9
+
HiveLibError,
10
+
commands::{CommandArguments, WireCommandChip, builder::CommandStringBuilder, run_command},
11
+
errors::{ActivationError, NetworkError},
12
+
hive::node::{Context, ExecuteStep, Goal, Objective, SwitchToConfigurationGoal},
13
+
};
14
+
15
+
#[derive(Debug, PartialEq)]
16
+
pub struct SwitchToConfiguration;
17
+
18
+
impl Display for SwitchToConfiguration {
19
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
20
+
write!(f, "switch-to-configuration")
21
+
}
22
+
}
23
+
24
+
async fn wait_for_ping(ctx: &Context<'_>) -> Result<(), HiveLibError> {
25
+
let host = ctx.node.target.get_preferred_host()?;
26
+
let mut result = ctx.node.ping(ctx.modifiers).await;
27
+
28
+
for num in 0..2 {
29
+
warn!("Trying to ping {host} (attempt {}/3)", num + 1);
30
+
31
+
result = ctx.node.ping(ctx.modifiers).await;
32
+
33
+
if result.is_ok() {
34
+
info!("Regained connection to {} via {host}", ctx.name);
35
+
36
+
break;
37
+
}
38
+
}
39
+
40
+
result
41
+
}
42
+
43
+
async fn set_profile(
44
+
goal: SwitchToConfigurationGoal,
45
+
built_path: &String,
46
+
ctx: &Context<'_>,
47
+
) -> Result<(), HiveLibError> {
48
+
info!("Setting profiles in anticipation for switch-to-configuration {goal}");
49
+
50
+
let mut command_string = CommandStringBuilder::new("nix-env");
51
+
command_string.args(&["-p", "/nix/var/nix/profiles/system", "--set"]);
52
+
command_string.arg(built_path);
53
+
54
+
let Objective::Apply(apply_objective) = ctx.objective else {
55
+
unreachable!()
56
+
};
57
+
58
+
let child = run_command(
59
+
&CommandArguments::new(command_string, ctx.modifiers)
60
+
.mode(crate::commands::ChildOutputMode::Nix)
61
+
.execute_on_remote(if apply_objective.should_apply_locally {
62
+
None
63
+
} else {
64
+
Some(&ctx.node.target)
65
+
})
66
+
.elevated(ctx.node),
67
+
)
68
+
.await?;
69
+
70
+
let _ = child
71
+
.wait_till_success()
72
+
.await
73
+
.map_err(HiveLibError::CommandError)?;
74
+
75
+
info!("Set system profile");
76
+
77
+
Ok(())
78
+
}
79
+
80
+
impl ExecuteStep for SwitchToConfiguration {
81
+
fn should_execute(&self, ctx: &Context) -> bool {
82
+
let Objective::Apply(apply_objective) = ctx.objective else {
83
+
return false;
84
+
};
85
+
86
+
matches!(apply_objective.goal, Goal::SwitchToConfiguration(..))
87
+
}
88
+
89
+
#[allow(clippy::too_many_lines)]
90
+
#[instrument(skip_all, name = "activate")]
91
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
92
+
let built_path = ctx.state.build.as_ref().unwrap();
93
+
94
+
let Objective::Apply(apply_objective) = ctx.objective else {
95
+
unreachable!()
96
+
};
97
+
98
+
let Goal::SwitchToConfiguration(goal) = &apply_objective.goal else {
99
+
unreachable!("Cannot reach as guarded by should_execute")
100
+
};
101
+
102
+
if matches!(
103
+
goal,
104
+
// switch profile if switch or boot
105
+
// https://github.com/NixOS/nixpkgs/blob/a2c92aa34735a04010671e3378e2aa2d109b2a72/pkgs/by-name/ni/nixos-rebuild-ng/src/nixos_rebuild/services.py#L224
106
+
SwitchToConfigurationGoal::Switch | SwitchToConfigurationGoal::Boot
107
+
) {
108
+
set_profile(*goal, built_path, ctx).await?;
109
+
}
110
+
111
+
info!("Running switch-to-configuration {goal}");
112
+
113
+
let mut command_string =
114
+
CommandStringBuilder::new(format!("{built_path}/bin/switch-to-configuration"));
115
+
command_string.arg(match goal {
116
+
SwitchToConfigurationGoal::Switch => "switch",
117
+
SwitchToConfigurationGoal::Boot => "boot",
118
+
SwitchToConfigurationGoal::Test => "test",
119
+
SwitchToConfigurationGoal::DryActivate => "dry-activate",
120
+
});
121
+
122
+
let child = run_command(
123
+
&CommandArguments::new(command_string, ctx.modifiers)
124
+
.execute_on_remote(if apply_objective.should_apply_locally {
125
+
None
126
+
} else {
127
+
Some(&ctx.node.target)
128
+
})
129
+
.elevated(ctx.node)
130
+
.log_stdout(),
131
+
)
132
+
.await?;
133
+
134
+
let result = child.wait_till_success().await;
135
+
136
+
match result {
137
+
Ok(_) => {
138
+
if !apply_objective.reboot {
139
+
return Ok(());
140
+
}
141
+
142
+
if apply_objective.should_apply_locally {
143
+
error!("Refusing to reboot local machine!");
144
+
145
+
return Ok(());
146
+
}
147
+
148
+
warn!("Rebooting {name}!", name = ctx.name);
149
+
150
+
let reboot = run_command(
151
+
&CommandArguments::new("reboot now", ctx.modifiers)
152
+
.log_stdout()
153
+
.execute_on_remote(Some(&ctx.node.target))
154
+
.elevated(ctx.node),
155
+
)
156
+
.await?;
157
+
158
+
// consume result, impossible to know if the machine failed to reboot or we
159
+
// simply disconnected
160
+
let _ = reboot
161
+
.wait_till_success()
162
+
.await
163
+
.map_err(HiveLibError::CommandError)?;
164
+
165
+
info!("Rebooted {name}, waiting to reconnect...", name = ctx.name);
166
+
167
+
if wait_for_ping(ctx).await.is_ok() {
168
+
return Ok(());
169
+
}
170
+
171
+
error!(
172
+
"Failed to get regain connection to {name} via {host} after reboot.",
173
+
name = ctx.name,
174
+
host = ctx.node.target.get_preferred_host()?
175
+
);
176
+
177
+
return Err(HiveLibError::NetworkError(
178
+
NetworkError::HostUnreachableAfterReboot(
179
+
ctx.node.target.get_preferred_host()?.to_string(),
180
+
),
181
+
));
182
+
}
183
+
Err(error) => {
184
+
warn!(
185
+
"Activation command for {name} exited unsuccessfully.",
186
+
name = ctx.name
187
+
);
188
+
189
+
// Bail if the command couldn't of broken the system
190
+
// and don't try to regain connection to localhost
191
+
if matches!(goal, SwitchToConfigurationGoal::DryActivate)
192
+
|| apply_objective.should_apply_locally
193
+
{
194
+
return Err(HiveLibError::ActivationError(
195
+
ActivationError::SwitchToConfigurationError(*goal, ctx.name.clone(), error),
196
+
));
197
+
}
198
+
199
+
if wait_for_ping(ctx).await.is_ok() {
200
+
return Err(HiveLibError::ActivationError(
201
+
ActivationError::SwitchToConfigurationError(*goal, ctx.name.clone(), error),
202
+
));
203
+
}
204
+
205
+
error!(
206
+
"Failed to get regain connection to {name} via {host} after {goal} activation.",
207
+
name = ctx.name,
208
+
host = ctx.node.target.get_preferred_host()?
209
+
);
210
+
211
+
return Err(HiveLibError::NetworkError(
212
+
NetworkError::HostUnreachableAfterReboot(
213
+
ctx.node.target.get_preferred_host()?.to_string(),
214
+
),
215
+
));
216
+
}
217
+
}
218
+
}
219
+
}
+89
crates/core/src/hive/steps/build.rs
+89
crates/core/src/hive/steps/build.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt::Display;
5
+
6
+
use tracing::{info, instrument};
7
+
8
+
use crate::{
9
+
HiveLibError,
10
+
commands::{
11
+
CommandArguments, Either, WireCommandChip, builder::CommandStringBuilder,
12
+
run_command_with_env,
13
+
},
14
+
hive::node::{Context, ExecuteStep, Goal, Objective},
15
+
};
16
+
17
+
#[derive(Debug, PartialEq)]
18
+
pub struct Build;
19
+
20
+
impl Display for Build {
21
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
22
+
write!(f, "Build the node")
23
+
}
24
+
}
25
+
26
+
impl ExecuteStep for Build {
27
+
fn should_execute(&self, ctx: &Context) -> bool {
28
+
match ctx.objective {
29
+
Objective::Apply(apply_objective) => {
30
+
!matches!(apply_objective.goal, Goal::Keys | Goal::Push)
31
+
}
32
+
Objective::BuildLocally => true,
33
+
}
34
+
}
35
+
36
+
#[instrument(skip_all, name = "build")]
37
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
38
+
let top_level = ctx.state.evaluation.as_ref().unwrap();
39
+
40
+
let mut command_string = CommandStringBuilder::nix();
41
+
command_string.args(&[
42
+
"--extra-experimental-features",
43
+
"nix-command",
44
+
"build",
45
+
"--print-build-logs",
46
+
"--no-link",
47
+
"--print-out-paths",
48
+
]);
49
+
command_string.arg(top_level.to_string());
50
+
51
+
let status = run_command_with_env(
52
+
&CommandArguments::new(command_string, ctx.modifiers)
53
+
// build remotely if asked for AND we arent applying locally
54
+
.execute_on_remote(
55
+
if ctx.node.build_remotely
56
+
&& let Objective::Apply(apply_objective) = ctx.objective
57
+
&& !apply_objective.should_apply_locally
58
+
{
59
+
Some(&ctx.node.target)
60
+
} else {
61
+
None
62
+
},
63
+
)
64
+
.mode(crate::commands::ChildOutputMode::Nix)
65
+
.log_stdout(),
66
+
std::collections::HashMap::new(),
67
+
)
68
+
.await?
69
+
.wait_till_success()
70
+
.await
71
+
.map_err(|source| HiveLibError::NixBuildError {
72
+
name: ctx.name.clone(),
73
+
source,
74
+
})?;
75
+
76
+
let stdout = match status {
77
+
Either::Left((_, stdout)) | Either::Right((_, stdout)) => stdout,
78
+
};
79
+
80
+
info!("Built output: {stdout:?}");
81
+
82
+
// print built path to stdout
83
+
println!("{stdout}");
84
+
85
+
ctx.state.build = Some(stdout);
86
+
87
+
Ok(())
88
+
}
89
+
}
+28
crates/core/src/hive/steps/cleanup.rs
+28
crates/core/src/hive/steps/cleanup.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt::Display;
5
+
6
+
use crate::{
7
+
errors::HiveLibError,
8
+
hive::node::{Context, ExecuteStep},
9
+
};
10
+
11
+
#[derive(PartialEq, Debug)]
12
+
pub(crate) struct CleanUp;
13
+
14
+
impl Display for CleanUp {
15
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
16
+
write!(f, "Clean up")
17
+
}
18
+
}
19
+
20
+
impl ExecuteStep for CleanUp {
21
+
fn should_execute(&self, _ctx: &Context) -> bool {
22
+
false
23
+
}
24
+
25
+
async fn execute(&self, _ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
26
+
Ok(())
27
+
}
28
+
}
+38
crates/core/src/hive/steps/evaluate.rs
+38
crates/core/src/hive/steps/evaluate.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt::Display;
5
+
6
+
use tracing::instrument;
7
+
8
+
use crate::{
9
+
HiveLibError,
10
+
hive::node::{Context, ExecuteStep, Goal, Objective},
11
+
};
12
+
13
+
#[derive(Debug, PartialEq)]
14
+
pub struct Evaluate;
15
+
16
+
impl Display for Evaluate {
17
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18
+
write!(f, "Evaluate the node")
19
+
}
20
+
}
21
+
22
+
impl ExecuteStep for Evaluate {
23
+
fn should_execute(&self, ctx: &Context) -> bool {
24
+
match ctx.objective {
25
+
Objective::Apply(apply_objective) => !matches!(apply_objective.goal, Goal::Keys),
26
+
Objective::BuildLocally => true,
27
+
}
28
+
}
29
+
30
+
#[instrument(skip_all, name = "eval")]
31
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
32
+
let rx = ctx.state.evaluation_rx.take().unwrap();
33
+
34
+
ctx.state.evaluation = Some(rx.await.unwrap()?);
35
+
36
+
Ok(())
37
+
}
38
+
}
+441
crates/core/src/hive/steps/keys.rs
+441
crates/core/src/hive/steps/keys.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use base64::Engine;
5
+
use base64::prelude::BASE64_STANDARD;
6
+
use futures::future::join_all;
7
+
use im::Vector;
8
+
use itertools::{Itertools, Position};
9
+
use owo_colors::OwoColorize;
10
+
use prost::Message;
11
+
use prost::bytes::BytesMut;
12
+
use serde::{Deserialize, Serialize};
13
+
use sha2::{Digest, Sha256};
14
+
use std::env;
15
+
use std::fmt::Display;
16
+
use std::io::Cursor;
17
+
use std::iter::Peekable;
18
+
use std::path::PathBuf;
19
+
use std::pin::Pin;
20
+
use std::process::Stdio;
21
+
use std::str::from_utf8;
22
+
use std::vec::IntoIter;
23
+
use tokio::io::AsyncReadExt as _;
24
+
use tokio::process::Command;
25
+
use tokio::{fs::File, io::AsyncRead};
26
+
use tokio_util::codec::LengthDelimitedCodec;
27
+
use tracing::{debug, instrument};
28
+
29
+
use crate::HiveLibError;
30
+
use crate::commands::builder::CommandStringBuilder;
31
+
use crate::commands::common::push;
32
+
use crate::commands::{CommandArguments, WireCommandChip, run_command};
33
+
use crate::errors::KeyError;
34
+
use crate::hive::node::{Context, ExecuteStep, Goal, Objective, Push, SwitchToConfigurationGoal};
35
+
36
+
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
37
+
#[serde(tag = "t", content = "c")]
38
+
pub enum Source {
39
+
String(String),
40
+
Path(PathBuf),
41
+
Command(Vec<String>),
42
+
}
43
+
44
+
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)]
45
+
pub enum UploadKeyAt {
46
+
#[serde(rename = "pre-activation")]
47
+
PreActivation,
48
+
#[serde(rename = "post-activation")]
49
+
PostActivation,
50
+
#[serde(skip)]
51
+
NoFilter,
52
+
}
53
+
54
+
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
55
+
pub struct Key {
56
+
pub name: String,
57
+
#[serde(rename = "destDir")]
58
+
pub dest_dir: String,
59
+
pub path: PathBuf,
60
+
pub group: String,
61
+
pub user: String,
62
+
pub permissions: String,
63
+
pub source: Source,
64
+
#[serde(rename = "uploadAt")]
65
+
pub upload_at: UploadKeyAt,
66
+
#[serde(default)]
67
+
pub environment: im::HashMap<String, String>,
68
+
}
69
+
70
+
impl Display for Key {
71
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
72
+
write!(
73
+
f,
74
+
"{} {} {}:{} {}",
75
+
match self.source {
76
+
Source::String(_) => "Literal",
77
+
Source::Path(_) => "Path",
78
+
Source::Command(_) => "Command",
79
+
}
80
+
.if_supports_color(owo_colors::Stream::Stdout, |x| x.dimmed()),
81
+
[self.dest_dir.clone(), self.name.clone()]
82
+
.iter()
83
+
.collect::<PathBuf>()
84
+
.display(),
85
+
self.user,
86
+
self.group,
87
+
self.permissions,
88
+
)
89
+
}
90
+
}
91
+
92
+
#[cfg(test)]
93
+
impl Default for Key {
94
+
fn default() -> Self {
95
+
use im::HashMap;
96
+
97
+
Self {
98
+
name: "key".into(),
99
+
dest_dir: "/somewhere/".into(),
100
+
path: "key".into(),
101
+
group: "root".into(),
102
+
user: "root".into(),
103
+
permissions: "0600".into(),
104
+
source: Source::String("test key".into()),
105
+
upload_at: UploadKeyAt::PreActivation,
106
+
environment: HashMap::new(),
107
+
}
108
+
}
109
+
}
110
+
111
+
fn get_u32_permission(key: &Key) -> Result<u32, KeyError> {
112
+
u32::from_str_radix(&key.permissions, 8).map_err(KeyError::ParseKeyPermissions)
113
+
}
114
+
115
+
async fn create_reader(key: &'_ Key) -> Result<Pin<Box<dyn AsyncRead + Send + '_>>, KeyError> {
116
+
match &key.source {
117
+
Source::Path(path) => Ok(Box::pin(File::open(path).await.map_err(KeyError::File)?)),
118
+
Source::String(string) => Ok(Box::pin(Cursor::new(string))),
119
+
Source::Command(args) => {
120
+
let output = Command::new(args.first().ok_or(KeyError::Empty)?)
121
+
.args(&args[1..])
122
+
.stdin(Stdio::null())
123
+
.stdout(Stdio::piped())
124
+
.stderr(Stdio::piped())
125
+
.envs(key.environment.clone())
126
+
.spawn()
127
+
.map_err(|err| KeyError::CommandSpawnError {
128
+
error: err,
129
+
command: args.join(" "),
130
+
command_span: Some((0..args.first().unwrap().len()).into()),
131
+
})?
132
+
.wait_with_output()
133
+
.await
134
+
.map_err(|err| KeyError::CommandResolveError {
135
+
error: err,
136
+
command: args.join(" "),
137
+
})?;
138
+
139
+
if output.status.success() {
140
+
return Ok(Box::pin(Cursor::new(output.stdout)));
141
+
}
142
+
143
+
Err(KeyError::CommandError(
144
+
output.status,
145
+
from_utf8(&output.stderr).unwrap().to_string(),
146
+
))
147
+
}
148
+
}
149
+
}
150
+
151
+
async fn process_key(key: &Key) -> Result<(wire_key_agent::keys::KeySpec, Vec<u8>), KeyError> {
152
+
let mut reader = create_reader(key).await?;
153
+
154
+
let mut buf = Vec::new();
155
+
156
+
reader
157
+
.read_to_end(&mut buf)
158
+
.await
159
+
.expect("failed to read into buffer");
160
+
161
+
let destination: PathBuf = [key.dest_dir.clone(), key.name.clone()].iter().collect();
162
+
163
+
debug!("Staging push to {}", destination.clone().display());
164
+
165
+
Ok((
166
+
wire_key_agent::keys::KeySpec {
167
+
length: buf
168
+
.len()
169
+
.try_into()
170
+
.expect("Failed to convert usize buf length to i32"),
171
+
user: key.user.clone(),
172
+
group: key.group.clone(),
173
+
permissions: get_u32_permission(key)?,
174
+
destination: destination.into_os_string().into_string().unwrap(),
175
+
digest: Sha256::digest(&buf).to_vec(),
176
+
last: false,
177
+
},
178
+
buf,
179
+
))
180
+
}
181
+
182
+
#[derive(Debug, PartialEq)]
183
+
pub struct Keys {
184
+
pub filter: UploadKeyAt,
185
+
}
186
+
#[derive(Debug, PartialEq)]
187
+
pub struct PushKeyAgent;
188
+
189
+
impl Display for Keys {
190
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
191
+
write!(f, "Upload key @ {:?}", self.filter)
192
+
}
193
+
}
194
+
195
+
impl Display for PushKeyAgent {
196
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
197
+
write!(f, "Push the key agent")
198
+
}
199
+
}
200
+
201
+
pub struct SimpleLengthDelimWriter<F> {
202
+
codec: LengthDelimitedCodec,
203
+
write_fn: F,
204
+
}
205
+
206
+
impl<F> SimpleLengthDelimWriter<F>
207
+
where
208
+
F: AsyncFnMut(Vec<u8>) -> Result<(), HiveLibError>,
209
+
{
210
+
fn new(write_fn: F) -> Self {
211
+
Self {
212
+
codec: LengthDelimitedCodec::new(),
213
+
write_fn,
214
+
}
215
+
}
216
+
217
+
async fn send(&mut self, data: prost::bytes::Bytes) -> Result<(), HiveLibError> {
218
+
let mut buffer = BytesMut::new();
219
+
tokio_util::codec::Encoder::encode(&mut self.codec, data, &mut buffer)
220
+
.map_err(HiveLibError::Encoding)?;
221
+
222
+
(self.write_fn)(buffer.to_vec()).await?;
223
+
Ok(())
224
+
}
225
+
}
226
+
227
+
impl ExecuteStep for Keys {
228
+
fn should_execute(&self, ctx: &Context) -> bool {
229
+
let Objective::Apply(apply_objective) = ctx.objective else {
230
+
return false;
231
+
};
232
+
233
+
if apply_objective.no_keys {
234
+
return false;
235
+
}
236
+
237
+
// should execute if no filter, and the goal is keys.
238
+
// otherwise, only execute if the goal is switch and non-nofilter
239
+
matches!(
240
+
(&self.filter, &apply_objective.goal),
241
+
(UploadKeyAt::NoFilter, Goal::Keys)
242
+
| (
243
+
UploadKeyAt::PreActivation | UploadKeyAt::PostActivation,
244
+
Goal::SwitchToConfiguration(SwitchToConfigurationGoal::Switch)
245
+
)
246
+
)
247
+
}
248
+
249
+
#[instrument(skip_all, name = "keys")]
250
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
251
+
let agent_directory = ctx.state.key_agent_directory.as_ref().unwrap();
252
+
253
+
let mut keys = self.select_keys(&ctx.node.keys).await?;
254
+
255
+
if keys.peek().is_none() {
256
+
debug!("Had no keys to push, ending KeyStep early.");
257
+
return Ok(());
258
+
}
259
+
260
+
let command_string =
261
+
CommandStringBuilder::new(format!("{agent_directory}/bin/wire-key-agent"));
262
+
263
+
let Objective::Apply(apply_objective) = ctx.objective else {
264
+
unreachable!()
265
+
};
266
+
267
+
let mut child = run_command(
268
+
&CommandArguments::new(command_string, ctx.modifiers)
269
+
.execute_on_remote(if apply_objective.should_apply_locally {
270
+
None
271
+
} else {
272
+
Some(&ctx.node.target)
273
+
})
274
+
.elevated(ctx.node)
275
+
.keep_stdin_open()
276
+
.log_stdout(),
277
+
)
278
+
.await?;
279
+
280
+
let mut writer = SimpleLengthDelimWriter::new(async |data| child.write_stdin(data).await);
281
+
282
+
for (position, (mut spec, buf)) in keys.with_position() {
283
+
if matches!(position, Position::Last | Position::Only) {
284
+
spec.last = true;
285
+
}
286
+
287
+
debug!("Writing spec & buf for {:?}", spec);
288
+
289
+
writer
290
+
.send(BASE64_STANDARD.encode(spec.encode_to_vec()).into())
291
+
.await?;
292
+
writer.send(BASE64_STANDARD.encode(buf).into()).await?;
293
+
}
294
+
295
+
let status = child
296
+
.wait_till_success()
297
+
.await
298
+
.map_err(HiveLibError::CommandError)?;
299
+
300
+
debug!("status: {status:?}");
301
+
302
+
Ok(())
303
+
}
304
+
}
305
+
306
+
impl Keys {
307
+
async fn select_keys(
308
+
&self,
309
+
keys: &Vector<Key>,
310
+
) -> Result<Peekable<IntoIter<(wire_key_agent::keys::KeySpec, std::vec::Vec<u8>)>>, HiveLibError>
311
+
{
312
+
let futures = keys
313
+
.iter()
314
+
.filter(|key| self.filter == UploadKeyAt::NoFilter || (key.upload_at == self.filter))
315
+
.map(|key| async move {
316
+
process_key(key)
317
+
.await
318
+
.map_err(|err| HiveLibError::KeyError(key.name.clone(), err))
319
+
});
320
+
321
+
Ok(join_all(futures)
322
+
.await
323
+
.into_iter()
324
+
.collect::<Result<Vec<_>, HiveLibError>>()?
325
+
.into_iter()
326
+
.peekable())
327
+
}
328
+
}
329
+
330
+
impl ExecuteStep for PushKeyAgent {
331
+
fn should_execute(&self, ctx: &Context) -> bool {
332
+
let Objective::Apply(apply_objective) = ctx.objective else {
333
+
return false;
334
+
};
335
+
336
+
if apply_objective.no_keys {
337
+
return false;
338
+
}
339
+
340
+
matches!(
341
+
&apply_objective.goal,
342
+
Goal::Keys | Goal::SwitchToConfiguration(SwitchToConfigurationGoal::Switch)
343
+
)
344
+
}
345
+
346
+
#[instrument(skip_all, name = "push_agent")]
347
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
348
+
let arg_name = format!(
349
+
"WIRE_KEY_AGENT_{platform}",
350
+
platform = ctx.node.host_platform.replace('-', "_")
351
+
);
352
+
353
+
let agent_directory = match env::var_os(&arg_name) {
354
+
Some(agent) => agent.into_string().unwrap(),
355
+
None => panic!(
356
+
"{arg_name} environment variable not set! \n
357
+
wire was not built with the ability to deploy keys to this platform. \n
358
+
Please create an issue: https://github.com/forallsys/wire/issues/new?template=bug_report.md"
359
+
),
360
+
};
361
+
362
+
let Objective::Apply(apply_objective) = ctx.objective else {
363
+
unreachable!()
364
+
};
365
+
366
+
if !apply_objective.should_apply_locally {
367
+
push(ctx, Push::Path(&agent_directory)).await?;
368
+
}
369
+
370
+
ctx.state.key_agent_directory = Some(agent_directory);
371
+
372
+
Ok(())
373
+
}
374
+
}
375
+
376
+
#[cfg(test)]
377
+
mod tests {
378
+
use im::Vector;
379
+
380
+
use crate::hive::steps::keys::{Key, Keys, UploadKeyAt, process_key};
381
+
382
+
fn new_key(upload_at: &UploadKeyAt) -> Key {
383
+
Key {
384
+
upload_at: upload_at.clone(),
385
+
source: super::Source::String(match upload_at {
386
+
UploadKeyAt::PreActivation => "pre".into(),
387
+
UploadKeyAt::PostActivation => "post".into(),
388
+
UploadKeyAt::NoFilter => "none".into(),
389
+
}),
390
+
..Default::default()
391
+
}
392
+
}
393
+
394
+
#[tokio::test]
395
+
async fn key_filtering() {
396
+
let keys = Vector::from(vec![
397
+
new_key(&UploadKeyAt::PreActivation),
398
+
new_key(&UploadKeyAt::PostActivation),
399
+
new_key(&UploadKeyAt::PreActivation),
400
+
new_key(&UploadKeyAt::PostActivation),
401
+
]);
402
+
403
+
for (_, buf) in (Keys {
404
+
filter: crate::hive::steps::keys::UploadKeyAt::PreActivation,
405
+
})
406
+
.select_keys(&keys)
407
+
.await
408
+
.unwrap()
409
+
{
410
+
assert_eq!(String::from_utf8_lossy(&buf), "pre");
411
+
}
412
+
413
+
for (_, buf) in (Keys {
414
+
filter: crate::hive::steps::keys::UploadKeyAt::PostActivation,
415
+
})
416
+
.select_keys(&keys)
417
+
.await
418
+
.unwrap()
419
+
{
420
+
assert_eq!(String::from_utf8_lossy(&buf), "post");
421
+
}
422
+
423
+
// test that NoFilter processes all keys.
424
+
let processed_all =
425
+
futures::future::join_all(keys.iter().map(async |x| process_key(x).await))
426
+
.await
427
+
.iter()
428
+
.flatten()
429
+
.cloned()
430
+
.collect::<Vec<_>>();
431
+
let no_filter = (Keys {
432
+
filter: crate::hive::steps::keys::UploadKeyAt::NoFilter,
433
+
})
434
+
.select_keys(&keys)
435
+
.await
436
+
.unwrap()
437
+
.collect::<Vec<_>>();
438
+
439
+
assert_eq!(processed_all, no_filter);
440
+
}
441
+
}
+10
crates/core/src/hive/steps/mod.rs
+10
crates/core/src/hive/steps/mod.rs
+58
crates/core/src/hive/steps/ping.rs
+58
crates/core/src/hive/steps/ping.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt::Display;
5
+
6
+
use tracing::{Level, event, instrument};
7
+
8
+
use crate::{
9
+
HiveLibError,
10
+
hive::node::{Context, ExecuteStep, Objective},
11
+
};
12
+
13
+
#[derive(Debug, PartialEq)]
14
+
pub struct Ping;
15
+
16
+
impl Display for Ping {
17
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18
+
write!(f, "Ping node")
19
+
}
20
+
}
21
+
22
+
impl ExecuteStep for Ping {
23
+
fn should_execute(&self, ctx: &Context) -> bool {
24
+
let Objective::Apply(apply_objective) = ctx.objective else {
25
+
return false;
26
+
};
27
+
28
+
!apply_objective.should_apply_locally
29
+
}
30
+
31
+
#[instrument(skip_all, name = "ping")]
32
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
33
+
loop {
34
+
event!(
35
+
Level::INFO,
36
+
status = "attempting",
37
+
host = ctx.node.target.get_preferred_host()?.to_string()
38
+
);
39
+
40
+
if ctx.node.ping(ctx.modifiers).await.is_ok() {
41
+
event!(
42
+
Level::INFO,
43
+
status = "success",
44
+
host = ctx.node.target.get_preferred_host()?.to_string()
45
+
);
46
+
return Ok(());
47
+
}
48
+
49
+
// ? will take us out if we ran out of hosts
50
+
event!(
51
+
Level::WARN,
52
+
status = "failed to ping",
53
+
host = ctx.node.target.get_preferred_host()?.to_string()
54
+
);
55
+
ctx.node.target.host_failed();
56
+
}
57
+
}
58
+
}
+84
crates/core/src/hive/steps/push.rs
+84
crates/core/src/hive/steps/push.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::fmt::Display;
5
+
6
+
use tracing::instrument;
7
+
8
+
use crate::{
9
+
HiveLibError,
10
+
commands::common::push,
11
+
hive::node::{Context, ExecuteStep, Goal, Objective},
12
+
};
13
+
14
+
#[derive(Debug, PartialEq)]
15
+
pub struct PushEvaluatedOutput;
16
+
#[derive(Debug, PartialEq)]
17
+
pub struct PushBuildOutput;
18
+
19
+
impl Display for PushEvaluatedOutput {
20
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
21
+
write!(f, "Push the evaluated output")
22
+
}
23
+
}
24
+
25
+
impl Display for PushBuildOutput {
26
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
27
+
write!(f, "Push the build output")
28
+
}
29
+
}
30
+
31
+
impl ExecuteStep for PushEvaluatedOutput {
32
+
fn should_execute(&self, ctx: &Context) -> bool {
33
+
let Objective::Apply(apply_objective) = ctx.objective else {
34
+
return false;
35
+
};
36
+
37
+
!matches!(apply_objective.goal, Goal::Keys)
38
+
&& !apply_objective.should_apply_locally
39
+
&& (ctx.node.build_remotely | matches!(apply_objective.goal, Goal::Push))
40
+
}
41
+
42
+
#[instrument(skip_all, name = "push_eval")]
43
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
44
+
let top_level = ctx.state.evaluation.as_ref().unwrap();
45
+
46
+
push(ctx, crate::hive::node::Push::Derivation(top_level)).await?;
47
+
48
+
Ok(())
49
+
}
50
+
}
51
+
52
+
impl ExecuteStep for PushBuildOutput {
53
+
fn should_execute(&self, ctx: &Context) -> bool {
54
+
let Objective::Apply(apply_objective) = ctx.objective else {
55
+
return false;
56
+
};
57
+
58
+
if matches!(apply_objective.goal, Goal::Keys | Goal::Push) {
59
+
// skip if we are not building
60
+
return false;
61
+
}
62
+
63
+
if ctx.node.build_remotely {
64
+
// skip if we are building remotely
65
+
return false;
66
+
}
67
+
68
+
if apply_objective.should_apply_locally {
69
+
// skip step if we are applying locally
70
+
return false;
71
+
}
72
+
73
+
true
74
+
}
75
+
76
+
#[instrument(skip_all, name = "push_build")]
77
+
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
78
+
let built_path = ctx.state.build.as_ref().unwrap();
79
+
80
+
push(ctx, crate::hive::node::Push::Path(built_path)).await?;
81
+
82
+
Ok(())
83
+
}
84
+
}
+71
crates/core/src/lib.rs
+71
crates/core/src/lib.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
#![feature(assert_matches)]
5
+
#![feature(iter_intersperse)]
6
+
#![feature(sync_nonpoison)]
7
+
#![feature(nonpoison_mutex)]
8
+
9
+
use std::{
10
+
io::{IsTerminal, stderr},
11
+
sync::LazyLock,
12
+
};
13
+
14
+
use tokio::sync::{AcquireError, Semaphore, SemaphorePermit};
15
+
16
+
use crate::{errors::HiveLibError, hive::node::Name, status::STATUS};
17
+
18
+
pub mod cache;
19
+
pub mod commands;
20
+
pub mod hive;
21
+
pub mod status;
22
+
23
+
#[cfg(test)]
24
+
mod test_macros;
25
+
26
+
#[cfg(test)]
27
+
mod test_support;
28
+
29
+
pub mod errors;
30
+
31
+
#[derive(Clone, Debug, Copy, Default)]
32
+
pub enum StrictHostKeyChecking {
33
+
/// do not accept new host. dangerous!
34
+
No,
35
+
36
+
/// accept-new, default
37
+
#[default]
38
+
AcceptNew,
39
+
}
40
+
41
+
#[derive(Debug, Clone, Copy)]
42
+
pub struct SubCommandModifiers {
43
+
pub show_trace: bool,
44
+
pub non_interactive: bool,
45
+
pub ssh_accept_host: StrictHostKeyChecking,
46
+
}
47
+
48
+
impl Default for SubCommandModifiers {
49
+
fn default() -> Self {
50
+
SubCommandModifiers {
51
+
show_trace: false,
52
+
non_interactive: !std::io::stdin().is_terminal(),
53
+
ssh_accept_host: StrictHostKeyChecking::default(),
54
+
}
55
+
}
56
+
}
57
+
58
+
pub enum EvalGoal<'a> {
59
+
Inspect,
60
+
Names,
61
+
GetTopLevel(&'a Name),
62
+
}
63
+
64
+
pub static STDIN_CLOBBER_LOCK: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(1));
65
+
66
+
pub async fn acquire_stdin_lock<'a>() -> Result<SemaphorePermit<'a>, AcquireError> {
67
+
let result = STDIN_CLOBBER_LOCK.acquire().await?;
68
+
STATUS.lock().wipe_out(&mut stderr());
69
+
70
+
Ok(result)
71
+
}
+173
crates/core/src/status.rs
+173
crates/core/src/status.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use owo_colors::OwoColorize;
5
+
use std::{fmt::Write, time::Instant};
6
+
use termion::{clear, cursor};
7
+
8
+
use crate::{STDIN_CLOBBER_LOCK, hive::node::Name};
9
+
10
+
use std::{
11
+
collections::HashMap,
12
+
sync::{LazyLock, nonpoison::Mutex},
13
+
};
14
+
15
+
#[derive(Default)]
16
+
pub enum NodeStatus {
17
+
#[default]
18
+
Pending,
19
+
Running(String),
20
+
Succeeded,
21
+
Failed,
22
+
}
23
+
24
+
pub struct Status {
25
+
statuses: HashMap<String, NodeStatus>,
26
+
began: Instant,
27
+
show_progress: bool,
28
+
}
29
+
30
+
/// global status used for the progress bar in the cli crate
31
+
pub static STATUS: LazyLock<Mutex<Status>> = LazyLock::new(|| Mutex::new(Status::new()));
32
+
33
+
impl Status {
34
+
fn new() -> Self {
35
+
Self {
36
+
statuses: HashMap::default(),
37
+
began: Instant::now(),
38
+
show_progress: false,
39
+
}
40
+
}
41
+
42
+
pub const fn show_progress(&mut self, show_progress: bool) {
43
+
self.show_progress = show_progress;
44
+
}
45
+
46
+
pub fn add_many(&mut self, names: &[&Name]) {
47
+
self.statuses.extend(
48
+
names
49
+
.iter()
50
+
.map(|name| (name.0.to_string(), NodeStatus::Pending)),
51
+
);
52
+
}
53
+
54
+
pub fn set_node_step(&mut self, node: &Name, step: String) {
55
+
self.statuses
56
+
.insert(node.0.to_string(), NodeStatus::Running(step));
57
+
}
58
+
59
+
pub fn mark_node_failed(&mut self, node: &Name) {
60
+
self.statuses.insert(node.0.to_string(), NodeStatus::Failed);
61
+
}
62
+
63
+
pub fn mark_node_succeeded(&mut self, node: &Name) {
64
+
self.statuses
65
+
.insert(node.0.to_string(), NodeStatus::Succeeded);
66
+
}
67
+
68
+
#[must_use]
69
+
fn num_finished(&self) -> usize {
70
+
self.statuses
71
+
.iter()
72
+
.filter(|(_, status)| matches!(status, NodeStatus::Succeeded | NodeStatus::Failed))
73
+
.count()
74
+
}
75
+
76
+
#[must_use]
77
+
fn num_running(&self) -> usize {
78
+
self.statuses
79
+
.iter()
80
+
.filter(|(_, status)| matches!(status, NodeStatus::Running(..)))
81
+
.count()
82
+
}
83
+
84
+
#[must_use]
85
+
fn num_failed(&self) -> usize {
86
+
self.statuses
87
+
.iter()
88
+
.filter(|(_, status)| matches!(status, NodeStatus::Failed))
89
+
.count()
90
+
}
91
+
92
+
#[must_use]
93
+
pub fn get_msg(&self) -> String {
94
+
if self.statuses.is_empty() {
95
+
return String::new();
96
+
}
97
+
98
+
let mut msg = format!("[{} / {}", self.num_finished(), self.statuses.len(),);
99
+
100
+
let num_failed = self.num_failed();
101
+
let num_running = self.num_running();
102
+
103
+
let failed = if num_failed >= 1 {
104
+
Some(format!("{} Failed", num_failed.red()))
105
+
} else {
106
+
None
107
+
};
108
+
109
+
let running = if num_running >= 1 {
110
+
Some(format!("{} Deploying", num_running.blue()))
111
+
} else {
112
+
None
113
+
};
114
+
115
+
let _ = match (failed, running) {
116
+
(None, None) => write!(&mut msg, ""),
117
+
(Some(message), None) | (None, Some(message)) => write!(&mut msg, " ({message})"),
118
+
(Some(failed), Some(running)) => write!(&mut msg, " ({failed}, {running})"),
119
+
};
120
+
121
+
let _ = write!(&mut msg, "]");
122
+
123
+
let _ = write!(&mut msg, " {}s", self.began.elapsed().as_secs());
124
+
125
+
msg
126
+
}
127
+
128
+
pub fn clear<T: std::io::Write>(&self, writer: &mut T) {
129
+
if !self.show_progress {
130
+
return;
131
+
}
132
+
133
+
let _ = write!(writer, "{}", cursor::Save);
134
+
// let _ = write!(writer, "{}", cursor::Down(1));
135
+
let _ = write!(writer, "{}", cursor::Left(999));
136
+
let _ = write!(writer, "{}", clear::CurrentLine);
137
+
}
138
+
139
+
/// used when there is an interactive prompt
140
+
pub fn wipe_out<T: std::io::Write>(&self, writer: &mut T) {
141
+
if !self.show_progress {
142
+
return;
143
+
}
144
+
145
+
let _ = write!(writer, "{}", cursor::Save);
146
+
let _ = write!(writer, "{}", cursor::Left(999));
147
+
let _ = write!(writer, "{}", clear::CurrentLine);
148
+
let _ = writer.flush();
149
+
}
150
+
151
+
pub fn write_status<T: std::io::Write>(&mut self, writer: &mut T) {
152
+
if self.show_progress {
153
+
let _ = write!(writer, "{}", self.get_msg());
154
+
}
155
+
}
156
+
157
+
pub fn write_above_status<T: std::io::Write>(
158
+
&mut self,
159
+
buf: &[u8],
160
+
writer: &mut T,
161
+
) -> std::io::Result<usize> {
162
+
if STDIN_CLOBBER_LOCK.available_permits() != 1 {
163
+
// skip
164
+
return Ok(0);
165
+
}
166
+
167
+
self.clear(writer);
168
+
let written = writer.write(buf)?;
169
+
self.write_status(writer);
170
+
171
+
Ok(written)
172
+
}
173
+
}
+43
crates/core/src/test_macros.rs
+43
crates/core/src/test_macros.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
#[macro_export]
5
+
macro_rules! function_name {
6
+
() => {{
7
+
fn f() {}
8
+
fn type_name_of<T>(_: T) -> &'static str {
9
+
std::any::type_name::<T>()
10
+
}
11
+
let name = type_name_of(f);
12
+
// closure for async functions
13
+
&name[..name.len() - 3]
14
+
}};
15
+
}
16
+
17
+
#[macro_export]
18
+
macro_rules! get_test_path {
19
+
() => {{
20
+
let mut path: PathBuf = env::var("WIRE_TEST_DIR").unwrap().into();
21
+
let full_name = $crate::function_name!();
22
+
let function_name = full_name
23
+
.trim_end_matches("::{{closure}}")
24
+
.split("::")
25
+
.last()
26
+
.unwrap();
27
+
path.push(function_name);
28
+
29
+
path
30
+
}};
31
+
}
32
+
33
+
#[macro_export]
34
+
macro_rules! location {
35
+
($path:expr) => {{
36
+
$crate::hive::get_hive_location(
37
+
$path.display().to_string(),
38
+
$crate::SubCommandModifiers::default(),
39
+
)
40
+
.await
41
+
.unwrap()
42
+
}};
43
+
}
+67
crates/core/src/test_support.rs
+67
crates/core/src/test_support.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
use std::{
5
+
fs::{self, create_dir},
6
+
io,
7
+
path::Path,
8
+
process::Command,
9
+
};
10
+
11
+
use tempdir::TempDir;
12
+
13
+
pub fn make_flake_sandbox(path: &Path) -> Result<TempDir, io::Error> {
14
+
let tmp_dir = TempDir::new("wire-test")?;
15
+
16
+
Command::new("git")
17
+
.args(["init", "-b", "tmp"])
18
+
.current_dir(tmp_dir.path())
19
+
.status()?;
20
+
21
+
for entry in fs::read_dir(path)? {
22
+
let entry = entry?;
23
+
24
+
fs::copy(entry.path(), tmp_dir.as_ref().join(entry.file_name()))?;
25
+
}
26
+
27
+
let root = path.parent().unwrap().parent().unwrap().parent().unwrap();
28
+
29
+
create_dir(tmp_dir.as_ref().join("module/"))?;
30
+
31
+
fs::copy(
32
+
root.join(Path::new("runtime/evaluate.nix")),
33
+
tmp_dir.as_ref().join("evaluate.nix"),
34
+
)?;
35
+
fs::copy(
36
+
root.join(Path::new("runtime/module/config.nix")),
37
+
tmp_dir.as_ref().join("module/config.nix"),
38
+
)?;
39
+
fs::copy(
40
+
root.join(Path::new("runtime/module/options.nix")),
41
+
tmp_dir.as_ref().join("module/options.nix"),
42
+
)?;
43
+
fs::copy(
44
+
root.join(Path::new("runtime/module/default.nix")),
45
+
tmp_dir.as_ref().join("module/default.nix"),
46
+
)?;
47
+
fs::copy(
48
+
root.join(Path::new("runtime/makeHive.nix")),
49
+
tmp_dir.as_ref().join("makeHive.nix"),
50
+
)?;
51
+
fs::copy(
52
+
root.join(Path::new("flake.lock")),
53
+
tmp_dir.as_ref().join("flake.lock"),
54
+
)?;
55
+
56
+
Command::new("git")
57
+
.args(["add", "-A"])
58
+
.current_dir(tmp_dir.path())
59
+
.status()?;
60
+
61
+
Command::new("nix")
62
+
.args(["flake", "lock"])
63
+
.current_dir(tmp_dir.path())
64
+
.status()?;
65
+
66
+
Ok(tmp_dir)
67
+
}
+20
crates/key_agent/Cargo.toml
+20
crates/key_agent/Cargo.toml
···
1
+
[package]
2
+
name = "wire-key-agent"
3
+
edition.workspace = true
4
+
version.workspace = true
5
+
6
+
[dependencies]
7
+
tokio = { workspace = true }
8
+
tokio-util = { workspace = true }
9
+
anyhow = { workspace = true }
10
+
prost = { workspace = true }
11
+
nix = { workspace = true }
12
+
futures-util = { workspace = true }
13
+
sha2 = { workspace = true }
14
+
base64 = { workspace = true }
15
+
16
+
[build-dependencies]
17
+
prost-build = "0.14"
18
+
19
+
[lints]
20
+
workspace = true
+8
crates/key_agent/build.rs
+8
crates/key_agent/build.rs
+17
crates/key_agent/default.nix
+17
crates/key_agent/default.nix
+17
crates/key_agent/src/keys.proto
+17
crates/key_agent/src/keys.proto
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
syntax = "proto3";
5
+
6
+
package key_agent.keys;
7
+
8
+
message KeySpec {
9
+
string destination = 1;
10
+
string user = 2;
11
+
string group = 3;
12
+
uint32 permissions = 4;
13
+
uint32 length = 5;
14
+
bool last = 6;
15
+
/// Sha256 digest
16
+
bytes digest = 7;
17
+
}
+6
crates/key_agent/src/lib.rs
+6
crates/key_agent/src/lib.rs
+94
crates/key_agent/src/main.rs
+94
crates/key_agent/src/main.rs
···
1
+
// SPDX-License-Identifier: AGPL-3.0-or-later
2
+
// Copyright 2024-2025 wire Contributors
3
+
4
+
#![deny(clippy::pedantic)]
5
+
use base64::Engine;
6
+
use base64::prelude::BASE64_STANDARD;
7
+
use futures_util::stream::StreamExt;
8
+
use nix::unistd::{Group, User};
9
+
use prost::Message;
10
+
use prost::bytes::Bytes;
11
+
use sha2::{Digest, Sha256};
12
+
use std::os::unix::fs::PermissionsExt;
13
+
use std::os::unix::fs::chown;
14
+
use std::path::{Path, PathBuf};
15
+
use tokio::fs::File;
16
+
use tokio::io::AsyncWriteExt;
17
+
use tokio_util::codec::{FramedRead, LengthDelimitedCodec};
18
+
use wire_key_agent::keys::KeySpec;
19
+
20
+
fn create_path(key_path: &Path) -> Result<(), anyhow::Error> {
21
+
let prefix = key_path.parent().unwrap();
22
+
std::fs::create_dir_all(prefix)?;
23
+
24
+
Ok(())
25
+
}
26
+
27
+
fn pretty_keyspec(spec: &KeySpec) -> String {
28
+
format!(
29
+
"{} {}:{} {}",
30
+
spec.destination, spec.user, spec.group, spec.permissions
31
+
)
32
+
}
33
+
34
+
#[tokio::main]
35
+
async fn main() -> Result<(), anyhow::Error> {
36
+
let stdin = tokio::io::stdin();
37
+
38
+
let mut framed = FramedRead::new(stdin, LengthDelimitedCodec::new());
39
+
40
+
while let Some(spec_bytes) = framed.next().await {
41
+
let spec_bytes = Bytes::from(BASE64_STANDARD.decode(spec_bytes?)?);
42
+
let spec = KeySpec::decode(spec_bytes)?;
43
+
44
+
let key_bytes = BASE64_STANDARD.decode(
45
+
framed
46
+
.next()
47
+
.await
48
+
.expect("expected key_bytes to come after spec_bytes")?,
49
+
)?;
50
+
51
+
let digest = Sha256::digest(&key_bytes).to_vec();
52
+
53
+
println!(
54
+
"Writing {}, {:?} bytes of data",
55
+
pretty_keyspec(&spec),
56
+
key_bytes.len()
57
+
);
58
+
59
+
if digest != spec.digest {
60
+
return Err(anyhow::anyhow!(
61
+
"digest of {spec:?} did not match {digest:?}! Please create an issue!"
62
+
));
63
+
}
64
+
65
+
let path = PathBuf::from(&spec.destination);
66
+
create_path(&path)?;
67
+
68
+
let mut file = File::create(path).await?;
69
+
let mut permissions = file.metadata().await?.permissions();
70
+
71
+
permissions.set_mode(spec.permissions);
72
+
file.set_permissions(permissions).await?;
73
+
74
+
let user = User::from_name(&spec.user)?;
75
+
let group = Group::from_name(&spec.group)?;
76
+
77
+
chown(
78
+
spec.destination,
79
+
// Default uid/gid to 0. This is then wrapped around an Option again for
80
+
// the function.
81
+
Some(user.map_or(0, |user| user.uid.into())),
82
+
Some(group.map_or(0, |group| group.gid.into())),
83
+
)?;
84
+
85
+
file.write_all(&key_bytes).await?;
86
+
87
+
// last key, goobye
88
+
if spec.last {
89
+
break;
90
+
}
91
+
}
92
+
93
+
Ok(())
94
+
}
+7
-6
doc/.vitepress/config.ts
+7
-6
doc/.vitepress/config.ts
···
20
20
21
21
footer: {
22
22
message:
23
-
'Released under the <a href="https://github.com/mrshmllow/wire/blob/trunk/COPYING">AGPL-3.0 License</a>.',
23
+
'Released under the <a href="https://github.com/forallsys/wire/blob/trunk/COPYING">AGPL-3.0 License</a>.',
24
24
copyright: "Copyright 2024-2025 wire Contributors",
25
25
},
26
26
···
31
31
{ text: "Guides", link: "/guides/installation" },
32
32
{ text: "Reference", link: "/reference/cli" },
33
33
{
34
-
text: MODE === "stable" ? pkg.version : "Unstable",
34
+
text: MODE === "stable" ? pkg.version : `Unstable (${pkg.version})`,
35
35
items: [
36
36
MODE === "unstable"
37
37
? {
38
-
text: `View ${pkg.version}`,
38
+
text: `View Stable`,
39
39
link: "https://wire.althaea.zone",
40
40
}
41
41
: {
···
44
44
},
45
45
{
46
46
text: "Changelog",
47
-
link: "https://github.com/mrshmllow/wire/blob/trunk/CHANGELOG.md",
47
+
link: "https://github.com/forallsys/wire/blob/trunk/CHANGELOG.md",
48
48
},
49
49
{
50
50
text: "CI Server",
···
119
119
},
120
120
{ text: "Apply your Config", link: "/guides/apply" },
121
121
{ text: "Target Nodes", link: "/guides/targeting" },
122
+
{ text: "Build in CI", link: "/guides/build-in-ci" },
122
123
{
123
124
text: "Features",
124
125
items: [
···
149
150
},
150
151
151
152
editLink: {
152
-
pattern: "https://github.com/mrshmllow/wire/edit/trunk/doc/:path",
153
+
pattern: "https://github.com/forallsys/wire/edit/trunk/doc/:path",
153
154
text: "Edit this page on GitHub",
154
155
},
155
156
156
157
socialLinks: [
157
-
{ icon: "github", link: "https://github.com/mrshmllow/wire" },
158
+
{ icon: "github", link: "https://github.com/forallsys/wire" },
158
159
],
159
160
},
160
161
markdown: {
+1
-34
doc/.vitepress/theme/index.ts
+1
-34
doc/.vitepress/theme/index.ts
···
1
1
import DefaultTheme from "vitepress/theme";
2
2
import "virtual:group-icons.css";
3
-
import giscusTalk from "vitepress-plugin-comment-with-giscus";
4
-
import { EnhanceAppContext, useData, useRoute } from "vitepress";
5
-
import { toRefs } from "vue";
3
+
import { EnhanceAppContext } from "vitepress";
6
4
import "./style.css";
7
5
8
6
export default {
9
7
...DefaultTheme,
10
8
enhanceApp(ctx: EnhanceAppContext) {
11
9
DefaultTheme.enhanceApp(ctx);
12
-
},
13
-
setup() {
14
-
const { frontmatter } = toRefs(useData());
15
-
const route = useRoute();
16
-
17
-
giscusTalk(
18
-
{
19
-
repo: "mrshmllow/wire",
20
-
repoId: "R_kgDOMQQbzw",
21
-
category: "giscus", // default: `General`
22
-
categoryId: "DIC_kwDOMQQbz84Co4vv",
23
-
mapping: "pathname",
24
-
inputPosition: "top",
25
-
lang: "en",
26
-
// i18n setting (Note: This configuration will override the default language set by lang)
27
-
// Configured as an object with key-value pairs inside:
28
-
// [your i18n configuration name]: [corresponds to the language pack name in Giscus]
29
-
locales: {
30
-
"en-US": "en",
31
-
},
32
-
homePageShowComment: false,
33
-
lightTheme: "light",
34
-
darkTheme: "transparent_dark",
35
-
},
36
-
{
37
-
frontmatter,
38
-
route,
39
-
},
40
-
// Default to false for all pages
41
-
false,
42
-
);
43
10
},
44
11
};
+36
doc/guides/build-in-ci.md
+36
doc/guides/build-in-ci.md
···
1
+
---
2
+
comment: true
3
+
title: Build in CI
4
+
---
5
+
6
+
# Build in CI
7
+
8
+
## The `wire build` command <Badge type="tip" text="^1.1.0" />
9
+
10
+
`wire build` builds nodes locally. It is distinct from
11
+
`wire apply build`, as it will not ping or push the result,
12
+
making it useful for CI.
13
+
14
+
It accepts the same `--on` argument as `wire apply` does.
15
+
16
+
## Partitioning builds
17
+
18
+
`wire build` accepts a `--partition` option inspired by
19
+
[cargo-nextest](https://nexte.st/docs/ci-features/partitioning/), which splits
20
+
selected nodes into buckets to be built separately.
21
+
22
+
It accepts values in the format `--partition current/total`, where 1 โค current โค total.
23
+
24
+
For example, these two commands will build the entire hive in two invocations:
25
+
26
+
```sh
27
+
wire build --partition 1/2
28
+
29
+
# later or synchronously:
30
+
31
+
wire build --partition 2/2
32
+
```
33
+
34
+
## Example: Build in Github Actions
35
+
36
+
<<< @/snippets/guides/example-action.yml [.github/workflows/build.yml]
+4
-4
doc/guides/installation.md
+4
-4
doc/guides/installation.md
···
19
19
20
20
## Binary Cache
21
21
22
-
You should enable the [garnix binary cache](https://garnix.io/docs/caching)
23
-
_before_ continuing otherwise you will be compiling from source:
22
+
You should enable the [garnix binary cache](https://garnix.io/docs/caching) _before_
23
+
continuing otherwise you will be compiling from source:
24
24
25
25
::: code-group
26
26
<<< @/snippets/tutorial/cache.conf [nix.conf]
···
45
45
you'd like, really.
46
46
47
47
```sh
48
-
$ npins add github mrshmllow wire --branch stable
48
+
$ npins add github forallsys wire --branch stable
49
49
```
50
50
51
51
Alternatively, you can use a tag instead:
52
52
53
53
```sh
54
-
$ npins add github mrshmllow wire --at v1.0.0
54
+
$ npins add github forallsys wire --at v1.1.1
55
55
```
56
56
57
57
Then, use this pinned version of wire for both your `hive.nix` and `shell.nix`:
+1
-1
doc/guides/migrate.md
+1
-1
doc/guides/migrate.md
···
41
41
inputs = {
42
42
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
43
43
colmena.url = "github:zhaofengli/colmena"; # [!code --]
44
-
wire.url = "github:mrshmllow/wire/stable"; # [!code ++]
44
+
wire.url = "github:forallsys/wire/stable"; # [!code ++]
45
45
};
46
46
outputs =
47
47
{ nixpkgs, colmena, ... }:
+4
doc/guides/writing-a-hive.md
+4
doc/guides/writing-a-hive.md
···
23
23
# type: attrset
24
24
specialArgs = { };
25
25
26
+
# `meta.nodeSpecialArgs` lets you override `meta.specialArgs` per-node.
27
+
# type: attrset of attrset
28
+
nodeSpecialArgs: = { };
29
+
26
30
# `meta.nodeNixpkgs` lets you override nixpkgs per-node.
27
31
# type: attrset of "A path or an instance of nixpkgs."
28
32
nodeNixpkgs: = { };
+1
-1
doc/index.md
+1
-1
doc/index.md
+1
-2
doc/package.json
+1
-2
doc/package.json
···
1
1
{
2
2
"name": "wire-docs",
3
-
"version": "1.0.0",
3
+
"version": "1.1.1",
4
4
"type": "module",
5
5
"devDependencies": {
6
6
"vitepress": "^1.6.4",
···
18
18
},
19
19
"dependencies": {
20
20
"markdown-it-footnote": "^4.0.0",
21
-
"vitepress-plugin-comment-with-giscus": "^1.1.15",
22
21
"vitepress-plugin-group-icons": "^1.6.5"
23
22
}
24
23
}
+1
-1
doc/package.nix
+1
-1
doc/package.nix
···
52
52
pnpmDeps = pnpm.fetchDeps {
53
53
inherit (finalAttrs) pname version src;
54
54
fetcherVersion = 1;
55
-
hash = "sha256-oh1r2YizJtHjEgWaHYEBeD4w0ts0oUGK98z4T/kj4d8=";
55
+
hash = "sha256-ydgb5NCFsYaDbmLjBqu91MqKj/I3TKpNLjOvyP+aY8o=";
56
56
};
57
57
patchPhase = ''
58
58
cat ${optionsDoc} >> ./reference/module.md
+68
-134
doc/pnpm-lock.yaml
+68
-134
doc/pnpm-lock.yaml
···
11
11
markdown-it-footnote:
12
12
specifier: ^4.0.0
13
13
version: 4.0.0
14
-
vitepress-plugin-comment-with-giscus:
15
-
specifier: ^1.1.15
16
-
version: 1.1.15(vue@3.5.25)
17
14
vitepress-plugin-group-icons:
18
15
specifier: ^1.6.5
19
16
version: 1.6.5(vite@5.4.21)
···
23
20
version: 1.6.4(@algolia/client-search@5.46.0)(postcss@8.5.6)(search-insights@2.17.3)
24
21
vue:
25
22
specifier: ^3.5.25
26
-
version: 3.5.25
23
+
version: 3.5.26
27
24
28
25
packages:
29
26
···
284
281
cpu: [x64]
285
282
os: [win32]
286
283
287
-
'@giscus/vue@2.4.0':
288
-
resolution: {integrity: sha512-QOxKHgsMT91myyQagP2v20YYAei1ByZuc3qcaYxbHx4AwOeyVrybDIuRFwG9YDv6OraC86jYnU4Ixd37ddC/0A==}
289
-
peerDependencies:
290
-
vue: '>=3.2.0'
291
-
292
284
'@iconify-json/logos@1.2.10':
293
285
resolution: {integrity: sha512-qxaXKJ6fu8jzTMPQdHtNxlfx6tBQ0jXRbHZIYy5Ilh8Lx9US9FsAdzZWUR8MXV8PnWTKGDFO4ZZee9VwerCyMA==}
294
286
···
306
298
307
299
'@jridgewell/sourcemap-codec@1.5.5':
308
300
resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==}
309
-
310
-
'@lit-labs/ssr-dom-shim@1.4.0':
311
-
resolution: {integrity: sha512-ficsEARKnmmW5njugNYKipTm4SFnbik7CXtoencDZzmzo/dQ+2Q0bgkzJuoJP20Aj0F+izzJjOqsnkd6F/o1bw==}
312
-
313
-
'@lit/reactive-element@2.1.1':
314
-
resolution: {integrity: sha512-N+dm5PAYdQ8e6UlywyyrgI2t++wFGXfHx+dSJ1oBrg6FAxUj40jId++EaRm80MKX5JnlH1sBsyZ5h0bcZKemCg==}
315
301
316
302
'@rollup/rollup-android-arm-eabi@4.53.5':
317
303
resolution: {integrity: sha512-iDGS/h7D8t7tvZ1t6+WPK04KD0MwzLZrG0se1hzBjSi5fyxlsiggoJHwh18PCFNn7tG43OWb6pdZ6Y+rMlmyNQ==}
···
465
451
'@types/mdurl@2.0.0':
466
452
resolution: {integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==}
467
453
468
-
'@types/trusted-types@2.0.7':
469
-
resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==}
470
-
471
454
'@types/unist@3.0.3':
472
455
resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==}
473
456
···
484
467
vite: ^5.0.0 || ^6.0.0
485
468
vue: ^3.2.25
486
469
487
-
'@vue/compiler-core@3.5.25':
488
-
resolution: {integrity: sha512-vay5/oQJdsNHmliWoZfHPoVZZRmnSWhug0BYT34njkYTPqClh3DNWLkZNJBVSjsNMrg0CCrBfoKkjZQPM/QVUw==}
470
+
'@vue/compiler-core@3.5.26':
471
+
resolution: {integrity: sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w==}
489
472
490
-
'@vue/compiler-dom@3.5.25':
491
-
resolution: {integrity: sha512-4We0OAcMZsKgYoGlMjzYvaoErltdFI2/25wqanuTu+S4gismOTRTBPi4IASOjxWdzIwrYSjnqONfKvuqkXzE2Q==}
473
+
'@vue/compiler-dom@3.5.26':
474
+
resolution: {integrity: sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A==}
492
475
493
-
'@vue/compiler-sfc@3.5.25':
494
-
resolution: {integrity: sha512-PUgKp2rn8fFsI++lF2sO7gwO2d9Yj57Utr5yEsDf3GNaQcowCLKL7sf+LvVFvtJDXUp/03+dC6f2+LCv5aK1ag==}
476
+
'@vue/compiler-sfc@3.5.26':
477
+
resolution: {integrity: sha512-egp69qDTSEZcf4bGOSsprUr4xI73wfrY5oRs6GSgXFTiHrWj4Y3X5Ydtip9QMqiCMCPVwLglB9GBxXtTadJ3mA==}
495
478
496
-
'@vue/compiler-ssr@3.5.25':
497
-
resolution: {integrity: sha512-ritPSKLBcParnsKYi+GNtbdbrIE1mtuFEJ4U1sWeuOMlIziK5GtOL85t5RhsNy4uWIXPgk+OUdpnXiTdzn8o3A==}
479
+
'@vue/compiler-ssr@3.5.26':
480
+
resolution: {integrity: sha512-lZT9/Y0nSIRUPVvapFJEVDbEXruZh2IYHMk2zTtEgJSlP5gVOqeWXH54xDKAaFS4rTnDeDBQUYDtxKyoW9FwDw==}
498
481
499
482
'@vue/devtools-api@7.7.9':
500
483
resolution: {integrity: sha512-kIE8wvwlcZ6TJTbNeU2HQNtaxLx3a84aotTITUuL/4bzfPxzajGBOoqjMhwZJ8L9qFYDU/lAYMEEm11dnZOD6g==}
···
505
488
'@vue/devtools-shared@7.7.9':
506
489
resolution: {integrity: sha512-iWAb0v2WYf0QWmxCGy0seZNDPdO3Sp5+u78ORnyeonS6MT4PC7VPrryX2BpMJrwlDeaZ6BD4vP4XKjK0SZqaeA==}
507
490
508
-
'@vue/reactivity@3.5.25':
509
-
resolution: {integrity: sha512-5xfAypCQepv4Jog1U4zn8cZIcbKKFka3AgWHEFQeK65OW+Ys4XybP6z2kKgws4YB43KGpqp5D/K3go2UPPunLA==}
491
+
'@vue/reactivity@3.5.26':
492
+
resolution: {integrity: sha512-9EnYB1/DIiUYYnzlnUBgwU32NNvLp/nhxLXeWRhHUEeWNTn1ECxX8aGO7RTXeX6PPcxe3LLuNBFoJbV4QZ+CFQ==}
510
493
511
-
'@vue/runtime-core@3.5.25':
512
-
resolution: {integrity: sha512-Z751v203YWwYzy460bzsYQISDfPjHTl+6Zzwo/a3CsAf+0ccEjQ8c+0CdX1WsumRTHeywvyUFtW6KvNukT/smA==}
494
+
'@vue/runtime-core@3.5.26':
495
+
resolution: {integrity: sha512-xJWM9KH1kd201w5DvMDOwDHYhrdPTrAatn56oB/LRG4plEQeZRQLw0Bpwih9KYoqmzaxF0OKSn6swzYi84e1/Q==}
513
496
514
-
'@vue/runtime-dom@3.5.25':
515
-
resolution: {integrity: sha512-a4WrkYFbb19i9pjkz38zJBg8wa/rboNERq3+hRRb0dHiJh13c+6kAbgqCPfMaJ2gg4weWD3APZswASOfmKwamA==}
497
+
'@vue/runtime-dom@3.5.26':
498
+
resolution: {integrity: sha512-XLLd/+4sPC2ZkN/6+V4O4gjJu6kSDbHAChvsyWgm1oGbdSO3efvGYnm25yCjtFm/K7rrSDvSfPDgN1pHgS4VNQ==}
516
499
517
-
'@vue/server-renderer@3.5.25':
518
-
resolution: {integrity: sha512-UJaXR54vMG61i8XNIzTSf2Q7MOqZHpp8+x3XLGtE3+fL+nQd+k7O5+X3D/uWrnQXOdMw5VPih+Uremcw+u1woQ==}
500
+
'@vue/server-renderer@3.5.26':
501
+
resolution: {integrity: sha512-TYKLXmrwWKSodyVuO1WAubucd+1XlLg4set0YoV+Hu8Lo79mp/YMwWV5mC5FgtsDxX3qo1ONrxFaTP1OQgy1uA==}
519
502
peerDependencies:
520
-
vue: 3.5.25
503
+
vue: 3.5.26
521
504
522
505
'@vue/shared@3.5.25':
523
506
resolution: {integrity: sha512-AbOPdQQnAnzs58H2FrrDxYj/TJfmeS2jdfEEhgiKINy+bnOANmVizIEgq1r+C5zsbs6l1CCQxtcj71rwNQ4jWg==}
507
+
508
+
'@vue/shared@3.5.26':
509
+
resolution: {integrity: sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==}
524
510
525
511
'@vueuse/core@12.8.2':
526
512
resolution: {integrity: sha512-HbvCmZdzAu3VGi/pWYm5Ut+Kd9mn1ZHnn4L5G8kOQTPs/IwIAmJoBrmYk2ckLArgMXZj0AW3n5CAejLUO+PhdQ==}
···
616
602
emoji-regex-xs@1.0.0:
617
603
resolution: {integrity: sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==}
618
604
619
-
entities@4.5.0:
620
-
resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==}
605
+
entities@7.0.0:
606
+
resolution: {integrity: sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ==}
621
607
engines: {node: '>=0.12'}
622
608
623
609
esbuild@0.21.5:
···
636
622
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
637
623
os: [darwin]
638
624
639
-
giscus@1.6.0:
640
-
resolution: {integrity: sha512-Zrsi8r4t1LVW950keaWcsURuZUQwUaMKjvJgTCY125vkW6OiEBkatE7ScJDbpqKHdZwb///7FVC21SE3iFK3PQ==}
641
-
642
625
hast-util-to-html@9.0.5:
643
626
resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==}
644
627
···
654
637
is-what@5.5.0:
655
638
resolution: {integrity: sha512-oG7cgbmg5kLYae2N5IVd3jm2s+vldjxJzK1pcu9LfpGuQ93MQSzo0okvRna+7y5ifrD+20FE8FvjusyGaz14fw==}
656
639
engines: {node: '>=18'}
657
-
658
-
lit-element@4.2.1:
659
-
resolution: {integrity: sha512-WGAWRGzirAgyphK2urmYOV72tlvnxw7YfyLDgQ+OZnM9vQQBQnumQ7jUJe6unEzwGU3ahFOjuz1iz1jjrpCPuw==}
660
-
661
-
lit-html@3.3.1:
662
-
resolution: {integrity: sha512-S9hbyDu/vs1qNrithiNyeyv64c9yqiW9l+DBgI18fL+MTvOtWoFR0FWiyq1TxaYef5wNlpEmzlXoBlZEO+WjoA==}
663
-
664
-
lit@3.3.1:
665
-
resolution: {integrity: sha512-Ksr/8L3PTapbdXJCk+EJVB78jDodUMaP54gD24W186zGRARvwrsPfS60wae/SSCTCNZVPd1chXqio1qHQmu4NA==}
666
640
667
641
magic-string@0.30.21:
668
642
resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==}
···
839
813
terser:
840
814
optional: true
841
815
842
-
vitepress-plugin-comment-with-giscus@1.1.15:
843
-
resolution: {integrity: sha512-1DJjgN+7SYvn5ZkjuSXPmz7nlqfcrh4qCGGviiZghA2ELXnaO2m9WY7m+RisPSaqCn90xqe0JbO2T4NMq8iUBg==}
844
-
845
816
vitepress-plugin-group-icons@1.6.5:
846
817
resolution: {integrity: sha512-+pg4+GKDq2fLqKb1Sat5p1p4SuIZ5tEPxu8HjpwoeecZ/VaXKy6Bdf0wyjedjaTAyZQzXbvyavJegqAcQ+B0VA==}
847
818
peerDependencies:
···
862
833
postcss:
863
834
optional: true
864
835
865
-
vue@3.5.25:
866
-
resolution: {integrity: sha512-YLVdgv2K13WJ6n+kD5owehKtEXwdwXuj2TTyJMsO7pSeKw2bfRNZGjhB7YzrpbMYj5b5QsUebHpOqR3R3ziy/g==}
836
+
vue@3.5.26:
837
+
resolution: {integrity: sha512-SJ/NTccVyAoNUJmkM9KUqPcYlY+u8OVL1X5EW9RIs3ch5H2uERxyyIUI4MRxVCSOiEcupX9xNGde1tL9ZKpimA==}
867
838
peerDependencies:
868
839
typescript: '*'
869
840
peerDependenciesMeta:
···
1098
1069
'@esbuild/win32-x64@0.21.5':
1099
1070
optional: true
1100
1071
1101
-
'@giscus/vue@2.4.0(vue@3.5.25)':
1102
-
dependencies:
1103
-
giscus: 1.6.0
1104
-
vue: 3.5.25
1105
-
1106
1072
'@iconify-json/logos@1.2.10':
1107
1073
dependencies:
1108
1074
'@iconify/types': 2.0.0
···
1124
1090
mlly: 1.8.0
1125
1091
1126
1092
'@jridgewell/sourcemap-codec@1.5.5': {}
1127
-
1128
-
'@lit-labs/ssr-dom-shim@1.4.0': {}
1129
-
1130
-
'@lit/reactive-element@2.1.1':
1131
-
dependencies:
1132
-
'@lit-labs/ssr-dom-shim': 1.4.0
1133
1093
1134
1094
'@rollup/rollup-android-arm-eabi@4.53.5':
1135
1095
optional: true
···
1256
1216
1257
1217
'@types/mdurl@2.0.0': {}
1258
1218
1259
-
'@types/trusted-types@2.0.7': {}
1260
-
1261
1219
'@types/unist@3.0.3': {}
1262
1220
1263
1221
'@types/web-bluetooth@0.0.21': {}
1264
1222
1265
1223
'@ungap/structured-clone@1.3.0': {}
1266
1224
1267
-
'@vitejs/plugin-vue@5.2.4(vite@5.4.21)(vue@3.5.25)':
1225
+
'@vitejs/plugin-vue@5.2.4(vite@5.4.21)(vue@3.5.26)':
1268
1226
dependencies:
1269
1227
vite: 5.4.21
1270
-
vue: 3.5.25
1228
+
vue: 3.5.26
1271
1229
1272
-
'@vue/compiler-core@3.5.25':
1230
+
'@vue/compiler-core@3.5.26':
1273
1231
dependencies:
1274
1232
'@babel/parser': 7.28.5
1275
-
'@vue/shared': 3.5.25
1276
-
entities: 4.5.0
1233
+
'@vue/shared': 3.5.26
1234
+
entities: 7.0.0
1277
1235
estree-walker: 2.0.2
1278
1236
source-map-js: 1.2.1
1279
1237
1280
-
'@vue/compiler-dom@3.5.25':
1238
+
'@vue/compiler-dom@3.5.26':
1281
1239
dependencies:
1282
-
'@vue/compiler-core': 3.5.25
1283
-
'@vue/shared': 3.5.25
1240
+
'@vue/compiler-core': 3.5.26
1241
+
'@vue/shared': 3.5.26
1284
1242
1285
-
'@vue/compiler-sfc@3.5.25':
1243
+
'@vue/compiler-sfc@3.5.26':
1286
1244
dependencies:
1287
1245
'@babel/parser': 7.28.5
1288
-
'@vue/compiler-core': 3.5.25
1289
-
'@vue/compiler-dom': 3.5.25
1290
-
'@vue/compiler-ssr': 3.5.25
1291
-
'@vue/shared': 3.5.25
1246
+
'@vue/compiler-core': 3.5.26
1247
+
'@vue/compiler-dom': 3.5.26
1248
+
'@vue/compiler-ssr': 3.5.26
1249
+
'@vue/shared': 3.5.26
1292
1250
estree-walker: 2.0.2
1293
1251
magic-string: 0.30.21
1294
1252
postcss: 8.5.6
1295
1253
source-map-js: 1.2.1
1296
1254
1297
-
'@vue/compiler-ssr@3.5.25':
1255
+
'@vue/compiler-ssr@3.5.26':
1298
1256
dependencies:
1299
-
'@vue/compiler-dom': 3.5.25
1300
-
'@vue/shared': 3.5.25
1257
+
'@vue/compiler-dom': 3.5.26
1258
+
'@vue/shared': 3.5.26
1301
1259
1302
1260
'@vue/devtools-api@7.7.9':
1303
1261
dependencies:
···
1317
1275
dependencies:
1318
1276
rfdc: 1.4.1
1319
1277
1320
-
'@vue/reactivity@3.5.25':
1278
+
'@vue/reactivity@3.5.26':
1321
1279
dependencies:
1322
-
'@vue/shared': 3.5.25
1280
+
'@vue/shared': 3.5.26
1323
1281
1324
-
'@vue/runtime-core@3.5.25':
1282
+
'@vue/runtime-core@3.5.26':
1325
1283
dependencies:
1326
-
'@vue/reactivity': 3.5.25
1327
-
'@vue/shared': 3.5.25
1284
+
'@vue/reactivity': 3.5.26
1285
+
'@vue/shared': 3.5.26
1328
1286
1329
-
'@vue/runtime-dom@3.5.25':
1287
+
'@vue/runtime-dom@3.5.26':
1330
1288
dependencies:
1331
-
'@vue/reactivity': 3.5.25
1332
-
'@vue/runtime-core': 3.5.25
1333
-
'@vue/shared': 3.5.25
1289
+
'@vue/reactivity': 3.5.26
1290
+
'@vue/runtime-core': 3.5.26
1291
+
'@vue/shared': 3.5.26
1334
1292
csstype: 3.2.3
1335
1293
1336
-
'@vue/server-renderer@3.5.25(vue@3.5.25)':
1294
+
'@vue/server-renderer@3.5.26(vue@3.5.26)':
1337
1295
dependencies:
1338
-
'@vue/compiler-ssr': 3.5.25
1339
-
'@vue/shared': 3.5.25
1340
-
vue: 3.5.25
1296
+
'@vue/compiler-ssr': 3.5.26
1297
+
'@vue/shared': 3.5.26
1298
+
vue: 3.5.26
1341
1299
1342
1300
'@vue/shared@3.5.25': {}
1301
+
1302
+
'@vue/shared@3.5.26': {}
1343
1303
1344
1304
'@vueuse/core@12.8.2':
1345
1305
dependencies:
1346
1306
'@types/web-bluetooth': 0.0.21
1347
1307
'@vueuse/metadata': 12.8.2
1348
1308
'@vueuse/shared': 12.8.2
1349
-
vue: 3.5.25
1309
+
vue: 3.5.26
1350
1310
transitivePeerDependencies:
1351
1311
- typescript
1352
1312
···
1354
1314
dependencies:
1355
1315
'@vueuse/core': 12.8.2
1356
1316
'@vueuse/shared': 12.8.2
1357
-
vue: 3.5.25
1317
+
vue: 3.5.26
1358
1318
optionalDependencies:
1359
1319
focus-trap: 7.6.6
1360
1320
transitivePeerDependencies:
···
1364
1324
1365
1325
'@vueuse/shared@12.8.2':
1366
1326
dependencies:
1367
-
vue: 3.5.25
1327
+
vue: 3.5.26
1368
1328
transitivePeerDependencies:
1369
1329
- typescript
1370
1330
···
1413
1373
1414
1374
emoji-regex-xs@1.0.0: {}
1415
1375
1416
-
entities@4.5.0: {}
1376
+
entities@7.0.0: {}
1417
1377
1418
1378
esbuild@0.21.5:
1419
1379
optionalDependencies:
···
1450
1410
fsevents@2.3.3:
1451
1411
optional: true
1452
1412
1453
-
giscus@1.6.0:
1454
-
dependencies:
1455
-
lit: 3.3.1
1456
-
1457
1413
hast-util-to-html@9.0.5:
1458
1414
dependencies:
1459
1415
'@types/hast': 3.0.4
···
1478
1434
1479
1435
is-what@5.5.0: {}
1480
1436
1481
-
lit-element@4.2.1:
1482
-
dependencies:
1483
-
'@lit-labs/ssr-dom-shim': 1.4.0
1484
-
'@lit/reactive-element': 2.1.1
1485
-
lit-html: 3.3.1
1486
-
1487
-
lit-html@3.3.1:
1488
-
dependencies:
1489
-
'@types/trusted-types': 2.0.7
1490
-
1491
-
lit@3.3.1:
1492
-
dependencies:
1493
-
'@lit/reactive-element': 2.1.1
1494
-
lit-element: 4.2.1
1495
-
lit-html: 3.3.1
1496
-
1497
1437
magic-string@0.30.21:
1498
1438
dependencies:
1499
1439
'@jridgewell/sourcemap-codec': 1.5.5
···
1691
1631
optionalDependencies:
1692
1632
fsevents: 2.3.3
1693
1633
1694
-
vitepress-plugin-comment-with-giscus@1.1.15(vue@3.5.25):
1695
-
dependencies:
1696
-
'@giscus/vue': 2.4.0(vue@3.5.25)
1697
-
transitivePeerDependencies:
1698
-
- vue
1699
-
1700
1634
vitepress-plugin-group-icons@1.6.5(vite@5.4.21):
1701
1635
dependencies:
1702
1636
'@iconify-json/logos': 1.2.10
···
1714
1648
'@shikijs/transformers': 2.5.0
1715
1649
'@shikijs/types': 2.5.0
1716
1650
'@types/markdown-it': 14.1.2
1717
-
'@vitejs/plugin-vue': 5.2.4(vite@5.4.21)(vue@3.5.25)
1651
+
'@vitejs/plugin-vue': 5.2.4(vite@5.4.21)(vue@3.5.26)
1718
1652
'@vue/devtools-api': 7.7.9
1719
1653
'@vue/shared': 3.5.25
1720
1654
'@vueuse/core': 12.8.2
···
1724
1658
minisearch: 7.2.0
1725
1659
shiki: 2.5.0
1726
1660
vite: 5.4.21
1727
-
vue: 3.5.25
1661
+
vue: 3.5.26
1728
1662
optionalDependencies:
1729
1663
postcss: 8.5.6
1730
1664
transitivePeerDependencies:
···
1754
1688
- typescript
1755
1689
- universal-cookie
1756
1690
1757
-
vue@3.5.25:
1691
+
vue@3.5.26:
1758
1692
dependencies:
1759
-
'@vue/compiler-dom': 3.5.25
1760
-
'@vue/compiler-sfc': 3.5.25
1761
-
'@vue/runtime-dom': 3.5.25
1762
-
'@vue/server-renderer': 3.5.25(vue@3.5.25)
1763
-
'@vue/shared': 3.5.25
1693
+
'@vue/compiler-dom': 3.5.26
1694
+
'@vue/compiler-sfc': 3.5.26
1695
+
'@vue/runtime-dom': 3.5.26
1696
+
'@vue/server-renderer': 3.5.26(vue@3.5.26)
1697
+
'@vue/shared': 3.5.26
1764
1698
1765
1699
zwitch@2.0.4: {}
+18
doc/reference/meta.md
+18
doc/reference/meta.md
···
61
61
}
62
62
```
63
63
64
+
## meta.nodeSpecialArgs
65
+
66
+
Extra `specialArgs` to override `meta.specialArgs` for each node
67
+
68
+
_Type:_ attribute set of attribute set
69
+
70
+
_Default:_ `{ }`
71
+
72
+
_Example:_
73
+
74
+
```nix
75
+
{
76
+
meta.nodeSpecialArgs = {
77
+
extra-property = "some-value";
78
+
};
79
+
}
80
+
```
81
+
64
82
## meta.nodeNixpkgs
65
83
66
84
Per-node nixpkgs to override `meta.nixpkgs`.
+1
-1
doc/snippets/getting-started/configuration.nix
+1
-1
doc/snippets/getting-started/configuration.nix
···
1
1
{system, ...}: let
2
2
wire = import (
3
3
# [!code ++]
4
-
builtins.fetchTarball "https://github.com/mrshmllow/wire/archive/refs/heads/trunk.tar.gz" # [!code ++]
4
+
builtins.fetchTarball "https://github.com/forallsys/wire/archive/refs/heads/trunk.tar.gz" # [!code ++]
5
5
); # [!code ++]
6
6
in {
7
7
environment.systemPackages = [
+1
-1
doc/snippets/getting-started/flake-merged.nix
+1
-1
doc/snippets/getting-started/flake-merged.nix
+1
-1
doc/snippets/getting-started/flake.nix
+1
-1
doc/snippets/getting-started/flake.nix
+1
-1
doc/snippets/getting-started/nixos.flake.nix
+1
-1
doc/snippets/getting-started/nixos.flake.nix
+40
doc/snippets/guides/example-action.yml
+40
doc/snippets/guides/example-action.yml
···
1
+
name: Build
2
+
3
+
on:
4
+
push:
5
+
branches: [main]
6
+
7
+
jobs:
8
+
build-partitioned:
9
+
name: Build Partitioned
10
+
runs-on: ubuntu-latest
11
+
permissions: {}
12
+
strategy:
13
+
matrix:
14
+
# Break into 4 partitions
15
+
partition: [1, 2, 3, 4]
16
+
steps:
17
+
- uses: actions/checkout@v6
18
+
with:
19
+
persist-credentials: false
20
+
# This will likely be required if you have multiple architectures
21
+
# in your hive.
22
+
- name: Set up QEMU
23
+
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
24
+
- uses: cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15
25
+
with:
26
+
nix_path: nixpkgs=channel:nixos-unstable
27
+
extra_nix_config: |
28
+
# Install binary cache as described in the install wire guide
29
+
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= cache.garnix.io:CTFPyKSLcx5RMJKfLo5EEPUObbA78b0YQ2DTCJXqr9g=
30
+
substituters = https://cache.nixos.org/ https://cache.garnix.io
31
+
32
+
# Again, include additional architectures if you have multiple
33
+
# architectures in your hive
34
+
extra-platforms = aarch64-linux i686-linux
35
+
# Uses wire from your shell (as described in the install wire guide).
36
+
- name: Build partition ${{ matrix.partition }}
37
+
run: nix develop -Lvc wire \
38
+
build \
39
+
--parallel 1 \
40
+
--partition ${{ matrix.partition }}/4
+2
-2
doc/snippets/guides/installation/flake.nix
+2
-2
doc/snippets/guides/installation/flake.nix
···
1
1
{
2
2
inputs = {
3
3
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
4
-
wire.url = "github:mrshmllow/wire/stable";
4
+
wire.url = "github:forallsys/wire/stable";
5
5
6
6
# alternatively, you can use a tag instead:
7
-
# wire.url = "github:mrshmllow/wire/v1.0.0";
7
+
# wire.url = "github:forallsys/wire/v1.1.1";
8
8
9
9
systems.url = "github:nix-systems/default";
10
10
};
-4
doc/tutorial/overview.md
-4
doc/tutorial/overview.md
···
10
10
11
11
[^1]: A lot of your colmena module options will continue to work with wire, but wire has additional ergonomic changes you can take advantage of.
12
12
13
-
::: warning
14
-
wire is alpha software, please use at your own risk.
15
-
:::
16
-
17
13
---
18
14
19
15
In this tutorial we will create and deploy a wire Hive. Along the way we will
+4
-4
doc/tutorial/part-one/repo-setup.md
+4
-4
doc/tutorial/part-one/repo-setup.md
···
39
39
40
40
## Adding wire as a dependency
41
41
42
-
We can now need to tell `npins` to use `mrshmllow/wire` as a dependency.
42
+
We can now need to tell `npins` to use `forallsys/wire` as a dependency.
43
43
44
44
```sh
45
-
[nix-shell]$ npins add github mrshmllow wire --branch stable
45
+
[nix-shell]$ npins add github forallsys wire --branch stable
46
46
[INFO ] Adding 'wire' โฆ
47
-
repository: https://github.com/mrshmllow/wire.git
47
+
repository: https://github.com/forallsys/wire.git
48
48
pre_releases: false
49
49
submodules: false
50
50
version: v0.4.0
···
68
68
frozen: false
69
69
70
70
wire: (git release tag)
71
-
repository: https://github.com/mrshmllow/wire.git
71
+
repository: https://github.com/forallsys/wire.git
72
72
pre_releases: false
73
73
submodules: false
74
74
version: v0.4.0
+3
-3
flake.nix
+3
-3
flake.nix
···
37
37
./nix/utils.nix # utility functions
38
38
./nix/shells.nix
39
39
./nix/tests.nix
40
-
./wire/cli
41
-
./wire/key_agent
40
+
./crates/cli
41
+
./crates/key_agent
42
42
./doc
43
43
./tests/nix
44
44
./runtime
···
86
86
# docs only
87
87
alejandra.enable = true;
88
88
rustfmt.enable = true;
89
-
just.enable = true;
90
89
prettier.enable = true;
91
90
protolint.enable = true;
92
91
taplo.enable = true;
92
+
ruff-format.enable = true;
93
93
};
94
94
settings.formatter = {
95
95
nixfmt.excludes = [ "doc/snippets/*.nix" ];
+14
-7
garnix.yaml
+14
-7
garnix.yaml
···
1
1
builds:
2
-
exclude: []
3
-
include:
4
-
- packages.x86_64-linux.docs
5
-
- packages.x86_64-linux.docs-unstable
6
-
- packages.*.wire
7
-
- packages.*.wire-small
8
-
branch: trunk
2
+
- exclude: []
3
+
include:
4
+
- packages.x86_64-linux.docs
5
+
- packages.x86_64-linux.docs-unstable
6
+
- packages.*.wire
7
+
- packages.*.wire-small
8
+
branch: trunk
9
+
- exclude: []
10
+
include:
11
+
- packages.x86_64-linux.docs
12
+
- packages.x86_64-linux.docs-unstable
13
+
- packages.*.wire
14
+
- packages.*.wire-small
15
+
branch: stable
-7
justfile
-7
justfile
+20
nix/hooks.nix
+20
nix/hooks.nix
···
4
4
toolchain,
5
5
config,
6
6
lib,
7
+
pkgs,
7
8
...
8
9
}:
9
10
{
···
15
16
enable = true;
16
17
settings.edit = true;
17
18
};
19
+
zizmor.enable = true;
18
20
clippy = {
19
21
enable = true;
20
22
settings.extraArgs = "--tests";
···
22
24
inherit (toolchain) cargo clippy;
23
25
};
24
26
};
27
+
ruff.enable = true;
25
28
cargo-check = {
26
29
enable = true;
27
30
package = toolchain.cargo;
···
30
33
enable = true;
31
34
name = "nix fmt";
32
35
entry = "${lib.getExe config.formatter} --no-cache";
36
+
};
37
+
ty = {
38
+
enable = true;
39
+
name = "ty check";
40
+
files = "\\.py$";
41
+
entry = lib.getExe (
42
+
pkgs.writeShellScriptBin "ty-check" ''
43
+
cd tests/nix
44
+
${lib.getExe pkgs.uv} run ty check
45
+
''
46
+
);
47
+
};
48
+
machete = {
49
+
enable = true;
50
+
name = "cargo-machete";
51
+
files = "\\.(rs|toml)$";
52
+
entry = lib.getExe pkgs.cargo-machete;
33
53
};
34
54
typos = {
35
55
enable = true;
+1
-1
nix/shells.nix
+1
-1
nix/shells.nix
+1
-1
nix/utils.nix
+1
-1
nix/utils.nix
+2
-1
runtime/evaluate.nix
+2
-1
runtime/evaluate.nix
+1
tests/nix/.python-version
+1
tests/nix/.python-version
···
1
+
3.13
+9
-1
tests/nix/default.nix
+9
-1
tests/nix/default.nix
···
23
23
lazyAttrsOf
24
24
;
25
25
cfg = config.wire.testing;
26
+
27
+
stripTyping =
28
+
value:
29
+
let
30
+
split = builtins.split "(from typing import TYPE_CHECKING|# typing-end)" value;
31
+
in
32
+
(builtins.elemAt split 0) + (builtins.elemAt split 4);
26
33
in
27
34
{
28
35
imports = [
···
44
51
type = lines;
45
52
default = '''';
46
53
description = "test script for runNixOSTest";
54
+
apply = stripTyping;
47
55
};
48
56
testDir = mkOption {
49
57
default = "${self}/tests/nix/suite/${name}";
···
163
171
164
172
TEST_DIR="${injectedFlakeDir}/${path}"
165
173
166
-
${builtins.readFile ./tools.py}
174
+
${stripTyping (builtins.readFile ./tools/__init__.py)}
167
175
''
168
176
+ lib.concatStringsSep "\n" (mapAttrsToList (_: value: value._wire.testScript) value.nodes)
169
177
+ opts.testScript;
+18
tests/nix/pyproject.toml
+18
tests/nix/pyproject.toml
···
1
+
[project]
2
+
name = "wire-vm-tests"
3
+
version = "0.0.0"
4
+
requires-python = ">=3.13"
5
+
dependencies = [
6
+
"colorama>=0.4.6",
7
+
"ipython>=9.8.0",
8
+
"junit-xml>=1.9",
9
+
"nixos-test-driver",
10
+
"ptpython>=3.0.32",
11
+
"remote-pdb>=2.1.0",
12
+
]
13
+
14
+
[tool.uv.sources]
15
+
nixos-test-driver = { git = "https://github.com/NixOS/nixpkgs", subdirectory = "nixos/lib/test-driver/src", branch = "nixos-25.11" }
16
+
17
+
[dependency-groups]
18
+
dev = ["ty>=0.0.4"]
+1
-82
tests/nix/suite/test_keys/default.nix
+1
-82
tests/nix/suite/test_keys/default.nix
···
10
10
nodes.receiver = {
11
11
_wire.receiver = true;
12
12
};
13
-
testScript = ''
14
-
deployer_so = collect_store_objects(deployer)
15
-
receiver_so = collect_store_objects(receiver)
16
-
17
-
# build receiver with no keys
18
-
deployer.succeed(f"wire apply --no-progress --on receiver --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host -vvv >&2")
19
-
20
-
receiver.wait_for_unit("sshd.service")
21
-
22
-
# --no-keys should never push a key
23
-
receiver.fail("test -f /run/keys/source_string_name")
24
-
deployer.fail("test -f /run/keys/source_string_name")
25
-
26
-
# key services are created
27
-
receiver.succeed("systemctl cat source_string_name-key.service")
28
-
29
-
_, is_failed = receiver.execute("systemctl is-failed source_string_name-key.service")
30
-
assert is_failed == "inactive\n", f"source_string_name-key.service must be inactive before key exists ({is_failed})"
31
-
32
-
def test_keys(target, target_object, non_interactive):
33
-
if non_interactive:
34
-
deployer.succeed(f"wire apply keys --on {target} --no-progress --path {TEST_DIR}/hive.nix --non-interactive --ssh-accept-host -vvv >&2")
35
-
else:
36
-
deployer.succeed(f"wire apply keys --on {target} --no-progress --path {TEST_DIR}/hive.nix --ssh-accept-host -vvv >&2")
37
-
38
-
keys = [
39
-
("/run/keys/source_string_name", "hello_world_source", "root root 600", "source_string_name"),
40
-
("/etc/keys/file", "hello_world_file", "root root 644", "file"),
41
-
("/home/owner/some/deep/path/command", "hello_world_command", "owner owner 644", "command"),
42
-
("/run/keys/environment", "string_from_environment", "root root 600", "environment"),
43
-
]
44
-
45
-
for path, value, permissions, name in keys:
46
-
# test existence & value
47
-
source_string = target_object.succeed(f"cat {path}")
48
-
assert value in source_string, f"{path} has correct contents ({target})"
49
-
50
-
stat = target_object.succeed(f"stat -c '%U %G %a' {path}").rstrip()
51
-
assert permissions == stat, f"{path} has correct permissions ({target})"
52
-
53
-
def perform_routine(target, target_object, non_interactive):
54
-
test_keys(target, target_object, non_interactive)
55
-
56
-
# only check systemd units on receiver since deployer apply's are one time only
57
-
if target == "receiver":
58
-
target_object.succeed("systemctl start source_string_name-key.path")
59
-
target_object.succeed("systemctl start command-key.path")
60
-
target_object.wait_for_unit("source_string_name-key.service")
61
-
target_object.wait_for_unit("command-key.service")
62
-
63
-
# Mess with the keys to make sure that every push refreshes the permissions
64
-
target_object.succeed("echo 'incorrect_value' > /run/keys/source_string")
65
-
target_object.succeed("chown 600 /etc/keys/file")
66
-
# Test having a key that doesn't exist mixed with keys that do
67
-
target_object.succeed("rm /home/owner/some/deep/path/command")
68
-
69
-
if target == "receiver":
70
-
_, is_failed = target_object.execute("systemctl is-active command-key.service")
71
-
assert is_failed == "failed\n", f"command-key.service is failed after deletion ({is_failed})"
72
-
73
-
# Test keys twice to ensure the operation is idempotent,
74
-
# especially around directory creation.
75
-
test_keys(target, target_object, non_interactive)
76
-
77
-
perform_routine("receiver", receiver, True)
78
-
perform_routine("deployer", deployer, True)
79
-
perform_routine("receiver", receiver, False)
80
-
perform_routine("deployer", deployer, False)
81
-
82
-
new_deployer_store_objects = collect_store_objects(deployer).difference(deployer_so)
83
-
new_receiver_store_objects = collect_store_objects(receiver).difference(receiver_so)
84
-
85
-
# no one should have any keys introduced by the operation
86
-
for node, objects in [
87
-
(deployer, new_deployer_store_objects),
88
-
(receiver, new_receiver_store_objects),
89
-
]:
90
-
assert_store_not_poisoned(node, "hello_world_source", objects)
91
-
assert_store_not_poisoned(node, "hello_world_file", objects)
92
-
assert_store_not_poisoned(node, "hello_world_command", objects)
93
-
assert_store_not_poisoned(node, "string_from_environment", objects)
94
-
'';
13
+
testScript = builtins.readFile ./script.py;
95
14
};
96
15
}
+123
tests/nix/suite/test_keys/script.py
+123
tests/nix/suite/test_keys/script.py
···
1
+
# SPDX-License-Identifier: AGPL-3.0-or-later
2
+
# Copyright 2024-2025 wire Contributors
3
+
4
+
from typing import TYPE_CHECKING
5
+
6
+
if TYPE_CHECKING:
7
+
from test_driver.machine import Machine
8
+
from tools import collect_store_objects, assert_store_not_poisoned
9
+
10
+
deployer: Machine = None # type: ignore[invalid-assignment]
11
+
receiver: Machine = None # type: ignore[invalid-assignment]
12
+
TEST_DIR = ""
13
+
14
+
# typing-end
15
+
16
+
deployer_so = collect_store_objects(deployer)
17
+
receiver_so = collect_store_objects(receiver)
18
+
19
+
# build receiver with no keys
20
+
deployer.succeed(
21
+
f"wire apply --no-progress --on receiver --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host -vvv >&2"
22
+
)
23
+
24
+
receiver.wait_for_unit("sshd.service")
25
+
26
+
# --no-keys should never push a key
27
+
receiver.fail("test -f /run/keys/source_string_name")
28
+
deployer.fail("test -f /run/keys/source_string_name")
29
+
30
+
# key services are created
31
+
receiver.succeed("systemctl cat source_string_name-key.service")
32
+
33
+
_, is_failed = receiver.execute("systemctl is-failed source_string_name-key.service")
34
+
assert is_failed == "inactive\n", (
35
+
f"source_string_name-key.service must be inactive before key exists ({is_failed})"
36
+
)
37
+
38
+
39
+
def test_keys(target, target_object, non_interactive):
40
+
if non_interactive:
41
+
deployer.succeed(
42
+
f"wire apply keys --on {target} --no-progress --path {TEST_DIR}/hive.nix --non-interactive --ssh-accept-host -vvv >&2"
43
+
)
44
+
else:
45
+
deployer.succeed(
46
+
f"wire apply keys --on {target} --no-progress --path {TEST_DIR}/hive.nix --ssh-accept-host -vvv >&2"
47
+
)
48
+
49
+
keys = [
50
+
(
51
+
"/run/keys/source_string_name",
52
+
"hello_world_source",
53
+
"root root 600",
54
+
"source_string_name",
55
+
),
56
+
("/etc/keys/file", "hello_world_file", "root root 644", "file"),
57
+
(
58
+
"/home/owner/some/deep/path/command",
59
+
"hello_world_command",
60
+
"owner owner 644",
61
+
"command",
62
+
),
63
+
(
64
+
"/run/keys/environment",
65
+
"string_from_environment",
66
+
"root root 600",
67
+
"environment",
68
+
),
69
+
]
70
+
71
+
for path, value, permissions, name in keys:
72
+
# test existence & value
73
+
source_string = target_object.succeed(f"cat {path}")
74
+
assert value in source_string, f"{path} has correct contents ({target})"
75
+
76
+
stat = target_object.succeed(f"stat -c '%U %G %a' {path}").rstrip()
77
+
assert permissions == stat, f"{path} has correct permissions ({target})"
78
+
79
+
80
+
def perform_routine(target, target_object, non_interactive):
81
+
test_keys(target, target_object, non_interactive)
82
+
83
+
# only check systemd units on receiver since deployer apply's are one time only
84
+
if target == "receiver":
85
+
target_object.succeed("systemctl start source_string_name-key.path")
86
+
target_object.succeed("systemctl start command-key.path")
87
+
target_object.wait_for_unit("source_string_name-key.service")
88
+
target_object.wait_for_unit("command-key.service")
89
+
90
+
# Mess with the keys to make sure that every push refreshes the permissions
91
+
target_object.succeed("echo 'incorrect_value' > /run/keys/source_string")
92
+
target_object.succeed("chown 600 /etc/keys/file")
93
+
# Test having a key that doesn't exist mixed with keys that do
94
+
target_object.succeed("rm /home/owner/some/deep/path/command")
95
+
96
+
if target == "receiver":
97
+
_, is_failed = target_object.execute("systemctl is-active command-key.service")
98
+
assert is_failed == "failed\n", (
99
+
f"command-key.service is failed after deletion ({is_failed})"
100
+
)
101
+
102
+
# Test keys twice to ensure the operation is idempotent,
103
+
# especially around directory creation.
104
+
test_keys(target, target_object, non_interactive)
105
+
106
+
107
+
perform_routine("receiver", receiver, True)
108
+
perform_routine("deployer", deployer, True)
109
+
perform_routine("receiver", receiver, False)
110
+
perform_routine("deployer", deployer, False)
111
+
112
+
new_deployer_store_objects = collect_store_objects(deployer).difference(deployer_so)
113
+
new_receiver_store_objects = collect_store_objects(receiver).difference(receiver_so)
114
+
115
+
# no one should have any keys introduced by the operation
116
+
for node, objects in [
117
+
(deployer, new_deployer_store_objects),
118
+
(receiver, new_receiver_store_objects),
119
+
]:
120
+
assert_store_not_poisoned(node, "hello_world_source", objects)
121
+
assert_store_not_poisoned(node, "hello_world_file", objects)
122
+
assert_store_not_poisoned(node, "hello_world_command", objects)
123
+
assert_store_not_poisoned(node, "string_from_environment", objects)
+1
-4
tests/nix/suite/test_local_deploy/default.nix
+1
-4
tests/nix/suite/test_local_deploy/default.nix
···
7
7
_wire.deployer = true;
8
8
_wire.receiver = true;
9
9
};
10
-
testScript = ''
11
-
deployer.succeed(f"wire apply --on deployer --no-progress --path {TEST_DIR}/hive.nix --no-keys -vvv >&2")
12
-
deployer.succeed("test -f /etc/a")
13
-
'';
10
+
testScript = builtins.readFile ./script.py;
14
11
};
15
12
}
+17
tests/nix/suite/test_local_deploy/script.py
+17
tests/nix/suite/test_local_deploy/script.py
···
1
+
# SPDX-License-Identifier: AGPL-3.0-or-later
2
+
# Copyright 2024-2025 wire Contributors
3
+
4
+
from typing import TYPE_CHECKING
5
+
6
+
if TYPE_CHECKING:
7
+
from test_driver.machine import Machine
8
+
9
+
deployer: Machine = None # type: ignore[invalid-assignment]
10
+
TEST_DIR = ""
11
+
12
+
# typing-end
13
+
14
+
deployer.succeed(
15
+
f"wire apply --on deployer --no-progress --path {TEST_DIR}/hive.nix --no-keys -vvv >&2"
16
+
)
17
+
deployer.succeed("test -f /etc/a")
+1
-36
tests/nix/suite/test_remote_deploy/default.nix
+1
-36
tests/nix/suite/test_remote_deploy/default.nix
···
9
9
nodes.receiver = {
10
10
_wire.receiver = true;
11
11
};
12
-
testScript = ''
13
-
with subtest("Test unreachable hosts"):
14
-
deployer.fail(f"wire apply --on receiver-unreachable --no-progress --path {TEST_DIR}/hive.nix --no-keys -vvv >&2")
15
-
16
-
with subtest("Check basic apply: Interactive"):
17
-
deployer.succeed(f"wire apply --on receiver --no-progress --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host -vvv >&2")
18
-
19
-
identity = receiver.succeed("cat /etc/identity")
20
-
assert identity == "first", "Identity of first apply wasn't as expected"
21
-
22
-
with subtest("Check basic apply: NonInteractive"):
23
-
deployer.succeed(f"wire apply --on receiver-third --no-progress --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host --non-interactive -vvv >&2")
24
-
25
-
identity = receiver.succeed("cat /etc/identity")
26
-
assert identity == "third", "Identity of non-interactive apply wasn't as expected"
27
-
28
-
with subtest("Check boot apply"):
29
-
first_system = receiver.succeed("readlink -f /run/current-system")
30
-
31
-
deployer.succeed(f"wire apply boot --on receiver-second --no-progress --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host -vvv >&2")
32
-
33
-
_first_system = receiver.succeed("readlink -f /run/current-system")
34
-
assert first_system == _first_system, "apply boot without --reboot changed /run/current-system"
35
-
36
-
# with subtest("Check /etc/identity after reboot"):
37
-
# receiver.reboot()
38
-
#
39
-
# identity = receiver.succeed("cat /etc/identity")
40
-
# assert identity == "second", "Identity didn't change after second apply"
41
-
42
-
# with subtest("Check --reboot"):
43
-
# deployer.succeed(f"wire apply boot --on receiver-third --no-progress --path {TEST_DIR}/hive.nix --reboot --no-keys -vvv >&2")
44
-
#
45
-
# identity = receiver.succeed("cat /etc/identity")
46
-
# assert identity == "third", "Identity didn't change after third apply"
47
-
'';
12
+
testScript = builtins.readFile ./script.py;
48
13
};
49
14
}
+25
-9
tests/nix/suite/test_remote_deploy/hive.nix
+25
-9
tests/nix/suite/test_remote_deploy/hive.nix
···
5
5
inherit (import ../utils.nix { testName = "test_keys-@IDENT@"; }) makeHive mkHiveNode;
6
6
in
7
7
makeHive {
8
-
meta.nixpkgs = import <nixpkgs> { localSystem = "x86_64-linux"; };
8
+
meta = {
9
+
nixpkgs = import <nixpkgs> { localSystem = "x86_64-linux"; };
10
+
11
+
specialArgs = {
12
+
message = "second";
13
+
};
14
+
15
+
nodeSpecialArgs = {
16
+
receiver-third.message = "third";
17
+
};
18
+
};
9
19
10
20
receiver = mkHiveNode { hostname = "receiver"; } {
11
21
environment.etc."identity".text = "first";
···
20
30
];
21
31
};
22
32
23
-
receiver-second = mkHiveNode { hostname = "receiver"; } {
24
-
environment.etc."identity".text = "second";
25
-
deployment.target.host = "receiver";
26
-
};
33
+
receiver-second = mkHiveNode { hostname = "receiver"; } (
34
+
{ message, ... }:
35
+
{
36
+
environment.etc."identity".text = message;
37
+
deployment.target.host = "receiver";
38
+
}
39
+
);
27
40
28
-
receiver-third = mkHiveNode { hostname = "receiver"; } {
29
-
environment.etc."identity".text = "third";
30
-
deployment.target.host = "receiver";
31
-
};
41
+
receiver-third = mkHiveNode { hostname = "receiver"; } (
42
+
{ message, ... }:
43
+
{
44
+
environment.etc."identity".text = message;
45
+
deployment.target.host = "receiver";
46
+
}
47
+
);
32
48
33
49
receiver-unreachable = mkHiveNode { hostname = "receiver"; } {
34
50
# test node pinging
+63
tests/nix/suite/test_remote_deploy/script.py
+63
tests/nix/suite/test_remote_deploy/script.py
···
1
+
# SPDX-License-Identifier: AGPL-3.0-or-later
2
+
# Copyright 2024-2025 wire Contributors
3
+
4
+
from typing import TYPE_CHECKING
5
+
from typing import Callable, ContextManager
6
+
7
+
if TYPE_CHECKING:
8
+
from test_driver.machine import Machine
9
+
10
+
deployer: Machine = None # type: ignore[invalid-assignment]
11
+
receiver: Machine = None # type: ignore[invalid-assignment]
12
+
13
+
TEST_DIR = ""
14
+
15
+
# https://github.com/NixOS/nixpkgs/blob/d10d9933b1c206f9b2950e5e1d68268c5ed0a3c7/nixos/lib/test-script-prepend.py#L43
16
+
subtest: Callable[[str], ContextManager[None]] = None # type: ignore[invalid-assignment]
17
+
18
+
# typing-end
19
+
20
+
with subtest("Test unreachable hosts"):
21
+
deployer.fail(
22
+
f"wire apply --on receiver-unreachable --no-progress --path {TEST_DIR}/hive.nix --no-keys -vvv >&2"
23
+
)
24
+
25
+
with subtest("Check basic apply: Interactive"):
26
+
deployer.succeed(
27
+
f"wire apply --on receiver --no-progress --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host -vvv >&2"
28
+
)
29
+
30
+
identity = receiver.succeed("cat /etc/identity")
31
+
assert identity == "first", "Identity of first apply wasn't as expected"
32
+
33
+
with subtest("Check basic apply: NonInteractive"):
34
+
deployer.succeed(
35
+
f"wire apply --on receiver-third --no-progress --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host --non-interactive -vvv >&2"
36
+
)
37
+
38
+
identity = receiver.succeed("cat /etc/identity")
39
+
assert identity == "third", "Identity of non-interactive apply wasn't as expected"
40
+
41
+
with subtest("Check boot apply"):
42
+
first_system = receiver.succeed("readlink -f /run/current-system")
43
+
44
+
deployer.succeed(
45
+
f"wire apply boot --on receiver-second --no-progress --path {TEST_DIR}/hive.nix --no-keys --ssh-accept-host -vvv >&2"
46
+
)
47
+
48
+
_first_system = receiver.succeed("readlink -f /run/current-system")
49
+
assert first_system == _first_system, (
50
+
"apply boot without --reboot changed /run/current-system"
51
+
)
52
+
53
+
# with subtest("Check /etc/identity after reboot"):
54
+
# receiver.reboot()
55
+
#
56
+
# identity = receiver.succeed("cat /etc/identity")
57
+
# assert identity == "second", "Identity didn't change after second apply"
58
+
59
+
# with subtest("Check --reboot"):
60
+
# deployer.succeed(f"wire apply boot --on receiver-third --no-progress --path {TEST_DIR}/hive.nix --reboot --no-keys -vvv >&2")
61
+
#
62
+
# identity = receiver.succeed("cat /etc/identity")
63
+
# assert identity == "third", "Identity didn't change after third apply"
+1
-4
tests/nix/suite/test_stdin/default.nix
+1
-4
tests/nix/suite/test_stdin/default.nix
···
7
7
_wire.deployer = true;
8
8
_wire.receiver = true;
9
9
};
10
-
testScript = ''
11
-
deployer.succeed(f"echo @tag | wire apply --on deployer --no-progress --path {TEST_DIR}/hive.nix --no-keys -vvv >&2")
12
-
deployer.succeed("test -f /etc/a")
13
-
'';
10
+
testScript = builtins.readFile ./script.py;
14
11
};
15
12
}
+17
tests/nix/suite/test_stdin/script.py
+17
tests/nix/suite/test_stdin/script.py
···
1
+
# SPDX-License-Identifier: AGPL-3.0-or-later
2
+
# Copyright 2024-2025 wire Contributors
3
+
4
+
from typing import TYPE_CHECKING
5
+
6
+
if TYPE_CHECKING:
7
+
from test_driver.machine import Machine
8
+
9
+
deployer: Machine = None # type: ignore[invalid-assignment]
10
+
TEST_DIR = ""
11
+
12
+
# typing-end
13
+
14
+
deployer.succeed(
15
+
f"echo @tag | wire apply --on deployer --no-progress --path {TEST_DIR}/hive.nix --no-keys -vvv >&2"
16
+
)
17
+
deployer.succeed("test -f /etc/a")
+20
tests/nix/tools/__init__.py
+20
tests/nix/tools/__init__.py
···
1
+
# SPDX-License-Identifier: AGPL-3.0-or-later
2
+
# Copyright 2024-2025 wire Contributors
3
+
4
+
from typing import TYPE_CHECKING
5
+
6
+
if TYPE_CHECKING:
7
+
from test_driver.machine import Machine
8
+
9
+
# typing-end
10
+
11
+
12
+
def collect_store_objects(machine: Machine) -> set[str]:
13
+
return set(machine.succeed("ls /nix/store").strip().split("\n"))
14
+
15
+
16
+
def assert_store_not_poisoned(machine: Machine, poison: str, objects: set[str]):
17
+
paths = list(map(lambda n: f"/nix/store/{n}", objects))
18
+
19
+
machine.succeed("which rg")
20
+
machine.fail(f"rg '{poison}' {' '.join(paths)}")
-13
tests/nix/tools.py
-13
tests/nix/tools.py
···
1
-
# SPDX-License-Identifier: AGPL-3.0-or-later
2
-
# Copyright 2024-2025 wire Contributors
3
-
4
-
5
-
def collect_store_objects(machine: Machine) -> set[str]:
6
-
return set(machine.succeed("ls /nix/store").strip().split("\n"))
7
-
8
-
9
-
def assert_store_not_poisoned(machine: Machine, poison: str, objects: set[str]):
10
-
paths = list(map(lambda n: f"/nix/store/{n}", objects))
11
-
12
-
machine.succeed("which rg")
13
-
machine.fail(f"rg '{poison}' {" ".join(paths)}")
+303
tests/nix/uv.lock
+303
tests/nix/uv.lock
···
1
+
version = 1
2
+
revision = 3
3
+
requires-python = ">=3.13"
4
+
5
+
[[package]]
6
+
name = "appdirs"
7
+
version = "1.4.4"
8
+
source = { registry = "https://pypi.org/simple" }
9
+
sdist = { url = "https://files.pythonhosted.org/packages/d7/d8/05696357e0311f5b5c316d7b95f46c669dd9c15aaeecbb48c7d0aeb88c40/appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", size = 13470, upload-time = "2020-05-11T07:59:51.037Z" }
10
+
wheels = [
11
+
{ url = "https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128", size = 9566, upload-time = "2020-05-11T07:59:49.499Z" },
12
+
]
13
+
14
+
[[package]]
15
+
name = "asttokens"
16
+
version = "3.0.1"
17
+
source = { registry = "https://pypi.org/simple" }
18
+
sdist = { url = "https://files.pythonhosted.org/packages/be/a5/8e3f9b6771b0b408517c82d97aed8f2036509bc247d46114925e32fe33f0/asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7", size = 62308, upload-time = "2025-11-15T16:43:48.578Z" }
19
+
wheels = [
20
+
{ url = "https://files.pythonhosted.org/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a", size = 27047, upload-time = "2025-11-15T16:43:16.109Z" },
21
+
]
22
+
23
+
[[package]]
24
+
name = "colorama"
25
+
version = "0.4.6"
26
+
source = { registry = "https://pypi.org/simple" }
27
+
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
28
+
wheels = [
29
+
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
30
+
]
31
+
32
+
[[package]]
33
+
name = "decorator"
34
+
version = "5.2.1"
35
+
source = { registry = "https://pypi.org/simple" }
36
+
sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" }
37
+
wheels = [
38
+
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
39
+
]
40
+
41
+
[[package]]
42
+
name = "executing"
43
+
version = "2.2.1"
44
+
source = { registry = "https://pypi.org/simple" }
45
+
sdist = { url = "https://files.pythonhosted.org/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" }
46
+
wheels = [
47
+
{ url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" },
48
+
]
49
+
50
+
[[package]]
51
+
name = "ipython"
52
+
version = "9.8.0"
53
+
source = { registry = "https://pypi.org/simple" }
54
+
dependencies = [
55
+
{ name = "colorama", marker = "sys_platform == 'win32'" },
56
+
{ name = "decorator" },
57
+
{ name = "ipython-pygments-lexers" },
58
+
{ name = "jedi" },
59
+
{ name = "matplotlib-inline" },
60
+
{ name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" },
61
+
{ name = "prompt-toolkit" },
62
+
{ name = "pygments" },
63
+
{ name = "stack-data" },
64
+
{ name = "traitlets" },
65
+
]
66
+
sdist = { url = "https://files.pythonhosted.org/packages/12/51/a703c030f4928646d390b4971af4938a1b10c9dfce694f0d99a0bb073cb2/ipython-9.8.0.tar.gz", hash = "sha256:8e4ce129a627eb9dd221c41b1d2cdaed4ef7c9da8c17c63f6f578fe231141f83", size = 4424940, upload-time = "2025-12-03T10:18:24.353Z" }
67
+
wheels = [
68
+
{ url = "https://files.pythonhosted.org/packages/f1/df/8ee1c5dd1e3308b5d5b2f2dfea323bb2f3827da8d654abb6642051199049/ipython-9.8.0-py3-none-any.whl", hash = "sha256:ebe6d1d58d7d988fbf23ff8ff6d8e1622cfdb194daf4b7b73b792c4ec3b85385", size = 621374, upload-time = "2025-12-03T10:18:22.335Z" },
69
+
]
70
+
71
+
[[package]]
72
+
name = "ipython-pygments-lexers"
73
+
version = "1.1.1"
74
+
source = { registry = "https://pypi.org/simple" }
75
+
dependencies = [
76
+
{ name = "pygments" },
77
+
]
78
+
sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" }
79
+
wheels = [
80
+
{ url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" },
81
+
]
82
+
83
+
[[package]]
84
+
name = "jedi"
85
+
version = "0.19.2"
86
+
source = { registry = "https://pypi.org/simple" }
87
+
dependencies = [
88
+
{ name = "parso" },
89
+
]
90
+
sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" }
91
+
wheels = [
92
+
{ url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" },
93
+
]
94
+
95
+
[[package]]
96
+
name = "junit-xml"
97
+
version = "1.9"
98
+
source = { registry = "https://pypi.org/simple" }
99
+
dependencies = [
100
+
{ name = "six" },
101
+
]
102
+
sdist = { url = "https://files.pythonhosted.org/packages/98/af/bc988c914dd1ea2bc7540ecc6a0265c2b6faccc6d9cdb82f20e2094a8229/junit-xml-1.9.tar.gz", hash = "sha256:de16a051990d4e25a3982b2dd9e89d671067548718866416faec14d9de56db9f", size = 7349, upload-time = "2023-01-24T18:42:00.836Z" }
103
+
wheels = [
104
+
{ url = "https://files.pythonhosted.org/packages/2a/93/2d896b5fd3d79b4cadd8882c06650e66d003f465c9d12c488d92853dff78/junit_xml-1.9-py2.py3-none-any.whl", hash = "sha256:ec5ca1a55aefdd76d28fcc0b135251d156c7106fa979686a4b48d62b761b4732", size = 7130, upload-time = "2020-02-22T20:41:37.661Z" },
105
+
]
106
+
107
+
[[package]]
108
+
name = "matplotlib-inline"
109
+
version = "0.2.1"
110
+
source = { registry = "https://pypi.org/simple" }
111
+
dependencies = [
112
+
{ name = "traitlets" },
113
+
]
114
+
sdist = { url = "https://files.pythonhosted.org/packages/c7/74/97e72a36efd4ae2bccb3463284300f8953f199b5ffbc04cbbb0ec78f74b1/matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe", size = 8110, upload-time = "2025-10-23T09:00:22.126Z" }
115
+
wheels = [
116
+
{ url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" },
117
+
]
118
+
119
+
[[package]]
120
+
name = "nixos-test-driver"
121
+
version = "0.0.0"
122
+
source = { git = "https://github.com/NixOS/nixpkgs?subdirectory=nixos%2Flib%2Ftest-driver%2Fsrc&branch=nixos-25.11#c6f52ebd45e5925c188d1a20119978aa4ffd5ef6" }
123
+
124
+
[[package]]
125
+
name = "parso"
126
+
version = "0.8.5"
127
+
source = { registry = "https://pypi.org/simple" }
128
+
sdist = { url = "https://files.pythonhosted.org/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" }
129
+
wheels = [
130
+
{ url = "https://files.pythonhosted.org/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" },
131
+
]
132
+
133
+
[[package]]
134
+
name = "pexpect"
135
+
version = "4.9.0"
136
+
source = { registry = "https://pypi.org/simple" }
137
+
dependencies = [
138
+
{ name = "ptyprocess" },
139
+
]
140
+
sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" }
141
+
wheels = [
142
+
{ url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" },
143
+
]
144
+
145
+
[[package]]
146
+
name = "prompt-toolkit"
147
+
version = "3.0.52"
148
+
source = { registry = "https://pypi.org/simple" }
149
+
dependencies = [
150
+
{ name = "wcwidth" },
151
+
]
152
+
sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" }
153
+
wheels = [
154
+
{ url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" },
155
+
]
156
+
157
+
[[package]]
158
+
name = "ptpython"
159
+
version = "3.0.32"
160
+
source = { registry = "https://pypi.org/simple" }
161
+
dependencies = [
162
+
{ name = "appdirs" },
163
+
{ name = "jedi" },
164
+
{ name = "prompt-toolkit" },
165
+
{ name = "pygments" },
166
+
]
167
+
sdist = { url = "https://files.pythonhosted.org/packages/b6/8c/7e904ceeb512b4530c7ca1d918d3565d694a1fa7df337cdfc36a16347d68/ptpython-3.0.32.tar.gz", hash = "sha256:11651778236de95c582b42737294e50a66ba4a21fa01c0090ea70815af478fe0", size = 74080, upload-time = "2025-11-20T21:20:48.27Z" }
168
+
wheels = [
169
+
{ url = "https://files.pythonhosted.org/packages/4c/ac/0e35e5d7afd47ab0e2c71293ed2ad18df91a2a4a008c0ff59c2f22def377/ptpython-3.0.32-py3-none-any.whl", hash = "sha256:16435d323e5fc0a685d5f4dc5bb4494fb68ac68736689cd1247e1eda9369b616", size = 68099, upload-time = "2025-11-20T21:20:46.634Z" },
170
+
]
171
+
172
+
[[package]]
173
+
name = "ptyprocess"
174
+
version = "0.7.0"
175
+
source = { registry = "https://pypi.org/simple" }
176
+
sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" }
177
+
wheels = [
178
+
{ url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" },
179
+
]
180
+
181
+
[[package]]
182
+
name = "pure-eval"
183
+
version = "0.2.3"
184
+
source = { registry = "https://pypi.org/simple" }
185
+
sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" }
186
+
wheels = [
187
+
{ url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" },
188
+
]
189
+
190
+
[[package]]
191
+
name = "pygments"
192
+
version = "2.19.2"
193
+
source = { registry = "https://pypi.org/simple" }
194
+
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
195
+
wheels = [
196
+
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
197
+
]
198
+
199
+
[[package]]
200
+
name = "remote-pdb"
201
+
version = "2.1.0"
202
+
source = { registry = "https://pypi.org/simple" }
203
+
sdist = { url = "https://files.pythonhosted.org/packages/e4/b5/4944cac06fd9fc4a2e168313ec220aa25ed96ce83947b63eea5b4045b22d/remote-pdb-2.1.0.tar.gz", hash = "sha256:2d70c6f41e0eabf0165e8f1be58f82aa7a605aaeab8f2aefeb9ce246431091c1", size = 22295, upload-time = "2020-07-24T13:31:32.985Z" }
204
+
wheels = [
205
+
{ url = "https://files.pythonhosted.org/packages/71/c5/d208c66344bb785d800adb61aef512290d3473052b9e7697890f0547aff2/remote_pdb-2.1.0-py2.py3-none-any.whl", hash = "sha256:94f73a92ac1248cf16189211011f97096bdada8a7baac8c79372663bbb57b5d0", size = 6304, upload-time = "2020-07-24T13:31:31.535Z" },
206
+
]
207
+
208
+
[[package]]
209
+
name = "six"
210
+
version = "1.17.0"
211
+
source = { registry = "https://pypi.org/simple" }
212
+
sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
213
+
wheels = [
214
+
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
215
+
]
216
+
217
+
[[package]]
218
+
name = "stack-data"
219
+
version = "0.6.3"
220
+
source = { registry = "https://pypi.org/simple" }
221
+
dependencies = [
222
+
{ name = "asttokens" },
223
+
{ name = "executing" },
224
+
{ name = "pure-eval" },
225
+
]
226
+
sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" }
227
+
wheels = [
228
+
{ url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" },
229
+
]
230
+
231
+
[[package]]
232
+
name = "traitlets"
233
+
version = "5.14.3"
234
+
source = { registry = "https://pypi.org/simple" }
235
+
sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" }
236
+
wheels = [
237
+
{ url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" },
238
+
]
239
+
240
+
[[package]]
241
+
name = "ty"
242
+
version = "0.0.4"
243
+
source = { registry = "https://pypi.org/simple" }
244
+
sdist = { url = "https://files.pythonhosted.org/packages/48/d9/97d5808e851f790e58f8a54efb5c7b9f404640baf9e295f424846040b316/ty-0.0.4.tar.gz", hash = "sha256:2ea47a0089d74730658ec4e988c8ef476a1e9bd92df3e56709c4003c2895ff3b", size = 4780289, upload-time = "2025-12-19T00:13:53.12Z" }
245
+
wheels = [
246
+
{ url = "https://files.pythonhosted.org/packages/b1/94/b32a962243cc8a16e8dc74cf1fe75e8bb013d0e13e71bb540e2c86214b61/ty-0.0.4-py3-none-linux_armv6l.whl", hash = "sha256:5225da65a8d1defeb21ee9d74298b1b97c6cbab36e235a310c1430d9079e4b6a", size = 9762399, upload-time = "2025-12-19T00:14:11.261Z" },
247
+
{ url = "https://files.pythonhosted.org/packages/d1/d2/7c76e0c22ddfc2fcd4a3458a65f87ce074070eb1c68c07ee475cc2b6ea68/ty-0.0.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f87770d7988f470b795a2043185082fa959dbe1979a11b4bfe20f1214d37bd6e", size = 9590410, upload-time = "2025-12-19T00:13:55.759Z" },
248
+
{ url = "https://files.pythonhosted.org/packages/a5/84/de4b1fc85669faca3622071d5a3f3ec7bfb239971f368c28fae461d3398a/ty-0.0.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:ecf68b8ea48674a289d733b4786aecc259242a2d9a920b3ec8583db18c67496a", size = 9131113, upload-time = "2025-12-19T00:14:08.593Z" },
249
+
{ url = "https://files.pythonhosted.org/packages/a7/ff/b5bf385b6983be56a470856bbcbac1b7e816bcd765a7e9d39ab2399e387d/ty-0.0.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efc396d76a57e527393cae4ee8faf23b93be3df9e93202f39925721a7a2bb7b8", size = 9599152, upload-time = "2025-12-19T00:13:40.484Z" },
250
+
{ url = "https://files.pythonhosted.org/packages/36/d6/9880ba106f2f20d13e6a5dca5d5ca44bfb3782936ee67ff635f89a2959c0/ty-0.0.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c893b968d2f9964a4d4db9992c9ba66b01f411b1f48dffcde08622e19cd6ab97", size = 9585368, upload-time = "2025-12-19T00:14:00.994Z" },
251
+
{ url = "https://files.pythonhosted.org/packages/3f/53/503cfc18bc4c7c4e02f89dd43debc41a6e343b41eb43df658dfb493a386d/ty-0.0.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:526c925b80d68a53c165044d2370fcfc0def1f119f7b7e483ee61d24da6fb891", size = 9998412, upload-time = "2025-12-19T00:14:18.653Z" },
252
+
{ url = "https://files.pythonhosted.org/packages/1d/bd/dd2d3e29834da5add2eda0ab5b433171ce9ce9a248c364d2e237f82073d7/ty-0.0.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:857f605a7fa366b6c6e6f38abc311d0606be513c2bee8977b5c8fd4bde1a82d5", size = 10853890, upload-time = "2025-12-19T00:13:50.891Z" },
253
+
{ url = "https://files.pythonhosted.org/packages/07/fe/28ba3be1672e6b8df46e43de66a02dc076ffba7853d391a5466421886225/ty-0.0.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4cc981aa3ebdac2c233421b1e58c80b0df6a8e6e6fa8b9e69fbdfd2f82768af", size = 10587263, upload-time = "2025-12-19T00:14:21.577Z" },
254
+
{ url = "https://files.pythonhosted.org/packages/26/9c/bb598772043f686afe5bc26cb386020709c1a0bcc164bc22ad9da2b4f55d/ty-0.0.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b03b2708b0bf67c76424a860f848aebaa4772c05529170c3761bfcaea93ec199", size = 10401204, upload-time = "2025-12-19T00:13:43.453Z" },
255
+
{ url = "https://files.pythonhosted.org/packages/ac/18/71765e9d63669bf09461c3fea84a7a63232ccb0e83b84676f07b987fc217/ty-0.0.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:469890e885544beb129c21e2f8f15321f0573d094aec13da68593c5f86389ff9", size = 10129713, upload-time = "2025-12-19T00:14:13.725Z" },
256
+
{ url = "https://files.pythonhosted.org/packages/c3/2d/c03eba570aa85e9c361de5ed36d60b9ab139e93ee91057f455ab4af48e54/ty-0.0.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:abfd928d09567e12068aeca875e920def3badf1978896f474aa4b85b552703c4", size = 9586203, upload-time = "2025-12-19T00:14:03.423Z" },
257
+
{ url = "https://files.pythonhosted.org/packages/61/f1/8c3c82a8df69bd4417c77be4f895d043db26dd47bfcc90b33dc109cd0096/ty-0.0.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:44b8e94f9d64df12eae4cf8031c5ca9a4c610b57092b26ad3d68d91bcc7af122", size = 9608230, upload-time = "2025-12-19T00:13:58.252Z" },
258
+
{ url = "https://files.pythonhosted.org/packages/51/0c/d8ba3a85c089c246ef6bd49d0f0b40bc0f9209bb819e8c02ccbea5cb4d57/ty-0.0.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9d6a439813e21a06769daf858105818c385d88018929d4a56970d4ddd5cd3df2", size = 9725125, upload-time = "2025-12-19T00:14:05.996Z" },
259
+
{ url = "https://files.pythonhosted.org/packages/4d/38/e30f64ad1e40905c766576ec70cffc69163591a5842ce14652672f6ab394/ty-0.0.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c3cfcf26cfe6c828e91d7a529cc2dda37bc3b51ba06909c9be07002a6584af52", size = 10237174, upload-time = "2025-12-19T00:14:23.858Z" },
260
+
{ url = "https://files.pythonhosted.org/packages/cb/d7/8d650aa0be8936dd3ed74e2b0655230e2904caa6077c30c16a089b523cff/ty-0.0.4-py3-none-win32.whl", hash = "sha256:58bbf70dd27af6b00dedbdebeec92d5993aa238664f96fa5c0064930f7a0d30b", size = 9188434, upload-time = "2025-12-19T00:13:45.875Z" },
261
+
{ url = "https://files.pythonhosted.org/packages/82/d7/9fc0c81cf0b0d281ac9c18bfbdb4d6bae2173503ba79e40b210ab41c2c8b/ty-0.0.4-py3-none-win_amd64.whl", hash = "sha256:7c2db0f96218f08c140bd9d3fcbb1b3c8c5c4f0c9b0a5624487f0a2bf4b76163", size = 10019313, upload-time = "2025-12-19T00:14:15.968Z" },
262
+
{ url = "https://files.pythonhosted.org/packages/5f/b8/3e3246738eed1cd695c5964a401f3b9c757d20ac21fdae06281af9f40ef6/ty-0.0.4-py3-none-win_arm64.whl", hash = "sha256:69f14fc98e4a847afa9f8c5d5234d008820dbc09c7dcdb3ac1ba16628f5132df", size = 9561857, upload-time = "2025-12-19T00:13:48.382Z" },
263
+
]
264
+
265
+
[[package]]
266
+
name = "wcwidth"
267
+
version = "0.2.14"
268
+
source = { registry = "https://pypi.org/simple" }
269
+
sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" }
270
+
wheels = [
271
+
{ url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" },
272
+
]
273
+
274
+
[[package]]
275
+
name = "wire-vm-tests"
276
+
version = "0.0.0"
277
+
source = { virtual = "." }
278
+
dependencies = [
279
+
{ name = "colorama" },
280
+
{ name = "ipython" },
281
+
{ name = "junit-xml" },
282
+
{ name = "nixos-test-driver" },
283
+
{ name = "ptpython" },
284
+
{ name = "remote-pdb" },
285
+
]
286
+
287
+
[package.dev-dependencies]
288
+
dev = [
289
+
{ name = "ty" },
290
+
]
291
+
292
+
[package.metadata]
293
+
requires-dist = [
294
+
{ name = "colorama", specifier = ">=0.4.6" },
295
+
{ name = "ipython", specifier = ">=9.8.0" },
296
+
{ name = "junit-xml", specifier = ">=1.9" },
297
+
{ name = "nixos-test-driver", git = "https://github.com/NixOS/nixpkgs?subdirectory=nixos%2Flib%2Ftest-driver%2Fsrc&branch=nixos-25.11" },
298
+
{ name = "ptpython", specifier = ">=3.0.32" },
299
+
{ name = "remote-pdb", specifier = ">=2.1.0" },
300
+
]
301
+
302
+
[package.metadata.requires-dev]
303
+
dev = [{ name = "ty", specifier = ">=0.0.4" }]
-35
wire/cli/Cargo.toml
-35
wire/cli/Cargo.toml
···
1
-
[package]
2
-
name = "wire"
3
-
version.workspace = true
4
-
edition.workspace = true
5
-
6
-
[lints]
7
-
workspace = true
8
-
9
-
[features]
10
-
dhat-heap = []
11
-
12
-
[dependencies]
13
-
clap = { workspace = true }
14
-
clap-verbosity-flag = { workspace = true }
15
-
serde = { workspace = true }
16
-
tokio = { workspace = true }
17
-
tracing = { workspace = true }
18
-
tracing-log = { workspace = true }
19
-
tracing-subscriber = { workspace = true }
20
-
lib = { path = "../lib" }
21
-
serde_json = { workspace = true }
22
-
miette = { workspace = true }
23
-
thiserror = { workspace = true }
24
-
enum-display-derive = "0.1.1"
25
-
im = { workspace = true }
26
-
futures = "0.3.31"
27
-
clap-num = "1.2.0"
28
-
clap-markdown = "0.1.5"
29
-
itertools = "0.14.0"
30
-
dhat = "0.3.2"
31
-
clap_complete = { version = "4.5.60", features = ["unstable-dynamic"] }
32
-
nix-compat = { workspace = true }
33
-
owo-colors = { workspace = true }
34
-
signal-hook-tokio = { version = "0.3.1", features = ["futures-v0_3"] }
35
-
signal-hook = "0.3.18"
-98
wire/cli/default.nix
-98
wire/cli/default.nix
···
1
-
{ getSystem, inputs, ... }:
2
-
{
3
-
perSystem =
4
-
{
5
-
pkgs,
6
-
lib,
7
-
self',
8
-
buildRustProgram,
9
-
system,
10
-
...
11
-
}:
12
-
let
13
-
cleanSystem = system: lib.replaceStrings [ "-" ] [ "_" ] system;
14
-
agents = lib.strings.concatMapStrings (
15
-
system: "--set WIRE_KEY_AGENT_${cleanSystem system} ${(getSystem system).packages.agent} "
16
-
) (import inputs.linux-systems);
17
-
in
18
-
{
19
-
packages = {
20
-
default = self'.packages.wire;
21
-
wire-unwrapped = buildRustProgram {
22
-
name = "wire";
23
-
pname = "wire";
24
-
cargoExtraArgs = "-p wire";
25
-
doCheck = true;
26
-
nativeBuildInputs = [
27
-
pkgs.installShellFiles
28
-
pkgs.sqlx-cli
29
-
];
30
-
preBuild = ''
31
-
export DATABASE_URL=sqlite:./db.sqlite3
32
-
sqlx database create
33
-
sqlx migrate run --source ./wire/lib/src/cache/migrations/
34
-
'';
35
-
postInstall = ''
36
-
installShellCompletion --cmd wire \
37
-
--bash <(COMPLETE=bash $out/bin/wire) \
38
-
--fish <(COMPLETE=fish $out/bin/wire) \
39
-
--zsh <(COMPLETE=zsh $out/bin/wire)
40
-
'';
41
-
};
42
-
43
-
wire-unwrapped-dev = self'.packages.wire-unwrapped.overrideAttrs {
44
-
CARGO_PROFILE = "dev";
45
-
};
46
-
47
-
wire-unwrapped-perf = buildRustProgram {
48
-
name = "wire";
49
-
pname = "wire";
50
-
CARGO_PROFILE = "profiling";
51
-
cargoExtraArgs = "-p wire";
52
-
};
53
-
54
-
wire = pkgs.symlinkJoin {
55
-
name = "wire";
56
-
paths = [ self'.packages.wire-unwrapped ];
57
-
nativeBuildInputs = [
58
-
pkgs.makeWrapper
59
-
];
60
-
postBuild = ''
61
-
wrapProgram $out/bin/wire ${agents}
62
-
'';
63
-
meta.mainProgram = "wire";
64
-
};
65
-
66
-
wire-small = pkgs.symlinkJoin {
67
-
name = "wire";
68
-
paths = [ self'.packages.wire-unwrapped ];
69
-
nativeBuildInputs = [
70
-
pkgs.makeWrapper
71
-
];
72
-
postBuild = ''
73
-
wrapProgram $out/bin/wire --set WIRE_KEY_AGENT_${cleanSystem system} ${self'.packages.agent}
74
-
'';
75
-
meta.mainProgram = "wire";
76
-
};
77
-
78
-
wire-dev = self'.packages.wire.overrideAttrs {
79
-
paths = [ self'.packages.wire-unwrapped-dev ];
80
-
};
81
-
82
-
wire-small-dev = self'.packages.wire-small.overrideAttrs {
83
-
paths = [ self'.packages.wire-unwrapped-dev ];
84
-
};
85
-
86
-
wire-small-perf = self'.packages.wire-small.overrideAttrs {
87
-
paths = [ self'.packages.wire-unwrapped-perf ];
88
-
};
89
-
90
-
wire-diagnostics-md = self'.packages.wire-unwrapped.overrideAttrs {
91
-
DIAGNOSTICS_MD_OUTPUT = "/build/source";
92
-
installPhase = ''
93
-
mv /build/source/DIAGNOSTICS.md $out
94
-
'';
95
-
};
96
-
};
97
-
};
98
-
}
-174
wire/cli/src/apply.rs
-174
wire/cli/src/apply.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use futures::{FutureExt, StreamExt};
5
-
use itertools::{Either, Itertools};
6
-
use lib::hive::node::{Context, GoalExecutor, Name, StepState, should_apply_locally};
7
-
use lib::hive::{Hive, HiveLocation};
8
-
use lib::status::STATUS;
9
-
use lib::{SubCommandModifiers, errors::HiveLibError};
10
-
use miette::{Diagnostic, IntoDiagnostic, Result};
11
-
use std::collections::HashSet;
12
-
use std::io::{Read, stderr};
13
-
use std::sync::Arc;
14
-
use std::sync::atomic::AtomicBool;
15
-
use thiserror::Error;
16
-
use tracing::{Span, error, info};
17
-
18
-
use crate::cli::{ApplyArgs, ApplyTarget};
19
-
20
-
#[derive(Debug, Error, Diagnostic)]
21
-
#[error("node {} failed to apply", .0)]
22
-
struct NodeError(
23
-
Name,
24
-
#[source]
25
-
#[diagnostic_source]
26
-
HiveLibError,
27
-
);
28
-
29
-
#[derive(Debug, Error, Diagnostic)]
30
-
#[error("{} node(s) failed to apply.", .0.len())]
31
-
struct NodeErrors(#[related] Vec<NodeError>);
32
-
33
-
// returns Names and Tags
34
-
fn read_apply_targets_from_stdin() -> Result<(Vec<String>, Vec<Name>)> {
35
-
let mut buf = String::new();
36
-
let mut stdin = std::io::stdin().lock();
37
-
stdin.read_to_string(&mut buf).into_diagnostic()?;
38
-
39
-
Ok(buf
40
-
.split_whitespace()
41
-
.map(|x| ApplyTarget::from(x.to_string()))
42
-
.fold((Vec::new(), Vec::new()), |(mut tags, mut names), target| {
43
-
match target {
44
-
ApplyTarget::Node(name) => names.push(name),
45
-
ApplyTarget::Tag(tag) => tags.push(tag),
46
-
ApplyTarget::Stdin => {}
47
-
}
48
-
(tags, names)
49
-
}))
50
-
}
51
-
52
-
// #[instrument(skip_all, fields(goal = %args.goal, on = %args.on.iter().join(", ")))]
53
-
pub async fn apply(
54
-
hive: &mut Hive,
55
-
should_shutdown: Arc<AtomicBool>,
56
-
location: HiveLocation,
57
-
args: ApplyArgs,
58
-
mut modifiers: SubCommandModifiers,
59
-
) -> Result<()> {
60
-
let header_span = Span::current();
61
-
let location = Arc::new(location);
62
-
63
-
// Respect user's --always-build-local arg
64
-
hive.force_always_local(args.always_build_local)?;
65
-
66
-
let header_span_enter = header_span.enter();
67
-
68
-
let (tags, names) = args.on.iter().fold(
69
-
(HashSet::new(), HashSet::new()),
70
-
|(mut tags, mut names), target| {
71
-
match target {
72
-
ApplyTarget::Tag(tag) => {
73
-
tags.insert(tag.clone());
74
-
}
75
-
ApplyTarget::Node(name) => {
76
-
names.insert(name.clone());
77
-
}
78
-
ApplyTarget::Stdin => {
79
-
// implies non_interactive
80
-
modifiers.non_interactive = true;
81
-
82
-
let (found_tags, found_names) = read_apply_targets_from_stdin().unwrap();
83
-
names.extend(found_names);
84
-
tags.extend(found_tags);
85
-
}
86
-
}
87
-
(tags, names)
88
-
},
89
-
);
90
-
91
-
let selected_nodes: Vec<_> = hive
92
-
.nodes
93
-
.iter_mut()
94
-
.filter(|(name, node)| {
95
-
args.on.is_empty()
96
-
|| names.contains(name)
97
-
|| node.tags.iter().any(|tag| tags.contains(tag))
98
-
})
99
-
.collect();
100
-
101
-
STATUS.lock().add_many(
102
-
&selected_nodes
103
-
.iter()
104
-
.map(|(name, _)| *name)
105
-
.collect::<Vec<_>>(),
106
-
);
107
-
108
-
let mut set = selected_nodes
109
-
.into_iter()
110
-
.map(|(name, node)| {
111
-
info!("Resolved {:?} to include {}", args.on, name);
112
-
113
-
let should_apply_locally = should_apply_locally(node.allow_local_deployment, &name.0);
114
-
115
-
let context = Context {
116
-
node,
117
-
name,
118
-
goal: args.goal.clone().try_into().unwrap(),
119
-
state: StepState::default(),
120
-
no_keys: args.no_keys,
121
-
hive_location: location.clone(),
122
-
modifiers,
123
-
reboot: args.reboot,
124
-
should_apply_locally,
125
-
handle_unreachable: args.handle_unreachable.clone().into(),
126
-
should_shutdown: should_shutdown.clone(),
127
-
};
128
-
129
-
GoalExecutor::new(context)
130
-
.execute()
131
-
.map(move |result| (name, result))
132
-
})
133
-
.peekable();
134
-
135
-
if set.peek().is_none() {
136
-
error!("There are no nodes selected for deployment");
137
-
}
138
-
139
-
let futures = futures::stream::iter(set).buffer_unordered(args.parallel);
140
-
let result = futures.collect::<Vec<_>>().await;
141
-
let (successful, errors): (Vec<_>, Vec<_>) =
142
-
result
143
-
.into_iter()
144
-
.partition_map(|(name, result)| match result {
145
-
Ok(..) => Either::Left(name),
146
-
Err(err) => Either::Right((name, err)),
147
-
});
148
-
149
-
if !successful.is_empty() {
150
-
info!(
151
-
"Successfully applied goal to {} node(s): {:?}",
152
-
successful.len(),
153
-
successful
154
-
);
155
-
}
156
-
157
-
std::mem::drop(header_span_enter);
158
-
std::mem::drop(header_span);
159
-
160
-
if !errors.is_empty() {
161
-
// clear the status bar if we are about to print error messages
162
-
STATUS.lock().clear(&mut stderr());
163
-
164
-
return Err(NodeErrors(
165
-
errors
166
-
.into_iter()
167
-
.map(|(name, error)| NodeError(name.clone(), error))
168
-
.collect(),
169
-
)
170
-
.into());
171
-
}
172
-
173
-
Ok(())
174
-
}
-305
wire/cli/src/cli.rs
-305
wire/cli/src/cli.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use clap::builder::PossibleValue;
5
-
use clap::{Args, Parser, Subcommand, ValueEnum};
6
-
use clap::{ValueHint, crate_version};
7
-
use clap_complete::CompletionCandidate;
8
-
use clap_complete::engine::ArgValueCompleter;
9
-
use clap_num::number_range;
10
-
use clap_verbosity_flag::InfoLevel;
11
-
use lib::SubCommandModifiers;
12
-
use lib::commands::common::get_hive_node_names;
13
-
use lib::hive::node::{Goal as HiveGoal, HandleUnreachable, Name, SwitchToConfigurationGoal};
14
-
use lib::hive::{Hive, get_hive_location};
15
-
use tokio::runtime::Handle;
16
-
17
-
use std::io::IsTerminal;
18
-
use std::{
19
-
fmt::{self, Display, Formatter},
20
-
sync::Arc,
21
-
};
22
-
23
-
#[allow(clippy::struct_excessive_bools)]
24
-
#[derive(Parser)]
25
-
#[command(
26
-
name = "wire",
27
-
bin_name = "wire",
28
-
about = "a tool to deploy nixos systems",
29
-
version = format!("{}\nDebug: Hive::SCHEMA_VERSION {}", crate_version!(), Hive::SCHEMA_VERSION)
30
-
)]
31
-
pub struct Cli {
32
-
#[command(subcommand)]
33
-
pub command: Commands,
34
-
35
-
#[command(flatten)]
36
-
pub verbose: clap_verbosity_flag::Verbosity<InfoLevel>,
37
-
38
-
/// Path or flake reference
39
-
#[arg(long, global = true, default_value = std::env::current_dir().unwrap().into_os_string(), visible_alias("flake"))]
40
-
pub path: String,
41
-
42
-
/// Hide progress bars.
43
-
///
44
-
/// Defaults to true if stdin does not refer to a tty (unix pipelines, in CI).
45
-
#[arg(long, global = true, default_value_t = !std::io::stdin().is_terminal())]
46
-
pub no_progress: bool,
47
-
48
-
/// Never accept user input.
49
-
///
50
-
/// Defaults to true if stdin does not refer to a tty (unix pipelines, in CI).
51
-
#[arg(long, global = true, default_value_t = !std::io::stdin().is_terminal())]
52
-
pub non_interactive: bool,
53
-
54
-
/// Show trace logs
55
-
#[arg(long, global = true, default_value_t = false)]
56
-
pub show_trace: bool,
57
-
58
-
#[cfg(debug_assertions)]
59
-
#[arg(long, hide = true, global = true)]
60
-
pub markdown_help: bool,
61
-
}
62
-
63
-
#[derive(Clone, Debug)]
64
-
pub enum ApplyTarget {
65
-
Node(Name),
66
-
Tag(String),
67
-
Stdin,
68
-
}
69
-
70
-
impl From<String> for ApplyTarget {
71
-
fn from(value: String) -> Self {
72
-
if value == "-" {
73
-
return ApplyTarget::Stdin;
74
-
}
75
-
76
-
if let Some(stripped) = value.strip_prefix("@") {
77
-
ApplyTarget::Tag(stripped.to_string())
78
-
} else {
79
-
ApplyTarget::Node(Name(Arc::from(value.as_str())))
80
-
}
81
-
}
82
-
}
83
-
84
-
impl Display for ApplyTarget {
85
-
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
86
-
match self {
87
-
ApplyTarget::Node(name) => name.fmt(f),
88
-
ApplyTarget::Tag(tag) => write!(f, "@{tag}"),
89
-
ApplyTarget::Stdin => write!(f, "#stdin"),
90
-
}
91
-
}
92
-
}
93
-
94
-
fn more_than_zero(s: &str) -> Result<usize, String> {
95
-
number_range(s, 1, usize::MAX)
96
-
}
97
-
98
-
#[derive(Clone)]
99
-
pub enum HandleUnreachableArg {
100
-
Ignore,
101
-
FailNode,
102
-
}
103
-
104
-
impl Display for HandleUnreachableArg {
105
-
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
106
-
match self {
107
-
Self::Ignore => write!(f, "ignore"),
108
-
Self::FailNode => write!(f, "fail-node"),
109
-
}
110
-
}
111
-
}
112
-
113
-
impl clap::ValueEnum for HandleUnreachableArg {
114
-
fn value_variants<'a>() -> &'a [Self] {
115
-
&[Self::Ignore, Self::FailNode]
116
-
}
117
-
118
-
fn to_possible_value(&self) -> Option<clap::builder::PossibleValue> {
119
-
match self {
120
-
Self::Ignore => Some(PossibleValue::new("ignore")),
121
-
Self::FailNode => Some(PossibleValue::new("fail-node")),
122
-
}
123
-
}
124
-
}
125
-
126
-
impl From<HandleUnreachableArg> for HandleUnreachable {
127
-
fn from(value: HandleUnreachableArg) -> Self {
128
-
match value {
129
-
HandleUnreachableArg::Ignore => Self::Ignore,
130
-
HandleUnreachableArg::FailNode => Self::FailNode,
131
-
}
132
-
}
133
-
}
134
-
135
-
#[derive(Args)]
136
-
pub struct ApplyArgs {
137
-
#[arg(value_enum, default_value_t)]
138
-
pub goal: Goal,
139
-
140
-
/// List of literal node names, a literal `-`, or `@` prefixed tags.
141
-
///
142
-
/// `-` will read additional values from stdin, separated by whitespace.
143
-
/// Any `-` implies `--non-interactive`.
144
-
#[arg(short, long, value_name = "NODE | @TAG | `-`", num_args = 1.., add = ArgValueCompleter::new(node_names_completer), value_hint = ValueHint::Unknown)]
145
-
pub on: Vec<ApplyTarget>,
146
-
147
-
#[arg(short, long, default_value_t = 10, value_parser=more_than_zero)]
148
-
pub parallel: usize,
149
-
150
-
/// Skip key uploads. noop when [GOAL] = Keys
151
-
#[arg(short, long, default_value_t = false)]
152
-
pub no_keys: bool,
153
-
154
-
/// Overrides deployment.buildOnTarget.
155
-
#[arg(short, long, value_name = "NODE")]
156
-
pub always_build_local: Vec<String>,
157
-
158
-
/// Reboot the nodes after activation
159
-
#[arg(short, long, default_value_t = false)]
160
-
pub reboot: bool,
161
-
162
-
/// How to handle an unreachable node in the ping step.
163
-
///
164
-
/// This only effects the ping step.
165
-
/// wire will still fail the node if it becomes unreachable after activation
166
-
#[arg(long, default_value_t = HandleUnreachableArg::FailNode)]
167
-
pub handle_unreachable: HandleUnreachableArg,
168
-
169
-
/// Unconditionally accept SSH host keys [!!]
170
-
///
171
-
/// Sets `StrictHostKeyChecking` to `no`.
172
-
/// Vulnerable to man-in-the-middle attacks, use with caution.
173
-
#[arg(long, default_value_t = false)]
174
-
pub ssh_accept_host: bool,
175
-
}
176
-
177
-
#[derive(Subcommand)]
178
-
pub enum Commands {
179
-
/// Deploy nodes
180
-
Apply(ApplyArgs),
181
-
/// Inspect hive
182
-
#[clap(visible_alias = "show")]
183
-
Inspect {
184
-
#[arg(value_enum, default_value_t)]
185
-
selection: Inspection,
186
-
187
-
/// Return in JSON format
188
-
#[arg(short, long, default_value_t = false)]
189
-
json: bool,
190
-
},
191
-
}
192
-
193
-
#[derive(Clone, Debug, Default, ValueEnum, Display)]
194
-
pub enum Inspection {
195
-
/// Output all data wire has on the entire hive
196
-
#[default]
197
-
Full,
198
-
/// Only output a list of node names
199
-
Names,
200
-
}
201
-
202
-
#[derive(Clone, Debug, Default, ValueEnum, Display)]
203
-
pub enum Goal {
204
-
/// Make the configuration the boot default and activate now
205
-
#[default]
206
-
Switch,
207
-
/// Build the configuration but do nothing with it
208
-
Build,
209
-
/// Copy system derivation to remote hosts
210
-
Push,
211
-
/// Push deployment keys to remote hosts
212
-
Keys,
213
-
/// Activate system profile on next boot
214
-
Boot,
215
-
/// Activate the configuration, but don't make it the boot default
216
-
Test,
217
-
/// Show what would be done if this configuration were activated.
218
-
DryActivate,
219
-
}
220
-
221
-
impl TryFrom<Goal> for HiveGoal {
222
-
type Error = miette::Error;
223
-
224
-
fn try_from(value: Goal) -> Result<Self, Self::Error> {
225
-
match value {
226
-
Goal::Build => Ok(HiveGoal::Build),
227
-
Goal::Push => Ok(HiveGoal::Push),
228
-
Goal::Boot => Ok(HiveGoal::SwitchToConfiguration(
229
-
SwitchToConfigurationGoal::Boot,
230
-
)),
231
-
Goal::Switch => Ok(HiveGoal::SwitchToConfiguration(
232
-
SwitchToConfigurationGoal::Switch,
233
-
)),
234
-
Goal::Test => Ok(HiveGoal::SwitchToConfiguration(
235
-
SwitchToConfigurationGoal::Test,
236
-
)),
237
-
Goal::DryActivate => Ok(HiveGoal::SwitchToConfiguration(
238
-
SwitchToConfigurationGoal::DryActivate,
239
-
)),
240
-
Goal::Keys => Ok(HiveGoal::Keys),
241
-
}
242
-
}
243
-
}
244
-
245
-
pub trait ToSubCommandModifiers {
246
-
fn to_subcommand_modifiers(&self) -> SubCommandModifiers;
247
-
}
248
-
249
-
impl ToSubCommandModifiers for Cli {
250
-
fn to_subcommand_modifiers(&self) -> SubCommandModifiers {
251
-
SubCommandModifiers {
252
-
show_trace: self.show_trace,
253
-
non_interactive: self.non_interactive,
254
-
ssh_accept_host: match &self.command {
255
-
Commands::Apply(args) if args.ssh_accept_host => lib::StrictHostKeyChecking::No,
256
-
_ => lib::StrictHostKeyChecking::default(),
257
-
},
258
-
}
259
-
}
260
-
}
261
-
262
-
fn node_names_completer(current: &std::ffi::OsStr) -> Vec<CompletionCandidate> {
263
-
tokio::task::block_in_place(|| {
264
-
let handle = Handle::current();
265
-
let modifiers = SubCommandModifiers::default();
266
-
let mut completions = vec![];
267
-
268
-
if current.is_empty() || current == "-" {
269
-
completions.push(
270
-
CompletionCandidate::new("-").help(Some("Read stdin as --on arguments".into())),
271
-
);
272
-
}
273
-
274
-
let Ok(current_dir) = std::env::current_dir() else {
275
-
return completions;
276
-
};
277
-
278
-
let Ok(hive_location) = handle.block_on(get_hive_location(
279
-
current_dir.display().to_string(),
280
-
modifiers,
281
-
)) else {
282
-
return completions;
283
-
};
284
-
285
-
let Some(current) = current.to_str() else {
286
-
return completions;
287
-
};
288
-
289
-
if current.starts_with('@') {
290
-
return vec![];
291
-
}
292
-
293
-
if let Ok(names) =
294
-
handle.block_on(async { get_hive_node_names(&hive_location, modifiers).await })
295
-
{
296
-
for name in names {
297
-
if name.starts_with(current) {
298
-
completions.push(CompletionCandidate::new(name));
299
-
}
300
-
}
301
-
}
302
-
303
-
completions
304
-
})
305
-
}
-127
wire/cli/src/main.rs
-127
wire/cli/src/main.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
#![feature(sync_nonpoison)]
5
-
#![feature(nonpoison_mutex)]
6
-
7
-
use std::process::Command;
8
-
use std::sync::Arc;
9
-
use std::sync::atomic::AtomicBool;
10
-
11
-
use crate::cli::Cli;
12
-
use crate::cli::ToSubCommandModifiers;
13
-
use crate::sigint::handle_signals;
14
-
use crate::tracing_setup::setup_logging;
15
-
use clap::CommandFactory;
16
-
use clap::Parser;
17
-
use clap_complete::CompleteEnv;
18
-
use lib::cache::InspectionCache;
19
-
use lib::commands::common::get_hive_node_names;
20
-
use lib::hive::Hive;
21
-
use lib::hive::get_hive_location;
22
-
use miette::IntoDiagnostic;
23
-
use miette::Result;
24
-
use signal_hook::consts::SIGINT;
25
-
use signal_hook_tokio::Signals;
26
-
use tracing::error;
27
-
use tracing::warn;
28
-
29
-
#[macro_use]
30
-
extern crate enum_display_derive;
31
-
32
-
mod apply;
33
-
mod cli;
34
-
mod sigint;
35
-
mod tracing_setup;
36
-
37
-
#[cfg(feature = "dhat-heap")]
38
-
#[global_allocator]
39
-
static ALLOC: dhat::Alloc = dhat::Alloc;
40
-
41
-
#[tokio::main]
42
-
async fn main() -> Result<()> {
43
-
#[cfg(feature = "dhat-heap")]
44
-
let _profiler = dhat::Profiler::new_heap();
45
-
CompleteEnv::with_factory(Cli::command).complete();
46
-
47
-
let args = Cli::parse();
48
-
49
-
let modifiers = args.to_subcommand_modifiers();
50
-
// disable progress when running inspect mode.
51
-
setup_logging(
52
-
&args.verbose,
53
-
!matches!(args.command, cli::Commands::Inspect { .. }) && !&args.no_progress,
54
-
);
55
-
56
-
#[cfg(debug_assertions)]
57
-
if args.markdown_help {
58
-
clap_markdown::print_help_markdown::<Cli>();
59
-
return Ok(());
60
-
}
61
-
62
-
if !check_nix_available() {
63
-
miette::bail!("Nix is not available on this system.");
64
-
}
65
-
66
-
let signals = Signals::new([SIGINT]).into_diagnostic()?;
67
-
let signals_handle = signals.handle();
68
-
let should_shutdown = Arc::new(AtomicBool::new(false));
69
-
let signals_task = tokio::spawn(handle_signals(signals, should_shutdown.clone()));
70
-
71
-
let location = get_hive_location(args.path, modifiers).await?;
72
-
let cache = InspectionCache::new().await;
73
-
74
-
match args.command {
75
-
cli::Commands::Apply(apply_args) => {
76
-
let mut hive = Hive::new_from_path(&location, cache.clone(), modifiers).await?;
77
-
apply::apply(&mut hive, should_shutdown, location, apply_args, modifiers).await?;
78
-
}
79
-
cli::Commands::Inspect { json, selection } => println!("{}", {
80
-
match selection {
81
-
cli::Inspection::Full => {
82
-
let hive = Hive::new_from_path(&location, cache.clone(), modifiers).await?;
83
-
if json {
84
-
serde_json::to_string(&hive).into_diagnostic()?
85
-
} else {
86
-
warn!("use --json to output something scripting suitable");
87
-
format!("{hive}")
88
-
}
89
-
}
90
-
cli::Inspection::Names => {
91
-
serde_json::to_string(&get_hive_node_names(&location, modifiers).await?)
92
-
.into_diagnostic()?
93
-
}
94
-
}
95
-
}),
96
-
}
97
-
98
-
if let Some(cache) = cache {
99
-
cache.gc().await.into_diagnostic()?;
100
-
}
101
-
102
-
signals_handle.close();
103
-
signals_task.await.into_diagnostic()?;
104
-
105
-
Ok(())
106
-
}
107
-
108
-
fn check_nix_available() -> bool {
109
-
match Command::new("nix")
110
-
.stdout(std::process::Stdio::null())
111
-
.stderr(std::process::Stdio::null())
112
-
.spawn()
113
-
{
114
-
Ok(_) => true,
115
-
Err(e) => {
116
-
if let std::io::ErrorKind::NotFound = e.kind() {
117
-
false
118
-
} else {
119
-
error!(
120
-
"Something weird happened checking for nix availability, {}",
121
-
e
122
-
);
123
-
false
124
-
}
125
-
}
126
-
}
127
-
}
-21
wire/cli/src/sigint.rs
-21
wire/cli/src/sigint.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::sync::{Arc, atomic::AtomicBool};
5
-
6
-
use signal_hook::consts::SIGINT;
7
-
use signal_hook_tokio::Signals;
8
-
9
-
use futures::stream::StreamExt;
10
-
use tracing::info;
11
-
12
-
pub(crate) async fn handle_signals(mut signals: Signals, should_shutdown: Arc<AtomicBool>) {
13
-
while let Some(signal) = signals.next().await {
14
-
if let SIGINT = signal
15
-
&& !should_shutdown.load(std::sync::atomic::Ordering::Relaxed)
16
-
{
17
-
info!("Received SIGINT, attempting to shut down executor tasks.");
18
-
should_shutdown.store(true, std::sync::atomic::Ordering::Relaxed);
19
-
}
20
-
}
21
-
}
-284
wire/cli/src/tracing_setup.rs
-284
wire/cli/src/tracing_setup.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::{
5
-
collections::VecDeque,
6
-
io::{self, Stderr, Write, stderr},
7
-
time::Duration,
8
-
};
9
-
10
-
use clap_verbosity_flag::{LogLevel, Verbosity};
11
-
use lib::{STDIN_CLOBBER_LOCK, status::STATUS};
12
-
use owo_colors::{OwoColorize, Stream, Style};
13
-
use tracing::{Level, Subscriber};
14
-
use tracing_log::AsTrace;
15
-
use tracing_subscriber::{
16
-
Layer,
17
-
field::{RecordFields, VisitFmt},
18
-
fmt::{
19
-
FormatEvent, FormatFields, FormattedFields,
20
-
format::{self, DefaultFields, DefaultVisitor, Format, Full},
21
-
},
22
-
layer::{Context, SubscriberExt},
23
-
registry::LookupSpan,
24
-
util::SubscriberInitExt,
25
-
};
26
-
27
-
/// The non-clobbering writer ensures that log lines are held while interactive
28
-
/// prompts are shown to the user. If logs where shown, they would "clobber" the
29
-
/// sudo / ssh prompt.
30
-
///
31
-
/// Additionally, the `STDIN_CLOBBER_LOCK` is used to ensure that no two
32
-
/// interactive prompts are shown at the same time.
33
-
struct NonClobberingWriter {
34
-
queue: VecDeque<Vec<u8>>,
35
-
stderr: Stderr,
36
-
}
37
-
38
-
impl NonClobberingWriter {
39
-
fn new() -> Self {
40
-
NonClobberingWriter {
41
-
queue: VecDeque::with_capacity(100),
42
-
stderr: stderr(),
43
-
}
44
-
}
45
-
46
-
/// expects the caller to write the status line
47
-
fn dump_previous(&mut self) -> Result<(), io::Error> {
48
-
STATUS.lock().clear(&mut self.stderr);
49
-
50
-
for buf in self.queue.iter().rev() {
51
-
self.stderr.write(buf).map(|_| ())?;
52
-
}
53
-
54
-
Ok(())
55
-
}
56
-
}
57
-
58
-
impl Write for NonClobberingWriter {
59
-
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
60
-
if let 1.. = STDIN_CLOBBER_LOCK.available_permits() {
61
-
self.dump_previous().map(|()| 0)?;
62
-
63
-
STATUS.lock().write_above_status(buf, &mut self.stderr)
64
-
} else {
65
-
self.queue.push_front(buf.to_vec());
66
-
67
-
Ok(buf.len())
68
-
}
69
-
}
70
-
71
-
fn flush(&mut self) -> std::io::Result<()> {
72
-
self.stderr.flush()
73
-
}
74
-
}
75
-
76
-
/// Handles event formatting, which falls back to the default formatter
77
-
/// passed.
78
-
struct WireEventFormat(Format<Full, ()>);
79
-
/// Formats the node's name with `WireFieldVisitor`
80
-
struct WireFieldFormat;
81
-
struct WireFieldVisitor<'a>(DefaultVisitor<'a>);
82
-
/// `WireLayer` injects `WireFieldFormat` as an extension on the event
83
-
struct WireLayer;
84
-
85
-
impl<'a> WireFieldVisitor<'a> {
86
-
fn new(writer: format::Writer<'a>, is_empty: bool) -> Self {
87
-
Self(DefaultVisitor::new(writer, is_empty))
88
-
}
89
-
}
90
-
91
-
impl<'writer> FormatFields<'writer> for WireFieldFormat {
92
-
fn format_fields<R: RecordFields>(
93
-
&self,
94
-
writer: format::Writer<'writer>,
95
-
fields: R,
96
-
) -> std::fmt::Result {
97
-
let mut v = WireFieldVisitor::new(writer, true);
98
-
fields.record(&mut v);
99
-
Ok(())
100
-
}
101
-
}
102
-
103
-
impl tracing::field::Visit for WireFieldVisitor<'_> {
104
-
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
105
-
if field.name() == "node" {
106
-
let _ = write!(
107
-
self.0.writer(),
108
-
"{:?}",
109
-
value.if_supports_color(Stream::Stderr, |text| text.bold())
110
-
);
111
-
}
112
-
}
113
-
}
114
-
115
-
const fn get_style(level: Level) -> Style {
116
-
let mut style = Style::new();
117
-
118
-
style = match level {
119
-
Level::TRACE => style.purple(),
120
-
Level::DEBUG => style.blue(),
121
-
Level::INFO => style.green(),
122
-
Level::WARN => style.yellow(),
123
-
Level::ERROR => style.red(),
124
-
};
125
-
126
-
style
127
-
}
128
-
129
-
const fn fmt_level(level: Level) -> &'static str {
130
-
match level {
131
-
Level::TRACE => "TRACE",
132
-
Level::DEBUG => "DEBUG",
133
-
Level::INFO => " INFO",
134
-
Level::WARN => " WARN",
135
-
Level::ERROR => "ERROR",
136
-
}
137
-
}
138
-
139
-
impl<S, N> FormatEvent<S, N> for WireEventFormat
140
-
where
141
-
S: Subscriber + for<'a> LookupSpan<'a>,
142
-
N: for<'a> FormatFields<'a> + 'static,
143
-
{
144
-
fn format_event(
145
-
&self,
146
-
ctx: &tracing_subscriber::fmt::FmtContext<'_, S, N>,
147
-
mut writer: tracing_subscriber::fmt::format::Writer<'_>,
148
-
event: &tracing::Event<'_>,
149
-
) -> std::fmt::Result {
150
-
let metadata = event.metadata();
151
-
152
-
// skip events without an "event_scope"
153
-
let Some(scope) = ctx.event_scope() else {
154
-
return self.0.format_event(ctx, writer, event);
155
-
};
156
-
157
-
// skip spans without a parent
158
-
let Some(parent) = scope.last() else {
159
-
return self.0.format_event(ctx, writer, event);
160
-
};
161
-
162
-
// skip spans that dont refer to the goal step executor
163
-
if parent.name() != "execute" {
164
-
return self.0.format_event(ctx, writer, event);
165
-
}
166
-
167
-
// skip spans that dont refer to a specific node being executed
168
-
if parent.fields().field("node").is_none() {
169
-
return self.0.format_event(ctx, writer, event);
170
-
}
171
-
172
-
let style = get_style(*metadata.level());
173
-
174
-
// write the log level with colour
175
-
write!(
176
-
writer,
177
-
"{} ",
178
-
fmt_level(*metadata.level()).if_supports_color(Stream::Stderr, |x| { x.style(style) })
179
-
)?;
180
-
181
-
// extract the formatted node name into a string
182
-
let parent_ext = parent.extensions();
183
-
let node_name = &parent_ext
184
-
.get::<FormattedFields<WireFieldFormat>>()
185
-
.unwrap();
186
-
187
-
write!(writer, "{node_name}")?;
188
-
189
-
// write the step name
190
-
if let Some(step) = ctx.event_scope().unwrap().from_root().nth(1) {
191
-
write!(writer, " {}", step.name().italic())?;
192
-
}
193
-
194
-
write!(writer, " | ")?;
195
-
196
-
// write the default fields, including the actual message and other data
197
-
let mut fields = FormattedFields::<DefaultFields>::new(String::new());
198
-
199
-
ctx.format_fields(fields.as_writer(), event)?;
200
-
201
-
write!(writer, "{fields}")?;
202
-
writeln!(writer)?;
203
-
204
-
Ok(())
205
-
}
206
-
}
207
-
208
-
impl<S> Layer<S> for WireLayer
209
-
where
210
-
S: Subscriber + for<'a> LookupSpan<'a>,
211
-
{
212
-
fn on_new_span(
213
-
&self,
214
-
attrs: &tracing::span::Attributes<'_>,
215
-
id: &tracing::span::Id,
216
-
ctx: Context<'_, S>,
217
-
) {
218
-
let span = ctx.span(id).unwrap();
219
-
220
-
if span.extensions().get::<WireFieldFormat>().is_some() {
221
-
return;
222
-
}
223
-
224
-
let mut fields = FormattedFields::<WireFieldFormat>::new(String::new());
225
-
if WireFieldFormat
226
-
.format_fields(fields.as_writer(), attrs)
227
-
.is_ok()
228
-
{
229
-
span.extensions_mut().insert(fields);
230
-
}
231
-
}
232
-
}
233
-
234
-
async fn status_tick_worker() {
235
-
let mut interval = tokio::time::interval(Duration::from_secs(1));
236
-
let mut stderr = stderr();
237
-
238
-
loop {
239
-
interval.tick().await;
240
-
241
-
if STDIN_CLOBBER_LOCK.available_permits() < 1 {
242
-
continue;
243
-
}
244
-
245
-
let mut status = STATUS.lock();
246
-
247
-
status.clear(&mut stderr);
248
-
status.write_status(&mut stderr);
249
-
}
250
-
}
251
-
252
-
/// Set up logging for the application
253
-
/// Uses `WireFieldFormat` if -v was never passed
254
-
pub fn setup_logging<L: LogLevel>(verbosity: &Verbosity<L>, show_progress: bool) {
255
-
let filter = verbosity.log_level_filter().as_trace();
256
-
let registry = tracing_subscriber::registry();
257
-
258
-
STATUS.lock().show_progress(show_progress);
259
-
260
-
// spawn worker to tick the status bar
261
-
if show_progress {
262
-
tokio::spawn(status_tick_worker());
263
-
}
264
-
265
-
if verbosity.is_present() {
266
-
let layer = tracing_subscriber::fmt::layer()
267
-
.without_time()
268
-
.with_target(false)
269
-
.with_writer(NonClobberingWriter::new)
270
-
.with_filter(filter);
271
-
272
-
registry.with(layer).init();
273
-
return;
274
-
}
275
-
276
-
let event_formatter = WireEventFormat(format::format().without_time().with_target(false));
277
-
278
-
let layer = tracing_subscriber::fmt::layer()
279
-
.event_format(event_formatter)
280
-
.with_writer(NonClobberingWriter::new)
281
-
.with_filter(filter);
282
-
283
-
registry.with(layer).with(WireLayer).init();
284
-
}
-17
wire/key_agent/Cargo.toml
-17
wire/key_agent/Cargo.toml
···
1
-
[package]
2
-
name = "key_agent"
3
-
edition.workspace = true
4
-
version.workspace = true
5
-
6
-
[dependencies]
7
-
tokio = { workspace = true }
8
-
tokio-util = { workspace = true }
9
-
anyhow = { workspace = true }
10
-
prost = { workspace = true }
11
-
nix = { workspace = true }
12
-
futures-util = { workspace = true }
13
-
sha2 = { workspace = true }
14
-
base64 = { workspace = true }
15
-
16
-
[build-dependencies]
17
-
prost-build = "0.14"
-8
wire/key_agent/build.rs
-8
wire/key_agent/build.rs
-17
wire/key_agent/default.nix
-17
wire/key_agent/default.nix
-17
wire/key_agent/src/keys.proto
-17
wire/key_agent/src/keys.proto
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
syntax = "proto3";
5
-
6
-
package key_agent.keys;
7
-
8
-
message KeySpec {
9
-
string destination = 1;
10
-
string user = 2;
11
-
string group = 3;
12
-
uint32 permissions = 4;
13
-
uint32 length = 5;
14
-
bool last = 6;
15
-
/// Sha256 digest
16
-
bytes digest = 7;
17
-
}
-6
wire/key_agent/src/lib.rs
-6
wire/key_agent/src/lib.rs
-94
wire/key_agent/src/main.rs
-94
wire/key_agent/src/main.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
#![deny(clippy::pedantic)]
5
-
use base64::Engine;
6
-
use base64::prelude::BASE64_STANDARD;
7
-
use futures_util::stream::StreamExt;
8
-
use key_agent::keys::KeySpec;
9
-
use nix::unistd::{Group, User};
10
-
use prost::Message;
11
-
use prost::bytes::Bytes;
12
-
use sha2::{Digest, Sha256};
13
-
use std::os::unix::fs::PermissionsExt;
14
-
use std::os::unix::fs::chown;
15
-
use std::path::{Path, PathBuf};
16
-
use tokio::fs::File;
17
-
use tokio::io::AsyncWriteExt;
18
-
use tokio_util::codec::{FramedRead, LengthDelimitedCodec};
19
-
20
-
fn create_path(key_path: &Path) -> Result<(), anyhow::Error> {
21
-
let prefix = key_path.parent().unwrap();
22
-
std::fs::create_dir_all(prefix)?;
23
-
24
-
Ok(())
25
-
}
26
-
27
-
fn pretty_keyspec(spec: &KeySpec) -> String {
28
-
format!(
29
-
"{} {}:{} {}",
30
-
spec.destination, spec.user, spec.group, spec.permissions
31
-
)
32
-
}
33
-
34
-
#[tokio::main]
35
-
async fn main() -> Result<(), anyhow::Error> {
36
-
let stdin = tokio::io::stdin();
37
-
38
-
let mut framed = FramedRead::new(stdin, LengthDelimitedCodec::new());
39
-
40
-
while let Some(spec_bytes) = framed.next().await {
41
-
let spec_bytes = Bytes::from(BASE64_STANDARD.decode(spec_bytes?)?);
42
-
let spec = KeySpec::decode(spec_bytes)?;
43
-
44
-
let key_bytes = BASE64_STANDARD.decode(
45
-
framed
46
-
.next()
47
-
.await
48
-
.expect("expected key_bytes to come after spec_bytes")?,
49
-
)?;
50
-
51
-
let digest = Sha256::digest(&key_bytes).to_vec();
52
-
53
-
println!(
54
-
"Writing {}, {:?} bytes of data",
55
-
pretty_keyspec(&spec),
56
-
key_bytes.len()
57
-
);
58
-
59
-
if digest != spec.digest {
60
-
return Err(anyhow::anyhow!(
61
-
"digest of {spec:?} did not match {digest:?}! Please create an issue!"
62
-
));
63
-
}
64
-
65
-
let path = PathBuf::from(&spec.destination);
66
-
create_path(&path)?;
67
-
68
-
let mut file = File::create(path).await?;
69
-
let mut permissions = file.metadata().await?.permissions();
70
-
71
-
permissions.set_mode(spec.permissions);
72
-
file.set_permissions(permissions).await?;
73
-
74
-
let user = User::from_name(&spec.user)?;
75
-
let group = Group::from_name(&spec.group)?;
76
-
77
-
chown(
78
-
spec.destination,
79
-
// Default uid/gid to 0. This is then wrapped around an Option again for
80
-
// the function.
81
-
Some(user.map_or(0, |user| user.uid.into())),
82
-
Some(group.map_or(0, |group| group.gid.into())),
83
-
)?;
84
-
85
-
file.write_all(&key_bytes).await?;
86
-
87
-
// last key, goobye
88
-
if spec.last {
89
-
break;
90
-
}
91
-
}
92
-
93
-
Ok(())
94
-
}
-51
wire/lib/Cargo.toml
-51
wire/lib/Cargo.toml
···
1
-
[package]
2
-
name = "lib"
3
-
version.workspace = true
4
-
edition.workspace = true
5
-
6
-
[lints]
7
-
workspace = true
8
-
9
-
[features]
10
-
no_web_tests = []
11
-
12
-
[dependencies]
13
-
tokio = { workspace = true }
14
-
serde = { workspace = true }
15
-
serde_json = { workspace = true }
16
-
tracing = { workspace = true }
17
-
im = { workspace = true }
18
-
thiserror = "2.0.17"
19
-
derive_more = { version = "2.0.1", features = ["display"] }
20
-
key_agent = { path = "../key_agent" }
21
-
futures = "0.3.31"
22
-
prost = { workspace = true }
23
-
gethostname = "1.1.0"
24
-
nix.workspace = true
25
-
miette = { workspace = true }
26
-
rand = "0.9.2"
27
-
tokio-util = { workspace = true }
28
-
portable-pty = "0.9.0"
29
-
anyhow.workspace = true
30
-
itertools = "0.14.0"
31
-
enum_dispatch = "0.3.13"
32
-
sha2 = { workspace = true }
33
-
base64 = { workspace = true }
34
-
nix-compat = { workspace = true }
35
-
strip-ansi-escapes = "0.2.1"
36
-
aho-corasick = "1.1.4"
37
-
num_enum = "0.7.5"
38
-
gjson = "0.8.1"
39
-
owo-colors = { workspace = true }
40
-
termion = "4.0.6"
41
-
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
42
-
zstd = "0.13.3"
43
-
44
-
[dev-dependencies]
45
-
tempdir = "0.3"
46
-
47
-
[build-dependencies]
48
-
miette = { workspace = true }
49
-
syn = "2.0.109"
50
-
proc-macro2 = "1.0.103"
51
-
itertools = "0.14.0"
-206
wire/lib/build.rs
-206
wire/lib/build.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use miette::{Context, IntoDiagnostic as _, Result, miette};
5
-
use std::fmt::Write;
6
-
use std::{
7
-
env,
8
-
fmt::{self, Display, Formatter},
9
-
fs::{self},
10
-
path::Path,
11
-
};
12
-
13
-
use itertools::Itertools;
14
-
use proc_macro2::TokenTree;
15
-
use syn::{Expr, Item, ItemEnum, Lit, Meta, MetaList, MetaNameValue, parse_file};
16
-
17
-
macro_rules! p {
18
-
($($tokens: tt)*) => {
19
-
println!("cargo::warning={}", format!($($tokens)*))
20
-
}
21
-
}
22
-
23
-
#[derive(Debug)]
24
-
struct DerivedError {
25
-
code: Option<String>,
26
-
help: Option<String>,
27
-
message: Option<String>,
28
-
doc_string: String,
29
-
}
30
-
31
-
impl Display for DerivedError {
32
-
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
33
-
write!(
34
-
f,
35
-
"## `{code}` {{#{code}}}
36
-
37
-
{doc}
38
-
{message}
39
-
{help}",
40
-
doc = self.doc_string,
41
-
code = self.code.as_ref().unwrap(),
42
-
help = match &self.help {
43
-
Some(help) => format!(
44
-
"
45
-
::: tip HELP
46
-
{help}
47
-
:::"
48
-
),
49
-
None => String::new(),
50
-
},
51
-
message = match &self.message {
52
-
Some(message) => format!(
53
-
"
54
-
```txt [message]
55
-
{message}
56
-
```"
57
-
),
58
-
None => String::new(),
59
-
}
60
-
)
61
-
}
62
-
}
63
-
64
-
impl DerivedError {
65
-
fn get_error(&mut self, list: &MetaList) -> Result<(), miette::Error> {
66
-
if list.path.segments.last().unwrap().ident != "error" {
67
-
return Err(miette!("Not an error"));
68
-
}
69
-
70
-
self.message = Some(
71
-
list.tokens
72
-
.clone()
73
-
.into_iter()
74
-
.filter(|tok| matches!(tok, TokenTree::Literal(tok) if tok.to_string().starts_with('"')))
75
-
.map(|tok| tok.to_string())
76
-
.join(""),
77
-
);
78
-
79
-
Err(miette!("No error msg found"))
80
-
}
81
-
82
-
fn update_diagnostic(&mut self, list: &MetaList) -> Result<(), miette::Error> {
83
-
if list.path.segments.last().unwrap().ident != "diagnostic" {
84
-
return Err(miette!("Not a diagnostic"));
85
-
}
86
-
87
-
let vec: Vec<_> = list.tokens.clone().into_iter().collect();
88
-
89
-
// Find `diagnostic(code(x::y::z))`
90
-
let code: Option<String> = if let Some((_, TokenTree::Group(group))) =
91
-
vec.iter().tuple_windows().find(|(ident, group)| {
92
-
matches!(ident, TokenTree::Ident(ident) if ident == "code")
93
-
&& matches!(group, TokenTree::Group(..))
94
-
}) {
95
-
Some(group.stream().to_string().replace(' ', ""))
96
-
} else {
97
-
None
98
-
};
99
-
100
-
// Find `diagnostic(help("hi"))`
101
-
let help: Option<String> = if let Some((_, TokenTree::Group(group))) =
102
-
vec.iter().tuple_windows().find(|(ident, group)| {
103
-
matches!(ident, TokenTree::Ident(ident) if ident == "help")
104
-
&& matches!(group, TokenTree::Group(..))
105
-
}) {
106
-
Some(group.stream().to_string())
107
-
} else {
108
-
None
109
-
};
110
-
111
-
if let Some(code) = code {
112
-
self.code = Some(code);
113
-
self.help = help;
114
-
return Ok(());
115
-
}
116
-
117
-
Err(miette!("Had no code."))
118
-
}
119
-
120
-
fn update_from_list(&mut self, list: &MetaList) {
121
-
let _ = self.get_error(list);
122
-
let _ = self.update_diagnostic(list);
123
-
}
124
-
125
-
fn update_from_namevalue(&mut self, list: MetaNameValue) -> Result<(), miette::Error> {
126
-
if list.path.segments.last().unwrap().ident != "doc" {
127
-
return Err(miette!("Not a doc string"));
128
-
}
129
-
130
-
if let Expr::Lit(lit) = list.value
131
-
&& let Lit::Str(str) = lit.lit
132
-
{
133
-
let _ = write!(self.doc_string, "{}\n\n", &str.value()[1..]);
134
-
}
135
-
136
-
Ok(())
137
-
}
138
-
}
139
-
140
-
fn main() -> Result<()> {
141
-
println!("cargo:rerun-if-changed=src/errors.rs");
142
-
143
-
let manifest_dir = env::var("CARGO_MANIFEST_DIR").into_diagnostic()?;
144
-
let Ok(md_out_dir) = env::var("DIAGNOSTICS_MD_OUTPUT") else {
145
-
return Ok(());
146
-
};
147
-
148
-
let src_path = Path::new(&manifest_dir).join("src/errors.rs");
149
-
let src = fs::read_to_string(&src_path)
150
-
.into_diagnostic()
151
-
.wrap_err("reading errors.rs")?;
152
-
153
-
let syntax_tree = parse_file(&src)
154
-
.into_diagnostic()
155
-
.wrap_err("parsing errors.rs")?;
156
-
let mut entries: Vec<DerivedError> = Vec::new();
157
-
158
-
for item in &syntax_tree.items {
159
-
if let Item::Enum(ItemEnum { variants, .. }) = item {
160
-
for variant in variants {
161
-
let mut entry = DerivedError {
162
-
code: None,
163
-
help: None,
164
-
message: None,
165
-
doc_string: String::new(),
166
-
};
167
-
168
-
for attribute in variant.attrs.clone() {
169
-
match attribute.meta {
170
-
Meta::List(list) => {
171
-
entry.update_from_list(&list);
172
-
}
173
-
Meta::NameValue(nv) => {
174
-
let _ = entry.update_from_namevalue(nv);
175
-
}
176
-
Meta::Path(_) => {}
177
-
}
178
-
}
179
-
180
-
if entry.code.is_some() {
181
-
entries.push(entry);
182
-
}
183
-
}
184
-
}
185
-
}
186
-
187
-
fs::create_dir_all(Path::new(&md_out_dir))
188
-
.into_diagnostic()
189
-
.wrap_err("creating target directory")?;
190
-
fs::write(
191
-
Path::new(&md_out_dir).join("DIAGNOSTICS.md"),
192
-
entries
193
-
.iter()
194
-
.map(std::string::ToString::to_string)
195
-
.join("\n\n"),
196
-
)
197
-
.into_diagnostic()
198
-
.wrap_err("writing DIAGNOSTICS.md")?;
199
-
200
-
p!(
201
-
"wrote to {:?}",
202
-
Path::new(&md_out_dir).join("DIAGNOSTICS.md")
203
-
);
204
-
205
-
Ok(())
206
-
}
-13
wire/lib/src/cache/migrations/20251124234730_init.sql
-13
wire/lib/src/cache/migrations/20251124234730_init.sql
···
1
-
create table hive_inspection (
2
-
id integer primary key autoincrement,
3
-
json_value text not null unique
4
-
) strict;
5
-
6
-
create table cached_inspection (
7
-
store_path text,
8
-
hash text,
9
-
10
-
inspection_id integer references hive_inspection(id) not null,
11
-
12
-
primary key (store_path, hash)
13
-
) strict;
-16
wire/lib/src/cache/migrations/20251126222409_blobs.sql
-16
wire/lib/src/cache/migrations/20251126222409_blobs.sql
···
1
-
create table inspection_blobs (
2
-
id integer primary key autoincrement,
3
-
json_value blob not null unique,
4
-
schema_version integer not null
5
-
) strict;
6
-
7
-
create table inspection_cache (
8
-
store_path text,
9
-
hash text,
10
-
blob_id integer references inspection_blobs (id) not null,
11
-
primary key (store_path, hash)
12
-
) strict;
13
-
14
-
drop table cached_inspection;
15
-
16
-
drop table hive_inspection;
-236
wire/lib/src/cache/mod.rs
-236
wire/lib/src/cache/mod.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::{
5
-
env,
6
-
path::{Path, PathBuf},
7
-
};
8
-
9
-
use sqlx::{
10
-
Pool, Sqlite,
11
-
migrate::Migrator,
12
-
sqlite::{SqliteConnectOptions, SqlitePoolOptions},
13
-
};
14
-
use tokio::fs::create_dir_all;
15
-
use tracing::{debug, error, trace};
16
-
17
-
use crate::hive::{FlakePrefetch, Hive};
18
-
19
-
#[derive(Clone)]
20
-
pub struct InspectionCache {
21
-
pool: Pool<Sqlite>,
22
-
}
23
-
24
-
static MIGRATOR: Migrator = sqlx::migrate!("src/cache/migrations");
25
-
26
-
async fn get_cache_directory() -> Option<PathBuf> {
27
-
let home = PathBuf::from(
28
-
env::var("HOME")
29
-
.inspect_err(|_| error!("HOME env var not found"))
30
-
.ok()?,
31
-
);
32
-
33
-
trace!(home = ?home);
34
-
35
-
let cache_home = env::var("XDG_CACHE_HOME")
36
-
.inspect_err(|_| debug!("XDG_CACHE_HOME not found"))
37
-
.ok()
38
-
.map(PathBuf::from)
39
-
.unwrap_or(home.join(".cache"));
40
-
41
-
let cache_directory = cache_home.join("wire");
42
-
43
-
trace!(cache_directory = ?cache_directory);
44
-
45
-
let _ = create_dir_all(&cache_directory).await;
46
-
47
-
Some(cache_directory)
48
-
}
49
-
50
-
impl InspectionCache {
51
-
pub async fn new() -> Option<Self> {
52
-
let cache_path = get_cache_directory().await?.join("inspect.db");
53
-
debug!(cache_path = ?cache_path);
54
-
55
-
let pool = SqlitePoolOptions::new()
56
-
.max_connections(1)
57
-
.connect_with(
58
-
SqliteConnectOptions::new()
59
-
.filename(cache_path)
60
-
.create_if_missing(true),
61
-
)
62
-
.await
63
-
.inspect_err(|x| error!("failed to open cache db: {x}"))
64
-
.ok()?;
65
-
66
-
MIGRATOR
67
-
.run(&pool)
68
-
.await
69
-
.inspect_err(|err| error!("failed to run cache migrations: {err:?}"))
70
-
.ok()?;
71
-
72
-
Some(Self { pool })
73
-
}
74
-
75
-
fn cache_invalid(store_path: &String) -> bool {
76
-
let path = Path::new(store_path);
77
-
78
-
// possible TOCTOU
79
-
!path.exists()
80
-
}
81
-
82
-
pub async fn get_hive(&self, prefetch: &FlakePrefetch) -> Option<Hive> {
83
-
struct Query {
84
-
json_value: Vec<u8>,
85
-
store_path: String,
86
-
}
87
-
88
-
let cached_blob = sqlx::query_as!(
89
-
Query,
90
-
"
91
-
select
92
-
inspection_blobs.json_value,
93
-
inspection_cache.store_path
94
-
from
95
-
inspection_blobs
96
-
join inspection_cache on inspection_cache.blob_id = inspection_blobs.id
97
-
where
98
-
inspection_cache.store_path = $1
99
-
and inspection_cache.hash = $2
100
-
and inspection_blobs.schema_version = $3
101
-
limit
102
-
1
103
-
",
104
-
prefetch.store_path,
105
-
prefetch.hash,
106
-
Hive::SCHEMA_VERSION
107
-
)
108
-
.fetch_optional(&self.pool)
109
-
.await
110
-
.inspect_err(|x| error!("failed to fetch cached hive: {x}"))
111
-
.ok()??;
112
-
113
-
// the cached path may of been garbage collected, discard it
114
-
// it is quite hard to replicate this bug but its occurred to me
115
-
// atleast once
116
-
if Self::cache_invalid(&cached_blob.store_path) {
117
-
trace!("discarding cache that does not exist in the nix store");
118
-
}
119
-
120
-
trace!(
121
-
"read {} bytes of zstd data from cache",
122
-
cached_blob.json_value.len()
123
-
);
124
-
125
-
let json_string = zstd::decode_all(cached_blob.json_value.as_slice())
126
-
.inspect_err(|err| error!("failed to decode cached zstd data: {err}"))
127
-
.ok()?;
128
-
129
-
trace!(
130
-
"inflated {} > {} in decoding",
131
-
cached_blob.json_value.len(),
132
-
json_string.len()
133
-
);
134
-
135
-
serde_json::from_slice(&json_string)
136
-
.inspect_err(|err| {
137
-
error!("could not use cached evaluation: {err}");
138
-
})
139
-
.ok()
140
-
}
141
-
142
-
pub async fn store_hive(&self, prefetch: &FlakePrefetch, json_value: &String) {
143
-
let Ok(json_value) = zstd::encode_all(json_value.as_bytes(), 0)
144
-
.inspect_err(|err| error!("failed to encode data w/ zstd: {err}"))
145
-
else {
146
-
return;
147
-
};
148
-
149
-
let hive_inspection = sqlx::query_scalar!(
150
-
"
151
-
insert into inspection_blobs (json_value, schema_version)
152
-
values ($1, $2)
153
-
on conflict(json_value)
154
-
do update set json_value = excluded.json_value
155
-
returning inspection_blobs.id
156
-
",
157
-
json_value,
158
-
Hive::SCHEMA_VERSION
159
-
)
160
-
.fetch_one(&self.pool)
161
-
.await
162
-
.inspect_err(|x| error!("could not insert hive_inspection: {x}"));
163
-
164
-
let Ok(blob_id) = hive_inspection else {
165
-
return;
166
-
};
167
-
168
-
let cached_inspection = sqlx::query!(
169
-
"
170
-
insert into
171
-
inspection_cache (store_path, hash, blob_id)
172
-
values
173
-
($1, $2, $3)
174
-
",
175
-
prefetch.store_path,
176
-
prefetch.hash,
177
-
blob_id
178
-
)
179
-
.execute(&self.pool)
180
-
.await;
181
-
182
-
if let Err(err) = cached_inspection {
183
-
error!("could not insert cached_inspection: {err}");
184
-
}
185
-
}
186
-
187
-
pub async fn gc(&self) -> Result<(), sqlx::Error> {
188
-
// keep newest 30 AND
189
-
// delete caches that refer to a blob w/ wrong schema
190
-
sqlx::query!(
191
-
"delete from inspection_cache
192
-
where
193
-
blob_id in (
194
-
select
195
-
id
196
-
from
197
-
inspection_blobs
198
-
where
199
-
schema_version != $1
200
-
)
201
-
or ROWID in (
202
-
select
203
-
ROWID
204
-
from
205
-
inspection_cache
206
-
order by
207
-
ROWID desc
208
-
limit
209
-
-1
210
-
offset
211
-
30
212
-
)",
213
-
Hive::SCHEMA_VERSION
214
-
)
215
-
.execute(&self.pool)
216
-
.await?;
217
-
218
-
// delete orphaned blobs
219
-
sqlx::query!(
220
-
"delete from inspection_blobs
221
-
where
222
-
not exists (
223
-
select
224
-
1
225
-
from
226
-
inspection_cache
227
-
where
228
-
inspection_cache.blob_id = inspection_blobs.id
229
-
)"
230
-
)
231
-
.execute(&self.pool)
232
-
.await?;
233
-
234
-
Ok(())
235
-
}
236
-
}
-162
wire/lib/src/commands/common.rs
-162
wire/lib/src/commands/common.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::collections::HashMap;
5
-
6
-
use tracing::instrument;
7
-
8
-
use crate::{
9
-
EvalGoal, SubCommandModifiers,
10
-
commands::{CommandArguments, Either, WireCommandChip, run_command, run_command_with_env},
11
-
errors::{CommandError, HiveInitialisationError, HiveLibError},
12
-
hive::{
13
-
HiveLocation,
14
-
node::{Context, Push},
15
-
},
16
-
};
17
-
18
-
fn get_common_copy_path_help(error: &CommandError) -> Option<String> {
19
-
if let CommandError::CommandFailed { logs, .. } = error
20
-
&& (logs.contains("error: unexpected end-of-file"))
21
-
{
22
-
Some("wire requires the deploying user or wire binary cache is trusted on the remote server. if you're attempting to make that change, skip keys with --no-keys. please read https://wire.althaea.zone/guides/keys for more information".to_string())
23
-
} else {
24
-
None
25
-
}
26
-
}
27
-
28
-
pub async fn push(context: &Context<'_>, push: Push<'_>) -> Result<(), HiveLibError> {
29
-
let command_string = format!(
30
-
"nix --extra-experimental-features nix-command \
31
-
copy --substitute-on-destination --to ssh://{user}@{host} {path}",
32
-
user = context.node.target.user,
33
-
host = context.node.target.get_preferred_host()?,
34
-
path = match push {
35
-
Push::Derivation(drv) => format!("{drv} --derivation"),
36
-
Push::Path(path) => path.clone(),
37
-
}
38
-
);
39
-
40
-
let child = run_command_with_env(
41
-
&CommandArguments::new(command_string, context.modifiers)
42
-
.mode(crate::commands::ChildOutputMode::Nix),
43
-
HashMap::from([(
44
-
"NIX_SSHOPTS".into(),
45
-
context
46
-
.node
47
-
.target
48
-
.create_ssh_opts(context.modifiers, false)?,
49
-
)]),
50
-
)
51
-
.await?;
52
-
53
-
let status = child.wait_till_success().await;
54
-
55
-
let help = if let Err(ref error) = status {
56
-
get_common_copy_path_help(error).map(Box::new)
57
-
} else {
58
-
None
59
-
};
60
-
61
-
status.map_err(|error| HiveLibError::NixCopyError {
62
-
name: context.name.clone(),
63
-
path: push.to_string(),
64
-
error: Box::new(error),
65
-
help,
66
-
})?;
67
-
68
-
Ok(())
69
-
}
70
-
71
-
fn get_common_command_help(error: &CommandError) -> Option<String> {
72
-
if let CommandError::CommandFailed { logs, .. } = error
73
-
// marshmallow: your using this repo as a hive you idiot
74
-
&& (logs.contains("attribute 'inspect' missing")
75
-
// using a flake that does not provide `wire`
76
-
|| logs.contains("does not provide attribute 'packages.x86_64-linux.wire'")
77
-
// using a file called `hive.nix` that is not actually a hive
78
-
|| logs.contains("attribute 'inspect' in selection path"))
79
-
{
80
-
Some("Double check this `--path` or `--flake` is a wire hive. You may be pointing to the wrong directory.".to_string())
81
-
} else {
82
-
None
83
-
}
84
-
}
85
-
86
-
pub async fn get_hive_node_names(
87
-
location: &HiveLocation,
88
-
modifiers: SubCommandModifiers,
89
-
) -> Result<Vec<String>, HiveLibError> {
90
-
let output = evaluate_hive_attribute(location, &EvalGoal::Names, modifiers).await?;
91
-
serde_json::from_str(&output).map_err(|err| {
92
-
HiveLibError::HiveInitialisationError(HiveInitialisationError::ParseEvaluateError(err))
93
-
})
94
-
}
95
-
96
-
/// Evaluates the hive in flakeref with regards to the given goal,
97
-
/// and returns stdout.
98
-
#[instrument(ret(level = tracing::Level::TRACE), skip_all)]
99
-
pub async fn evaluate_hive_attribute(
100
-
location: &HiveLocation,
101
-
goal: &EvalGoal<'_>,
102
-
modifiers: SubCommandModifiers,
103
-
) -> Result<String, HiveLibError> {
104
-
let attribute = match location {
105
-
HiveLocation::Flake { uri, .. } => {
106
-
format!(
107
-
"{uri}#wire --apply \"hive: {}\"",
108
-
match goal {
109
-
EvalGoal::Inspect => "hive.inspect".to_string(),
110
-
EvalGoal::Names => "hive.names".to_string(),
111
-
EvalGoal::GetTopLevel(node) => format!("hive.topLevels.{node}"),
112
-
}
113
-
)
114
-
}
115
-
HiveLocation::HiveNix(path) => {
116
-
format!(
117
-
"--file {} {}",
118
-
&path.to_string_lossy(),
119
-
match goal {
120
-
EvalGoal::Inspect => "inspect".to_string(),
121
-
EvalGoal::Names => "names".to_string(),
122
-
EvalGoal::GetTopLevel(node) => format!("topLevels.{node}"),
123
-
}
124
-
)
125
-
}
126
-
};
127
-
128
-
let command_string = format!(
129
-
"nix --extra-experimental-features nix-command \
130
-
--extra-experimental-features flakes \
131
-
eval --json {mods} {attribute}",
132
-
mods = if modifiers.show_trace {
133
-
"--show-trace"
134
-
} else {
135
-
""
136
-
},
137
-
);
138
-
139
-
let child = run_command(
140
-
&CommandArguments::new(command_string, modifiers)
141
-
.mode(crate::commands::ChildOutputMode::Nix),
142
-
)
143
-
.await?;
144
-
145
-
let status = child.wait_till_success().await;
146
-
147
-
let help = if let Err(ref error) = status {
148
-
get_common_command_help(error).map(Box::new)
149
-
} else {
150
-
None
151
-
};
152
-
153
-
status
154
-
.map_err(|source| HiveLibError::NixEvalError {
155
-
attribute,
156
-
source,
157
-
help,
158
-
})
159
-
.map(|x| match x {
160
-
Either::Left((_, stdout)) | Either::Right((_, stdout)) => stdout,
161
-
})
162
-
}
-239
wire/lib/src/commands/mod.rs
-239
wire/lib/src/commands/mod.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use crate::commands::pty::{InteractiveChildChip, interactive_command_with_env};
5
-
use std::{collections::HashMap, str::from_utf8, sync::LazyLock};
6
-
7
-
use aho_corasick::AhoCorasick;
8
-
use gjson::Value;
9
-
use itertools::Itertools;
10
-
use nix_compat::log::{AT_NIX_PREFIX, VerbosityLevel};
11
-
use num_enum::TryFromPrimitive;
12
-
use tracing::{debug, error, info, trace, warn};
13
-
14
-
use crate::{
15
-
SubCommandModifiers,
16
-
commands::noninteractive::{NonInteractiveChildChip, non_interactive_command_with_env},
17
-
errors::{CommandError, HiveLibError},
18
-
hive::node::{Node, Target},
19
-
};
20
-
21
-
pub mod common;
22
-
pub(crate) mod noninteractive;
23
-
pub(crate) mod pty;
24
-
25
-
#[derive(Copy, Clone, Debug)]
26
-
pub(crate) enum ChildOutputMode {
27
-
Nix,
28
-
Generic,
29
-
Interactive,
30
-
}
31
-
32
-
#[derive(Debug)]
33
-
pub enum Either<L, R> {
34
-
Left(L),
35
-
Right(R),
36
-
}
37
-
38
-
#[derive(Debug)]
39
-
pub(crate) struct CommandArguments<'t, S: AsRef<str>> {
40
-
modifiers: SubCommandModifiers,
41
-
target: Option<&'t Target>,
42
-
output_mode: ChildOutputMode,
43
-
command_string: S,
44
-
keep_stdin_open: bool,
45
-
privilege_escalation_command: Option<String>,
46
-
log_stdout: bool,
47
-
}
48
-
49
-
static AHO_CORASICK: LazyLock<AhoCorasick> = LazyLock::new(|| {
50
-
AhoCorasick::builder()
51
-
.ascii_case_insensitive(false)
52
-
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
53
-
.build([AT_NIX_PREFIX])
54
-
.unwrap()
55
-
});
56
-
57
-
impl<'a, S: AsRef<str>> CommandArguments<'a, S> {
58
-
pub(crate) const fn new(command_string: S, modifiers: SubCommandModifiers) -> Self {
59
-
Self {
60
-
command_string,
61
-
keep_stdin_open: false,
62
-
privilege_escalation_command: None,
63
-
log_stdout: false,
64
-
target: None,
65
-
output_mode: ChildOutputMode::Generic,
66
-
modifiers,
67
-
}
68
-
}
69
-
70
-
pub(crate) const fn on_target(mut self, target: Option<&'a Target>) -> Self {
71
-
self.target = target;
72
-
self
73
-
}
74
-
75
-
pub(crate) const fn mode(mut self, mode: ChildOutputMode) -> Self {
76
-
self.output_mode = mode;
77
-
self
78
-
}
79
-
80
-
pub(crate) const fn keep_stdin_open(mut self) -> Self {
81
-
self.keep_stdin_open = true;
82
-
self
83
-
}
84
-
85
-
pub(crate) fn elevated(mut self, node: &Node) -> Self {
86
-
self.privilege_escalation_command =
87
-
Some(node.privilege_escalation_command.iter().join(" "));
88
-
self
89
-
}
90
-
91
-
pub(crate) const fn is_elevated(&self) -> bool {
92
-
self.privilege_escalation_command.is_some()
93
-
}
94
-
95
-
pub(crate) const fn log_stdout(mut self) -> Self {
96
-
self.log_stdout = true;
97
-
self
98
-
}
99
-
}
100
-
101
-
pub(crate) async fn run_command<S: AsRef<str>>(
102
-
arguments: &CommandArguments<'_, S>,
103
-
) -> Result<Either<InteractiveChildChip, NonInteractiveChildChip>, HiveLibError> {
104
-
run_command_with_env(arguments, HashMap::new()).await
105
-
}
106
-
107
-
pub(crate) async fn run_command_with_env<S: AsRef<str>>(
108
-
arguments: &CommandArguments<'_, S>,
109
-
envs: HashMap<String, String>,
110
-
) -> Result<Either<InteractiveChildChip, NonInteractiveChildChip>, HiveLibError> {
111
-
// use the non interactive command runner when forced
112
-
// ... or when there is no reason for interactivity, local and unprivileged
113
-
if arguments.modifiers.non_interactive
114
-
|| (arguments.target.is_none() && !arguments.is_elevated())
115
-
{
116
-
return Ok(Either::Right(non_interactive_command_with_env(
117
-
arguments, envs,
118
-
)?));
119
-
}
120
-
121
-
Ok(Either::Left(
122
-
interactive_command_with_env(arguments, envs).await?,
123
-
))
124
-
}
125
-
126
-
pub(crate) trait WireCommandChip {
127
-
type ExitStatus;
128
-
129
-
async fn wait_till_success(self) -> Result<Self::ExitStatus, CommandError>;
130
-
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError>;
131
-
}
132
-
133
-
type ExitStatus = Either<(portable_pty::ExitStatus, String), (std::process::ExitStatus, String)>;
134
-
135
-
impl WireCommandChip for Either<InteractiveChildChip, NonInteractiveChildChip> {
136
-
type ExitStatus = ExitStatus;
137
-
138
-
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError> {
139
-
match self {
140
-
Self::Left(left) => left.write_stdin(data).await,
141
-
Self::Right(right) => right.write_stdin(data).await,
142
-
}
143
-
}
144
-
145
-
async fn wait_till_success(self) -> Result<Self::ExitStatus, CommandError> {
146
-
match self {
147
-
Self::Left(left) => left.wait_till_success().await.map(Either::Left),
148
-
Self::Right(right) => right.wait_till_success().await.map(Either::Right),
149
-
}
150
-
}
151
-
}
152
-
153
-
fn trace_gjson_str<'a>(log: &'a Value<'a>, msg: &'a str) -> Option<String> {
154
-
if msg.is_empty() {
155
-
return None;
156
-
}
157
-
158
-
let level = log.get("level");
159
-
160
-
if !level.exists() {
161
-
return None;
162
-
}
163
-
164
-
let level = match VerbosityLevel::try_from_primitive(level.u64()) {
165
-
Ok(level) => level,
166
-
Err(err) => {
167
-
error!("nix log `level` did not match to a VerbosityLevel: {err:?}");
168
-
return None;
169
-
}
170
-
};
171
-
172
-
let msg = strip_ansi_escapes::strip_str(msg);
173
-
174
-
match level {
175
-
VerbosityLevel::Info => info!("{msg}"),
176
-
VerbosityLevel::Warn | VerbosityLevel::Notice => warn!("{msg}"),
177
-
VerbosityLevel::Error => error!("{msg}"),
178
-
VerbosityLevel::Debug => debug!("{msg}"),
179
-
VerbosityLevel::Vomit | VerbosityLevel::Talkative | VerbosityLevel::Chatty => {
180
-
trace!("{msg}");
181
-
}
182
-
}
183
-
184
-
if matches!(
185
-
level,
186
-
VerbosityLevel::Error | VerbosityLevel::Warn | VerbosityLevel::Notice
187
-
) {
188
-
return Some(msg);
189
-
}
190
-
191
-
None
192
-
}
193
-
194
-
impl ChildOutputMode {
195
-
/// this function is by far the biggest hotspot in the whole tree
196
-
/// Returns a string if this log is notable to be stored as an error message
197
-
fn trace_slice(self, line: &mut [u8]) -> Option<String> {
198
-
let slice = match self {
199
-
Self::Generic | Self::Interactive => {
200
-
let string = String::from_utf8_lossy(line);
201
-
let stripped = strip_ansi_escapes::strip_str(&string);
202
-
warn!("{stripped}");
203
-
return Some(string.to_string());
204
-
}
205
-
Self::Nix => {
206
-
let position = AHO_CORASICK.find(&line).map(|x| &mut line[x.end()..]);
207
-
208
-
if let Some(json_buf) = position {
209
-
json_buf
210
-
} else {
211
-
// usually happens when ssh is outputting something
212
-
warn!("{}", String::from_utf8_lossy(line));
213
-
return None;
214
-
}
215
-
}
216
-
};
217
-
218
-
let Ok(str) = from_utf8(slice) else {
219
-
error!("nix log was not valid utf8!");
220
-
return None;
221
-
};
222
-
223
-
let log = gjson::parse(str);
224
-
225
-
let text = log.get("text");
226
-
227
-
if text.exists() {
228
-
return trace_gjson_str(&log, text.str());
229
-
}
230
-
231
-
let text = log.get("msg");
232
-
233
-
if text.exists() {
234
-
return trace_gjson_str(&log, text.str());
235
-
}
236
-
237
-
None
238
-
}
239
-
}
-199
wire/lib/src/commands/noninteractive.rs
-199
wire/lib/src/commands/noninteractive.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::{
5
-
collections::{HashMap, VecDeque},
6
-
process::ExitStatus,
7
-
sync::Arc,
8
-
};
9
-
10
-
use crate::{
11
-
SubCommandModifiers,
12
-
commands::{ChildOutputMode, CommandArguments, WireCommandChip},
13
-
errors::{CommandError, HiveLibError},
14
-
hive::node::Target,
15
-
};
16
-
use itertools::Itertools;
17
-
use tokio::{
18
-
io::{AsyncWriteExt, BufReader},
19
-
process::{Child, ChildStdin, Command},
20
-
sync::Mutex,
21
-
task::JoinSet,
22
-
};
23
-
use tracing::{Instrument, debug, instrument, trace};
24
-
25
-
pub(crate) struct NonInteractiveChildChip {
26
-
error_collection: Arc<Mutex<VecDeque<String>>>,
27
-
stdout_collection: Arc<Mutex<VecDeque<String>>>,
28
-
child: Child,
29
-
joinset: JoinSet<()>,
30
-
original_command: String,
31
-
stdin: ChildStdin,
32
-
}
33
-
34
-
#[instrument(skip_all, name = "run", fields(elevated = %arguments.is_elevated()))]
35
-
pub(crate) fn non_interactive_command_with_env<S: AsRef<str>>(
36
-
arguments: &CommandArguments<S>,
37
-
envs: HashMap<String, String>,
38
-
) -> Result<NonInteractiveChildChip, HiveLibError> {
39
-
let mut command = if let Some(target) = arguments.target {
40
-
create_sync_ssh_command(target, arguments.modifiers)?
41
-
} else {
42
-
let mut command = Command::new("sh");
43
-
44
-
command.arg("-c");
45
-
46
-
command
47
-
};
48
-
49
-
let command_string = format!(
50
-
"{command_string}{extra}",
51
-
command_string = arguments.command_string.as_ref(),
52
-
extra = match arguments.output_mode {
53
-
ChildOutputMode::Generic | ChildOutputMode::Interactive => "",
54
-
ChildOutputMode::Nix => " --log-format internal-json",
55
-
}
56
-
);
57
-
58
-
let command_string = if let Some(escalation_command) = &arguments.privilege_escalation_command {
59
-
format!("{escalation_command} sh -c '{command_string}'")
60
-
} else {
61
-
command_string
62
-
};
63
-
64
-
debug!("{command_string}");
65
-
66
-
command.arg(&command_string);
67
-
command.stdin(std::process::Stdio::piped());
68
-
command.stderr(std::process::Stdio::piped());
69
-
command.stdout(std::process::Stdio::piped());
70
-
command.kill_on_drop(true);
71
-
// command.env_clear();
72
-
command.envs(envs);
73
-
74
-
let mut child = command.spawn().unwrap();
75
-
let error_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
76
-
let stdout_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
77
-
let stdin = child.stdin.take().unwrap();
78
-
79
-
let stdout_handle = child
80
-
.stdout
81
-
.take()
82
-
.ok_or(HiveLibError::CommandError(CommandError::NoHandle))?;
83
-
let stderr_handle = child
84
-
.stderr
85
-
.take()
86
-
.ok_or(HiveLibError::CommandError(CommandError::NoHandle))?;
87
-
88
-
let mut joinset = JoinSet::new();
89
-
let output_mode = Arc::new(arguments.output_mode);
90
-
91
-
joinset.spawn(
92
-
handle_io(
93
-
stderr_handle,
94
-
output_mode.clone(),
95
-
error_collection.clone(),
96
-
true,
97
-
true,
98
-
)
99
-
.in_current_span(),
100
-
);
101
-
joinset.spawn(
102
-
handle_io(
103
-
stdout_handle,
104
-
output_mode.clone(),
105
-
stdout_collection.clone(),
106
-
false,
107
-
arguments.log_stdout,
108
-
)
109
-
.in_current_span(),
110
-
);
111
-
112
-
Ok(NonInteractiveChildChip {
113
-
error_collection,
114
-
stdout_collection,
115
-
child,
116
-
joinset,
117
-
original_command: arguments.command_string.as_ref().to_string(),
118
-
stdin,
119
-
})
120
-
}
121
-
122
-
impl WireCommandChip for NonInteractiveChildChip {
123
-
type ExitStatus = (ExitStatus, String);
124
-
125
-
async fn wait_till_success(mut self) -> Result<Self::ExitStatus, CommandError> {
126
-
let status = self.child.wait().await.unwrap();
127
-
let _ = self.joinset.join_all().await;
128
-
129
-
if !status.success() {
130
-
let logs = self.error_collection.lock().await.iter().rev().join("\n");
131
-
132
-
return Err(CommandError::CommandFailed {
133
-
command_ran: self.original_command,
134
-
logs,
135
-
code: match status.code() {
136
-
Some(code) => format!("code {code}"),
137
-
None => "no exit code".to_string(),
138
-
},
139
-
reason: "known-status",
140
-
});
141
-
}
142
-
143
-
let stdout = self.stdout_collection.lock().await.iter().rev().join("\n");
144
-
145
-
Ok((status, stdout))
146
-
}
147
-
148
-
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError> {
149
-
trace!("Writing {} bytes", data.len());
150
-
self.stdin.write_all(&data).await.unwrap();
151
-
Ok(())
152
-
}
153
-
}
154
-
155
-
#[instrument(skip_all, name = "log")]
156
-
pub async fn handle_io<R>(
157
-
reader: R,
158
-
output_mode: Arc<ChildOutputMode>,
159
-
collection: Arc<Mutex<VecDeque<String>>>,
160
-
is_error: bool,
161
-
should_log: bool,
162
-
) where
163
-
R: tokio::io::AsyncRead + Unpin,
164
-
{
165
-
let mut io_reader = tokio::io::AsyncBufReadExt::lines(BufReader::new(reader));
166
-
167
-
while let Some(line) = io_reader.next_line().await.unwrap() {
168
-
let mut line = line.into_bytes();
169
-
170
-
let log = if should_log {
171
-
Some(output_mode.trace_slice(&mut line))
172
-
} else {
173
-
None
174
-
};
175
-
176
-
if !is_error {
177
-
let mut queue = collection.lock().await;
178
-
queue.push_front(String::from_utf8_lossy(&line).to_string());
179
-
} else if let Some(error_msg) = log.flatten() {
180
-
let mut queue = collection.lock().await;
181
-
queue.push_front(error_msg);
182
-
// add at most 20 message to the front, drop the rest.
183
-
queue.truncate(20);
184
-
}
185
-
}
186
-
187
-
debug!("io_handler: goodbye!");
188
-
}
189
-
190
-
fn create_sync_ssh_command(
191
-
target: &Target,
192
-
modifiers: SubCommandModifiers,
193
-
) -> Result<Command, HiveLibError> {
194
-
let mut command = Command::new("ssh");
195
-
command.args(target.create_ssh_args(modifiers, true, false)?);
196
-
command.arg(target.get_preferred_host()?.to_string());
197
-
198
-
Ok(command)
199
-
}
-102
wire/lib/src/commands/pty/input.rs
-102
wire/lib/src/commands/pty/input.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::os::fd::{AsFd, OwnedFd};
5
-
6
-
use nix::{
7
-
poll::{PollFd, PollFlags, PollTimeout, poll},
8
-
unistd::read,
9
-
};
10
-
use tracing::{Span, debug, error, instrument, trace};
11
-
12
-
use crate::{
13
-
commands::pty::{MasterWriter, THREAD_BEGAN_SIGNAL, THREAD_QUIT_SIGNAL},
14
-
errors::CommandError,
15
-
};
16
-
17
-
/// Exits on any data written to `cancel_pipe_r`
18
-
/// A pipe is used to cancel the function.
19
-
#[instrument(skip_all, level = "trace", parent = span)]
20
-
pub(super) fn watch_stdin_from_user(
21
-
cancel_pipe_r: &OwnedFd,
22
-
mut master_writer: MasterWriter,
23
-
write_pipe_r: &OwnedFd,
24
-
span: Span,
25
-
) -> Result<(), CommandError> {
26
-
const WRITER_POSITION: usize = 0;
27
-
const SIGNAL_POSITION: usize = 1;
28
-
const USER_POSITION: usize = 2;
29
-
30
-
let mut buffer = [0u8; 1024];
31
-
let stdin = std::io::stdin();
32
-
let mut cancel_pipe_buf = [0u8; 1];
33
-
34
-
let user_stdin_fd = stdin.as_fd();
35
-
let cancel_pipe_r_fd = cancel_pipe_r.as_fd();
36
-
37
-
let mut all_fds = vec![
38
-
PollFd::new(write_pipe_r.as_fd(), PollFlags::POLLIN),
39
-
PollFd::new(cancel_pipe_r.as_fd(), PollFlags::POLLIN),
40
-
PollFd::new(user_stdin_fd, PollFlags::POLLIN),
41
-
];
42
-
43
-
loop {
44
-
match poll(&mut all_fds, PollTimeout::NONE) {
45
-
Ok(0) => {} // timeout, impossible
46
-
Ok(_) => {
47
-
// The user stdin pipe can be removed
48
-
if all_fds.get(USER_POSITION).is_some()
49
-
&& let Some(events) = all_fds[USER_POSITION].revents()
50
-
&& events.contains(PollFlags::POLLIN)
51
-
{
52
-
trace!("Got stdin from user...");
53
-
let n = read(user_stdin_fd, &mut buffer).map_err(CommandError::PosixPipe)?;
54
-
master_writer
55
-
.write_all(&buffer[..n])
56
-
.map_err(CommandError::WritingMasterStdout)?;
57
-
master_writer
58
-
.flush()
59
-
.map_err(CommandError::WritingMasterStdout)?;
60
-
}
61
-
62
-
if let Some(events) = all_fds[WRITER_POSITION].revents()
63
-
&& events.contains(PollFlags::POLLIN)
64
-
{
65
-
trace!("Got stdin from writer...");
66
-
let n = read(write_pipe_r, &mut buffer).map_err(CommandError::PosixPipe)?;
67
-
master_writer
68
-
.write_all(&buffer[..n])
69
-
.map_err(CommandError::WritingMasterStdout)?;
70
-
master_writer
71
-
.flush()
72
-
.map_err(CommandError::WritingMasterStdout)?;
73
-
}
74
-
75
-
if let Some(events) = all_fds[SIGNAL_POSITION].revents()
76
-
&& events.contains(PollFlags::POLLIN)
77
-
{
78
-
let n = read(cancel_pipe_r_fd, &mut cancel_pipe_buf)
79
-
.map_err(CommandError::PosixPipe)?;
80
-
let message = &cancel_pipe_buf[..n];
81
-
82
-
trace!("Got byte from signal pipe: {message:?}");
83
-
84
-
if message == THREAD_QUIT_SIGNAL {
85
-
return Ok(());
86
-
}
87
-
88
-
if message == THREAD_BEGAN_SIGNAL {
89
-
all_fds.remove(USER_POSITION);
90
-
}
91
-
}
92
-
}
93
-
Err(e) => {
94
-
error!("Poll error: {e}");
95
-
break;
96
-
}
97
-
}
98
-
}
99
-
100
-
debug!("stdin_thread: goodbye");
101
-
Ok(())
102
-
}
-63
wire/lib/src/commands/pty/logbuffer.rs
-63
wire/lib/src/commands/pty/logbuffer.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
/// Split into its own struct to be tested nicer
5
-
pub(crate) struct LogBuffer {
6
-
buffer: Vec<u8>,
7
-
}
8
-
9
-
impl LogBuffer {
10
-
pub const fn new() -> Self {
11
-
Self { buffer: Vec::new() }
12
-
}
13
-
14
-
pub fn process_slice(&mut self, slice: &[u8]) {
15
-
self.buffer.extend_from_slice(slice);
16
-
}
17
-
18
-
pub fn next_line(&mut self) -> Option<Vec<u8>> {
19
-
let line_end = self.buffer.iter().position(|x| *x == b'\n')?;
20
-
21
-
let drained = self.buffer.drain(..line_end).collect();
22
-
self.buffer.remove(0);
23
-
Some(drained)
24
-
}
25
-
26
-
#[cfg(test)]
27
-
fn take_lines(&mut self) -> Vec<Vec<u8>> {
28
-
let mut lines = vec![];
29
-
30
-
while let Some(line) = self.next_line() {
31
-
lines.push(line);
32
-
}
33
-
34
-
lines
35
-
}
36
-
}
37
-
38
-
#[cfg(test)]
39
-
mod tests {
40
-
use super::*;
41
-
42
-
#[test]
43
-
fn test_split_line_processing() {
44
-
let mut log_buffer = LogBuffer::new();
45
-
46
-
log_buffer.process_slice(b"Writing key KeySpec { destination: \"/et");
47
-
log_buffer.process_slice(b"c/keys/buildbot.aws.key\", user: \"buildbot\", group: \"buildbot-worker\", permissions: 384, length: 32, last: false, crc: 1370815231 }, 32 bytes of data");
48
-
log_buffer.process_slice(b"\n");
49
-
log_buffer.process_slice(b"xxx");
50
-
log_buffer.process_slice(b"xx_WIRE");
51
-
log_buffer.process_slice(b"_QUIT\n");
52
-
let lines = log_buffer.take_lines();
53
-
assert_eq!(lines.len(), 2);
54
-
assert_eq!(
55
-
String::from_utf8_lossy(lines.first().unwrap()),
56
-
"Writing key KeySpec { destination: \"/etc/keys/buildbot.aws.key\", user: \"buildbot\", group: \"buildbot-worker\", permissions: 384, length: 32, last: false, crc: 1370815231 }, 32 bytes of data"
57
-
);
58
-
assert_eq!(lines.get(1), Some(&"xxxxx_WIRE_QUIT".as_bytes().to_vec()));
59
-
60
-
// taking leaves none
61
-
assert_eq!(log_buffer.take_lines().len(), 0);
62
-
}
63
-
}
-566
wire/lib/src/commands/pty/mod.rs
-566
wire/lib/src/commands/pty/mod.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use crate::commands::pty::output::{WatchStdoutArguments, handle_pty_stdout};
5
-
use crate::status::STATUS;
6
-
use aho_corasick::PatternID;
7
-
use itertools::Itertools;
8
-
use nix::sys::termios::{LocalFlags, SetArg, Termios, tcgetattr, tcsetattr};
9
-
use nix::unistd::pipe;
10
-
use nix::unistd::write as posix_write;
11
-
use portable_pty::{CommandBuilder, NativePtySystem, PtyPair, PtySize};
12
-
use rand::distr::Alphabetic;
13
-
use std::collections::VecDeque;
14
-
use std::io::stderr;
15
-
use std::sync::{LazyLock, Mutex};
16
-
use std::{
17
-
io::{Read, Write},
18
-
os::fd::{AsFd, OwnedFd},
19
-
sync::Arc,
20
-
};
21
-
use tokio::sync::{oneshot, watch};
22
-
use tracing::instrument;
23
-
use tracing::{Span, debug, trace};
24
-
25
-
use crate::commands::CommandArguments;
26
-
use crate::commands::pty::input::watch_stdin_from_user;
27
-
use crate::errors::CommandError;
28
-
use crate::{SubCommandModifiers, acquire_stdin_lock};
29
-
use crate::{
30
-
commands::{ChildOutputMode, WireCommandChip},
31
-
errors::HiveLibError,
32
-
hive::node::Target,
33
-
};
34
-
35
-
mod input;
36
-
mod logbuffer;
37
-
mod output;
38
-
39
-
type MasterWriter = Box<dyn Write + Send>;
40
-
type MasterReader = Box<dyn Read + Send>;
41
-
42
-
/// the underlying command began
43
-
const THREAD_BEGAN_SIGNAL: &[u8; 1] = b"b";
44
-
const THREAD_QUIT_SIGNAL: &[u8; 1] = b"q";
45
-
46
-
type Child = Box<dyn portable_pty::Child + Send + Sync>;
47
-
48
-
pub(crate) struct InteractiveChildChip {
49
-
child: Child,
50
-
51
-
cancel_stdin_pipe_w: OwnedFd,
52
-
write_stdin_pipe_w: OwnedFd,
53
-
54
-
stderr_collection: Arc<Mutex<VecDeque<String>>>,
55
-
stdout_collection: Arc<Mutex<VecDeque<String>>>,
56
-
57
-
original_command: String,
58
-
59
-
status_receiver: watch::Receiver<Status>,
60
-
stdout_handle: tokio::task::JoinHandle<Result<(), CommandError>>,
61
-
}
62
-
63
-
/// sets and reverts terminal options (the terminal user interaction is performed)
64
-
/// reverts data when dropped
65
-
struct StdinTermiosAttrGuard(Termios);
66
-
67
-
#[derive(Debug)]
68
-
enum Status {
69
-
Running,
70
-
Done { success: bool },
71
-
}
72
-
73
-
#[derive(Debug)]
74
-
enum SearchFindings {
75
-
None,
76
-
Started,
77
-
Terminate,
78
-
}
79
-
80
-
static STARTED_PATTERN: LazyLock<PatternID> = LazyLock::new(|| PatternID::must(0));
81
-
static SUCCEEDED_PATTERN: LazyLock<PatternID> = LazyLock::new(|| PatternID::must(1));
82
-
static FAILED_PATTERN: LazyLock<PatternID> = LazyLock::new(|| PatternID::must(2));
83
-
84
-
/// substitutes STDOUT with #$line. stdout is far less common than stderr.
85
-
const IO_SUBS: &str = "1> >(while IFS= read -r line; do echo \"#$line\"; done)";
86
-
87
-
fn create_ending_segment<S: AsRef<str>>(
88
-
arguments: &CommandArguments<'_, S>,
89
-
needles: &Needles,
90
-
) -> String {
91
-
let Needles {
92
-
succeed,
93
-
fail,
94
-
start,
95
-
} = needles;
96
-
97
-
format!(
98
-
"echo -e '{succeed}' || echo '{failed}'",
99
-
succeed = if matches!(arguments.output_mode, ChildOutputMode::Interactive) {
100
-
format!(
101
-
"{start}\\n{succeed}",
102
-
start = String::from_utf8_lossy(start),
103
-
succeed = String::from_utf8_lossy(succeed)
104
-
)
105
-
} else {
106
-
String::from_utf8_lossy(succeed).to_string()
107
-
},
108
-
failed = String::from_utf8_lossy(fail)
109
-
)
110
-
}
111
-
112
-
fn create_starting_segment<S: AsRef<str>>(
113
-
arguments: &CommandArguments<'_, S>,
114
-
start_needle: &Arc<Vec<u8>>,
115
-
) -> String {
116
-
if matches!(arguments.output_mode, ChildOutputMode::Interactive) {
117
-
String::new()
118
-
} else {
119
-
format!(
120
-
"echo '{start}' && ",
121
-
start = String::from_utf8_lossy(start_needle)
122
-
)
123
-
}
124
-
}
125
-
126
-
#[instrument(skip_all, name = "run-int", fields(elevated = %arguments.is_elevated(), mode = ?arguments.output_mode))]
127
-
pub(crate) async fn interactive_command_with_env<S: AsRef<str>>(
128
-
arguments: &CommandArguments<'_, S>,
129
-
envs: std::collections::HashMap<String, String>,
130
-
) -> Result<InteractiveChildChip, HiveLibError> {
131
-
print_authenticate_warning(arguments)?;
132
-
133
-
let needles = create_needles();
134
-
let pty_system = NativePtySystem::default();
135
-
let pty_pair = portable_pty::PtySystem::openpty(&pty_system, PtySize::default()).unwrap();
136
-
setup_master(&pty_pair)?;
137
-
138
-
let command_string = &format!(
139
-
"{starting}{command} {flags} {IO_SUBS} && {ending}",
140
-
command = arguments.command_string.as_ref(),
141
-
flags = match arguments.output_mode {
142
-
ChildOutputMode::Nix => "--log-format internal-json",
143
-
ChildOutputMode::Generic | ChildOutputMode::Interactive => "",
144
-
},
145
-
starting = create_starting_segment(arguments, &needles.start),
146
-
ending = create_ending_segment(arguments, &needles)
147
-
);
148
-
149
-
debug!("{command_string}");
150
-
151
-
let mut command = build_command(arguments, command_string)?;
152
-
153
-
// give command all env vars
154
-
for (key, value) in envs {
155
-
command.env(key, value);
156
-
}
157
-
158
-
let clobber_guard = acquire_stdin_lock().await;
159
-
let _guard = StdinTermiosAttrGuard::new().map_err(HiveLibError::CommandError)?;
160
-
let child = pty_pair
161
-
.slave
162
-
.spawn_command(command)
163
-
.map_err(|x| HiveLibError::CommandError(CommandError::PortablePty(x)))?;
164
-
165
-
// Release any handles owned by the slave: we don't need it now
166
-
// that we've spawned the child.
167
-
drop(pty_pair.slave);
168
-
169
-
let reader = pty_pair
170
-
.master
171
-
.try_clone_reader()
172
-
.map_err(|x| HiveLibError::CommandError(CommandError::PortablePty(x)))?;
173
-
let master_writer = pty_pair
174
-
.master
175
-
.take_writer()
176
-
.map_err(|x| HiveLibError::CommandError(CommandError::PortablePty(x)))?;
177
-
178
-
let stderr_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
179
-
let stdout_collection = Arc::new(Mutex::new(VecDeque::<String>::with_capacity(10)));
180
-
let (began_tx, began_rx) = oneshot::channel::<()>();
181
-
let (status_sender, status_receiver) = watch::channel(Status::Running);
182
-
183
-
let stdout_handle = {
184
-
let arguments = WatchStdoutArguments {
185
-
began_tx,
186
-
reader,
187
-
needles,
188
-
output_mode: arguments.output_mode,
189
-
stderr_collection: stderr_collection.clone(),
190
-
stdout_collection: stdout_collection.clone(),
191
-
span: Span::current(),
192
-
log_stdout: arguments.log_stdout,
193
-
status_sender,
194
-
};
195
-
196
-
tokio::task::spawn_blocking(move || handle_pty_stdout(arguments))
197
-
};
198
-
199
-
let (write_stdin_pipe_r, write_stdin_pipe_w) =
200
-
pipe().map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
201
-
let (cancel_stdin_pipe_r, cancel_stdin_pipe_w) =
202
-
pipe().map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
203
-
204
-
tokio::task::spawn_blocking(move || {
205
-
watch_stdin_from_user(
206
-
&cancel_stdin_pipe_r,
207
-
master_writer,
208
-
&write_stdin_pipe_r,
209
-
Span::current(),
210
-
)
211
-
});
212
-
213
-
debug!("Setup threads");
214
-
215
-
let () = began_rx
216
-
.await
217
-
.map_err(|x| HiveLibError::CommandError(CommandError::OneshotRecvError(x)))?;
218
-
219
-
drop(clobber_guard);
220
-
221
-
if arguments.keep_stdin_open {
222
-
trace!("Sending THREAD_BEGAN_SIGNAL");
223
-
224
-
posix_write(&cancel_stdin_pipe_w, THREAD_BEGAN_SIGNAL)
225
-
.map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
226
-
} else {
227
-
trace!("Sending THREAD_QUIT_SIGNAL");
228
-
229
-
posix_write(&cancel_stdin_pipe_w, THREAD_QUIT_SIGNAL)
230
-
.map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
231
-
}
232
-
233
-
Ok(InteractiveChildChip {
234
-
child,
235
-
cancel_stdin_pipe_w,
236
-
write_stdin_pipe_w,
237
-
stderr_collection,
238
-
stdout_collection,
239
-
original_command: arguments.command_string.as_ref().to_string(),
240
-
status_receiver,
241
-
stdout_handle,
242
-
})
243
-
}
244
-
245
-
fn print_authenticate_warning<S: AsRef<str>>(
246
-
arguments: &CommandArguments<S>,
247
-
) -> Result<(), HiveLibError> {
248
-
if !arguments.is_elevated() {
249
-
return Ok(());
250
-
}
251
-
252
-
let _ = STATUS.lock().write_above_status(
253
-
&format!(
254
-
"{} | Authenticate for \"sudo {}\":\n",
255
-
arguments
256
-
.target
257
-
.map_or(Ok("localhost (!)".to_string()), |target| Ok(format!(
258
-
"{}@{}:{}",
259
-
target.user,
260
-
target.get_preferred_host()?,
261
-
target.port
262
-
)))?,
263
-
arguments.command_string.as_ref()
264
-
)
265
-
.into_bytes(),
266
-
&mut stderr(),
267
-
);
268
-
269
-
Ok(())
270
-
}
271
-
272
-
struct Needles {
273
-
succeed: Arc<Vec<u8>>,
274
-
fail: Arc<Vec<u8>>,
275
-
start: Arc<Vec<u8>>,
276
-
}
277
-
278
-
fn create_needles() -> Needles {
279
-
let tmp_prefix = rand::distr::SampleString::sample_string(&Alphabetic, &mut rand::rng(), 5);
280
-
281
-
Needles {
282
-
succeed: Arc::new(format!("{tmp_prefix}_W_Q").as_bytes().to_vec()),
283
-
fail: Arc::new(format!("{tmp_prefix}_W_F").as_bytes().to_vec()),
284
-
start: Arc::new(format!("{tmp_prefix}_W_S").as_bytes().to_vec()),
285
-
}
286
-
}
287
-
288
-
fn setup_master(pty_pair: &PtyPair) -> Result<(), HiveLibError> {
289
-
if let Some(fd) = pty_pair.master.as_raw_fd() {
290
-
// convert raw fd to a BorrowedFd
291
-
// safe as `fd` is dropped well before `pty_pair.master`
292
-
let fd = unsafe { std::os::unix::io::BorrowedFd::borrow_raw(fd) };
293
-
let mut termios =
294
-
tcgetattr(fd).map_err(|x| HiveLibError::CommandError(CommandError::TermAttrs(x)))?;
295
-
296
-
termios.local_flags &= !LocalFlags::ECHO;
297
-
// Key agent does not work well without canonical mode
298
-
termios.local_flags &= !LocalFlags::ICANON;
299
-
// Actually quit
300
-
termios.local_flags &= !LocalFlags::ISIG;
301
-
302
-
tcsetattr(fd, SetArg::TCSANOW, &termios)
303
-
.map_err(|x| HiveLibError::CommandError(CommandError::TermAttrs(x)))?;
304
-
}
305
-
306
-
Ok(())
307
-
}
308
-
309
-
fn build_command<S: AsRef<str>>(
310
-
arguments: &CommandArguments<'_, S>,
311
-
command_string: &String,
312
-
) -> Result<CommandBuilder, HiveLibError> {
313
-
let mut command = if let Some(target) = arguments.target {
314
-
let mut command = create_int_ssh_command(target, arguments.modifiers)?;
315
-
316
-
// force ssh to use our pseudo terminal
317
-
command.arg("-tt");
318
-
319
-
command
320
-
} else {
321
-
let mut command = portable_pty::CommandBuilder::new("sh");
322
-
323
-
command.arg("-c");
324
-
325
-
command
326
-
};
327
-
328
-
if arguments.is_elevated() {
329
-
command.arg(format!("sudo -u root -- sh -c '{command_string}'"));
330
-
} else {
331
-
command.arg(command_string);
332
-
}
333
-
334
-
Ok(command)
335
-
}
336
-
337
-
impl WireCommandChip for InteractiveChildChip {
338
-
type ExitStatus = (portable_pty::ExitStatus, String);
339
-
340
-
#[instrument(skip_all)]
341
-
async fn wait_till_success(mut self) -> Result<Self::ExitStatus, CommandError> {
342
-
drop(self.write_stdin_pipe_w);
343
-
344
-
let exit_status = tokio::task::spawn_blocking(move || self.child.wait())
345
-
.await
346
-
.map_err(CommandError::JoinError)?
347
-
.map_err(CommandError::WaitForStatus)?;
348
-
349
-
debug!("exit_status: {exit_status:?}");
350
-
351
-
self.stdout_handle
352
-
.await
353
-
.map_err(|_| CommandError::ThreadPanic)??;
354
-
355
-
let status = self
356
-
.status_receiver
357
-
.wait_for(|value| matches!(value, Status::Done { .. }))
358
-
.await
359
-
.unwrap();
360
-
361
-
let _ = posix_write(&self.cancel_stdin_pipe_w, THREAD_QUIT_SIGNAL);
362
-
363
-
if let Status::Done { success: true } = *status {
364
-
let logs = self
365
-
.stdout_collection
366
-
.lock()
367
-
.unwrap()
368
-
.iter()
369
-
.rev()
370
-
.map(|x| x.trim())
371
-
.join("\n");
372
-
373
-
return Ok((exit_status, logs));
374
-
}
375
-
376
-
debug!("child did not succeed");
377
-
378
-
let logs = self
379
-
.stderr_collection
380
-
.lock()
381
-
.unwrap()
382
-
.iter()
383
-
.rev()
384
-
.join("\n");
385
-
386
-
Err(CommandError::CommandFailed {
387
-
command_ran: self.original_command,
388
-
logs,
389
-
code: format!("code {}", exit_status.exit_code()),
390
-
reason: match *status {
391
-
Status::Done { .. } => "marked-unsuccessful",
392
-
Status::Running => "child-crashed-before-succeeding",
393
-
},
394
-
})
395
-
}
396
-
397
-
async fn write_stdin(&mut self, data: Vec<u8>) -> Result<(), HiveLibError> {
398
-
trace!("Writing {} bytes to stdin", data.len());
399
-
400
-
posix_write(&self.write_stdin_pipe_w, &data)
401
-
.map_err(|x| HiveLibError::CommandError(CommandError::PosixPipe(x)))?;
402
-
403
-
Ok(())
404
-
}
405
-
}
406
-
407
-
impl StdinTermiosAttrGuard {
408
-
fn new() -> Result<Self, CommandError> {
409
-
let stdin = std::io::stdin();
410
-
let stdin_fd = stdin.as_fd();
411
-
412
-
let mut termios = tcgetattr(stdin_fd).map_err(CommandError::TermAttrs)?;
413
-
let original_termios = termios.clone();
414
-
415
-
termios.local_flags &= !(LocalFlags::ECHO | LocalFlags::ICANON);
416
-
tcsetattr(stdin_fd, SetArg::TCSANOW, &termios).map_err(CommandError::TermAttrs)?;
417
-
418
-
Ok(StdinTermiosAttrGuard(original_termios))
419
-
}
420
-
}
421
-
422
-
impl Drop for StdinTermiosAttrGuard {
423
-
fn drop(&mut self) {
424
-
let stdin = std::io::stdin();
425
-
let stdin_fd = stdin.as_fd();
426
-
427
-
let _ = tcsetattr(stdin_fd, SetArg::TCSANOW, &self.0);
428
-
}
429
-
}
430
-
431
-
fn create_int_ssh_command(
432
-
target: &Target,
433
-
modifiers: SubCommandModifiers,
434
-
) -> Result<portable_pty::CommandBuilder, HiveLibError> {
435
-
let mut command = portable_pty::CommandBuilder::new("ssh");
436
-
command.args(target.create_ssh_args(modifiers, false, false)?);
437
-
command.arg(target.get_preferred_host()?.to_string());
438
-
Ok(command)
439
-
}
440
-
441
-
#[cfg(test)]
442
-
mod tests {
443
-
use aho_corasick::AhoCorasick;
444
-
use tokio::sync::oneshot::error::TryRecvError;
445
-
446
-
use crate::commands::pty::output::handle_rawmode_data;
447
-
448
-
use super::*;
449
-
use std::assert_matches::assert_matches;
450
-
451
-
#[test]
452
-
fn test_rawmode_data() {
453
-
let aho_corasick = AhoCorasick::builder()
454
-
.ascii_case_insensitive(false)
455
-
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
456
-
.build(["START_NEEDLE", "SUCCEEDED_NEEDLE", "FAILED_NEEDLE"])
457
-
.unwrap();
458
-
let mut stderr = vec![];
459
-
let (began_tx, mut began_rx) = oneshot::channel::<()>();
460
-
let mut began_tx = Some(began_tx);
461
-
let (status_sender, _) = watch::channel(Status::Running);
462
-
463
-
// each "Bla" is 4 bytes.
464
-
let buffer = "bla bla bla START_NEEDLE bla bla bla".as_bytes();
465
-
let mut raw_mode_buffer = vec![];
466
-
467
-
// handle 1 "bla"
468
-
assert_matches!(
469
-
handle_rawmode_data(
470
-
&mut stderr,
471
-
buffer,
472
-
4,
473
-
&mut raw_mode_buffer,
474
-
&aho_corasick,
475
-
&status_sender,
476
-
&mut began_tx
477
-
),
478
-
Ok(SearchFindings::None)
479
-
);
480
-
assert_matches!(began_rx.try_recv(), Err(TryRecvError::Empty));
481
-
assert!(began_tx.is_some());
482
-
assert_eq!(raw_mode_buffer, b"bla ");
483
-
assert_matches!(*status_sender.borrow(), Status::Running);
484
-
485
-
let buffer = &buffer[4..];
486
-
487
-
// handle 2 "bla"'s and half a "START_NEEDLE"
488
-
let n = 4 + 4 + 6;
489
-
assert_matches!(
490
-
handle_rawmode_data(
491
-
&mut stderr,
492
-
buffer,
493
-
n,
494
-
&mut raw_mode_buffer,
495
-
&aho_corasick,
496
-
&status_sender,
497
-
&mut began_tx
498
-
),
499
-
Ok(SearchFindings::None)
500
-
);
501
-
assert_matches!(began_rx.try_recv(), Err(TryRecvError::Empty));
502
-
assert!(began_tx.is_some());
503
-
assert_matches!(*status_sender.borrow(), Status::Running);
504
-
assert_eq!(raw_mode_buffer, b"bla bla bla START_");
505
-
506
-
let buffer = &buffer[n..];
507
-
508
-
// handle rest of the data
509
-
let n = buffer.len();
510
-
assert_matches!(
511
-
handle_rawmode_data(
512
-
&mut stderr,
513
-
buffer,
514
-
n,
515
-
&mut raw_mode_buffer,
516
-
&aho_corasick,
517
-
&status_sender,
518
-
&mut began_tx
519
-
),
520
-
Ok(SearchFindings::Started)
521
-
);
522
-
assert_matches!(began_rx.try_recv(), Ok(()));
523
-
assert_matches!(began_tx, None);
524
-
assert_eq!(raw_mode_buffer, b"bla bla bla START_NEEDLE bla bla bla");
525
-
assert_matches!(*status_sender.borrow(), Status::Running);
526
-
527
-
// test failed needle
528
-
let buffer = "bla FAILED_NEEDLE bla".as_bytes();
529
-
let mut raw_mode_buffer = vec![];
530
-
531
-
let n = buffer.len();
532
-
assert_matches!(
533
-
handle_rawmode_data(
534
-
&mut stderr,
535
-
buffer,
536
-
n,
537
-
&mut raw_mode_buffer,
538
-
&aho_corasick,
539
-
&status_sender,
540
-
&mut began_tx
541
-
),
542
-
Ok(SearchFindings::Terminate)
543
-
);
544
-
assert_matches!(*status_sender.borrow(), Status::Done { success: false });
545
-
546
-
// test succeed needle
547
-
let buffer = "bla SUCCEEDED_NEEDLE bla".as_bytes();
548
-
let mut raw_mode_buffer = vec![];
549
-
let (status_sender, _) = watch::channel(Status::Running);
550
-
551
-
let n = buffer.len();
552
-
assert_matches!(
553
-
handle_rawmode_data(
554
-
&mut stderr,
555
-
buffer,
556
-
n,
557
-
&mut raw_mode_buffer,
558
-
&aho_corasick,
559
-
&status_sender,
560
-
&mut began_tx
561
-
),
562
-
Ok(SearchFindings::Terminate)
563
-
);
564
-
assert_matches!(*status_sender.borrow(), Status::Done { success: true });
565
-
}
566
-
}
-264
wire/lib/src/commands/pty/output.rs
-264
wire/lib/src/commands/pty/output.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use crate::{
5
-
commands::{
6
-
ChildOutputMode,
7
-
pty::{
8
-
FAILED_PATTERN, Needles, STARTED_PATTERN, SUCCEEDED_PATTERN, SearchFindings, Status,
9
-
logbuffer::LogBuffer,
10
-
},
11
-
},
12
-
errors::CommandError,
13
-
};
14
-
use aho_corasick::AhoCorasick;
15
-
use std::{
16
-
collections::VecDeque,
17
-
io::Write,
18
-
sync::{Arc, Mutex},
19
-
};
20
-
use tokio::sync::{oneshot, watch};
21
-
use tracing::{Span, debug, instrument};
22
-
23
-
pub(super) struct WatchStdoutArguments {
24
-
pub began_tx: oneshot::Sender<()>,
25
-
pub reader: super::MasterReader,
26
-
pub needles: Needles,
27
-
pub output_mode: ChildOutputMode,
28
-
pub stderr_collection: Arc<Mutex<VecDeque<String>>>,
29
-
pub stdout_collection: Arc<Mutex<VecDeque<String>>>,
30
-
pub status_sender: watch::Sender<Status>,
31
-
pub span: Span,
32
-
pub log_stdout: bool,
33
-
}
34
-
35
-
/// Handles data from the PTY, and logs or prompts the user depending on the state
36
-
/// of the command.
37
-
///
38
-
/// Emits a message on the `began_tx` when the command is considered started.
39
-
///
40
-
/// Records stderr and stdout when it is considered notable (all stdout, last few stderr messages)
41
-
#[instrument(skip_all, name = "log", parent = arguments.span)]
42
-
pub(super) fn handle_pty_stdout(arguments: WatchStdoutArguments) -> Result<(), CommandError> {
43
-
let WatchStdoutArguments {
44
-
began_tx,
45
-
mut reader,
46
-
needles,
47
-
output_mode,
48
-
stdout_collection,
49
-
stderr_collection,
50
-
status_sender,
51
-
log_stdout,
52
-
..
53
-
} = arguments;
54
-
55
-
let aho_corasick = AhoCorasick::builder()
56
-
.ascii_case_insensitive(false)
57
-
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
58
-
.build([
59
-
needles.start.as_ref(),
60
-
needles.succeed.as_ref(),
61
-
needles.fail.as_ref(),
62
-
])
63
-
.unwrap();
64
-
65
-
let mut buffer = [0u8; 1024];
66
-
let mut stderr = std::io::stderr();
67
-
let mut began = false;
68
-
let mut log_buffer = LogBuffer::new();
69
-
let mut raw_mode_buffer = Vec::new();
70
-
let mut belled = false;
71
-
let mut began_tx = Some(began_tx);
72
-
73
-
'outer: loop {
74
-
match reader.read(&mut buffer) {
75
-
Ok(0) => break 'outer,
76
-
Ok(n) => {
77
-
// this block is responsible for outputting the "raw" data,
78
-
// mostly sudo prompts.
79
-
if !began {
80
-
let findings = handle_rawmode_data(
81
-
&mut stderr,
82
-
&buffer,
83
-
n,
84
-
&mut raw_mode_buffer,
85
-
&aho_corasick,
86
-
&status_sender,
87
-
&mut began_tx,
88
-
)?;
89
-
90
-
match findings {
91
-
SearchFindings::Terminate => break 'outer,
92
-
SearchFindings::Started => {
93
-
began = true;
94
-
continue;
95
-
}
96
-
SearchFindings::None => {}
97
-
}
98
-
99
-
if belled {
100
-
continue;
101
-
}
102
-
103
-
stderr
104
-
.write(b"\x07") // bell
105
-
.map_err(CommandError::WritingClientStderr)?;
106
-
stderr.flush().map_err(CommandError::WritingClientStderr)?;
107
-
108
-
belled = true;
109
-
110
-
continue;
111
-
}
112
-
113
-
log_buffer.process_slice(&buffer[..n]);
114
-
115
-
while let Some(mut line) = log_buffer.next_line() {
116
-
let findings =
117
-
search_string(&aho_corasick, &line, &status_sender, &mut began_tx);
118
-
119
-
match findings {
120
-
SearchFindings::Terminate => break 'outer,
121
-
SearchFindings::Started => {
122
-
began = true;
123
-
continue;
124
-
}
125
-
SearchFindings::None => {}
126
-
}
127
-
128
-
handle_normal_data(
129
-
&stderr_collection,
130
-
&stdout_collection,
131
-
&mut line,
132
-
log_stdout,
133
-
output_mode,
134
-
);
135
-
}
136
-
}
137
-
Err(e) => {
138
-
eprintln!("Error reading from PTY: {e}");
139
-
break;
140
-
}
141
-
}
142
-
}
143
-
144
-
began_tx.map(|began_tx| began_tx.send(()));
145
-
146
-
// failsafe if there were errors or the reader stopped
147
-
if matches!(*status_sender.borrow(), Status::Running) {
148
-
status_sender.send_replace(Status::Done { success: false });
149
-
}
150
-
151
-
debug!("stdout: goodbye");
152
-
153
-
Ok(())
154
-
}
155
-
156
-
/// handles raw data, prints to stderr when a prompt is detected
157
-
pub(super) fn handle_rawmode_data<W: std::io::Write>(
158
-
stderr: &mut W,
159
-
buffer: &[u8],
160
-
n: usize,
161
-
raw_mode_buffer: &mut Vec<u8>,
162
-
aho_corasick: &AhoCorasick,
163
-
status_sender: &watch::Sender<Status>,
164
-
began_tx: &mut Option<oneshot::Sender<()>>,
165
-
) -> Result<SearchFindings, CommandError> {
166
-
raw_mode_buffer.extend_from_slice(&buffer[..n]);
167
-
168
-
let findings = search_string(aho_corasick, raw_mode_buffer, status_sender, began_tx);
169
-
170
-
if matches!(
171
-
findings,
172
-
SearchFindings::Started | SearchFindings::Terminate
173
-
) {
174
-
return Ok(findings);
175
-
}
176
-
177
-
stderr
178
-
.write_all(&buffer[..n])
179
-
.map_err(CommandError::WritingClientStderr)?;
180
-
181
-
stderr.flush().map_err(CommandError::WritingClientStderr)?;
182
-
183
-
Ok(findings)
184
-
}
185
-
186
-
/// handles data when the command is considered "started", logs and records errors as appropriate
187
-
fn handle_normal_data(
188
-
stderr_collection: &Arc<Mutex<VecDeque<String>>>,
189
-
stdout_collection: &Arc<Mutex<VecDeque<String>>>,
190
-
line: &mut [u8],
191
-
log_stdout: bool,
192
-
output_mode: ChildOutputMode,
193
-
) {
194
-
if line.starts_with(b"#") {
195
-
let stripped = &mut line[1..];
196
-
197
-
if log_stdout {
198
-
output_mode.trace_slice(stripped);
199
-
}
200
-
201
-
let mut queue = stdout_collection.lock().unwrap();
202
-
queue.push_front(String::from_utf8_lossy(stripped).to_string());
203
-
return;
204
-
}
205
-
206
-
let log = output_mode.trace_slice(line);
207
-
208
-
if let Some(error_msg) = log {
209
-
let mut queue = stderr_collection.lock().unwrap();
210
-
211
-
// add at most 20 message to the front, drop the rest.
212
-
queue.push_front(error_msg);
213
-
queue.truncate(20);
214
-
}
215
-
}
216
-
217
-
/// returns true if the command is considered stopped
218
-
fn search_string(
219
-
aho_corasick: &AhoCorasick,
220
-
haystack: &[u8],
221
-
status_sender: &watch::Sender<Status>,
222
-
began_tx: &mut Option<oneshot::Sender<()>>,
223
-
) -> SearchFindings {
224
-
let searched = aho_corasick
225
-
.find_iter(haystack)
226
-
.map(|x| x.pattern())
227
-
.collect::<Vec<_>>();
228
-
229
-
let started = if searched.contains(&STARTED_PATTERN) {
230
-
debug!("start needle was found, switching mode...");
231
-
if let Some(began_tx) = began_tx.take() {
232
-
let _ = began_tx.send(());
233
-
}
234
-
true
235
-
} else {
236
-
false
237
-
};
238
-
239
-
let succeeded = if searched.contains(&SUCCEEDED_PATTERN) {
240
-
debug!("succeed needle was found, marking child as succeeding.");
241
-
status_sender.send_replace(Status::Done { success: true });
242
-
true
243
-
} else {
244
-
false
245
-
};
246
-
247
-
let failed = if searched.contains(&FAILED_PATTERN) {
248
-
debug!("failed needle was found, elevated child did not succeed.");
249
-
status_sender.send_replace(Status::Done { success: false });
250
-
true
251
-
} else {
252
-
false
253
-
};
254
-
255
-
if succeeded || failed {
256
-
return SearchFindings::Terminate;
257
-
}
258
-
259
-
if started {
260
-
return SearchFindings::Started;
261
-
}
262
-
263
-
SearchFindings::None
264
-
}
-376
wire/lib/src/errors.rs
-376
wire/lib/src/errors.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
#![allow(unused_assignments)]
5
-
6
-
use std::{num::ParseIntError, path::PathBuf, process::ExitStatus, sync::mpsc::RecvError};
7
-
8
-
use miette::{Diagnostic, SourceSpan};
9
-
use nix_compat::flakeref::{FlakeRef, FlakeRefError};
10
-
use thiserror::Error;
11
-
use tokio::task::JoinError;
12
-
13
-
use crate::hive::node::{Name, SwitchToConfigurationGoal};
14
-
15
-
#[cfg(debug_assertions)]
16
-
const DOCS_URL: &str = "http://localhost:5173/reference/errors.html";
17
-
#[cfg(not(debug_assertions))]
18
-
const DOCS_URL: &str = "https://wire.althaea.zone/reference/errors.html";
19
-
20
-
#[derive(Debug, Diagnostic, Error)]
21
-
pub enum KeyError {
22
-
#[diagnostic(
23
-
code(wire::key::File),
24
-
url("{DOCS_URL}#{}", self.code().unwrap())
25
-
)]
26
-
#[error("error reading file")]
27
-
File(#[source] std::io::Error),
28
-
29
-
#[diagnostic(
30
-
code(wire::key::SpawningCommand),
31
-
help("Ensure wire has the correct $PATH for this command"),
32
-
url("{DOCS_URL}#{}", self.code().unwrap())
33
-
)]
34
-
#[error("error spawning key command")]
35
-
CommandSpawnError {
36
-
#[source]
37
-
error: std::io::Error,
38
-
39
-
#[source_code]
40
-
command: String,
41
-
42
-
#[label(primary, "Program ran")]
43
-
command_span: Option<SourceSpan>,
44
-
},
45
-
46
-
#[diagnostic(
47
-
code(wire::key::Resolving),
48
-
url("{DOCS_URL}#{}", self.code().unwrap())
49
-
)]
50
-
#[error("Error resolving key command child process")]
51
-
CommandResolveError {
52
-
#[source]
53
-
error: std::io::Error,
54
-
55
-
#[source_code]
56
-
command: String,
57
-
},
58
-
59
-
#[diagnostic(
60
-
code(wire::key::CommandExit),
61
-
url("{DOCS_URL}#{}", self.code().unwrap())
62
-
)]
63
-
#[error("key command failed with status {}: {}", .0,.1)]
64
-
CommandError(ExitStatus, String),
65
-
66
-
#[diagnostic(
67
-
code(wire::key::Empty),
68
-
url("{DOCS_URL}#{}", self.code().unwrap())
69
-
)]
70
-
#[error("Command list empty")]
71
-
Empty,
72
-
73
-
#[diagnostic(
74
-
code(wire::key::ParseKeyPermissions),
75
-
help("Refer to the documentation for the format of key file permissions."),
76
-
url("{DOCS_URL}#{}", self.code().unwrap())
77
-
)]
78
-
#[error("Failed to parse key permissions")]
79
-
ParseKeyPermissions(#[source] ParseIntError),
80
-
}
81
-
82
-
#[derive(Debug, Diagnostic, Error)]
83
-
pub enum ActivationError {
84
-
#[diagnostic(
85
-
code(wire::activation::SwitchToConfiguration),
86
-
url("{DOCS_URL}#{}", self.code().unwrap())
87
-
)]
88
-
#[error("failed to run switch-to-configuration {0} on node {1}")]
89
-
SwitchToConfigurationError(SwitchToConfigurationGoal, Name, #[source] CommandError),
90
-
}
91
-
92
-
#[derive(Debug, Diagnostic, Error)]
93
-
pub enum NetworkError {
94
-
#[diagnostic(
95
-
code(wire::network::HostUnreachable),
96
-
help(
97
-
"If you failed due to a fault in DNS, note that a node can have multiple targets defined."
98
-
),
99
-
url("{DOCS_URL}#{}", self.code().unwrap())
100
-
)]
101
-
#[error("Cannot reach host {host}")]
102
-
HostUnreachable {
103
-
host: String,
104
-
#[source]
105
-
source: CommandError,
106
-
},
107
-
108
-
#[diagnostic(
109
-
code(wire::network::HostUnreachableAfterReboot),
110
-
url("{DOCS_URL}#{}", self.code().unwrap())
111
-
)]
112
-
#[error("Failed to get regain connection to {0} after activation.")]
113
-
HostUnreachableAfterReboot(String),
114
-
115
-
#[diagnostic(
116
-
code(wire::network::HostsExhausted),
117
-
url("{DOCS_URL}#{}", self.code().unwrap())
118
-
)]
119
-
#[error("Ran out of contactable hosts")]
120
-
HostsExhausted,
121
-
}
122
-
123
-
#[derive(Debug, Diagnostic, Error)]
124
-
pub enum HiveInitialisationError {
125
-
#[diagnostic(
126
-
code(wire::hive_init::NoHiveFound),
127
-
help(
128
-
"Double check the path is correct. You can adjust the hive path with `--path` when the hive lies outside of the CWD."
129
-
),
130
-
url("{DOCS_URL}#{}", self.code().unwrap())
131
-
)]
132
-
#[error("No hive could be found in {}", .0.display())]
133
-
NoHiveFound(PathBuf),
134
-
135
-
#[diagnostic(
136
-
code(wire::hive_init::Parse),
137
-
help("If you cannot resolve this problem, please create an issue."),
138
-
url("{DOCS_URL}#{}", self.code().unwrap())
139
-
)]
140
-
#[error("Failed to parse internal wire json.")]
141
-
ParseEvaluateError(#[source] serde_json::Error),
142
-
143
-
#[diagnostic(
144
-
code(wire::hive_init::ParsePrefetch),
145
-
help("please create an issue."),
146
-
url("{DOCS_URL}#{}", self.code().unwrap())
147
-
)]
148
-
#[error("Failed to parse `nix flake prefetch --json`.")]
149
-
ParsePrefetchError(#[source] serde_json::Error),
150
-
151
-
#[diagnostic(
152
-
code(wire::hive_init::NodeDoesNotExist),
153
-
help("Please create an issue!"),
154
-
url("{DOCS_URL}#{}", self.code().unwrap())
155
-
)]
156
-
#[error("node {0} not exist in hive")]
157
-
NodeDoesNotExist(String),
158
-
}
159
-
160
-
#[derive(Debug, Diagnostic, Error)]
161
-
pub enum HiveLocationError {
162
-
#[diagnostic(
163
-
code(wire::hive_location::MalformedPath),
164
-
url("{DOCS_URL}#{}", self.code().unwrap())
165
-
)]
166
-
#[error("Path was malformed: {}", .0.display())]
167
-
MalformedPath(PathBuf),
168
-
169
-
#[diagnostic(
170
-
code(wire::hive_location::Malformed),
171
-
url("{DOCS_URL}#{}", self.code().unwrap())
172
-
)]
173
-
#[error("--path was malformed")]
174
-
Malformed(#[source] FlakeRefError),
175
-
176
-
#[diagnostic(
177
-
code(wire::hive_location::TypeUnsupported),
178
-
url("{DOCS_URL}#{}", self.code().unwrap())
179
-
)]
180
-
#[error("The flakref had an unsupported type: {:#?}", .0)]
181
-
TypeUnsupported(Box<FlakeRef>),
182
-
}
183
-
184
-
#[derive(Debug, Diagnostic, Error)]
185
-
pub enum CommandError {
186
-
#[diagnostic(
187
-
code(wire::command::TermAttrs),
188
-
url("{DOCS_URL}#{}", self.code().unwrap())
189
-
)]
190
-
#[error("Failed to set PTY attrs")]
191
-
TermAttrs(#[source] nix::errno::Errno),
192
-
193
-
#[diagnostic(
194
-
code(wire::command::PosixPipe),
195
-
url("{DOCS_URL}#{}", self.code().unwrap())
196
-
)]
197
-
#[error("There was an error in regards to a pipe")]
198
-
PosixPipe(#[source] nix::errno::Errno),
199
-
200
-
/// Error wrapped around `portable_pty`'s anyhow
201
-
/// errors
202
-
#[diagnostic(
203
-
code(wire::command::PortablePty),
204
-
url("{DOCS_URL}#{}", self.code().unwrap())
205
-
)]
206
-
#[error("There was an error from the portable_pty crate")]
207
-
PortablePty(#[source] anyhow::Error),
208
-
209
-
#[diagnostic(
210
-
code(wire::command::Joining),
211
-
url("{DOCS_URL}#{}", self.code().unwrap())
212
-
)]
213
-
#[error("Failed to join on some tokio task")]
214
-
JoinError(#[source] JoinError),
215
-
216
-
#[diagnostic(
217
-
code(wire::command::WaitForStatus),
218
-
url("{DOCS_URL}#{}", self.code().unwrap())
219
-
)]
220
-
#[error("Failed to wait for the child's status")]
221
-
WaitForStatus(#[source] std::io::Error),
222
-
223
-
#[diagnostic(
224
-
code(wire::detached::NoHandle),
225
-
help("This should never happen, please create an issue!"),
226
-
url("{DOCS_URL}#{}", self.code().unwrap())
227
-
)]
228
-
#[error("There was no handle to child io")]
229
-
NoHandle,
230
-
231
-
#[diagnostic(
232
-
code(wire::command::WritingClientStdout),
233
-
url("{DOCS_URL}#{}", self.code().unwrap())
234
-
)]
235
-
#[error("Failed to write to client stderr.")]
236
-
WritingClientStderr(#[source] std::io::Error),
237
-
238
-
#[diagnostic(
239
-
code(wire::command::WritingMasterStdin),
240
-
url("{DOCS_URL}#{}", self.code().unwrap())
241
-
)]
242
-
#[error("Failed to write to PTY master stdout.")]
243
-
WritingMasterStdout(#[source] std::io::Error),
244
-
245
-
#[diagnostic(
246
-
code(wire::command::Recv),
247
-
url("{DOCS_URL}#{}", self.code().unwrap()),
248
-
help("please create an issue!"),
249
-
)]
250
-
#[error("Failed to receive a message from the begin channel")]
251
-
RecvError(#[source] RecvError),
252
-
253
-
#[diagnostic(
254
-
code(wire::command::ThreadPanic),
255
-
url("{DOCS_URL}#{}", self.code().unwrap()),
256
-
help("please create an issue!"),
257
-
)]
258
-
#[error("Thread panicked")]
259
-
ThreadPanic,
260
-
261
-
#[diagnostic(
262
-
code(wire::command::CommandFailed),
263
-
url("{DOCS_URL}#{}", self.code().unwrap()),
264
-
help("`nix` commands are filtered, run with -vvv to view all"),
265
-
)]
266
-
#[error("{command_ran} failed ({reason}) with {code} (last 20 lines):\n{logs}")]
267
-
CommandFailed {
268
-
command_ran: String,
269
-
logs: String,
270
-
code: String,
271
-
reason: &'static str,
272
-
},
273
-
274
-
#[diagnostic(
275
-
code(wire::command::RuntimeDirectory),
276
-
url("{DOCS_URL}#{}", self.code().unwrap())
277
-
)]
278
-
#[error("error creating $XDG_RUNTIME_DIR/wire")]
279
-
RuntimeDirectory(#[source] std::io::Error),
280
-
281
-
#[diagnostic(
282
-
code(wire::command::RuntimeDirectoryMissing),
283
-
url("{DOCS_URL}#{}", self.code().unwrap())
284
-
)]
285
-
#[error("$XDG_RUNTIME_DIR could not be used.")]
286
-
RuntimeDirectoryMissing(#[source] std::env::VarError),
287
-
288
-
#[diagnostic(
289
-
code(wire::command::OneshotRecvError),
290
-
url("{DOCS_URL}#{}", self.code().unwrap())
291
-
)]
292
-
#[error("Error waiting for begin message")]
293
-
OneshotRecvError(#[source] tokio::sync::oneshot::error::RecvError),
294
-
}
295
-
296
-
#[derive(Debug, Diagnostic, Error)]
297
-
pub enum HiveLibError {
298
-
#[error(transparent)]
299
-
#[diagnostic(transparent)]
300
-
HiveInitialisationError(HiveInitialisationError),
301
-
302
-
#[error(transparent)]
303
-
#[diagnostic(transparent)]
304
-
NetworkError(NetworkError),
305
-
306
-
#[error(transparent)]
307
-
#[diagnostic(transparent)]
308
-
ActivationError(ActivationError),
309
-
310
-
#[error(transparent)]
311
-
#[diagnostic(transparent)]
312
-
CommandError(CommandError),
313
-
314
-
#[error(transparent)]
315
-
#[diagnostic(transparent)]
316
-
HiveLocationError(HiveLocationError),
317
-
318
-
#[error("Failed to apply key {}", .0)]
319
-
KeyError(
320
-
String,
321
-
#[source]
322
-
#[diagnostic_source]
323
-
KeyError,
324
-
),
325
-
326
-
#[diagnostic(
327
-
code(wire::BuildNode),
328
-
url("{DOCS_URL}#{}", self.code().unwrap())
329
-
)]
330
-
#[error("failed to build node {name}")]
331
-
NixBuildError {
332
-
name: Name,
333
-
#[source]
334
-
source: CommandError,
335
-
},
336
-
337
-
#[diagnostic(
338
-
code(wire::CopyPath),
339
-
url("{DOCS_URL}#{}", self.code().unwrap())
340
-
)]
341
-
#[error("failed to copy path {path} to node {name}")]
342
-
NixCopyError {
343
-
name: Name,
344
-
path: String,
345
-
#[source]
346
-
error: Box<CommandError>,
347
-
#[help]
348
-
help: Option<Box<String>>,
349
-
},
350
-
351
-
#[diagnostic(code(wire::Evaluate))]
352
-
#[error("failed to evaluate `{attribute}` from the context of a hive.")]
353
-
NixEvalError {
354
-
attribute: String,
355
-
356
-
#[source]
357
-
source: CommandError,
358
-
359
-
#[help]
360
-
help: Option<Box<String>>,
361
-
},
362
-
363
-
#[diagnostic(
364
-
code(wire::Encoding),
365
-
url("{DOCS_URL}#{}", self.code().unwrap())
366
-
)]
367
-
#[error("error encoding length delimited data")]
368
-
Encoding(#[source] std::io::Error),
369
-
370
-
#[diagnostic(
371
-
code(wire::SIGINT),
372
-
url("{DOCS_URL}#{}", self.code().unwrap())
373
-
)]
374
-
#[error("SIGINT received, shut down")]
375
-
Sigint,
376
-
}
-467
wire/lib/src/hive/mod.rs
-467
wire/lib/src/hive/mod.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use itertools::Itertools;
5
-
use nix_compat::flakeref::FlakeRef;
6
-
use node::{Name, Node};
7
-
use owo_colors::{OwoColorize, Stream};
8
-
use serde::de::Error;
9
-
use serde::{Deserialize, Deserializer, Serialize};
10
-
use std::collections::HashMap;
11
-
use std::collections::hash_map::OccupiedEntry;
12
-
use std::ffi::OsStr;
13
-
use std::fmt::Display;
14
-
use std::fs;
15
-
use std::path::PathBuf;
16
-
use std::str::FromStr;
17
-
use std::sync::Arc;
18
-
use tracing::{debug, info, instrument};
19
-
20
-
use crate::cache::InspectionCache;
21
-
use crate::commands::common::evaluate_hive_attribute;
22
-
use crate::commands::{CommandArguments, Either, WireCommandChip, run_command};
23
-
use crate::errors::{HiveInitialisationError, HiveLocationError};
24
-
use crate::{EvalGoal, HiveLibError, SubCommandModifiers};
25
-
pub mod node;
26
-
pub mod steps;
27
-
28
-
#[derive(Serialize, Deserialize, Debug, PartialEq)]
29
-
#[serde(deny_unknown_fields)]
30
-
pub struct Hive {
31
-
pub nodes: HashMap<Name, Node>,
32
-
33
-
#[serde(deserialize_with = "check_schema_version", rename = "_schema")]
34
-
pub schema: u32,
35
-
}
36
-
37
-
pub enum Action<'a> {
38
-
Inspect,
39
-
EvaluateNode(OccupiedEntry<'a, String, Node>),
40
-
}
41
-
42
-
fn check_schema_version<'de, D: Deserializer<'de>>(d: D) -> Result<u32, D::Error> {
43
-
let version = u32::deserialize(d)?;
44
-
if version != Hive::SCHEMA_VERSION {
45
-
return Err(D::Error::custom(
46
-
"Version mismatch for Hive. Please ensure the binary and your wire input match!",
47
-
));
48
-
}
49
-
Ok(version)
50
-
}
51
-
52
-
impl Hive {
53
-
pub const SCHEMA_VERSION: u32 = 1;
54
-
55
-
#[instrument(skip_all, name = "eval_hive")]
56
-
pub async fn new_from_path(
57
-
location: &HiveLocation,
58
-
cache: Option<InspectionCache>,
59
-
modifiers: SubCommandModifiers,
60
-
) -> Result<Hive, HiveLibError> {
61
-
info!("evaluating hive {location:?}");
62
-
63
-
if let Some(ref cache) = cache
64
-
&& let HiveLocation::Flake { prefetch, .. } = location
65
-
&& let Some(hive) = cache.get_hive(prefetch).await
66
-
{
67
-
return Ok(hive);
68
-
}
69
-
70
-
let output = evaluate_hive_attribute(location, &EvalGoal::Inspect, modifiers).await?;
71
-
72
-
let hive: Hive = serde_json::from_str(&output).map_err(|err| {
73
-
HiveLibError::HiveInitialisationError(HiveInitialisationError::ParseEvaluateError(err))
74
-
})?;
75
-
76
-
if let Some(cache) = cache
77
-
&& let HiveLocation::Flake { prefetch, .. } = location
78
-
{
79
-
cache.store_hive(prefetch, &output).await;
80
-
}
81
-
82
-
Ok(hive)
83
-
}
84
-
85
-
/// # Errors
86
-
///
87
-
/// Returns an error if a node in nodes does not exist in the hive.
88
-
pub fn force_always_local(&mut self, nodes: Vec<String>) -> Result<(), HiveLibError> {
89
-
for node in nodes {
90
-
info!("Forcing a local build for {node}");
91
-
92
-
self.nodes
93
-
.get_mut(&Name(Arc::from(node.clone())))
94
-
.ok_or(HiveLibError::HiveInitialisationError(
95
-
HiveInitialisationError::NodeDoesNotExist(node.clone()),
96
-
))?
97
-
.build_remotely = false;
98
-
}
99
-
100
-
Ok(())
101
-
}
102
-
}
103
-
104
-
impl Display for Hive {
105
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
106
-
for (name, node) in &self.nodes {
107
-
writeln!(
108
-
f,
109
-
"Node {} {}:\n",
110
-
name.bold(),
111
-
format!("({})", node.host_platform)
112
-
.italic()
113
-
.if_supports_color(Stream::Stdout, |x| x.dimmed()),
114
-
)?;
115
-
116
-
if !node.tags.is_empty() {
117
-
write!(f, " > {}", "Tags:".bold())?;
118
-
writeln!(f, " {:?}", node.tags)?;
119
-
}
120
-
121
-
write!(f, " > {}", "Connection:".bold())?;
122
-
writeln!(f, " {{{}}}", node.target)?;
123
-
124
-
write!(
125
-
f,
126
-
" > {} {}{}",
127
-
"Build remotely".bold(),
128
-
"`deployment.buildOnTarget`"
129
-
.if_supports_color(Stream::Stdout, |x| x.dimmed())
130
-
.italic(),
131
-
":".bold()
132
-
)?;
133
-
writeln!(f, " {}", node.build_remotely)?;
134
-
135
-
write!(
136
-
f,
137
-
" > {} {}{}",
138
-
"Local apply allowed".bold(),
139
-
"`deployment.allowLocalDeployment`"
140
-
.if_supports_color(Stream::Stdout, |x| x.dimmed())
141
-
.italic(),
142
-
":".bold()
143
-
)?;
144
-
writeln!(f, " {}", node.allow_local_deployment)?;
145
-
146
-
if !node.keys.is_empty() {
147
-
write!(f, " > {}", "Keys:".bold())?;
148
-
writeln!(f, " {} key(s)", node.keys.len())?;
149
-
150
-
for key in &node.keys {
151
-
writeln!(f, " > {key}")?;
152
-
}
153
-
}
154
-
155
-
writeln!(f)?;
156
-
}
157
-
158
-
let total_keys = self
159
-
.nodes
160
-
.values()
161
-
.flat_map(|node| node.keys.iter())
162
-
.collect::<Vec<_>>();
163
-
let distinct_keys = self
164
-
.nodes
165
-
.values()
166
-
.flat_map(|node| node.keys.iter())
167
-
.unique()
168
-
.collect::<Vec<_>>()
169
-
.len();
170
-
171
-
write!(f, "{}", "Summary:".bold())?;
172
-
writeln!(
173
-
f,
174
-
" {} total node(s), totalling {} keys ({distinct_keys} distinct).",
175
-
self.nodes.len(),
176
-
total_keys.len()
177
-
)?;
178
-
writeln!(
179
-
f,
180
-
"{}",
181
-
"Note: Listed connections are tried from Left to Right".italic(),
182
-
)?;
183
-
184
-
Ok(())
185
-
}
186
-
}
187
-
188
-
#[derive(Debug, PartialEq, Eq, Deserialize)]
189
-
pub struct FlakePrefetch {
190
-
pub(crate) hash: String,
191
-
#[serde(rename = "storePath")]
192
-
pub(crate) store_path: String,
193
-
}
194
-
195
-
#[derive(Debug, PartialEq, Eq)]
196
-
pub enum HiveLocation {
197
-
HiveNix(PathBuf),
198
-
Flake {
199
-
uri: String,
200
-
prefetch: FlakePrefetch,
201
-
},
202
-
}
203
-
204
-
impl HiveLocation {
205
-
async fn get_flake(
206
-
uri: String,
207
-
modifiers: SubCommandModifiers,
208
-
) -> Result<HiveLocation, HiveLibError> {
209
-
let command = run_command(
210
-
&CommandArguments::new(format!("nix flake prefetch --extra-experimental-features nix-command --extra-experimental-features flakes --json {uri}"), modifiers)
211
-
.mode(crate::commands::ChildOutputMode::Generic),
212
-
)
213
-
.await?;
214
-
215
-
let result = command
216
-
.wait_till_success()
217
-
.await
218
-
.map_err(HiveLibError::CommandError)?;
219
-
220
-
debug!(hash_json = ?result);
221
-
222
-
let prefetch = serde_json::from_str(&match result {
223
-
Either::Left((.., output)) | Either::Right((.., output)) => output,
224
-
})
225
-
.map_err(|x| {
226
-
HiveLibError::HiveInitialisationError(HiveInitialisationError::ParsePrefetchError(x))
227
-
})?;
228
-
229
-
debug!(prefetch = ?prefetch);
230
-
231
-
Ok(HiveLocation::Flake { uri, prefetch })
232
-
}
233
-
}
234
-
235
-
pub async fn get_hive_location(
236
-
path: String,
237
-
modifiers: SubCommandModifiers,
238
-
) -> Result<HiveLocation, HiveLibError> {
239
-
let flakeref = FlakeRef::from_str(&path);
240
-
241
-
let path_to_location = async |path: PathBuf| {
242
-
Ok(match path.file_name().and_then(OsStr::to_str) {
243
-
Some("hive.nix") => HiveLocation::HiveNix(path.clone()),
244
-
Some(_) => {
245
-
if fs::metadata(path.join("flake.nix")).is_ok() {
246
-
HiveLocation::get_flake(path.display().to_string(), modifiers).await?
247
-
} else {
248
-
HiveLocation::HiveNix(path.join("hive.nix"))
249
-
}
250
-
}
251
-
None => {
252
-
return Err(HiveLibError::HiveLocationError(
253
-
HiveLocationError::MalformedPath(path.clone()),
254
-
));
255
-
}
256
-
})
257
-
};
258
-
259
-
match flakeref {
260
-
Err(nix_compat::flakeref::FlakeRefError::UrlParseError(_err)) => {
261
-
let path = PathBuf::from(path);
262
-
Ok(path_to_location(path).await?)
263
-
}
264
-
Ok(FlakeRef::Path { path, .. }) => Ok(path_to_location(path).await?),
265
-
Ok(
266
-
FlakeRef::Git { .. }
267
-
| FlakeRef::GitHub { .. }
268
-
| FlakeRef::GitLab { .. }
269
-
| FlakeRef::Tarball { .. }
270
-
| FlakeRef::Mercurial { .. }
271
-
| FlakeRef::SourceHut { .. },
272
-
) => Ok(HiveLocation::get_flake(path, modifiers).await?),
273
-
Err(err) => Err(HiveLibError::HiveLocationError(
274
-
HiveLocationError::Malformed(err),
275
-
)),
276
-
Ok(flakeref) => Err(HiveLibError::HiveLocationError(
277
-
HiveLocationError::TypeUnsupported(Box::new(flakeref)),
278
-
)),
279
-
}
280
-
}
281
-
282
-
#[cfg(test)]
283
-
mod tests {
284
-
use im::vector;
285
-
286
-
use crate::{
287
-
errors::CommandError,
288
-
get_test_path,
289
-
hive::steps::keys::{Key, Source, UploadKeyAt},
290
-
location,
291
-
test_support::make_flake_sandbox,
292
-
};
293
-
294
-
use super::*;
295
-
use std::{assert_matches::assert_matches, env};
296
-
297
-
// flake should always come before hive.nix
298
-
#[tokio::test]
299
-
async fn test_hive_dot_nix_priority() {
300
-
let location = location!(get_test_path!());
301
-
302
-
assert_matches!(location, HiveLocation::Flake { .. });
303
-
}
304
-
305
-
#[tokio::test]
306
-
#[cfg_attr(feature = "no_web_tests", ignore)]
307
-
async fn test_hive_file() {
308
-
let location = location!(get_test_path!());
309
-
310
-
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
311
-
.await
312
-
.unwrap();
313
-
314
-
let node = Node {
315
-
target: node::Target::from_host("192.168.122.96"),
316
-
..Default::default()
317
-
};
318
-
319
-
let mut nodes = HashMap::new();
320
-
nodes.insert(Name("node-a".into()), node);
321
-
322
-
assert_eq!(
323
-
hive,
324
-
Hive {
325
-
nodes,
326
-
schema: Hive::SCHEMA_VERSION
327
-
}
328
-
);
329
-
}
330
-
331
-
#[tokio::test]
332
-
#[cfg_attr(feature = "no_web_tests", ignore)]
333
-
async fn non_trivial_hive() {
334
-
let location = location!(get_test_path!());
335
-
336
-
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
337
-
.await
338
-
.unwrap();
339
-
340
-
let node = Node {
341
-
target: node::Target::from_host("name"),
342
-
keys: vector![Key {
343
-
name: "different-than-a".into(),
344
-
dest_dir: "/run/keys/".into(),
345
-
path: "/run/keys/different-than-a".into(),
346
-
group: "root".into(),
347
-
user: "root".into(),
348
-
permissions: "0600".into(),
349
-
source: Source::String("hi".into()),
350
-
upload_at: UploadKeyAt::PreActivation,
351
-
environment: im::HashMap::new()
352
-
}],
353
-
build_remotely: true,
354
-
..Default::default()
355
-
};
356
-
357
-
let mut nodes = HashMap::new();
358
-
nodes.insert(Name("node-a".into()), node);
359
-
360
-
assert_eq!(
361
-
hive,
362
-
Hive {
363
-
nodes,
364
-
schema: Hive::SCHEMA_VERSION
365
-
}
366
-
);
367
-
}
368
-
369
-
#[tokio::test]
370
-
#[cfg_attr(feature = "no_web_tests", ignore)]
371
-
async fn flake_hive() {
372
-
let tmp_dir = make_flake_sandbox(&get_test_path!()).unwrap();
373
-
374
-
let location = get_hive_location(
375
-
tmp_dir.path().display().to_string(),
376
-
SubCommandModifiers::default(),
377
-
)
378
-
.await
379
-
.unwrap();
380
-
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
381
-
.await
382
-
.unwrap();
383
-
384
-
let mut nodes = HashMap::new();
385
-
386
-
// a merged node
387
-
nodes.insert(Name("node-a".into()), Node::from_host("node-a"));
388
-
// a non-merged node
389
-
nodes.insert(Name("node-b".into()), Node::from_host("node-b"));
390
-
391
-
assert_eq!(
392
-
hive,
393
-
Hive {
394
-
nodes,
395
-
schema: Hive::SCHEMA_VERSION
396
-
}
397
-
);
398
-
399
-
tmp_dir.close().unwrap();
400
-
}
401
-
402
-
#[tokio::test]
403
-
async fn no_nixpkgs() {
404
-
let location = location!(get_test_path!());
405
-
406
-
assert_matches!(
407
-
Hive::new_from_path(&location, None, SubCommandModifiers::default()).await,
408
-
Err(HiveLibError::NixEvalError {
409
-
source: CommandError::CommandFailed {
410
-
logs,
411
-
..
412
-
},
413
-
..
414
-
})
415
-
if logs.contains("makeHive called without meta.nixpkgs specified")
416
-
);
417
-
}
418
-
419
-
#[tokio::test]
420
-
async fn _keys_should_fail() {
421
-
let location = location!(get_test_path!());
422
-
423
-
assert_matches!(
424
-
Hive::new_from_path(&location, None, SubCommandModifiers::default()).await,
425
-
Err(HiveLibError::NixEvalError {
426
-
source: CommandError::CommandFailed {
427
-
logs,
428
-
..
429
-
},
430
-
..
431
-
})
432
-
if logs.contains("The option `deployment._keys' is read-only, but it's set multiple times.")
433
-
);
434
-
}
435
-
436
-
#[tokio::test]
437
-
async fn test_force_always_local() {
438
-
let mut location: PathBuf = env::var("WIRE_TEST_DIR").unwrap().into();
439
-
location.push("non_trivial_hive");
440
-
let location = location!(location);
441
-
442
-
let mut hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
443
-
.await
444
-
.unwrap();
445
-
446
-
assert_matches!(
447
-
hive.force_always_local(vec!["non-existent".to_string()]),
448
-
Err(HiveLibError::HiveInitialisationError(
449
-
HiveInitialisationError::NodeDoesNotExist(node)
450
-
)) if node == "non-existent"
451
-
);
452
-
453
-
for node in hive.nodes.values() {
454
-
assert!(node.build_remotely);
455
-
}
456
-
457
-
assert_matches!(hive.force_always_local(vec!["node-a".to_string()]), Ok(()));
458
-
459
-
assert!(
460
-
!hive
461
-
.nodes
462
-
.get(&Name("node-a".into()))
463
-
.unwrap()
464
-
.build_remotely
465
-
);
466
-
}
467
-
}
-830
wire/lib/src/hive/node.rs
-830
wire/lib/src/hive/node.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
#![allow(clippy::missing_errors_doc)]
5
-
use enum_dispatch::enum_dispatch;
6
-
use gethostname::gethostname;
7
-
use serde::{Deserialize, Serialize};
8
-
use std::assert_matches::debug_assert_matches;
9
-
use std::fmt::Display;
10
-
use std::sync::Arc;
11
-
use std::sync::atomic::AtomicBool;
12
-
use tokio::sync::oneshot;
13
-
use tracing::{Instrument, Level, Span, debug, error, event, instrument, trace};
14
-
15
-
use crate::commands::common::evaluate_hive_attribute;
16
-
use crate::commands::{CommandArguments, WireCommandChip, run_command};
17
-
use crate::errors::NetworkError;
18
-
use crate::hive::HiveLocation;
19
-
use crate::hive::steps::build::Build;
20
-
use crate::hive::steps::cleanup::CleanUp;
21
-
use crate::hive::steps::evaluate::Evaluate;
22
-
use crate::hive::steps::keys::{Key, Keys, PushKeyAgent, UploadKeyAt};
23
-
use crate::hive::steps::ping::Ping;
24
-
use crate::hive::steps::push::{PushBuildOutput, PushEvaluatedOutput};
25
-
use crate::status::STATUS;
26
-
use crate::{EvalGoal, StrictHostKeyChecking, SubCommandModifiers};
27
-
28
-
use super::HiveLibError;
29
-
use super::steps::activate::SwitchToConfiguration;
30
-
31
-
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq, derive_more::Display)]
32
-
pub struct Name(pub Arc<str>);
33
-
34
-
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)]
35
-
pub struct Target {
36
-
pub hosts: Vec<Arc<str>>,
37
-
pub user: Arc<str>,
38
-
pub port: u32,
39
-
40
-
#[serde(skip)]
41
-
current_host: usize,
42
-
}
43
-
44
-
impl Target {
45
-
#[instrument(ret(level = tracing::Level::DEBUG), skip_all)]
46
-
pub fn create_ssh_opts(
47
-
&self,
48
-
modifiers: SubCommandModifiers,
49
-
master: bool,
50
-
) -> Result<String, HiveLibError> {
51
-
self.create_ssh_args(modifiers, false, master)
52
-
.map(|x| x.join(" "))
53
-
}
54
-
55
-
#[instrument(ret(level = tracing::Level::DEBUG))]
56
-
pub fn create_ssh_args(
57
-
&self,
58
-
modifiers: SubCommandModifiers,
59
-
non_interactive_forced: bool,
60
-
master: bool,
61
-
) -> Result<Vec<String>, HiveLibError> {
62
-
let mut vector = vec![
63
-
"-l".to_string(),
64
-
self.user.to_string(),
65
-
"-p".to_string(),
66
-
self.port.to_string(),
67
-
];
68
-
let mut options = vec![
69
-
format!(
70
-
"StrictHostKeyChecking={}",
71
-
match modifiers.ssh_accept_host {
72
-
StrictHostKeyChecking::AcceptNew => "accept-new",
73
-
StrictHostKeyChecking::No => "no",
74
-
}
75
-
)
76
-
.to_string(),
77
-
];
78
-
79
-
options.extend(["PasswordAuthentication=no".to_string()]);
80
-
options.extend(["KbdInteractiveAuthentication=no".to_string()]);
81
-
82
-
vector.push("-o".to_string());
83
-
vector.extend(options.into_iter().intersperse("-o".to_string()));
84
-
85
-
Ok(vector)
86
-
}
87
-
}
88
-
89
-
#[cfg(test)]
90
-
impl Default for Target {
91
-
fn default() -> Self {
92
-
Target {
93
-
hosts: vec!["NAME".into()],
94
-
user: "root".into(),
95
-
port: 22,
96
-
current_host: 0,
97
-
}
98
-
}
99
-
}
100
-
101
-
#[cfg(test)]
102
-
impl<'a> Context<'a> {
103
-
fn create_test_context(
104
-
hive_location: HiveLocation,
105
-
name: &'a Name,
106
-
node: &'a mut Node,
107
-
) -> Self {
108
-
Context {
109
-
name,
110
-
node,
111
-
hive_location: Arc::new(hive_location),
112
-
modifiers: SubCommandModifiers::default(),
113
-
no_keys: false,
114
-
state: StepState::default(),
115
-
goal: Goal::SwitchToConfiguration(SwitchToConfigurationGoal::Switch),
116
-
reboot: false,
117
-
should_apply_locally: false,
118
-
handle_unreachable: HandleUnreachable::default(),
119
-
should_shutdown: Arc::new(AtomicBool::new(false)),
120
-
}
121
-
}
122
-
}
123
-
124
-
impl Target {
125
-
pub fn get_preferred_host(&self) -> Result<&Arc<str>, HiveLibError> {
126
-
self.hosts
127
-
.get(self.current_host)
128
-
.ok_or(HiveLibError::NetworkError(NetworkError::HostsExhausted))
129
-
}
130
-
131
-
pub const fn host_failed(&mut self) {
132
-
self.current_host += 1;
133
-
}
134
-
135
-
#[cfg(test)]
136
-
#[must_use]
137
-
pub fn from_host(host: &str) -> Self {
138
-
Target {
139
-
hosts: vec![host.into()],
140
-
..Default::default()
141
-
}
142
-
}
143
-
}
144
-
145
-
impl Display for Target {
146
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
147
-
let hosts = itertools::Itertools::join(
148
-
&mut self
149
-
.hosts
150
-
.iter()
151
-
.map(|host| format!("{}@{host}:{}", self.user, self.port)),
152
-
", ",
153
-
);
154
-
155
-
write!(f, "{hosts}")
156
-
}
157
-
}
158
-
159
-
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
160
-
pub struct Node {
161
-
#[serde(rename = "target")]
162
-
pub target: Target,
163
-
164
-
#[serde(rename = "buildOnTarget")]
165
-
pub build_remotely: bool,
166
-
167
-
#[serde(rename = "allowLocalDeployment")]
168
-
pub allow_local_deployment: bool,
169
-
170
-
#[serde(default)]
171
-
pub tags: im::HashSet<String>,
172
-
173
-
#[serde(rename(deserialize = "_keys", serialize = "keys"))]
174
-
pub keys: im::Vector<Key>,
175
-
176
-
#[serde(rename(deserialize = "_hostPlatform", serialize = "host_platform"))]
177
-
pub host_platform: Arc<str>,
178
-
179
-
#[serde(rename(
180
-
deserialize = "privilegeEscalationCommand",
181
-
serialize = "privilege_escalation_command"
182
-
))]
183
-
pub privilege_escalation_command: im::Vector<Arc<str>>,
184
-
}
185
-
186
-
#[cfg(test)]
187
-
impl Default for Node {
188
-
fn default() -> Self {
189
-
Node {
190
-
target: Target::default(),
191
-
keys: im::Vector::new(),
192
-
tags: im::HashSet::new(),
193
-
privilege_escalation_command: vec!["sudo".into(), "--".into()].into(),
194
-
allow_local_deployment: true,
195
-
build_remotely: false,
196
-
host_platform: "x86_64-linux".into(),
197
-
}
198
-
}
199
-
}
200
-
201
-
impl Node {
202
-
#[cfg(test)]
203
-
#[must_use]
204
-
pub fn from_host(host: &str) -> Self {
205
-
Node {
206
-
target: Target::from_host(host),
207
-
..Default::default()
208
-
}
209
-
}
210
-
211
-
/// Tests the connection to a node
212
-
pub async fn ping(&self, modifiers: SubCommandModifiers) -> Result<(), HiveLibError> {
213
-
let host = self.target.get_preferred_host()?;
214
-
215
-
let command_string = format!(
216
-
"ssh {}@{host} {} exit",
217
-
self.target.user,
218
-
self.target.create_ssh_opts(modifiers, true)?
219
-
);
220
-
221
-
let output = run_command(
222
-
&CommandArguments::new(command_string, modifiers)
223
-
.log_stdout()
224
-
.mode(crate::commands::ChildOutputMode::Interactive),
225
-
)
226
-
.await?;
227
-
228
-
output.wait_till_success().await.map_err(|source| {
229
-
HiveLibError::NetworkError(NetworkError::HostUnreachable {
230
-
host: host.to_string(),
231
-
source,
232
-
})
233
-
})?;
234
-
235
-
Ok(())
236
-
}
237
-
}
238
-
239
-
#[must_use]
240
-
pub fn should_apply_locally(allow_local_deployment: bool, name: &str) -> bool {
241
-
*name == *gethostname() && allow_local_deployment
242
-
}
243
-
244
-
#[derive(derive_more::Display)]
245
-
pub enum Push<'a> {
246
-
Derivation(&'a Derivation),
247
-
Path(&'a String),
248
-
}
249
-
250
-
#[derive(Deserialize, Clone, Debug)]
251
-
pub struct Derivation(String);
252
-
253
-
impl Display for Derivation {
254
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
255
-
self.0.fmt(f).and_then(|()| write!(f, "^*"))
256
-
}
257
-
}
258
-
259
-
#[derive(derive_more::Display, Debug, Clone, Copy)]
260
-
pub enum SwitchToConfigurationGoal {
261
-
Switch,
262
-
Boot,
263
-
Test,
264
-
DryActivate,
265
-
}
266
-
267
-
#[derive(derive_more::Display, Clone, Copy)]
268
-
pub enum Goal {
269
-
SwitchToConfiguration(SwitchToConfigurationGoal),
270
-
Build,
271
-
Push,
272
-
Keys,
273
-
}
274
-
275
-
#[enum_dispatch]
276
-
pub(crate) trait ExecuteStep: Send + Sync + Display + std::fmt::Debug {
277
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError>;
278
-
279
-
fn should_execute(&self, context: &Context) -> bool;
280
-
}
281
-
282
-
// may include other options such as FailAll in the future
283
-
#[non_exhaustive]
284
-
#[derive(Clone, Default)]
285
-
pub enum HandleUnreachable {
286
-
Ignore,
287
-
#[default]
288
-
FailNode,
289
-
}
290
-
291
-
#[derive(Default)]
292
-
pub struct StepState {
293
-
pub evaluation: Option<Derivation>,
294
-
pub evaluation_rx: Option<oneshot::Receiver<Result<Derivation, HiveLibError>>>,
295
-
pub build: Option<String>,
296
-
pub key_agent_directory: Option<String>,
297
-
}
298
-
299
-
pub struct Context<'a> {
300
-
pub name: &'a Name,
301
-
pub node: &'a mut Node,
302
-
pub hive_location: Arc<HiveLocation>,
303
-
pub modifiers: SubCommandModifiers,
304
-
pub no_keys: bool,
305
-
pub state: StepState,
306
-
pub goal: Goal,
307
-
pub reboot: bool,
308
-
pub should_apply_locally: bool,
309
-
pub handle_unreachable: HandleUnreachable,
310
-
pub should_shutdown: Arc<AtomicBool>,
311
-
}
312
-
313
-
#[enum_dispatch(ExecuteStep)]
314
-
#[derive(Debug, PartialEq)]
315
-
enum Step {
316
-
Ping,
317
-
PushKeyAgent,
318
-
Keys,
319
-
Evaluate,
320
-
PushEvaluatedOutput,
321
-
Build,
322
-
PushBuildOutput,
323
-
SwitchToConfiguration,
324
-
CleanUp,
325
-
}
326
-
327
-
impl Display for Step {
328
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
329
-
match self {
330
-
Self::Ping(step) => step.fmt(f),
331
-
Self::PushKeyAgent(step) => step.fmt(f),
332
-
Self::Keys(step) => step.fmt(f),
333
-
Self::Evaluate(step) => step.fmt(f),
334
-
Self::PushEvaluatedOutput(step) => step.fmt(f),
335
-
Self::Build(step) => step.fmt(f),
336
-
Self::PushBuildOutput(step) => step.fmt(f),
337
-
Self::SwitchToConfiguration(step) => step.fmt(f),
338
-
Self::CleanUp(step) => step.fmt(f),
339
-
}
340
-
}
341
-
}
342
-
343
-
pub struct GoalExecutor<'a> {
344
-
steps: Vec<Step>,
345
-
context: Context<'a>,
346
-
}
347
-
348
-
/// returns Err if the application should shut down.
349
-
fn app_shutdown_guard(context: &Context) -> Result<(), HiveLibError> {
350
-
if context
351
-
.should_shutdown
352
-
.load(std::sync::atomic::Ordering::Relaxed)
353
-
{
354
-
return Err(HiveLibError::Sigint);
355
-
}
356
-
357
-
Ok(())
358
-
}
359
-
360
-
impl<'a> GoalExecutor<'a> {
361
-
#[must_use]
362
-
pub fn new(context: Context<'a>) -> Self {
363
-
Self {
364
-
steps: vec![
365
-
Step::Ping(Ping),
366
-
Step::PushKeyAgent(PushKeyAgent),
367
-
Step::Keys(Keys {
368
-
filter: UploadKeyAt::NoFilter,
369
-
}),
370
-
Step::Keys(Keys {
371
-
filter: UploadKeyAt::PreActivation,
372
-
}),
373
-
Step::Evaluate(super::steps::evaluate::Evaluate),
374
-
Step::PushEvaluatedOutput(super::steps::push::PushEvaluatedOutput),
375
-
Step::Build(super::steps::build::Build),
376
-
Step::PushBuildOutput(super::steps::push::PushBuildOutput),
377
-
Step::SwitchToConfiguration(SwitchToConfiguration),
378
-
Step::Keys(Keys {
379
-
filter: UploadKeyAt::PostActivation,
380
-
}),
381
-
Step::CleanUp(CleanUp),
382
-
],
383
-
context,
384
-
}
385
-
}
386
-
387
-
#[instrument(skip_all, name = "eval")]
388
-
async fn evaluate_task(
389
-
tx: oneshot::Sender<Result<Derivation, HiveLibError>>,
390
-
hive_location: Arc<HiveLocation>,
391
-
name: Name,
392
-
modifiers: SubCommandModifiers,
393
-
) {
394
-
let output =
395
-
evaluate_hive_attribute(&hive_location, &EvalGoal::GetTopLevel(&name), modifiers)
396
-
.await
397
-
.map(|output| {
398
-
serde_json::from_str::<Derivation>(&output).expect("failed to parse derivation")
399
-
});
400
-
401
-
debug!(output = ?output, done = true);
402
-
403
-
let _ = tx.send(output);
404
-
}
405
-
406
-
#[instrument(skip_all, fields(node = %self.context.name))]
407
-
pub async fn execute(mut self) -> Result<(), HiveLibError> {
408
-
app_shutdown_guard(&self.context)?;
409
-
410
-
let (tx, rx) = oneshot::channel();
411
-
self.context.state.evaluation_rx = Some(rx);
412
-
413
-
// The name of this span should never be changed without updating
414
-
// `wire/cli/tracing_setup.rs`
415
-
debug_assert_matches!(Span::current().metadata().unwrap().name(), "execute");
416
-
// This span should always have a `node` field by the same file
417
-
debug_assert!(
418
-
Span::current()
419
-
.metadata()
420
-
.unwrap()
421
-
.fields()
422
-
.field("node")
423
-
.is_some()
424
-
);
425
-
426
-
if !matches!(self.context.goal, Goal::Keys) {
427
-
tokio::spawn(
428
-
GoalExecutor::evaluate_task(
429
-
tx,
430
-
self.context.hive_location.clone(),
431
-
self.context.name.clone(),
432
-
self.context.modifiers,
433
-
)
434
-
.in_current_span(),
435
-
);
436
-
}
437
-
438
-
let steps = self
439
-
.steps
440
-
.iter()
441
-
.filter(|step| step.should_execute(&self.context))
442
-
.inspect(|step| {
443
-
trace!("Will execute step `{step}` for {}", self.context.name);
444
-
})
445
-
.collect::<Vec<_>>();
446
-
let length = steps.len();
447
-
448
-
for (position, step) in steps.iter().enumerate() {
449
-
app_shutdown_guard(&self.context)?;
450
-
451
-
event!(
452
-
Level::INFO,
453
-
step = step.to_string(),
454
-
progress = format!("{}/{length}", position + 1)
455
-
);
456
-
457
-
STATUS
458
-
.lock()
459
-
.set_node_step(self.context.name, step.to_string());
460
-
461
-
if let Err(err) = step.execute(&mut self.context).await.inspect_err(|_| {
462
-
error!("Failed to execute `{step}`");
463
-
}) {
464
-
// discard error from cleanup
465
-
let _ = CleanUp.execute(&mut self.context).await;
466
-
467
-
if matches!(step, Step::Ping(..))
468
-
&& matches!(self.context.handle_unreachable, HandleUnreachable::Ignore)
469
-
{
470
-
return Ok(());
471
-
}
472
-
473
-
STATUS.lock().mark_node_failed(self.context.name);
474
-
475
-
return Err(err);
476
-
}
477
-
}
478
-
479
-
STATUS.lock().mark_node_succeeded(self.context.name);
480
-
481
-
Ok(())
482
-
}
483
-
}
484
-
485
-
#[cfg(test)]
486
-
mod tests {
487
-
use rand::distr::Alphabetic;
488
-
489
-
use super::*;
490
-
use crate::{
491
-
function_name, get_test_path,
492
-
hive::{Hive, get_hive_location},
493
-
location,
494
-
};
495
-
use std::{assert_matches::assert_matches, path::PathBuf};
496
-
use std::{collections::HashMap, env};
497
-
498
-
fn get_steps(goal_executor: GoalExecutor) -> std::vec::Vec<Step> {
499
-
goal_executor
500
-
.steps
501
-
.into_iter()
502
-
.filter(|step| step.should_execute(&goal_executor.context))
503
-
.collect::<Vec<_>>()
504
-
}
505
-
506
-
#[tokio::test]
507
-
#[cfg_attr(feature = "no_web_tests", ignore)]
508
-
async fn default_values_match() {
509
-
let mut path = get_test_path!();
510
-
511
-
let location =
512
-
get_hive_location(path.display().to_string(), SubCommandModifiers::default())
513
-
.await
514
-
.unwrap();
515
-
let hive = Hive::new_from_path(&location, None, SubCommandModifiers::default())
516
-
.await
517
-
.unwrap();
518
-
519
-
let node = Node::default();
520
-
521
-
let mut nodes = HashMap::new();
522
-
nodes.insert(Name("NAME".into()), node);
523
-
524
-
path.push("hive.nix");
525
-
526
-
assert_eq!(
527
-
hive,
528
-
Hive {
529
-
nodes,
530
-
schema: Hive::SCHEMA_VERSION
531
-
}
532
-
);
533
-
}
534
-
535
-
#[tokio::test]
536
-
async fn order_build_locally() {
537
-
let location = location!(get_test_path!());
538
-
let mut node = Node {
539
-
build_remotely: false,
540
-
..Default::default()
541
-
};
542
-
let name = &Name(function_name!().into());
543
-
let executor = GoalExecutor::new(Context::create_test_context(location, name, &mut node));
544
-
let steps = get_steps(executor);
545
-
546
-
assert_eq!(
547
-
steps,
548
-
vec![
549
-
Ping.into(),
550
-
PushKeyAgent.into(),
551
-
Keys {
552
-
filter: UploadKeyAt::PreActivation
553
-
}
554
-
.into(),
555
-
crate::hive::steps::evaluate::Evaluate.into(),
556
-
crate::hive::steps::build::Build.into(),
557
-
crate::hive::steps::push::PushBuildOutput.into(),
558
-
SwitchToConfiguration.into(),
559
-
Keys {
560
-
filter: UploadKeyAt::PostActivation
561
-
}
562
-
.into(),
563
-
CleanUp.into()
564
-
]
565
-
);
566
-
}
567
-
568
-
#[tokio::test]
569
-
async fn order_keys_only() {
570
-
let location = location!(get_test_path!());
571
-
let mut node = Node::default();
572
-
let name = &Name(function_name!().into());
573
-
let mut context = Context::create_test_context(location, name, &mut node);
574
-
575
-
context.goal = Goal::Keys;
576
-
577
-
let executor = GoalExecutor::new(context);
578
-
let steps = get_steps(executor);
579
-
580
-
assert_eq!(
581
-
steps,
582
-
vec![
583
-
Ping.into(),
584
-
PushKeyAgent.into(),
585
-
Keys {
586
-
filter: UploadKeyAt::NoFilter
587
-
}
588
-
.into(),
589
-
CleanUp.into()
590
-
]
591
-
);
592
-
}
593
-
594
-
#[tokio::test]
595
-
async fn order_build_only() {
596
-
let location = location!(get_test_path!());
597
-
let mut node = Node::default();
598
-
let name = &Name(function_name!().into());
599
-
let mut context = Context::create_test_context(location, name, &mut node);
600
-
601
-
context.goal = Goal::Build;
602
-
603
-
let executor = GoalExecutor::new(context);
604
-
let steps = get_steps(executor);
605
-
606
-
assert_eq!(
607
-
steps,
608
-
vec![
609
-
Ping.into(),
610
-
crate::hive::steps::evaluate::Evaluate.into(),
611
-
crate::hive::steps::build::Build.into(),
612
-
crate::hive::steps::push::PushBuildOutput.into(),
613
-
CleanUp.into()
614
-
]
615
-
);
616
-
}
617
-
618
-
#[tokio::test]
619
-
async fn order_push_only() {
620
-
let location = location!(get_test_path!());
621
-
let mut node = Node::default();
622
-
let name = &Name(function_name!().into());
623
-
let mut context = Context::create_test_context(location, name, &mut node);
624
-
625
-
context.goal = Goal::Push;
626
-
627
-
let executor = GoalExecutor::new(context);
628
-
let steps = get_steps(executor);
629
-
630
-
assert_eq!(
631
-
steps,
632
-
vec![
633
-
Ping.into(),
634
-
crate::hive::steps::evaluate::Evaluate.into(),
635
-
crate::hive::steps::push::PushEvaluatedOutput.into(),
636
-
CleanUp.into()
637
-
]
638
-
);
639
-
}
640
-
641
-
#[tokio::test]
642
-
async fn order_remote_build() {
643
-
let location = location!(get_test_path!());
644
-
let mut node = Node {
645
-
build_remotely: true,
646
-
..Default::default()
647
-
};
648
-
649
-
let name = &Name(function_name!().into());
650
-
let executor = GoalExecutor::new(Context::create_test_context(location, name, &mut node));
651
-
let steps = get_steps(executor);
652
-
653
-
assert_eq!(
654
-
steps,
655
-
vec![
656
-
Ping.into(),
657
-
PushKeyAgent.into(),
658
-
Keys {
659
-
filter: UploadKeyAt::PreActivation
660
-
}
661
-
.into(),
662
-
crate::hive::steps::evaluate::Evaluate.into(),
663
-
crate::hive::steps::push::PushEvaluatedOutput.into(),
664
-
crate::hive::steps::build::Build.into(),
665
-
SwitchToConfiguration.into(),
666
-
Keys {
667
-
filter: UploadKeyAt::PostActivation
668
-
}
669
-
.into(),
670
-
CleanUp.into()
671
-
]
672
-
);
673
-
}
674
-
675
-
#[test]
676
-
fn target_fails_increments() {
677
-
let mut target = Target::from_host("localhost");
678
-
679
-
assert_eq!(target.current_host, 0);
680
-
681
-
for i in 0..100 {
682
-
target.host_failed();
683
-
assert_eq!(target.current_host, i + 1);
684
-
}
685
-
}
686
-
687
-
#[test]
688
-
fn get_preferred_host_fails() {
689
-
let mut target = Target {
690
-
hosts: vec![
691
-
"un.reachable.1".into(),
692
-
"un.reachable.2".into(),
693
-
"un.reachable.3".into(),
694
-
"un.reachable.4".into(),
695
-
"un.reachable.5".into(),
696
-
],
697
-
..Default::default()
698
-
};
699
-
700
-
assert_ne!(
701
-
target.get_preferred_host().unwrap().to_string(),
702
-
"un.reachable.5"
703
-
);
704
-
705
-
for i in 1..=5 {
706
-
assert_eq!(
707
-
target.get_preferred_host().unwrap().to_string(),
708
-
format!("un.reachable.{i}")
709
-
);
710
-
target.host_failed();
711
-
}
712
-
713
-
for _ in 0..5 {
714
-
assert_matches!(
715
-
target.get_preferred_host(),
716
-
Err(HiveLibError::NetworkError(NetworkError::HostsExhausted))
717
-
);
718
-
}
719
-
}
720
-
721
-
#[test]
722
-
fn test_ssh_opts() {
723
-
let target = Target::from_host("hello-world");
724
-
let subcommand_modifiers = SubCommandModifiers {
725
-
non_interactive: false,
726
-
..Default::default()
727
-
};
728
-
let tmp = format!(
729
-
"/tmp/{}",
730
-
rand::distr::SampleString::sample_string(&Alphabetic, &mut rand::rng(), 10)
731
-
);
732
-
733
-
std::fs::create_dir(&tmp).unwrap();
734
-
735
-
unsafe { env::set_var("XDG_RUNTIME_DIR", &tmp) }
736
-
737
-
let args = [
738
-
"-l".to_string(),
739
-
target.user.to_string(),
740
-
"-p".to_string(),
741
-
target.port.to_string(),
742
-
"-o".to_string(),
743
-
"StrictHostKeyChecking=accept-new".to_string(),
744
-
"-o".to_string(),
745
-
"PasswordAuthentication=no".to_string(),
746
-
"-o".to_string(),
747
-
"KbdInteractiveAuthentication=no".to_string(),
748
-
];
749
-
750
-
assert_eq!(
751
-
target
752
-
.create_ssh_args(subcommand_modifiers, false, false)
753
-
.unwrap(),
754
-
args
755
-
);
756
-
assert_eq!(
757
-
target.create_ssh_opts(subcommand_modifiers, false).unwrap(),
758
-
args.join(" ")
759
-
);
760
-
761
-
assert_eq!(
762
-
target
763
-
.create_ssh_args(subcommand_modifiers, false, true)
764
-
.unwrap(),
765
-
[
766
-
"-l".to_string(),
767
-
target.user.to_string(),
768
-
"-p".to_string(),
769
-
target.port.to_string(),
770
-
"-o".to_string(),
771
-
"StrictHostKeyChecking=accept-new".to_string(),
772
-
"-o".to_string(),
773
-
"PasswordAuthentication=no".to_string(),
774
-
"-o".to_string(),
775
-
"KbdInteractiveAuthentication=no".to_string(),
776
-
]
777
-
);
778
-
779
-
assert_eq!(
780
-
target
781
-
.create_ssh_args(subcommand_modifiers, true, true)
782
-
.unwrap(),
783
-
[
784
-
"-l".to_string(),
785
-
target.user.to_string(),
786
-
"-p".to_string(),
787
-
target.port.to_string(),
788
-
"-o".to_string(),
789
-
"StrictHostKeyChecking=accept-new".to_string(),
790
-
"-o".to_string(),
791
-
"PasswordAuthentication=no".to_string(),
792
-
"-o".to_string(),
793
-
"KbdInteractiveAuthentication=no".to_string(),
794
-
]
795
-
);
796
-
797
-
// forced non interactive is the same as --non-interactive
798
-
assert_eq!(
799
-
target
800
-
.create_ssh_args(subcommand_modifiers, true, false)
801
-
.unwrap(),
802
-
target
803
-
.create_ssh_args(
804
-
SubCommandModifiers {
805
-
non_interactive: true,
806
-
..Default::default()
807
-
},
808
-
false,
809
-
false
810
-
)
811
-
.unwrap()
812
-
);
813
-
}
814
-
815
-
#[tokio::test]
816
-
async fn context_quits_sigint() {
817
-
let location = location!(get_test_path!());
818
-
let mut node = Node::default();
819
-
820
-
let name = &Name(function_name!().into());
821
-
let context = Context::create_test_context(location, name, &mut node);
822
-
context
823
-
.should_shutdown
824
-
.store(true, std::sync::atomic::Ordering::Relaxed);
825
-
let executor = GoalExecutor::new(context);
826
-
let status = executor.execute().await;
827
-
828
-
assert_matches!(status, Err(HiveLibError::Sigint));
829
-
}
830
-
}
-205
wire/lib/src/hive/steps/activate.rs
-205
wire/lib/src/hive/steps/activate.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::fmt::Display;
5
-
6
-
use tracing::{error, info, instrument, warn};
7
-
8
-
use crate::{
9
-
HiveLibError,
10
-
commands::{CommandArguments, WireCommandChip, run_command},
11
-
errors::{ActivationError, NetworkError},
12
-
hive::node::{Context, ExecuteStep, Goal, SwitchToConfigurationGoal},
13
-
};
14
-
15
-
#[derive(Debug, PartialEq)]
16
-
pub struct SwitchToConfiguration;
17
-
18
-
impl Display for SwitchToConfiguration {
19
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
20
-
write!(f, "switch-to-configuration")
21
-
}
22
-
}
23
-
24
-
async fn wait_for_ping(ctx: &Context<'_>) -> Result<(), HiveLibError> {
25
-
let host = ctx.node.target.get_preferred_host()?;
26
-
let mut result = ctx.node.ping(ctx.modifiers).await;
27
-
28
-
for num in 0..2 {
29
-
warn!("Trying to ping {host} (attempt {}/3)", num + 1);
30
-
31
-
result = ctx.node.ping(ctx.modifiers).await;
32
-
33
-
if result.is_ok() {
34
-
info!("Regained connection to {} via {host}", ctx.name);
35
-
36
-
break;
37
-
}
38
-
}
39
-
40
-
result
41
-
}
42
-
43
-
async fn set_profile(
44
-
goal: SwitchToConfigurationGoal,
45
-
built_path: &String,
46
-
ctx: &Context<'_>,
47
-
) -> Result<(), HiveLibError> {
48
-
info!("Setting profiles in anticipation for switch-to-configuration {goal}");
49
-
50
-
let command_string = format!("nix-env -p /nix/var/nix/profiles/system/ --set {built_path}");
51
-
52
-
let child = run_command(
53
-
&CommandArguments::new(command_string, ctx.modifiers)
54
-
.mode(crate::commands::ChildOutputMode::Nix)
55
-
.on_target(if ctx.should_apply_locally {
56
-
None
57
-
} else {
58
-
Some(&ctx.node.target)
59
-
})
60
-
.elevated(ctx.node),
61
-
)
62
-
.await?;
63
-
64
-
let _ = child
65
-
.wait_till_success()
66
-
.await
67
-
.map_err(HiveLibError::CommandError)?;
68
-
69
-
info!("Set system profile");
70
-
71
-
Ok(())
72
-
}
73
-
74
-
impl ExecuteStep for SwitchToConfiguration {
75
-
fn should_execute(&self, ctx: &Context) -> bool {
76
-
matches!(ctx.goal, Goal::SwitchToConfiguration(..))
77
-
}
78
-
79
-
#[instrument(skip_all, name = "activate")]
80
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
81
-
let built_path = ctx.state.build.as_ref().unwrap();
82
-
83
-
let Goal::SwitchToConfiguration(goal) = &ctx.goal else {
84
-
unreachable!("Cannot reach as guarded by should_execute")
85
-
};
86
-
87
-
if matches!(
88
-
goal,
89
-
// switch profile if switch or boot
90
-
// https://github.com/NixOS/nixpkgs/blob/a2c92aa34735a04010671e3378e2aa2d109b2a72/pkgs/by-name/ni/nixos-rebuild-ng/src/nixos_rebuild/services.py#L224
91
-
SwitchToConfigurationGoal::Switch | SwitchToConfigurationGoal::Boot
92
-
) {
93
-
set_profile(*goal, built_path, ctx).await?;
94
-
}
95
-
96
-
info!("Running switch-to-configuration {goal}");
97
-
98
-
let command_string = format!(
99
-
"{built_path}/bin/switch-to-configuration {}",
100
-
match goal {
101
-
SwitchToConfigurationGoal::Switch => "switch",
102
-
SwitchToConfigurationGoal::Boot => "boot",
103
-
SwitchToConfigurationGoal::Test => "test",
104
-
SwitchToConfigurationGoal::DryActivate => "dry-activate",
105
-
}
106
-
);
107
-
108
-
let child = run_command(
109
-
&CommandArguments::new(command_string, ctx.modifiers)
110
-
.on_target(if ctx.should_apply_locally {
111
-
None
112
-
} else {
113
-
Some(&ctx.node.target)
114
-
})
115
-
.elevated(ctx.node)
116
-
.log_stdout(),
117
-
)
118
-
.await?;
119
-
120
-
let result = child.wait_till_success().await;
121
-
122
-
match result {
123
-
Ok(_) => {
124
-
if !ctx.reboot {
125
-
return Ok(());
126
-
}
127
-
128
-
if ctx.should_apply_locally {
129
-
error!("Refusing to reboot local machine!");
130
-
131
-
return Ok(());
132
-
}
133
-
134
-
warn!("Rebooting {name}!", name = ctx.name);
135
-
136
-
let reboot = run_command(
137
-
&CommandArguments::new("reboot now", ctx.modifiers)
138
-
.log_stdout()
139
-
.on_target(Some(&ctx.node.target))
140
-
.elevated(ctx.node),
141
-
)
142
-
.await?;
143
-
144
-
// consume result, impossible to know if the machine failed to reboot or we
145
-
// simply disconnected
146
-
let _ = reboot
147
-
.wait_till_success()
148
-
.await
149
-
.map_err(HiveLibError::CommandError)?;
150
-
151
-
info!("Rebooted {name}, waiting to reconnect...", name = ctx.name);
152
-
153
-
if wait_for_ping(ctx).await.is_ok() {
154
-
return Ok(());
155
-
}
156
-
157
-
error!(
158
-
"Failed to get regain connection to {name} via {host} after reboot.",
159
-
name = ctx.name,
160
-
host = ctx.node.target.get_preferred_host()?
161
-
);
162
-
163
-
return Err(HiveLibError::NetworkError(
164
-
NetworkError::HostUnreachableAfterReboot(
165
-
ctx.node.target.get_preferred_host()?.to_string(),
166
-
),
167
-
));
168
-
}
169
-
Err(error) => {
170
-
warn!(
171
-
"Activation command for {name} exited unsuccessfully.",
172
-
name = ctx.name
173
-
);
174
-
175
-
// Bail if the command couldn't of broken the system
176
-
// and don't try to regain connection to localhost
177
-
if matches!(goal, SwitchToConfigurationGoal::DryActivate)
178
-
|| ctx.should_apply_locally
179
-
{
180
-
return Err(HiveLibError::ActivationError(
181
-
ActivationError::SwitchToConfigurationError(*goal, ctx.name.clone(), error),
182
-
));
183
-
}
184
-
185
-
if wait_for_ping(ctx).await.is_ok() {
186
-
return Err(HiveLibError::ActivationError(
187
-
ActivationError::SwitchToConfigurationError(*goal, ctx.name.clone(), error),
188
-
));
189
-
}
190
-
191
-
error!(
192
-
"Failed to get regain connection to {name} via {host} after {goal} activation.",
193
-
name = ctx.name,
194
-
host = ctx.node.target.get_preferred_host()?
195
-
);
196
-
197
-
return Err(HiveLibError::NetworkError(
198
-
NetworkError::HostUnreachableAfterReboot(
199
-
ctx.node.target.get_preferred_host()?.to_string(),
200
-
),
201
-
));
202
-
}
203
-
}
204
-
}
205
-
}
-68
wire/lib/src/hive/steps/build.rs
-68
wire/lib/src/hive/steps/build.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::fmt::Display;
5
-
6
-
use tracing::{info, instrument};
7
-
8
-
use crate::{
9
-
HiveLibError,
10
-
commands::{CommandArguments, Either, WireCommandChip, run_command_with_env},
11
-
hive::node::{Context, ExecuteStep, Goal},
12
-
};
13
-
14
-
#[derive(Debug, PartialEq)]
15
-
pub struct Build;
16
-
17
-
impl Display for Build {
18
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
19
-
write!(f, "Build the node")
20
-
}
21
-
}
22
-
23
-
impl ExecuteStep for Build {
24
-
fn should_execute(&self, ctx: &Context) -> bool {
25
-
!matches!(ctx.goal, Goal::Keys | Goal::Push)
26
-
}
27
-
28
-
#[instrument(skip_all, name = "build")]
29
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
30
-
let top_level = ctx.state.evaluation.as_ref().unwrap();
31
-
32
-
let command_string = format!(
33
-
"nix --extra-experimental-features nix-command \
34
-
build --print-build-logs --no-link --print-out-paths {top_level}"
35
-
);
36
-
37
-
let status = run_command_with_env(
38
-
&CommandArguments::new(command_string, ctx.modifiers)
39
-
// build remotely if asked for AND we arent applying locally
40
-
// building remotely but applying locally does not logically
41
-
// make any sense
42
-
.on_target(if ctx.node.build_remotely && !ctx.should_apply_locally {
43
-
Some(&ctx.node.target)
44
-
} else {
45
-
None
46
-
})
47
-
.mode(crate::commands::ChildOutputMode::Nix)
48
-
.log_stdout(),
49
-
std::collections::HashMap::new(),
50
-
)
51
-
.await?
52
-
.wait_till_success()
53
-
.await
54
-
.map_err(|source| HiveLibError::NixBuildError {
55
-
name: ctx.name.clone(),
56
-
source,
57
-
})?;
58
-
59
-
let stdout = match status {
60
-
Either::Left((_, stdout)) | Either::Right((_, stdout)) => stdout,
61
-
};
62
-
63
-
info!("Built output: {stdout:?}");
64
-
ctx.state.build = Some(stdout);
65
-
66
-
Ok(())
67
-
}
68
-
}
-28
wire/lib/src/hive/steps/cleanup.rs
-28
wire/lib/src/hive/steps/cleanup.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::fmt::Display;
5
-
6
-
use crate::{
7
-
errors::HiveLibError,
8
-
hive::node::{Context, ExecuteStep},
9
-
};
10
-
11
-
#[derive(PartialEq, Debug)]
12
-
pub(crate) struct CleanUp;
13
-
14
-
impl Display for CleanUp {
15
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
16
-
write!(f, "Clean up")
17
-
}
18
-
}
19
-
20
-
impl ExecuteStep for CleanUp {
21
-
fn should_execute(&self, ctx: &Context) -> bool {
22
-
!ctx.should_apply_locally
23
-
}
24
-
25
-
async fn execute(&self, _ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
26
-
Ok(())
27
-
}
28
-
}
-35
wire/lib/src/hive/steps/evaluate.rs
-35
wire/lib/src/hive/steps/evaluate.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::fmt::Display;
5
-
6
-
use tracing::instrument;
7
-
8
-
use crate::{
9
-
HiveLibError,
10
-
hive::node::{Context, ExecuteStep, Goal},
11
-
};
12
-
13
-
#[derive(Debug, PartialEq)]
14
-
pub struct Evaluate;
15
-
16
-
impl Display for Evaluate {
17
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18
-
write!(f, "Evaluate the node")
19
-
}
20
-
}
21
-
22
-
impl ExecuteStep for Evaluate {
23
-
fn should_execute(&self, ctx: &Context) -> bool {
24
-
!matches!(ctx.goal, Goal::Keys)
25
-
}
26
-
27
-
#[instrument(skip_all, name = "eval")]
28
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
29
-
let rx = ctx.state.evaluation_rx.take().unwrap();
30
-
31
-
ctx.state.evaluation = Some(rx.await.unwrap()?);
32
-
33
-
Ok(())
34
-
}
35
-
}
-423
wire/lib/src/hive/steps/keys.rs
-423
wire/lib/src/hive/steps/keys.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use base64::Engine;
5
-
use base64::prelude::BASE64_STANDARD;
6
-
use futures::future::join_all;
7
-
use im::Vector;
8
-
use itertools::{Itertools, Position};
9
-
use owo_colors::OwoColorize;
10
-
use prost::Message;
11
-
use prost::bytes::BytesMut;
12
-
use serde::{Deserialize, Serialize};
13
-
use sha2::{Digest, Sha256};
14
-
use std::env;
15
-
use std::fmt::Display;
16
-
use std::io::Cursor;
17
-
use std::iter::Peekable;
18
-
use std::path::PathBuf;
19
-
use std::pin::Pin;
20
-
use std::process::Stdio;
21
-
use std::str::from_utf8;
22
-
use std::vec::IntoIter;
23
-
use tokio::io::AsyncReadExt as _;
24
-
use tokio::process::Command;
25
-
use tokio::{fs::File, io::AsyncRead};
26
-
use tokio_util::codec::LengthDelimitedCodec;
27
-
use tracing::{debug, instrument};
28
-
29
-
use crate::HiveLibError;
30
-
use crate::commands::common::push;
31
-
use crate::commands::{CommandArguments, WireCommandChip, run_command};
32
-
use crate::errors::KeyError;
33
-
use crate::hive::node::{Context, ExecuteStep, Goal, Push, SwitchToConfigurationGoal};
34
-
35
-
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
36
-
#[serde(tag = "t", content = "c")]
37
-
pub enum Source {
38
-
String(String),
39
-
Path(PathBuf),
40
-
Command(Vec<String>),
41
-
}
42
-
43
-
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)]
44
-
pub enum UploadKeyAt {
45
-
#[serde(rename = "pre-activation")]
46
-
PreActivation,
47
-
#[serde(rename = "post-activation")]
48
-
PostActivation,
49
-
#[serde(skip)]
50
-
NoFilter,
51
-
}
52
-
53
-
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
54
-
pub struct Key {
55
-
pub name: String,
56
-
#[serde(rename = "destDir")]
57
-
pub dest_dir: String,
58
-
pub path: PathBuf,
59
-
pub group: String,
60
-
pub user: String,
61
-
pub permissions: String,
62
-
pub source: Source,
63
-
#[serde(rename = "uploadAt")]
64
-
pub upload_at: UploadKeyAt,
65
-
#[serde(default)]
66
-
pub environment: im::HashMap<String, String>,
67
-
}
68
-
69
-
impl Display for Key {
70
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
71
-
write!(
72
-
f,
73
-
"{} {} {}:{} {}",
74
-
match self.source {
75
-
Source::String(_) => "Literal",
76
-
Source::Path(_) => "Path",
77
-
Source::Command(_) => "Command",
78
-
}
79
-
.if_supports_color(owo_colors::Stream::Stdout, |x| x.dimmed()),
80
-
[self.dest_dir.clone(), self.name.clone()]
81
-
.iter()
82
-
.collect::<PathBuf>()
83
-
.display(),
84
-
self.user,
85
-
self.group,
86
-
self.permissions,
87
-
)
88
-
}
89
-
}
90
-
91
-
#[cfg(test)]
92
-
impl Default for Key {
93
-
fn default() -> Self {
94
-
use im::HashMap;
95
-
96
-
Self {
97
-
name: "key".into(),
98
-
dest_dir: "/somewhere/".into(),
99
-
path: "key".into(),
100
-
group: "root".into(),
101
-
user: "root".into(),
102
-
permissions: "0600".into(),
103
-
source: Source::String("test key".into()),
104
-
upload_at: UploadKeyAt::PreActivation,
105
-
environment: HashMap::new(),
106
-
}
107
-
}
108
-
}
109
-
110
-
fn get_u32_permission(key: &Key) -> Result<u32, KeyError> {
111
-
u32::from_str_radix(&key.permissions, 8).map_err(KeyError::ParseKeyPermissions)
112
-
}
113
-
114
-
async fn create_reader(key: &'_ Key) -> Result<Pin<Box<dyn AsyncRead + Send + '_>>, KeyError> {
115
-
match &key.source {
116
-
Source::Path(path) => Ok(Box::pin(File::open(path).await.map_err(KeyError::File)?)),
117
-
Source::String(string) => Ok(Box::pin(Cursor::new(string))),
118
-
Source::Command(args) => {
119
-
let output = Command::new(args.first().ok_or(KeyError::Empty)?)
120
-
.args(&args[1..])
121
-
.stdin(Stdio::null())
122
-
.stdout(Stdio::piped())
123
-
.stderr(Stdio::piped())
124
-
.envs(key.environment.clone())
125
-
.spawn()
126
-
.map_err(|err| KeyError::CommandSpawnError {
127
-
error: err,
128
-
command: args.join(" "),
129
-
command_span: Some((0..args.first().unwrap().len()).into()),
130
-
})?
131
-
.wait_with_output()
132
-
.await
133
-
.map_err(|err| KeyError::CommandResolveError {
134
-
error: err,
135
-
command: args.join(" "),
136
-
})?;
137
-
138
-
if output.status.success() {
139
-
return Ok(Box::pin(Cursor::new(output.stdout)));
140
-
}
141
-
142
-
Err(KeyError::CommandError(
143
-
output.status,
144
-
from_utf8(&output.stderr).unwrap().to_string(),
145
-
))
146
-
}
147
-
}
148
-
}
149
-
150
-
async fn process_key(key: &Key) -> Result<(key_agent::keys::KeySpec, Vec<u8>), KeyError> {
151
-
let mut reader = create_reader(key).await?;
152
-
153
-
let mut buf = Vec::new();
154
-
155
-
reader
156
-
.read_to_end(&mut buf)
157
-
.await
158
-
.expect("failed to read into buffer");
159
-
160
-
let destination: PathBuf = [key.dest_dir.clone(), key.name.clone()].iter().collect();
161
-
162
-
debug!("Staging push to {}", destination.clone().display());
163
-
164
-
Ok((
165
-
key_agent::keys::KeySpec {
166
-
length: buf
167
-
.len()
168
-
.try_into()
169
-
.expect("Failed to convert usize buf length to i32"),
170
-
user: key.user.clone(),
171
-
group: key.group.clone(),
172
-
permissions: get_u32_permission(key)?,
173
-
destination: destination.into_os_string().into_string().unwrap(),
174
-
digest: Sha256::digest(&buf).to_vec(),
175
-
last: false,
176
-
},
177
-
buf,
178
-
))
179
-
}
180
-
181
-
#[derive(Debug, PartialEq)]
182
-
pub struct Keys {
183
-
pub filter: UploadKeyAt,
184
-
}
185
-
#[derive(Debug, PartialEq)]
186
-
pub struct PushKeyAgent;
187
-
188
-
impl Display for Keys {
189
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
190
-
write!(f, "Upload key @ {:?}", self.filter)
191
-
}
192
-
}
193
-
194
-
impl Display for PushKeyAgent {
195
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
196
-
write!(f, "Push the key agent")
197
-
}
198
-
}
199
-
200
-
pub struct SimpleLengthDelimWriter<F> {
201
-
codec: LengthDelimitedCodec,
202
-
write_fn: F,
203
-
}
204
-
205
-
impl<F> SimpleLengthDelimWriter<F>
206
-
where
207
-
F: AsyncFnMut(Vec<u8>) -> Result<(), HiveLibError>,
208
-
{
209
-
fn new(write_fn: F) -> Self {
210
-
Self {
211
-
codec: LengthDelimitedCodec::new(),
212
-
write_fn,
213
-
}
214
-
}
215
-
216
-
async fn send(&mut self, data: prost::bytes::Bytes) -> Result<(), HiveLibError> {
217
-
let mut buffer = BytesMut::new();
218
-
tokio_util::codec::Encoder::encode(&mut self.codec, data, &mut buffer)
219
-
.map_err(HiveLibError::Encoding)?;
220
-
221
-
(self.write_fn)(buffer.to_vec()).await?;
222
-
Ok(())
223
-
}
224
-
}
225
-
226
-
impl ExecuteStep for Keys {
227
-
fn should_execute(&self, ctx: &Context) -> bool {
228
-
if ctx.no_keys {
229
-
return false;
230
-
}
231
-
232
-
// should execute if no filter, and the goal is keys.
233
-
// otherwise, only execute if the goal is switch and non-nofilter
234
-
matches!(
235
-
(&self.filter, &ctx.goal),
236
-
(UploadKeyAt::NoFilter, Goal::Keys)
237
-
| (
238
-
UploadKeyAt::PreActivation | UploadKeyAt::PostActivation,
239
-
Goal::SwitchToConfiguration(SwitchToConfigurationGoal::Switch)
240
-
)
241
-
)
242
-
}
243
-
244
-
#[instrument(skip_all, name = "keys")]
245
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
246
-
let agent_directory = ctx.state.key_agent_directory.as_ref().unwrap();
247
-
248
-
let mut keys = self.select_keys(&ctx.node.keys).await?;
249
-
250
-
if keys.peek().is_none() {
251
-
debug!("Had no keys to push, ending KeyStep early.");
252
-
return Ok(());
253
-
}
254
-
255
-
let command_string = format!("{agent_directory}/bin/key_agent");
256
-
257
-
let mut child = run_command(
258
-
&CommandArguments::new(command_string, ctx.modifiers)
259
-
.on_target(if ctx.should_apply_locally {
260
-
None
261
-
} else {
262
-
Some(&ctx.node.target)
263
-
})
264
-
.elevated(ctx.node)
265
-
.keep_stdin_open()
266
-
.log_stdout(),
267
-
)
268
-
.await?;
269
-
270
-
let mut writer = SimpleLengthDelimWriter::new(async |data| child.write_stdin(data).await);
271
-
272
-
for (position, (mut spec, buf)) in keys.with_position() {
273
-
if matches!(position, Position::Last | Position::Only) {
274
-
spec.last = true;
275
-
}
276
-
277
-
debug!("Writing spec & buf for {:?}", spec);
278
-
279
-
writer
280
-
.send(BASE64_STANDARD.encode(spec.encode_to_vec()).into())
281
-
.await?;
282
-
writer.send(BASE64_STANDARD.encode(buf).into()).await?;
283
-
}
284
-
285
-
let status = child
286
-
.wait_till_success()
287
-
.await
288
-
.map_err(HiveLibError::CommandError)?;
289
-
290
-
debug!("status: {status:?}");
291
-
292
-
Ok(())
293
-
}
294
-
}
295
-
296
-
impl Keys {
297
-
async fn select_keys(
298
-
&self,
299
-
keys: &Vector<Key>,
300
-
) -> Result<Peekable<IntoIter<(key_agent::keys::KeySpec, std::vec::Vec<u8>)>>, HiveLibError>
301
-
{
302
-
let futures = keys
303
-
.iter()
304
-
.filter(|key| self.filter == UploadKeyAt::NoFilter || (key.upload_at == self.filter))
305
-
.map(|key| async move {
306
-
process_key(key)
307
-
.await
308
-
.map_err(|err| HiveLibError::KeyError(key.name.clone(), err))
309
-
});
310
-
311
-
Ok(join_all(futures)
312
-
.await
313
-
.into_iter()
314
-
.collect::<Result<Vec<_>, HiveLibError>>()?
315
-
.into_iter()
316
-
.peekable())
317
-
}
318
-
}
319
-
320
-
impl ExecuteStep for PushKeyAgent {
321
-
fn should_execute(&self, ctx: &Context) -> bool {
322
-
if ctx.no_keys {
323
-
return false;
324
-
}
325
-
326
-
matches!(
327
-
&ctx.goal,
328
-
Goal::Keys | Goal::SwitchToConfiguration(SwitchToConfigurationGoal::Switch)
329
-
)
330
-
}
331
-
332
-
#[instrument(skip_all, name = "push_agent")]
333
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
334
-
let arg_name = format!(
335
-
"WIRE_KEY_AGENT_{platform}",
336
-
platform = ctx.node.host_platform.replace('-', "_")
337
-
);
338
-
339
-
let agent_directory = match env::var_os(&arg_name) {
340
-
Some(agent) => agent.into_string().unwrap(),
341
-
None => panic!(
342
-
"{arg_name} environment variable not set! \n
343
-
wire was not built with the ability to deploy keys to this platform. \n
344
-
Please create an issue: https://github.com/mrshmllow/wire/issues/new?template=bug_report.md"
345
-
),
346
-
};
347
-
348
-
if !ctx.should_apply_locally {
349
-
push(ctx, Push::Path(&agent_directory)).await?;
350
-
}
351
-
352
-
ctx.state.key_agent_directory = Some(agent_directory);
353
-
354
-
Ok(())
355
-
}
356
-
}
357
-
358
-
#[cfg(test)]
359
-
mod tests {
360
-
use im::Vector;
361
-
362
-
use crate::hive::steps::keys::{Key, Keys, UploadKeyAt, process_key};
363
-
364
-
fn new_key(upload_at: &UploadKeyAt) -> Key {
365
-
Key {
366
-
upload_at: upload_at.clone(),
367
-
source: super::Source::String(match upload_at {
368
-
UploadKeyAt::PreActivation => "pre".into(),
369
-
UploadKeyAt::PostActivation => "post".into(),
370
-
UploadKeyAt::NoFilter => "none".into(),
371
-
}),
372
-
..Default::default()
373
-
}
374
-
}
375
-
376
-
#[tokio::test]
377
-
async fn key_filtering() {
378
-
let keys = Vector::from(vec![
379
-
new_key(&UploadKeyAt::PreActivation),
380
-
new_key(&UploadKeyAt::PostActivation),
381
-
new_key(&UploadKeyAt::PreActivation),
382
-
new_key(&UploadKeyAt::PostActivation),
383
-
]);
384
-
385
-
for (_, buf) in (Keys {
386
-
filter: crate::hive::steps::keys::UploadKeyAt::PreActivation,
387
-
})
388
-
.select_keys(&keys)
389
-
.await
390
-
.unwrap()
391
-
{
392
-
assert_eq!(String::from_utf8_lossy(&buf), "pre");
393
-
}
394
-
395
-
for (_, buf) in (Keys {
396
-
filter: crate::hive::steps::keys::UploadKeyAt::PostActivation,
397
-
})
398
-
.select_keys(&keys)
399
-
.await
400
-
.unwrap()
401
-
{
402
-
assert_eq!(String::from_utf8_lossy(&buf), "post");
403
-
}
404
-
405
-
// test that NoFilter processes all keys.
406
-
let processed_all =
407
-
futures::future::join_all(keys.iter().map(async |x| process_key(x).await))
408
-
.await
409
-
.iter()
410
-
.flatten()
411
-
.cloned()
412
-
.collect::<Vec<_>>();
413
-
let no_filter = (Keys {
414
-
filter: crate::hive::steps::keys::UploadKeyAt::NoFilter,
415
-
})
416
-
.select_keys(&keys)
417
-
.await
418
-
.unwrap()
419
-
.collect::<Vec<_>>();
420
-
421
-
assert_eq!(processed_all, no_filter);
422
-
}
423
-
}
-10
wire/lib/src/hive/steps/mod.rs
-10
wire/lib/src/hive/steps/mod.rs
-54
wire/lib/src/hive/steps/ping.rs
-54
wire/lib/src/hive/steps/ping.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::fmt::Display;
5
-
6
-
use tracing::{Level, event, instrument};
7
-
8
-
use crate::{
9
-
HiveLibError,
10
-
hive::node::{Context, ExecuteStep},
11
-
};
12
-
13
-
#[derive(Debug, PartialEq)]
14
-
pub struct Ping;
15
-
16
-
impl Display for Ping {
17
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18
-
write!(f, "Ping node")
19
-
}
20
-
}
21
-
22
-
impl ExecuteStep for Ping {
23
-
fn should_execute(&self, ctx: &Context) -> bool {
24
-
!ctx.should_apply_locally
25
-
}
26
-
27
-
#[instrument(skip_all, name = "ping")]
28
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
29
-
loop {
30
-
event!(
31
-
Level::INFO,
32
-
status = "attempting",
33
-
host = ctx.node.target.get_preferred_host()?.to_string()
34
-
);
35
-
36
-
if ctx.node.ping(ctx.modifiers).await.is_ok() {
37
-
event!(
38
-
Level::INFO,
39
-
status = "success",
40
-
host = ctx.node.target.get_preferred_host()?.to_string()
41
-
);
42
-
return Ok(());
43
-
}
44
-
45
-
// ? will take us out if we ran out of hosts
46
-
event!(
47
-
Level::WARN,
48
-
status = "failed to ping",
49
-
host = ctx.node.target.get_preferred_host()?.to_string()
50
-
);
51
-
ctx.node.target.host_failed();
52
-
}
53
-
}
54
-
}
-76
wire/lib/src/hive/steps/push.rs
-76
wire/lib/src/hive/steps/push.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::fmt::Display;
5
-
6
-
use tracing::instrument;
7
-
8
-
use crate::{
9
-
HiveLibError,
10
-
commands::common::push,
11
-
hive::node::{Context, ExecuteStep, Goal},
12
-
};
13
-
14
-
#[derive(Debug, PartialEq)]
15
-
pub struct PushEvaluatedOutput;
16
-
#[derive(Debug, PartialEq)]
17
-
pub struct PushBuildOutput;
18
-
19
-
impl Display for PushEvaluatedOutput {
20
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
21
-
write!(f, "Push the evaluated output")
22
-
}
23
-
}
24
-
25
-
impl Display for PushBuildOutput {
26
-
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
27
-
write!(f, "Push the build output")
28
-
}
29
-
}
30
-
31
-
impl ExecuteStep for PushEvaluatedOutput {
32
-
fn should_execute(&self, ctx: &Context) -> bool {
33
-
!matches!(ctx.goal, Goal::Keys)
34
-
&& !ctx.should_apply_locally
35
-
&& (ctx.node.build_remotely | matches!(ctx.goal, Goal::Push))
36
-
}
37
-
38
-
#[instrument(skip_all, name = "push_eval")]
39
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
40
-
let top_level = ctx.state.evaluation.as_ref().unwrap();
41
-
42
-
push(ctx, crate::hive::node::Push::Derivation(top_level)).await?;
43
-
44
-
Ok(())
45
-
}
46
-
}
47
-
48
-
impl ExecuteStep for PushBuildOutput {
49
-
fn should_execute(&self, ctx: &Context) -> bool {
50
-
if matches!(ctx.goal, Goal::Keys | Goal::Push) {
51
-
// skip if we are not building
52
-
return false;
53
-
}
54
-
55
-
if ctx.node.build_remotely {
56
-
// skip if we are building remotely
57
-
return false;
58
-
}
59
-
60
-
if ctx.should_apply_locally {
61
-
// skip step if we are applying locally
62
-
return false;
63
-
}
64
-
65
-
true
66
-
}
67
-
68
-
#[instrument(skip_all, name = "push_build")]
69
-
async fn execute(&self, ctx: &mut Context<'_>) -> Result<(), HiveLibError> {
70
-
let built_path = ctx.state.build.as_ref().unwrap();
71
-
72
-
push(ctx, crate::hive::node::Push::Path(built_path)).await?;
73
-
74
-
Ok(())
75
-
}
76
-
}
-71
wire/lib/src/lib.rs
-71
wire/lib/src/lib.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
#![feature(assert_matches)]
5
-
#![feature(iter_intersperse)]
6
-
#![feature(sync_nonpoison)]
7
-
#![feature(nonpoison_mutex)]
8
-
9
-
use std::{
10
-
io::{IsTerminal, stderr},
11
-
sync::LazyLock,
12
-
};
13
-
14
-
use tokio::sync::{AcquireError, Semaphore, SemaphorePermit};
15
-
16
-
use crate::{errors::HiveLibError, hive::node::Name, status::STATUS};
17
-
18
-
pub mod cache;
19
-
pub mod commands;
20
-
pub mod hive;
21
-
pub mod status;
22
-
23
-
#[cfg(test)]
24
-
mod test_macros;
25
-
26
-
#[cfg(test)]
27
-
mod test_support;
28
-
29
-
pub mod errors;
30
-
31
-
#[derive(Clone, Debug, Copy, Default)]
32
-
pub enum StrictHostKeyChecking {
33
-
/// do not accept new host. dangerous!
34
-
No,
35
-
36
-
/// accept-new, default
37
-
#[default]
38
-
AcceptNew,
39
-
}
40
-
41
-
#[derive(Debug, Clone, Copy)]
42
-
pub struct SubCommandModifiers {
43
-
pub show_trace: bool,
44
-
pub non_interactive: bool,
45
-
pub ssh_accept_host: StrictHostKeyChecking,
46
-
}
47
-
48
-
impl Default for SubCommandModifiers {
49
-
fn default() -> Self {
50
-
SubCommandModifiers {
51
-
show_trace: false,
52
-
non_interactive: !std::io::stdin().is_terminal(),
53
-
ssh_accept_host: StrictHostKeyChecking::default(),
54
-
}
55
-
}
56
-
}
57
-
58
-
pub enum EvalGoal<'a> {
59
-
Inspect,
60
-
Names,
61
-
GetTopLevel(&'a Name),
62
-
}
63
-
64
-
pub static STDIN_CLOBBER_LOCK: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(1));
65
-
66
-
pub async fn acquire_stdin_lock<'a>() -> Result<SemaphorePermit<'a>, AcquireError> {
67
-
let result = STDIN_CLOBBER_LOCK.acquire().await?;
68
-
STATUS.lock().wipe_out(&mut stderr());
69
-
70
-
Ok(result)
71
-
}
-173
wire/lib/src/status.rs
-173
wire/lib/src/status.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use owo_colors::OwoColorize;
5
-
use std::{fmt::Write, time::Instant};
6
-
use termion::{clear, cursor};
7
-
8
-
use crate::{STDIN_CLOBBER_LOCK, hive::node::Name};
9
-
10
-
use std::{
11
-
collections::HashMap,
12
-
sync::{LazyLock, nonpoison::Mutex},
13
-
};
14
-
15
-
#[derive(Default)]
16
-
pub enum NodeStatus {
17
-
#[default]
18
-
Pending,
19
-
Running(String),
20
-
Succeeded,
21
-
Failed,
22
-
}
23
-
24
-
pub struct Status {
25
-
statuses: HashMap<String, NodeStatus>,
26
-
began: Instant,
27
-
show_progress: bool,
28
-
}
29
-
30
-
/// global status used for the progress bar in the cli crate
31
-
pub static STATUS: LazyLock<Mutex<Status>> = LazyLock::new(|| Mutex::new(Status::new()));
32
-
33
-
impl Status {
34
-
fn new() -> Self {
35
-
Self {
36
-
statuses: HashMap::default(),
37
-
began: Instant::now(),
38
-
show_progress: false,
39
-
}
40
-
}
41
-
42
-
pub const fn show_progress(&mut self, show_progress: bool) {
43
-
self.show_progress = show_progress;
44
-
}
45
-
46
-
pub fn add_many(&mut self, names: &[&Name]) {
47
-
self.statuses.extend(
48
-
names
49
-
.iter()
50
-
.map(|name| (name.0.to_string(), NodeStatus::Pending)),
51
-
);
52
-
}
53
-
54
-
pub fn set_node_step(&mut self, node: &Name, step: String) {
55
-
self.statuses
56
-
.insert(node.0.to_string(), NodeStatus::Running(step));
57
-
}
58
-
59
-
pub fn mark_node_failed(&mut self, node: &Name) {
60
-
self.statuses.insert(node.0.to_string(), NodeStatus::Failed);
61
-
}
62
-
63
-
pub fn mark_node_succeeded(&mut self, node: &Name) {
64
-
self.statuses
65
-
.insert(node.0.to_string(), NodeStatus::Succeeded);
66
-
}
67
-
68
-
#[must_use]
69
-
fn num_finished(&self) -> usize {
70
-
self.statuses
71
-
.iter()
72
-
.filter(|(_, status)| matches!(status, NodeStatus::Succeeded | NodeStatus::Failed))
73
-
.count()
74
-
}
75
-
76
-
#[must_use]
77
-
fn num_running(&self) -> usize {
78
-
self.statuses
79
-
.iter()
80
-
.filter(|(_, status)| matches!(status, NodeStatus::Running(..)))
81
-
.count()
82
-
}
83
-
84
-
#[must_use]
85
-
fn num_failed(&self) -> usize {
86
-
self.statuses
87
-
.iter()
88
-
.filter(|(_, status)| matches!(status, NodeStatus::Failed))
89
-
.count()
90
-
}
91
-
92
-
#[must_use]
93
-
pub fn get_msg(&self) -> String {
94
-
if self.statuses.is_empty() {
95
-
return String::new();
96
-
}
97
-
98
-
let mut msg = format!("[{} / {}", self.num_finished(), self.statuses.len(),);
99
-
100
-
let num_failed = self.num_failed();
101
-
let num_running = self.num_running();
102
-
103
-
let failed = if num_failed >= 1 {
104
-
Some(format!("{} Failed", num_failed.red()))
105
-
} else {
106
-
None
107
-
};
108
-
109
-
let running = if num_running >= 1 {
110
-
Some(format!("{} Deploying", num_running.blue()))
111
-
} else {
112
-
None
113
-
};
114
-
115
-
let _ = match (failed, running) {
116
-
(None, None) => write!(&mut msg, ""),
117
-
(Some(message), None) | (None, Some(message)) => write!(&mut msg, " ({message})"),
118
-
(Some(failed), Some(running)) => write!(&mut msg, " ({failed}, {running})"),
119
-
};
120
-
121
-
let _ = write!(&mut msg, "]");
122
-
123
-
let _ = write!(&mut msg, " {}s", self.began.elapsed().as_secs());
124
-
125
-
msg
126
-
}
127
-
128
-
pub fn clear<T: std::io::Write>(&self, writer: &mut T) {
129
-
if !self.show_progress {
130
-
return;
131
-
}
132
-
133
-
let _ = write!(writer, "{}", cursor::Save);
134
-
// let _ = write!(writer, "{}", cursor::Down(1));
135
-
let _ = write!(writer, "{}", cursor::Left(999));
136
-
let _ = write!(writer, "{}", clear::CurrentLine);
137
-
}
138
-
139
-
/// used when there is an interactive prompt
140
-
pub fn wipe_out<T: std::io::Write>(&self, writer: &mut T) {
141
-
if !self.show_progress {
142
-
return;
143
-
}
144
-
145
-
let _ = write!(writer, "{}", cursor::Save);
146
-
let _ = write!(writer, "{}", cursor::Left(999));
147
-
let _ = write!(writer, "{}", clear::CurrentLine);
148
-
let _ = writer.flush();
149
-
}
150
-
151
-
pub fn write_status<T: std::io::Write>(&mut self, writer: &mut T) {
152
-
if self.show_progress {
153
-
let _ = write!(writer, "{}", self.get_msg());
154
-
}
155
-
}
156
-
157
-
pub fn write_above_status<T: std::io::Write>(
158
-
&mut self,
159
-
buf: &[u8],
160
-
writer: &mut T,
161
-
) -> std::io::Result<usize> {
162
-
if STDIN_CLOBBER_LOCK.available_permits() != 1 {
163
-
// skip
164
-
return Ok(0);
165
-
}
166
-
167
-
self.clear(writer);
168
-
let written = writer.write(buf)?;
169
-
self.write_status(writer);
170
-
171
-
Ok(written)
172
-
}
173
-
}
-43
wire/lib/src/test_macros.rs
-43
wire/lib/src/test_macros.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
#[macro_export]
5
-
macro_rules! function_name {
6
-
() => {{
7
-
fn f() {}
8
-
fn type_name_of<T>(_: T) -> &'static str {
9
-
std::any::type_name::<T>()
10
-
}
11
-
let name = type_name_of(f);
12
-
// closure for async functions
13
-
&name[..name.len() - 3]
14
-
}};
15
-
}
16
-
17
-
#[macro_export]
18
-
macro_rules! get_test_path {
19
-
() => {{
20
-
let mut path: PathBuf = env::var("WIRE_TEST_DIR").unwrap().into();
21
-
let full_name = $crate::function_name!();
22
-
let function_name = full_name
23
-
.trim_end_matches("::{{closure}}")
24
-
.split("::")
25
-
.last()
26
-
.unwrap();
27
-
path.push(function_name);
28
-
29
-
path
30
-
}};
31
-
}
32
-
33
-
#[macro_export]
34
-
macro_rules! location {
35
-
($path:expr) => {{
36
-
$crate::hive::get_hive_location(
37
-
$path.display().to_string(),
38
-
$crate::SubCommandModifiers::default(),
39
-
)
40
-
.await
41
-
.unwrap()
42
-
}};
43
-
}
-67
wire/lib/src/test_support.rs
-67
wire/lib/src/test_support.rs
···
1
-
// SPDX-License-Identifier: AGPL-3.0-or-later
2
-
// Copyright 2024-2025 wire Contributors
3
-
4
-
use std::{
5
-
fs::{self, create_dir},
6
-
io,
7
-
path::Path,
8
-
process::Command,
9
-
};
10
-
11
-
use tempdir::TempDir;
12
-
13
-
pub fn make_flake_sandbox(path: &Path) -> Result<TempDir, io::Error> {
14
-
let tmp_dir = TempDir::new("wire-test")?;
15
-
16
-
Command::new("git")
17
-
.args(["init", "-b", "tmp"])
18
-
.current_dir(tmp_dir.path())
19
-
.status()?;
20
-
21
-
for entry in fs::read_dir(path)? {
22
-
let entry = entry?;
23
-
24
-
fs::copy(entry.path(), tmp_dir.as_ref().join(entry.file_name()))?;
25
-
}
26
-
27
-
let root = path.parent().unwrap().parent().unwrap().parent().unwrap();
28
-
29
-
create_dir(tmp_dir.as_ref().join("module/"))?;
30
-
31
-
fs::copy(
32
-
root.join(Path::new("runtime/evaluate.nix")),
33
-
tmp_dir.as_ref().join("evaluate.nix"),
34
-
)?;
35
-
fs::copy(
36
-
root.join(Path::new("runtime/module/config.nix")),
37
-
tmp_dir.as_ref().join("module/config.nix"),
38
-
)?;
39
-
fs::copy(
40
-
root.join(Path::new("runtime/module/options.nix")),
41
-
tmp_dir.as_ref().join("module/options.nix"),
42
-
)?;
43
-
fs::copy(
44
-
root.join(Path::new("runtime/module/default.nix")),
45
-
tmp_dir.as_ref().join("module/default.nix"),
46
-
)?;
47
-
fs::copy(
48
-
root.join(Path::new("runtime/makeHive.nix")),
49
-
tmp_dir.as_ref().join("makeHive.nix"),
50
-
)?;
51
-
fs::copy(
52
-
root.join(Path::new("flake.lock")),
53
-
tmp_dir.as_ref().join("flake.lock"),
54
-
)?;
55
-
56
-
Command::new("git")
57
-
.args(["add", "-A"])
58
-
.current_dir(tmp_dir.path())
59
-
.status()?;
60
-
61
-
Command::new("nix")
62
-
.args(["flake", "lock"])
63
-
.current_dir(tmp_dir.path())
64
-
.status()?;
65
-
66
-
Ok(tmp_dir)
67
-
}