+173
abciapp/app.go
+173
abciapp/app.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"fmt"
5
+
"os"
6
+
"path/filepath"
7
+
"sync"
8
+
"time"
9
+
10
+
abcitypes "github.com/cometbft/cometbft/abci/types"
11
+
"github.com/cosmos/iavl"
12
+
"github.com/dgraph-io/badger/v4"
13
+
"github.com/palantir/stacktrace"
14
+
"github.com/samber/lo"
15
+
"tangled.org/gbl08ma/didplcbft/badgeradapter"
16
+
"tangled.org/gbl08ma/didplcbft/plc"
17
+
"tangled.org/gbl08ma/didplcbft/store"
18
+
)
19
+
20
+
type DIDPLCApplication struct {
21
+
plc plc.PLC
22
+
tree *iavl.MutableTree
23
+
fullyClearTree func() error
24
+
25
+
snapshotDirectory string
26
+
snapshotApplier *snapshotApplier
27
+
28
+
lastProcessedProposalHash []byte
29
+
lastProcessedProposalExecTxResults []*abcitypes.ExecTxResult
30
+
}
31
+
32
+
// store and plc must be able to share transaction objects
33
+
func NewDIDPLCApplication(badgerDB *badger.DB, snapshotDirectory string) (*DIDPLCApplication, plc.PLC, func(), error) {
34
+
treePrefix := []byte{}
35
+
mkTree := func() *iavl.MutableTree {
36
+
return iavl.NewMutableTree(badgeradapter.AdaptBadger(badgerDB, treePrefix), 2048, false, iavl.NewNopLogger())
37
+
}
38
+
39
+
tree := mkTree()
40
+
41
+
_, err := tree.Load()
42
+
if err != nil {
43
+
return nil, nil, func() {}, stacktrace.Propagate(err, "error loading latest version of the tree from storage")
44
+
}
45
+
46
+
err = os.MkdirAll(snapshotDirectory, os.FileMode(0755))
47
+
if err != nil {
48
+
return nil, nil, func() {}, stacktrace.Propagate(err, "")
49
+
}
50
+
51
+
d := &DIDPLCApplication{
52
+
tree: tree,
53
+
snapshotDirectory: snapshotDirectory,
54
+
}
55
+
d.fullyClearTree = func() error {
56
+
// we assume this is called in a single-threaded context, which should be a safe assumption since we'll only call this during snapshot import
57
+
// and CometBFT only calls one ABCI method at a time
58
+
err := d.tree.Close()
59
+
if err != nil {
60
+
return stacktrace.Propagate(err, "")
61
+
}
62
+
63
+
if len(treePrefix) == 0 {
64
+
// this is probably slightly more efficient when we don't actually need to clear a prefix
65
+
err = badgerDB.DropAll()
66
+
if err != nil {
67
+
return stacktrace.Propagate(err, "")
68
+
}
69
+
} else {
70
+
err = badgerDB.DropPrefix(treePrefix)
71
+
if err != nil {
72
+
return stacktrace.Propagate(err, "")
73
+
}
74
+
}
75
+
76
+
*d.tree = *mkTree()
77
+
return nil
78
+
}
79
+
80
+
d.plc = plc.NewPLC(d)
81
+
82
+
fmt.Println("TREE SIZE", tree.Size())
83
+
84
+
var wg sync.WaitGroup
85
+
closeCh := make(chan struct{})
86
+
wg.Go(func() {
87
+
lastSnapshotVersion := int64(0)
88
+
for {
89
+
select {
90
+
case <-closeCh:
91
+
return
92
+
case <-time.After(1 * time.Minute):
93
+
}
94
+
treeVersion := tree.Version()
95
+
if lastSnapshotVersion == 0 || treeVersion > int64(lastSnapshotVersion+1000) {
96
+
err = d.createSnapshot(treeVersion, filepath.Join(snapshotDirectory, "snapshot.tmp"))
97
+
if err != nil {
98
+
fmt.Println("FAILED TO TAKE SNAPSHOT", stacktrace.Propagate(err, ""))
99
+
}
100
+
fmt.Println("TOOK SNAPSHOT OF VERSION", treeVersion)
101
+
lastSnapshotVersion = treeVersion
102
+
}
103
+
}
104
+
105
+
})
106
+
107
+
/*err = d.createSnapshot(tree.Version(), filepath.Join(snapshotDirectory, "snapshot.tmp"))
108
+
if err != nil {
109
+
return nil, nil, func() {}, stacktrace.Propagate(err, "")
110
+
}*/
111
+
112
+
/*
113
+
tree2 := iavl.NewMutableTree(dbm.NewMemDB(), 2048, false, iavl.NewNopLogger())
114
+
importer, err := tree2.Import(tree.Version())
115
+
if err != nil {
116
+
return nil, nil, func() {}, stacktrace.Propagate(err, "")
117
+
}
118
+
cimporter := iavl.NewCompressImporter(importer)
119
+
120
+
st = time.Now()
121
+
for _, node := range nodes {
122
+
err := cimporter.Add(&node)
123
+
if err != nil {
124
+
return nil, nil, func() {}, stacktrace.Propagate(err, "")
125
+
}
126
+
}
127
+
err = importer.Commit()
128
+
if err != nil {
129
+
return nil, nil, func() {}, stacktrace.Propagate(err, "")
130
+
}
131
+
132
+
fmt.Println("Took", time.Since(st), "to import", len(nodes), "nodes")
133
+
fmt.Println("Imported tree hash", hex.EncodeToString(tree2.Hash()), "and version", tree2.Version())
134
+
*/
135
+
136
+
return d, d.plc, func() {
137
+
closeCh <- struct{}{}
138
+
wg.Wait()
139
+
lo.Must0(tree.Close())
140
+
}, nil
141
+
}
142
+
143
+
var _ abcitypes.Application = (*DIDPLCApplication)(nil)
144
+
var _ plc.TreeProvider = (*DIDPLCApplication)(nil)
145
+
146
+
// ImmutableTree implements [plc.TreeProvider].
147
+
func (d *DIDPLCApplication) ImmutableTree(version plc.TreeVersion) (store.PossiblyMutableTree, error) {
148
+
if version.IsMutable() {
149
+
return store.AdaptMutableTree(d.tree), nil
150
+
}
151
+
var v int64
152
+
if version.IsCommitted() {
153
+
var err error
154
+
v, err = d.tree.GetLatestVersion()
155
+
if err != nil {
156
+
return nil, stacktrace.Propagate(err, "")
157
+
}
158
+
} else {
159
+
var ok bool
160
+
v, ok = version.SpecificVersion()
161
+
if !ok {
162
+
return nil, stacktrace.NewError("unsupported TreeVersion")
163
+
}
164
+
}
165
+
166
+
it, err := d.tree.GetImmutable(v)
167
+
return store.AdaptImmutableTree(it), stacktrace.Propagate(err, "")
168
+
}
169
+
170
+
// MutableTree implements [plc.TreeProvider].
171
+
func (d *DIDPLCApplication) MutableTree() (*iavl.MutableTree, error) {
172
+
return d.tree, nil
173
+
}
+72
abciapp/app_test.go
+72
abciapp/app_test.go
···
···
1
+
package abciapp_test
2
+
3
+
import (
4
+
"encoding/json"
5
+
"testing"
6
+
7
+
"github.com/cometbft/cometbft/abci/types"
8
+
"github.com/dgraph-io/badger/v4"
9
+
cbornode "github.com/ipfs/go-ipld-cbor"
10
+
"github.com/stretchr/testify/require"
11
+
"tangled.org/gbl08ma/didplcbft/abciapp"
12
+
)
13
+
14
+
func txJSONToCBOR(t *testing.T, jsonBytes []byte) []byte {
15
+
var v any
16
+
err := json.Unmarshal(jsonBytes, &v)
17
+
require.NoError(t, err)
18
+
out, err := cbornode.DumpObject(v)
19
+
require.NoError(t, err)
20
+
return out
21
+
}
22
+
23
+
func TestCheckTx(t *testing.T) {
24
+
badgerDB, err := badger.Open(badger.DefaultOptions("").WithInMemory(true))
25
+
require.NoError(t, err)
26
+
t.Cleanup(func() {
27
+
err := badgerDB.Close()
28
+
require.NoError(t, err)
29
+
})
30
+
31
+
app, _, cleanup, err := abciapp.NewDIDPLCApplication(badgerDB, "")
32
+
require.NoError(t, err)
33
+
t.Cleanup(cleanup)
34
+
35
+
// force one tree version to exist
36
+
_, err = app.Commit(t.Context(), &types.RequestCommit{})
37
+
require.NoError(t, err)
38
+
39
+
txJSONBytes := []byte(`
40
+
{
41
+
"action": "CreatePlcOp",
42
+
"arguments": {
43
+
"did": "did:plc:uyauirpjzk6le4ygqzatcwnq",
44
+
"operation": {
45
+
"sig": "JICl9boFJK12rmBbnuKLHdV51f_CMsrsmWUrgu17DDwtsqSiSazidWA_RZbJplYNr34bTwXyY7kkJ7oJHsbOjQ",
46
+
"prev": null,
47
+
"type": "plc_operation",
48
+
"services": {
49
+
"atproto_pds": {
50
+
"type": "AtprotoPersonalDataServer",
51
+
"endpoint": "https://at.tny.im"
52
+
}
53
+
},
54
+
"alsoKnownAs": [
55
+
"at://pdslabeler.at.tny.im"
56
+
],
57
+
"rotationKeys": [
58
+
"did:key:zQ3shhguVfzmkfgXHzrnSeDxzbAvw7NjiVUcu2nmkeiQUrZUM"
59
+
],
60
+
"verificationMethods": {
61
+
"atproto": "did:key:zQ3shsHRci5EP9nr7dzsy5QkzTbmm8uJBXbXdghbLc8JAqogb"
62
+
}
63
+
}
64
+
}
65
+
}`)
66
+
67
+
response, err := app.CheckTx(t.Context(), &types.RequestCheckTx{
68
+
Tx: txJSONToCBOR(t, txJSONBytes),
69
+
})
70
+
require.NoError(t, err)
71
+
require.Equal(t, uint32(0), response.Code)
72
+
}
+178
abciapp/execution.go
+178
abciapp/execution.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"bytes"
5
+
"context"
6
+
"slices"
7
+
"time"
8
+
9
+
abcitypes "github.com/cometbft/cometbft/abci/types"
10
+
"github.com/palantir/stacktrace"
11
+
)
12
+
13
+
// InitChain implements [types.Application].
14
+
func (d *DIDPLCApplication) InitChain(context.Context, *abcitypes.RequestInitChain) (*abcitypes.ResponseInitChain, error) {
15
+
// TODO
16
+
return &abcitypes.ResponseInitChain{}, nil
17
+
}
18
+
19
+
// PrepareProposal implements [types.Application].
20
+
func (d *DIDPLCApplication) PrepareProposal(ctx context.Context, req *abcitypes.RequestPrepareProposal) (*abcitypes.ResponsePrepareProposal, error) {
21
+
defer d.tree.Rollback()
22
+
23
+
st := time.Now()
24
+
acceptedTx := make([][]byte, 0, len(req.Txs))
25
+
toProcess := req.Txs
26
+
for {
27
+
toTryNext := [][]byte{}
28
+
for _, tx := range toProcess {
29
+
result, err := processTx(ctx, d.plc, tx, req.Time, true)
30
+
if err != nil {
31
+
return nil, stacktrace.Propagate(err, "")
32
+
}
33
+
34
+
if result.Code == 0 {
35
+
acceptedTx = append(acceptedTx, tx)
36
+
} else {
37
+
// if a transaction is invalid, it _might_ be because it depends on a transaction that's further up in the list
38
+
// process it after all the others
39
+
toTryNext = append(toTryNext, tx)
40
+
}
41
+
}
42
+
if len(toProcess) == len(toTryNext) {
43
+
// we made no progress in this iteration - all transactions left to process fail to do so
44
+
// so they can't be depending on anything that would be included in this block, at this point
45
+
// just continue while dropping the transactions that would never succeed in this block
46
+
break
47
+
}
48
+
if time.Since(st) > 800*time.Millisecond {
49
+
// this is taking too long, just continue with what's already in acceptedTx
50
+
break
51
+
}
52
+
toProcess = toTryNext
53
+
}
54
+
55
+
return &abcitypes.ResponsePrepareProposal{Txs: acceptedTx}, nil
56
+
}
57
+
58
+
// ProcessProposal implements [types.Application].
59
+
func (d *DIDPLCApplication) ProcessProposal(ctx context.Context, req *abcitypes.RequestProcessProposal) (*abcitypes.ResponseProcessProposal, error) {
60
+
// do not rollback tree in this method, in case the changes can be reused in FinalizeBlock
61
+
if req.Height != d.tree.WorkingVersion() {
62
+
// our tree went out of sync, this should never happen
63
+
return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_REJECT}, nil
64
+
}
65
+
66
+
// if we return early, ensure we don't use incomplete results where we haven't voted ACCEPT
67
+
d.lastProcessedProposalHash = nil
68
+
d.lastProcessedProposalExecTxResults = nil
69
+
defer func() {
70
+
if d.lastProcessedProposalHash == nil {
71
+
// we didn't vote ACCEPT
72
+
// we could rollback only eventually on FinalizeBlock, but why wait - rollback now for safety
73
+
d.tree.Rollback()
74
+
}
75
+
}()
76
+
77
+
txResults := make([]*abcitypes.ExecTxResult, len(req.Txs))
78
+
for i, tx := range req.Txs {
79
+
result, err := processTx(ctx, d.plc, tx, req.Time, true)
80
+
if err != nil {
81
+
return nil, stacktrace.Propagate(err, "")
82
+
}
83
+
for _, c := range result.TreeChanges {
84
+
_, err := d.tree.Set(c.Key, c.Value)
85
+
if err != nil {
86
+
return nil, stacktrace.Propagate(err, "")
87
+
}
88
+
}
89
+
// when preparing a proposal, invalid transactions should have been discarded
90
+
// so, if something doesn't succeed now, something has gone wrong and we should not vote in agreement of the proposal
91
+
if result.Code != 0 {
92
+
return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_REJECT}, nil
93
+
}
94
+
95
+
txResults[i] = &abcitypes.ExecTxResult{
96
+
Code: result.Code,
97
+
Data: result.Data,
98
+
Log: result.Log,
99
+
Info: result.Info,
100
+
GasWanted: result.GasWanted,
101
+
GasUsed: result.GasUsed,
102
+
Events: result.Events,
103
+
Codespace: result.Codespace,
104
+
}
105
+
}
106
+
107
+
d.lastProcessedProposalHash = slices.Clone(req.Hash)
108
+
d.lastProcessedProposalExecTxResults = txResults
109
+
110
+
return &abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}, nil
111
+
}
112
+
113
+
// ExtendVote implements [types.Application].
114
+
func (d *DIDPLCApplication) ExtendVote(context.Context, *abcitypes.RequestExtendVote) (*abcitypes.ResponseExtendVote, error) {
115
+
// TODO
116
+
return &abcitypes.ResponseExtendVote{}, nil
117
+
}
118
+
119
+
// VerifyVoteExtension implements [types.Application].
120
+
func (d *DIDPLCApplication) VerifyVoteExtension(context.Context, *abcitypes.RequestVerifyVoteExtension) (*abcitypes.ResponseVerifyVoteExtension, error) {
121
+
// TODO
122
+
return &abcitypes.ResponseVerifyVoteExtension{}, nil
123
+
}
124
+
125
+
// FinalizeBlock implements [types.Application].
126
+
func (d *DIDPLCApplication) FinalizeBlock(ctx context.Context, req *abcitypes.RequestFinalizeBlock) (*abcitypes.ResponseFinalizeBlock, error) {
127
+
if bytes.Equal(req.Hash, d.lastProcessedProposalHash) && d.lastProcessedProposalExecTxResults != nil {
128
+
// the block that was decided was the one we processed in ProcessProposal, and ProcessProposal processed successfully
129
+
// reuse the uncommitted results
130
+
return &abcitypes.ResponseFinalizeBlock{
131
+
TxResults: d.lastProcessedProposalExecTxResults,
132
+
AppHash: d.tree.WorkingHash(),
133
+
}, nil
134
+
}
135
+
// a block other than the one we processed in ProcessProposal was decided
136
+
// discard the current modified state, and process the decided block
137
+
d.tree.Rollback()
138
+
139
+
txResults := make([]*abcitypes.ExecTxResult, len(req.Txs))
140
+
for i, tx := range req.Txs {
141
+
result, err := processTx(ctx, d.plc, tx, req.Time, true)
142
+
if err != nil {
143
+
return nil, stacktrace.Propagate(err, "")
144
+
}
145
+
for _, c := range result.TreeChanges {
146
+
_, err := d.tree.Set(c.Key, c.Value)
147
+
if err != nil {
148
+
return nil, stacktrace.Propagate(err, "")
149
+
}
150
+
}
151
+
txResults[i] = &abcitypes.ExecTxResult{
152
+
Code: result.Code,
153
+
Data: result.Data,
154
+
Log: result.Log,
155
+
Info: result.Info,
156
+
GasWanted: result.GasWanted,
157
+
GasUsed: result.GasUsed,
158
+
Events: result.Events,
159
+
Codespace: result.Codespace,
160
+
}
161
+
}
162
+
163
+
return &abcitypes.ResponseFinalizeBlock{
164
+
TxResults: txResults,
165
+
AppHash: d.tree.WorkingHash(),
166
+
}, nil
167
+
}
168
+
169
+
// Commit implements [types.Application].
170
+
func (d *DIDPLCApplication) Commit(context.Context, *abcitypes.RequestCommit) (*abcitypes.ResponseCommit, error) {
171
+
_, _, err := d.tree.SaveVersion()
172
+
if err != nil {
173
+
return nil, stacktrace.Propagate(err, "")
174
+
}
175
+
176
+
// TODO(later) consider whether we can set some RetainHeight in the response
177
+
return &abcitypes.ResponseCommit{}, nil
178
+
}
+158
abciapp/info.go
+158
abciapp/info.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"context"
5
+
"encoding/json"
6
+
"errors"
7
+
"net/http"
8
+
"net/url"
9
+
10
+
abcitypes "github.com/cometbft/cometbft/abci/types"
11
+
"github.com/palantir/stacktrace"
12
+
"github.com/ucarion/urlpath"
13
+
"tangled.org/gbl08ma/didplcbft/plc"
14
+
)
15
+
16
+
// Info implements [types.Application].
17
+
func (d *DIDPLCApplication) Info(ctx context.Context, req *abcitypes.RequestInfo) (*abcitypes.ResponseInfo, error) {
18
+
return &abcitypes.ResponseInfo{
19
+
Data: "", // TODO some status report as JSON? Information about CometBFT itself can be obtained from RequestInfo
20
+
Version: "0.1.1",
21
+
AppVersion: 0, // TODO, included in the header of every block. move these values to constants
22
+
LastBlockHeight: d.tree.Version(),
23
+
LastBlockAppHash: d.tree.Hash(),
24
+
}, nil
25
+
}
26
+
27
+
// Query implements [types.Application].
28
+
func (d *DIDPLCApplication) Query(ctx context.Context, req *abcitypes.RequestQuery) (*abcitypes.ResponseQuery, error) {
29
+
url, err := url.ParseRequestURI(req.Path)
30
+
if err != nil || url.Host != "" || url.Scheme != "" {
31
+
return &abcitypes.ResponseQuery{
32
+
Code: 6000,
33
+
Info: "Invalid path",
34
+
}, nil
35
+
}
36
+
37
+
treeVersion := plc.CommittedTreeVersion
38
+
if req.Height != 0 {
39
+
treeVersion = plc.SpecificTreeVersion(req.Height)
40
+
}
41
+
42
+
handlers := []struct {
43
+
matcher urlpath.Path
44
+
handler func(match urlpath.Match) (*abcitypes.ResponseQuery, error)
45
+
}{
46
+
{
47
+
matcher: urlpath.New("/plc/:did"),
48
+
handler: func(match urlpath.Match) (*abcitypes.ResponseQuery, error) {
49
+
did := match.Params["did"]
50
+
doc, err := d.plc.Resolve(ctx, treeVersion, did)
51
+
if err != nil {
52
+
switch {
53
+
case errors.Is(err, plc.ErrDIDNotFound):
54
+
return &abcitypes.ResponseQuery{
55
+
Key: []byte(did),
56
+
Code: http.StatusNotFound,
57
+
Info: "DID not registered: " + did,
58
+
}, nil
59
+
case errors.Is(err, plc.ErrDIDGone):
60
+
return &abcitypes.ResponseQuery{
61
+
Key: []byte(did),
62
+
Code: http.StatusGone,
63
+
Info: "DID not available: " + did,
64
+
}, nil
65
+
default:
66
+
return nil, stacktrace.Propagate(err, "")
67
+
}
68
+
}
69
+
70
+
docJSON, err := json.Marshal(doc)
71
+
if err != nil {
72
+
return nil, stacktrace.Propagate(err, "")
73
+
}
74
+
75
+
return &abcitypes.ResponseQuery{
76
+
Key: []byte(did),
77
+
Value: []byte(docJSON),
78
+
Code: 0,
79
+
}, nil
80
+
},
81
+
},
82
+
{
83
+
matcher: urlpath.New("/plc/:did/log"),
84
+
handler: func(match urlpath.Match) (*abcitypes.ResponseQuery, error) {
85
+
return &abcitypes.ResponseQuery{
86
+
Code: 1,
87
+
Info: "Not implemented",
88
+
}, nil
89
+
},
90
+
},
91
+
{
92
+
matcher: urlpath.New("/plc/:did/log/audit"),
93
+
handler: func(match urlpath.Match) (*abcitypes.ResponseQuery, error) {
94
+
return &abcitypes.ResponseQuery{
95
+
Code: 1,
96
+
Info: "Not implemented",
97
+
}, nil
98
+
},
99
+
},
100
+
{
101
+
matcher: urlpath.New("/plc/:did/log/last"),
102
+
handler: func(match urlpath.Match) (*abcitypes.ResponseQuery, error) {
103
+
return &abcitypes.ResponseQuery{
104
+
Code: 1,
105
+
Info: "Not implemented",
106
+
}, nil
107
+
},
108
+
},
109
+
{
110
+
matcher: urlpath.New("/plc/:did/data"),
111
+
handler: func(match urlpath.Match) (*abcitypes.ResponseQuery, error) {
112
+
did := match.Params["did"]
113
+
data, err := d.plc.Data(ctx, treeVersion, did)
114
+
if err != nil {
115
+
switch {
116
+
case errors.Is(err, plc.ErrDIDNotFound):
117
+
return &abcitypes.ResponseQuery{
118
+
Key: []byte(did),
119
+
Code: http.StatusNotFound,
120
+
Info: "DID not registered: " + did,
121
+
}, nil
122
+
case errors.Is(err, plc.ErrDIDGone):
123
+
return &abcitypes.ResponseQuery{
124
+
Key: []byte(did),
125
+
Code: http.StatusGone,
126
+
Info: "DID not available: " + did,
127
+
}, nil
128
+
default:
129
+
return nil, stacktrace.Propagate(err, "")
130
+
}
131
+
}
132
+
133
+
dataJSON, err := json.Marshal(&data)
134
+
if err != nil {
135
+
return nil, stacktrace.Propagate(err, "")
136
+
}
137
+
138
+
return &abcitypes.ResponseQuery{
139
+
Key: []byte(did),
140
+
Value: []byte(dataJSON),
141
+
Code: 0,
142
+
}, nil
143
+
},
144
+
},
145
+
}
146
+
147
+
for _, h := range handlers {
148
+
if match, ok := h.matcher.Match(url.Path); ok {
149
+
return h.handler(match)
150
+
}
151
+
}
152
+
153
+
return &abcitypes.ResponseQuery{
154
+
Code: 6000,
155
+
Info: "Invalid path",
156
+
}, nil
157
+
158
+
}
+27
abciapp/mempool.go
+27
abciapp/mempool.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"context"
5
+
"time"
6
+
7
+
abcitypes "github.com/cometbft/cometbft/abci/types"
8
+
"github.com/palantir/stacktrace"
9
+
)
10
+
11
+
// CheckTx implements [types.Application].
12
+
func (d *DIDPLCApplication) CheckTx(ctx context.Context, req *abcitypes.RequestCheckTx) (*abcitypes.ResponseCheckTx, error) {
13
+
result, err := processTx(ctx, d.plc, req.Tx, time.Now(), false)
14
+
if err != nil {
15
+
return nil, stacktrace.Propagate(err, "")
16
+
}
17
+
return &abcitypes.ResponseCheckTx{
18
+
Code: result.Code,
19
+
Data: result.Data,
20
+
Log: result.Log,
21
+
Info: result.Info,
22
+
GasWanted: result.GasWanted,
23
+
GasUsed: result.GasUsed,
24
+
Events: result.Events,
25
+
Codespace: result.Codespace,
26
+
}, nil
27
+
}
+721
abciapp/snapshots.go
+721
abciapp/snapshots.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"bufio"
5
+
"bytes"
6
+
"context"
7
+
"crypto/sha256"
8
+
"encoding/binary"
9
+
"errors"
10
+
"fmt"
11
+
"io"
12
+
"os"
13
+
"path/filepath"
14
+
"slices"
15
+
"strconv"
16
+
"strings"
17
+
"sync"
18
+
"time"
19
+
20
+
abcitypes "github.com/cometbft/cometbft/abci/types"
21
+
"github.com/cosmos/iavl"
22
+
"github.com/klauspost/compress/zstd"
23
+
"github.com/palantir/stacktrace"
24
+
)
25
+
26
+
const snapshotChunkSize = 10 * 1024 * 1024 // 10 MB
27
+
const snapshotChunkHashSize = 32
28
+
29
+
// ListSnapshots implements [types.Application].
30
+
func (d *DIDPLCApplication) ListSnapshots(context.Context, *abcitypes.RequestListSnapshots) (*abcitypes.ResponseListSnapshots, error) {
31
+
files, err := filepath.Glob(filepath.Join(d.snapshotDirectory, "*.snapshot"))
32
+
if err != nil {
33
+
return nil, stacktrace.Propagate(err, "")
34
+
}
35
+
36
+
snapshots := make([]*abcitypes.Snapshot, 0, len(files))
37
+
for _, filename := range files {
38
+
s, err := readSnapshotMetadata(filename)
39
+
if err != nil {
40
+
return nil, stacktrace.Propagate(err, "")
41
+
}
42
+
43
+
snapshots = append(snapshots, s)
44
+
}
45
+
46
+
return &abcitypes.ResponseListSnapshots{
47
+
Snapshots: snapshots,
48
+
}, nil
49
+
}
50
+
51
+
func readSnapshotMetadata(filename string) (*abcitypes.Snapshot, error) {
52
+
// Extract height from filename pattern: %020d.snapshot
53
+
base := filepath.Base(filename)
54
+
if !strings.HasSuffix(base, ".snapshot") {
55
+
return nil, stacktrace.NewError("invalid snapshot filename format: %s", filename)
56
+
}
57
+
heightStr := strings.TrimSuffix(base, ".snapshot")
58
+
height, err := strconv.ParseInt(heightStr, 10, 64)
59
+
if err != nil {
60
+
return nil, stacktrace.Propagate(err, "failed to parse height from filename: %s", filename)
61
+
}
62
+
63
+
// Open and read snapshot file header
64
+
f, err := os.Open(filename)
65
+
if err != nil {
66
+
return nil, stacktrace.Propagate(err, "failed to open snapshot file: %s", filename)
67
+
}
68
+
defer f.Close()
69
+
70
+
// Read file magic (18 bytes)
71
+
magic := make([]byte, 18)
72
+
_, err = io.ReadFull(f, magic)
73
+
if err != nil {
74
+
return nil, stacktrace.Propagate(err, "failed to read file magic")
75
+
}
76
+
if string(magic) != "didplcbft-snapshot" {
77
+
return nil, stacktrace.NewError("invalid file magic: expected 'didplcbft-snapshot', got '%s'", string(magic))
78
+
}
79
+
80
+
// Read version bytes (6 bytes)
81
+
versionBytes := make([]byte, 6)
82
+
_, err = io.ReadFull(f, versionBytes)
83
+
if err != nil {
84
+
return nil, stacktrace.Propagate(err, "failed to read version bytes")
85
+
}
86
+
format := binary.BigEndian.Uint32(versionBytes[2:])
87
+
88
+
// Read height (8 bytes, big-endian)
89
+
heightBytes := make([]byte, 8)
90
+
_, err = io.ReadFull(f, heightBytes)
91
+
if err != nil {
92
+
return nil, stacktrace.Propagate(err, "failed to read height")
93
+
}
94
+
fileHeight := int64(binary.BigEndian.Uint64(heightBytes))
95
+
if fileHeight != height {
96
+
return nil, stacktrace.NewError("height mismatch: filename indicates %d, file header contains %d", height, fileHeight)
97
+
}
98
+
99
+
// Read tree hash (32 bytes)
100
+
hash := make([]byte, 32)
101
+
_, err = io.ReadFull(f, hash)
102
+
if err != nil {
103
+
return nil, stacktrace.Propagate(err, "failed to read tree hash")
104
+
}
105
+
106
+
// Read corresponding chunksums file
107
+
chunksumsFilename := strings.TrimSuffix(filename, ".snapshot") + ".chunksums"
108
+
chunksumsData, err := os.ReadFile(chunksumsFilename)
109
+
if err != nil {
110
+
return nil, stacktrace.Propagate(err, "failed to read chunksums file: %s", chunksumsFilename)
111
+
}
112
+
113
+
// Calculate number of chunks (each chunk hash is 32 bytes)
114
+
chunks := int64(len(chunksumsData)) / snapshotChunkHashSize
115
+
116
+
return &abcitypes.Snapshot{
117
+
Height: uint64(height),
118
+
Format: format,
119
+
Chunks: uint32(chunks),
120
+
Hash: hash,
121
+
Metadata: chunksumsData,
122
+
}, nil
123
+
}
124
+
125
+
// LoadSnapshotChunk implements [types.Application].
126
+
func (d *DIDPLCApplication) LoadSnapshotChunk(_ context.Context, req *abcitypes.RequestLoadSnapshotChunk) (*abcitypes.ResponseLoadSnapshotChunk, error) {
127
+
if req.Format != 1 {
128
+
// just in case CometBFT asks us to load a chunk of a format we didn't declare to support in ListSnapshots...
129
+
return nil, stacktrace.NewError("unsupported snapshot format")
130
+
}
131
+
132
+
// Construct filename from height using the same pattern as createSnapshot
133
+
snapshotFilename := filepath.Join(d.snapshotDirectory, fmt.Sprintf("%020d.snapshot", req.Height))
134
+
135
+
// Open the snapshot file
136
+
f, err := os.Open(snapshotFilename)
137
+
if err != nil {
138
+
return nil, stacktrace.Propagate(err, "failed to open snapshot file: %s", snapshotFilename)
139
+
}
140
+
defer f.Close()
141
+
142
+
// Calculate the offset for the requested chunk (start from beginning of file, including header)
143
+
offset := int64(req.Chunk) * snapshotChunkSize
144
+
_, err = f.Seek(offset, io.SeekStart)
145
+
if err != nil {
146
+
return nil, stacktrace.Propagate(err, "failed to seek to chunk offset")
147
+
}
148
+
149
+
// Read up to snapshotChunkSize bytes
150
+
chunkData := make([]byte, snapshotChunkSize)
151
+
n, err := f.Read(chunkData)
152
+
if err != nil && err != io.EOF {
153
+
return nil, stacktrace.Propagate(err, "failed to read chunk data")
154
+
}
155
+
156
+
// If we read less than snapshotChunkSize, trim the slice
157
+
if n < snapshotChunkSize {
158
+
chunkData = chunkData[:n]
159
+
}
160
+
161
+
return &abcitypes.ResponseLoadSnapshotChunk{
162
+
Chunk: chunkData,
163
+
}, nil
164
+
}
165
+
166
+
// ApplySnapshotChunk implements [types.Application].
167
+
func (d *DIDPLCApplication) ApplySnapshotChunk(_ context.Context, req *abcitypes.RequestApplySnapshotChunk) (*abcitypes.ResponseApplySnapshotChunk, error) {
168
+
if d.snapshotApplier == nil {
169
+
return nil, stacktrace.NewError("snapshot not offered yet, can't apply chunk")
170
+
}
171
+
172
+
err := d.snapshotApplier.Apply(int(req.Index), req.Chunk)
173
+
if err != nil {
174
+
fmt.Println("SNAPSHOT APPLY FAILED:", err.Error())
175
+
if errors.Is(err, errMalformedChunk) {
176
+
return &abcitypes.ResponseApplySnapshotChunk{
177
+
Result: abcitypes.ResponseApplySnapshotChunk_RETRY,
178
+
RefetchChunks: []uint32{req.Index},
179
+
RejectSenders: []string{req.Sender},
180
+
}, nil
181
+
} else if errors.Is(err, errTreeHashMismatch) {
182
+
return &abcitypes.ResponseApplySnapshotChunk{
183
+
Result: abcitypes.ResponseApplySnapshotChunk_REJECT_SNAPSHOT,
184
+
RejectSenders: []string{req.Sender},
185
+
}, nil
186
+
}
187
+
return nil, stacktrace.NewError("failed to apply")
188
+
}
189
+
190
+
if d.snapshotApplier.Done() {
191
+
d.snapshotApplier = nil
192
+
}
193
+
194
+
return &abcitypes.ResponseApplySnapshotChunk{
195
+
Result: abcitypes.ResponseApplySnapshotChunk_ACCEPT,
196
+
}, nil
197
+
}
198
+
199
+
// OfferSnapshot implements [types.Application].
200
+
func (d *DIDPLCApplication) OfferSnapshot(_ context.Context, req *abcitypes.RequestOfferSnapshot) (*abcitypes.ResponseOfferSnapshot, error) {
201
+
if d.snapshotApplier != nil {
202
+
err := d.snapshotApplier.Abort()
203
+
if err != nil {
204
+
return nil, stacktrace.Propagate(err, "")
205
+
}
206
+
}
207
+
208
+
if req.Snapshot.Format != 1 {
209
+
return &abcitypes.ResponseOfferSnapshot{
210
+
Result: abcitypes.ResponseOfferSnapshot_REJECT_FORMAT,
211
+
}, nil
212
+
}
213
+
214
+
var err error
215
+
d.snapshotApplier, err = d.beginApplyingSnapshot(int64(req.Snapshot.Height), req.AppHash, int(req.Snapshot.Chunks), req.Snapshot.Metadata)
216
+
if err != nil {
217
+
d.snapshotApplier = nil
218
+
if errors.Is(err, errInvalidMetadata) {
219
+
return &abcitypes.ResponseOfferSnapshot{
220
+
Result: abcitypes.ResponseOfferSnapshot_REJECT_SENDER,
221
+
}, nil
222
+
}
223
+
return nil, stacktrace.Propagate(err, "")
224
+
}
225
+
226
+
return &abcitypes.ResponseOfferSnapshot{
227
+
Result: abcitypes.ResponseOfferSnapshot_ACCEPT,
228
+
}, nil
229
+
}
230
+
231
+
func (d *DIDPLCApplication) createSnapshot(treeVersion int64, tempFilename string) error {
232
+
it, err := d.tree.GetImmutable(treeVersion)
233
+
if err != nil {
234
+
return stacktrace.Propagate(err, "")
235
+
}
236
+
237
+
// Delete tempFilename if it exists to ensure a fresh file is created
238
+
_ = os.Remove(tempFilename)
239
+
240
+
f, err := os.Create(tempFilename)
241
+
if err != nil {
242
+
return stacktrace.Propagate(err, "")
243
+
}
244
+
defer f.Close()
245
+
246
+
st := time.Now()
247
+
248
+
err = writeSnapshot(f, it)
249
+
if err != nil {
250
+
return stacktrace.Propagate(err, "")
251
+
}
252
+
253
+
err = f.Sync()
254
+
if err != nil {
255
+
return stacktrace.Propagate(err, "")
256
+
}
257
+
258
+
hf, err := os.Create(filepath.Join(d.snapshotDirectory, fmt.Sprintf("%020d.chunksums", treeVersion)))
259
+
if err != nil {
260
+
return stacktrace.Propagate(err, "")
261
+
}
262
+
defer hf.Close()
263
+
264
+
err = writeChunkHashes(f, hf)
265
+
if err != nil {
266
+
return stacktrace.Propagate(err, "")
267
+
}
268
+
269
+
err = hf.Sync()
270
+
if err != nil {
271
+
return stacktrace.Propagate(err, "")
272
+
}
273
+
274
+
err = f.Close()
275
+
if err != nil {
276
+
return stacktrace.Propagate(err, "")
277
+
}
278
+
279
+
os.Rename(tempFilename, filepath.Join(d.snapshotDirectory, fmt.Sprintf("%020d.snapshot", treeVersion)))
280
+
281
+
fmt.Println("Took", time.Since(st), "to export")
282
+
283
+
return nil
284
+
}
285
+
286
+
func writeSnapshot(writerSeeker io.WriteSeeker, it *iavl.ImmutableTree) error {
287
+
writtenUntilReservedFields := 0
288
+
289
+
bw := bufio.NewWriter(writerSeeker)
290
+
291
+
// file magic and version
292
+
c, err := bw.Write([]byte("didplcbft-snapshot"))
293
+
if err != nil {
294
+
return stacktrace.Propagate(err, "")
295
+
}
296
+
writtenUntilReservedFields += c
297
+
298
+
c, err = bw.Write([]byte{0, 0, 0, 0, 0, 1})
299
+
if err != nil {
300
+
return stacktrace.Propagate(err, "")
301
+
}
302
+
writtenUntilReservedFields += c
303
+
304
+
b := make([]byte, 8)
305
+
binary.BigEndian.PutUint64(b, uint64(it.Version()))
306
+
c, err = bw.Write(b)
307
+
if err != nil {
308
+
return stacktrace.Propagate(err, "")
309
+
}
310
+
writtenUntilReservedFields += c
311
+
312
+
c, err = bw.Write(it.Hash())
313
+
if err != nil {
314
+
return stacktrace.Propagate(err, "")
315
+
}
316
+
writtenUntilReservedFields += c
317
+
318
+
// reserve space for writing number of bytes, number of nodes
319
+
// 8 bytes for node list size in bytes
320
+
// 8 bytes for number of nodes
321
+
sizeOfReservedFields := 8 + 8
322
+
b = make([]byte, sizeOfReservedFields)
323
+
_, err = bw.Write(b)
324
+
if err != nil {
325
+
return stacktrace.Propagate(err, "")
326
+
}
327
+
328
+
zstdw, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(zstd.SpeedBetterCompression))
329
+
if err != nil {
330
+
return stacktrace.Propagate(err, "")
331
+
}
332
+
333
+
numNodes, err := exportNodes(it, zstdw)
334
+
if err != nil {
335
+
return stacktrace.Propagate(err, "")
336
+
}
337
+
338
+
err = zstdw.Close()
339
+
if err != nil {
340
+
return stacktrace.Propagate(err, "")
341
+
}
342
+
343
+
err = bw.Flush()
344
+
if err != nil {
345
+
return stacktrace.Propagate(err, "")
346
+
}
347
+
348
+
// find total compressed node list file size
349
+
offset, err := writerSeeker.Seek(0, io.SeekCurrent)
350
+
if err != nil {
351
+
return stacktrace.Propagate(err, "")
352
+
}
353
+
compressedNodeListSize := offset - int64(writtenUntilReservedFields) - int64(sizeOfReservedFields)
354
+
355
+
// seek back and write empty header fields
356
+
357
+
offset, err = writerSeeker.Seek(int64(writtenUntilReservedFields), io.SeekStart)
358
+
if err != nil {
359
+
return stacktrace.Propagate(err, "")
360
+
}
361
+
if offset != int64(writtenUntilReservedFields) {
362
+
return stacktrace.NewError("unexpected seek result")
363
+
}
364
+
365
+
b = make([]byte, sizeOfReservedFields)
366
+
binary.BigEndian.PutUint64(b, uint64(compressedNodeListSize))
367
+
binary.BigEndian.PutUint64(b[8:], uint64(numNodes))
368
+
_, err = writerSeeker.Write(b)
369
+
if err != nil {
370
+
return stacktrace.Propagate(err, "")
371
+
}
372
+
373
+
return nil
374
+
}
375
+
376
+
func exportNodes(it *iavl.ImmutableTree, w io.Writer) (int64, error) {
377
+
exporter, err := it.Export()
378
+
if err != nil {
379
+
return 0, stacktrace.Propagate(err, "")
380
+
}
381
+
defer exporter.Close()
382
+
cexporter := iavl.NewCompressExporter(exporter)
383
+
384
+
numNodes := int64(0)
385
+
for {
386
+
node, err := cexporter.Next()
387
+
if errors.Is(err, iavl.ErrorExportDone) {
388
+
break
389
+
}
390
+
if err != nil {
391
+
return 0, stacktrace.Propagate(err, "")
392
+
}
393
+
394
+
b := make([]byte, 9)
395
+
b[0] = byte(node.Height)
396
+
397
+
binary.BigEndian.PutUint64(b[1:], uint64(node.Version))
398
+
_, err = w.Write(b)
399
+
if err != nil {
400
+
return 0, stacktrace.Propagate(err, "")
401
+
}
402
+
403
+
// nil node values are different from 0-byte values
404
+
b = []byte{0xff, 0xff, 0xff, 0xff}
405
+
if node.Key != nil {
406
+
binary.BigEndian.PutUint32(b, uint32(len(node.Key)))
407
+
}
408
+
_, err = w.Write(b)
409
+
if err != nil {
410
+
return 0, stacktrace.Propagate(err, "")
411
+
}
412
+
413
+
b = []byte{0xff, 0xff, 0xff, 0xff}
414
+
if node.Value != nil {
415
+
binary.BigEndian.PutUint32(b, uint32(len(node.Value)))
416
+
}
417
+
_, err = w.Write(b)
418
+
if err != nil {
419
+
return 0, stacktrace.Propagate(err, "")
420
+
}
421
+
422
+
_, err = w.Write(node.Key)
423
+
if err != nil {
424
+
return 0, stacktrace.Propagate(err, "")
425
+
}
426
+
427
+
_, err = w.Write(node.Value)
428
+
if err != nil {
429
+
return 0, stacktrace.Propagate(err, "")
430
+
}
431
+
numNodes++
432
+
}
433
+
434
+
return numNodes, nil
435
+
}
436
+
437
+
func writeChunkHashes(snapshotFile io.ReadSeeker, w io.Writer) error {
438
+
bw := bufio.NewWriter(w)
439
+
defer bw.Flush()
440
+
441
+
_, err := snapshotFile.Seek(0, io.SeekStart)
442
+
if err != nil {
443
+
return stacktrace.Propagate(err, "")
444
+
}
445
+
446
+
buf := make([]byte, snapshotChunkSize)
447
+
for {
448
+
n, err := io.ReadFull(snapshotFile, buf)
449
+
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
450
+
return stacktrace.Propagate(err, "")
451
+
}
452
+
if n == 0 {
453
+
break
454
+
}
455
+
456
+
hash := sha256.Sum256(buf[:n])
457
+
c, err := w.Write(hash[:])
458
+
if err != nil {
459
+
return stacktrace.Propagate(err, "")
460
+
}
461
+
462
+
if c != snapshotChunkHashSize {
463
+
return stacktrace.NewError("unexpected chunk hash size")
464
+
}
465
+
466
+
if n < snapshotChunkSize {
467
+
break
468
+
}
469
+
}
470
+
471
+
return nil
472
+
}
473
+
474
+
type snapshotApplier struct {
475
+
tree *iavl.MutableTree
476
+
treeVersion int64
477
+
expectedFinalHash []byte
478
+
expectedChunkHashes [][]byte
479
+
480
+
pipeWriter *io.PipeWriter
481
+
pipeReader *io.PipeReader
482
+
zstdReader io.ReadCloser
483
+
484
+
importer *iavl.Importer
485
+
compressImporter iavl.NodeImporter
486
+
importerWg sync.WaitGroup
487
+
488
+
numImportedNodes int
489
+
claimedNodeCount int
490
+
done bool
491
+
}
492
+
493
+
var errMalformedChunk = errors.New("malformed chunk")
494
+
var errInvalidMetadata = errors.New("invalid metadata")
495
+
var errTreeHashMismatch = errors.New("tree hash mismatch")
496
+
497
+
func (d *DIDPLCApplication) beginApplyingSnapshot(treeVersion int64, expectedFinalHash []byte, expectedNumChunks int, chunksums []byte) (*snapshotApplier, error) {
498
+
if len(chunksums)%snapshotChunkHashSize != 0 || len(chunksums)/snapshotChunkHashSize != expectedNumChunks {
499
+
return nil, stacktrace.Propagate(errInvalidMetadata, "")
500
+
}
501
+
502
+
if !d.tree.IsEmpty() {
503
+
err := d.fullyClearTree()
504
+
if err != nil {
505
+
return nil, stacktrace.Propagate(err, "")
506
+
}
507
+
}
508
+
509
+
importer, err := d.tree.Import(treeVersion)
510
+
if err != nil {
511
+
return nil, stacktrace.Propagate(err, "")
512
+
}
513
+
514
+
pipeReader, pipeWriter := io.Pipe()
515
+
516
+
zstdReader, err := zstd.NewReader(pipeReader)
517
+
if err != nil {
518
+
return nil, stacktrace.Propagate(err, "")
519
+
}
520
+
521
+
chunkHashes := make([][]byte, 0, expectedNumChunks)
522
+
for hash := range slices.Chunk(chunksums, snapshotChunkHashSize) {
523
+
chunkHashes = append(chunkHashes, hash)
524
+
}
525
+
526
+
return &snapshotApplier{
527
+
tree: d.tree,
528
+
treeVersion: treeVersion,
529
+
expectedFinalHash: expectedFinalHash,
530
+
expectedChunkHashes: chunkHashes,
531
+
532
+
pipeWriter: pipeWriter,
533
+
pipeReader: pipeReader,
534
+
zstdReader: zstdReader.IOReadCloser(),
535
+
536
+
importer: importer,
537
+
compressImporter: iavl.NewCompressImporter(importer),
538
+
}, nil
539
+
}
540
+
541
+
func (a *snapshotApplier) Apply(chunkIndex int, chunkBytes []byte) error {
542
+
if len(chunkBytes) > snapshotChunkSize {
543
+
return stacktrace.Propagate(errMalformedChunk, "chunk too large")
544
+
}
545
+
hash := sha256.Sum256(chunkBytes)
546
+
if !bytes.Equal(a.expectedChunkHashes[chunkIndex], hash[:]) {
547
+
return stacktrace.Propagate(errMalformedChunk, "hash mismatch")
548
+
}
549
+
550
+
if chunkIndex == 0 {
551
+
if len(chunkBytes) < 80 {
552
+
return stacktrace.Propagate(errMalformedChunk, "chunk too small")
553
+
}
554
+
555
+
if string(chunkBytes[0:18]) != "didplcbft-snapshot" {
556
+
return stacktrace.Propagate(errMalformedChunk, "invalid file magic")
557
+
}
558
+
559
+
if binary.BigEndian.Uint32(chunkBytes[20:]) != 1 {
560
+
return stacktrace.Propagate(errMalformedChunk, "invalid snapshot format")
561
+
}
562
+
563
+
if binary.BigEndian.Uint64(chunkBytes[24:]) != uint64(a.treeVersion) {
564
+
return stacktrace.Propagate(errMalformedChunk, "mismatched tree version")
565
+
}
566
+
567
+
if !bytes.Equal(chunkBytes[32:64], a.expectedFinalHash) {
568
+
return stacktrace.Propagate(errMalformedChunk, "mismatched declared tree hash")
569
+
}
570
+
571
+
declaredFileSize := 80 + binary.BigEndian.Uint64(chunkBytes[64:])
572
+
minExpectedSize := uint64((len(a.expectedChunkHashes) - 1) * snapshotChunkSize)
573
+
maxExpectedSize := uint64(len(a.expectedChunkHashes) * snapshotChunkSize)
574
+
if declaredFileSize < minExpectedSize ||
575
+
declaredFileSize > maxExpectedSize {
576
+
return stacktrace.Propagate(errMalformedChunk, "unexpected compressed node list length")
577
+
}
578
+
579
+
a.claimedNodeCount = int(binary.BigEndian.Uint64(chunkBytes[72:]))
580
+
581
+
// move to the start of the compressed portion
582
+
chunkBytes = chunkBytes[80:]
583
+
584
+
a.importerWg.Go(a.streamingImporter)
585
+
}
586
+
587
+
isLastChunk := chunkIndex == len(a.expectedChunkHashes)-1
588
+
go func(b []byte) {
589
+
// From the docs:
590
+
// It is safe to call Read and Write in parallel with each other or with Close.
591
+
// Parallel calls to Read and parallel calls to Write are also safe:
592
+
// the individual calls will be gated sequentially.
593
+
594
+
// so even if not everything gets written from this chunk (e.g. because the zstd decoder decided not to advance)
595
+
// it'll eventually be written, in the correct order
596
+
_, _ = a.pipeWriter.Write(b)
597
+
if isLastChunk {
598
+
_ = a.pipeWriter.Close()
599
+
}
600
+
}(chunkBytes)
601
+
602
+
if isLastChunk {
603
+
// wait for importer to finish reading and importing everything
604
+
a.importerWg.Wait()
605
+
606
+
if a.numImportedNodes != a.claimedNodeCount {
607
+
return stacktrace.Propagate(errTreeHashMismatch, "imported node count mismatch")
608
+
}
609
+
610
+
err := a.importer.Commit()
611
+
if err != nil {
612
+
if strings.Contains(err.Error(), "invalid node structure") {
613
+
return stacktrace.Propagate(errors.Join(errMalformedChunk, err), "")
614
+
}
615
+
return stacktrace.Propagate(err, "")
616
+
}
617
+
618
+
a.closeCommons()
619
+
a.done = true
620
+
621
+
if !bytes.Equal(a.tree.Hash(), a.expectedFinalHash) {
622
+
return stacktrace.Propagate(errTreeHashMismatch, "")
623
+
}
624
+
}
625
+
626
+
return nil
627
+
}
628
+
629
+
func (a *snapshotApplier) streamingImporter() {
630
+
for {
631
+
nodeHeader := make([]byte, 9+4+4)
632
+
n, err := io.ReadFull(a.zstdReader, nodeHeader)
633
+
if err != nil || n != 9+4+4 {
634
+
// err may be EOF here, which is expected
635
+
return
636
+
}
637
+
638
+
// validate lengths against sensible limits to prevent OOM DoS by malicious third parties
639
+
keyLength := binary.BigEndian.Uint32(nodeHeader[9:13])
640
+
var key []byte
641
+
if keyLength != 0xffffffff {
642
+
if keyLength > 1024*1024 {
643
+
return
644
+
}
645
+
key = make([]byte, keyLength)
646
+
647
+
n, err = io.ReadFull(a.zstdReader, key)
648
+
if err != nil || n != len(key) {
649
+
// this shouldn't happen unless the data is corrupt
650
+
// we can return silently here because since we didn't import all nodes, the tree hash won't match anyway
651
+
return
652
+
}
653
+
}
654
+
655
+
valueLength := binary.BigEndian.Uint32(nodeHeader[13:17])
656
+
var value []byte
657
+
if valueLength != 0xffffffff {
658
+
if valueLength > 1024*1024 {
659
+
return
660
+
}
661
+
value = make([]byte, valueLength)
662
+
n, err = io.ReadFull(a.zstdReader, value)
663
+
if err != nil || n != len(value) {
664
+
return
665
+
}
666
+
}
667
+
668
+
err = a.compressImporter.Add(&iavl.ExportNode{
669
+
Height: int8(nodeHeader[0]),
670
+
Version: int64(binary.BigEndian.Uint64(nodeHeader[1:9])),
671
+
Key: key,
672
+
Value: value,
673
+
})
674
+
if err != nil {
675
+
// this shouldn't happen unless the data is corrupt
676
+
// we can return silently here because since we didn't import all nodes, the tree hash won't match anyway
677
+
return
678
+
}
679
+
a.numImportedNodes++
680
+
}
681
+
}
682
+
683
+
func (a *snapshotApplier) Abort() error {
684
+
err := a.closeCommons()
685
+
if err != nil {
686
+
return stacktrace.Propagate(err, "")
687
+
}
688
+
689
+
err = a.tree.DeleteVersionsFrom(0)
690
+
if err != nil {
691
+
return stacktrace.Propagate(err, "")
692
+
}
693
+
694
+
return nil
695
+
}
696
+
697
+
func (a *snapshotApplier) closeCommons() error {
698
+
err := a.zstdReader.Close()
699
+
if err != nil {
700
+
return stacktrace.Propagate(err, "")
701
+
}
702
+
703
+
err = a.pipeReader.Close()
704
+
if err != nil {
705
+
return stacktrace.Propagate(err, "")
706
+
}
707
+
708
+
err = a.pipeWriter.Close()
709
+
if err != nil {
710
+
return stacktrace.Propagate(err, "")
711
+
}
712
+
713
+
a.importerWg.Wait()
714
+
a.importer.Close()
715
+
716
+
return nil
717
+
}
718
+
719
+
func (a *snapshotApplier) Done() bool {
720
+
return a.done
721
+
}
+136
abciapp/tx.go
+136
abciapp/tx.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"bytes"
5
+
"context"
6
+
"time"
7
+
8
+
abcitypes "github.com/cometbft/cometbft/abci/types"
9
+
cbornode "github.com/ipfs/go-ipld-cbor"
10
+
"github.com/palantir/stacktrace"
11
+
"tangled.org/gbl08ma/didplcbft/plc"
12
+
)
13
+
14
+
type ArgumentType interface {
15
+
ForAction() TransactionAction
16
+
}
17
+
18
+
type TransactionAction string
19
+
20
+
var (
21
+
knownActions = map[TransactionAction]struct{}{}
22
+
TransactionActionCreatePlcOp = registerTransactionAction[CreatePlcOpArguments]("CreatePlcOp")
23
+
)
24
+
25
+
func registerTransactionAction[ArgType ArgumentType](action string) TransactionAction {
26
+
ta := TransactionAction(action)
27
+
if _, present := knownActions[ta]; present {
28
+
panic("action already registered")
29
+
}
30
+
var argType ArgType
31
+
if argType.ForAction() != "" && argType.ForAction() != ta {
32
+
panic("mismatched argument types")
33
+
}
34
+
knownActions[ta] = struct{}{}
35
+
return ta
36
+
}
37
+
38
+
type Transaction[ArgType any] struct {
39
+
Action TransactionAction `json:"action" refmt:"action"`
40
+
Arguments ArgType `json:"arguments,omitempty" refmt:"arguments,omitempty"`
41
+
}
42
+
43
+
func UnmarshalTransaction[ArgType ArgumentType](txBytes []byte) (Transaction[ArgType], error) {
44
+
var t Transaction[ArgType]
45
+
err := cbornode.DecodeInto(txBytes, &t)
46
+
if err != nil {
47
+
return Transaction[ArgType]{}, stacktrace.Propagate(err, "")
48
+
}
49
+
50
+
var argType ArgType
51
+
if argType.ForAction() != t.Action {
52
+
return Transaction[ArgType]{}, stacktrace.NewError("mismatched argument types")
53
+
}
54
+
55
+
return t, nil
56
+
}
57
+
58
+
// SanitizeTransaction should always be called to obtain the canonical bytes of a transaction
59
+
func SanitizeTransaction(txBytes []byte) ([]byte, error) {
60
+
var v any
61
+
err := cbornode.DecodeInto(txBytes, &v)
62
+
if err != nil {
63
+
return nil, stacktrace.Propagate(err, "invalid transaction")
64
+
}
65
+
b, err := cbornode.DumpObject(v)
66
+
if err != nil {
67
+
return nil, stacktrace.Propagate(err, "")
68
+
}
69
+
return b, nil
70
+
}
71
+
72
+
func IsTransactionSanitized(txBytes []byte) bool {
73
+
s, err := SanitizeTransaction(txBytes)
74
+
if err != nil {
75
+
return false
76
+
}
77
+
return bytes.Equal(txBytes, s)
78
+
}
79
+
80
+
type treeChange struct {
81
+
Key []byte
82
+
Value []byte
83
+
}
84
+
type processResult struct {
85
+
TreeChanges []treeChange
86
+
Code uint32
87
+
Data []byte
88
+
Log string
89
+
Info string
90
+
GasWanted int64
91
+
GasUsed int64
92
+
Events []abcitypes.Event
93
+
Codespace string
94
+
}
95
+
96
+
func processTx(ctx context.Context, p plc.PLC, txBytes []byte, atTime time.Time, execute bool) (*processResult, error) {
97
+
if !IsTransactionSanitized(txBytes) {
98
+
return &processResult{
99
+
Code: 4000,
100
+
Info: "Transaction bytes do not follow canonical serialization format",
101
+
}, nil
102
+
}
103
+
var v map[string]interface{}
104
+
err := cbornode.DecodeInto(txBytes, &v)
105
+
if err != nil {
106
+
return &processResult{
107
+
Code: 4001,
108
+
Info: "Invalid transaction",
109
+
}, nil
110
+
}
111
+
actionInterface, ok := v["action"]
112
+
if !ok {
113
+
return &processResult{
114
+
Code: 4001,
115
+
Info: "Unknown transaction action",
116
+
}, nil
117
+
}
118
+
action, ok := actionInterface.(string)
119
+
if !ok {
120
+
return &processResult{
121
+
Code: 4001,
122
+
Info: "Unknown transaction action",
123
+
}, nil
124
+
}
125
+
126
+
switch TransactionAction(action) {
127
+
case TransactionActionCreatePlcOp:
128
+
result, err := processCreatePlcOpTx(ctx, p, txBytes, atTime, execute)
129
+
return result, stacktrace.Propagate(err, "")
130
+
default:
131
+
return &processResult{
132
+
Code: 4001,
133
+
Info: "Unknown transaction action",
134
+
}, nil
135
+
}
136
+
}
+78
abciapp/tx_create_plc_op.go
+78
abciapp/tx_create_plc_op.go
···
···
1
+
package abciapp
2
+
3
+
import (
4
+
"context"
5
+
"encoding/json"
6
+
"time"
7
+
8
+
"github.com/did-method-plc/go-didplc"
9
+
"github.com/ipfs/go-cid"
10
+
cbornode "github.com/ipfs/go-ipld-cbor"
11
+
"github.com/palantir/stacktrace"
12
+
"tangled.org/gbl08ma/didplcbft/plc"
13
+
)
14
+
15
+
type CreatePlcOpArguments struct {
16
+
DID string `json:"did" refmt:"did"`
17
+
Operation *didplc.OpEnum `refmt:"operation"`
18
+
}
19
+
20
+
func (CreatePlcOpArguments) ForAction() TransactionAction {
21
+
return TransactionActionCreatePlcOp
22
+
}
23
+
24
+
func init() {
25
+
cbornode.RegisterCborType(CreatePlcOpArguments{})
26
+
cbornode.RegisterCborType(Transaction[CreatePlcOpArguments]{})
27
+
}
28
+
29
+
func processCreatePlcOpTx(ctx context.Context, p plc.PLC, txBytes []byte, atTime time.Time, execute bool) (*processResult, error) {
30
+
tx, err := UnmarshalTransaction[CreatePlcOpArguments](txBytes)
31
+
if err != nil {
32
+
return &processResult{
33
+
Code: 4000,
34
+
Info: err.Error(),
35
+
}, nil
36
+
}
37
+
38
+
// sadly didplc is really designed to unmarshal JSON, not CBOR
39
+
// so JSON ends up being the lingua franca for operations inside our PLC implementation too
40
+
// we also can't instance didplc.Operations directly from the CBOR unmarshaller (the MakeUnmarshalTransformFunc thing)
41
+
// because the interface makes us lose data (it is not powerful enough to detect the type of a transaction, for instance)
42
+
// so our PLC internals end up depending on OpEnum, too
43
+
// the decision to use CBOR for the entire thing at the blockchain transaction level is:
44
+
// - to make transactions more compact
45
+
// - to have more of a canonical format for them (we specifically use the stable CBOR format already used by the PLC for signing)
46
+
47
+
// there is one advantage to this approach: by ensuring we first unmarshal the operations into strongly defined types
48
+
// (e.g. the OpEnum struct of the didplc package)
49
+
// we avoid accepting malformed data like what happened in https://github.com/did-method-plc/did-method-plc/issues/71
50
+
opBytes, err := json.Marshal(tx.Arguments.Operation)
51
+
if err != nil {
52
+
return nil, stacktrace.Propagate(err, "internal error")
53
+
}
54
+
55
+
var cid cid.Cid
56
+
if execute {
57
+
cid, err = p.ExecuteOperation(ctx, atTime, tx.Arguments.DID, opBytes)
58
+
} else {
59
+
err = p.ValidateOperation(ctx, plc.CommittedTreeVersion, atTime, tx.Arguments.DID, opBytes)
60
+
}
61
+
if err != nil {
62
+
if code, ok := plc.InvalidOperationErrorCode(err); ok {
63
+
return &processResult{
64
+
Code: code,
65
+
Info: err.Error(),
66
+
}, nil
67
+
}
68
+
return nil, stacktrace.Propagate(err, "internal error")
69
+
}
70
+
71
+
return &processResult{
72
+
TreeChanges: []treeChange{{
73
+
Key: []byte(tx.Arguments.DID),
74
+
Value: cid.Bytes(),
75
+
}},
76
+
Code: 0,
77
+
}, nil
78
+
}
+407
badgeradapter/adapter.go
+407
badgeradapter/adapter.go
···
···
1
+
package badgeradapter
2
+
3
+
import (
4
+
"bytes"
5
+
"slices"
6
+
7
+
"cosmossdk.io/core/store"
8
+
"github.com/cosmos/iavl/db"
9
+
"github.com/palantir/stacktrace"
10
+
11
+
badger "github.com/dgraph-io/badger/v4"
12
+
)
13
+
14
+
type BadgerAdapter struct {
15
+
badgerDB *badger.DB
16
+
keyPrefix []byte
17
+
}
18
+
19
+
func AdaptBadger(badgerDB *badger.DB, keyPrefix []byte) *BadgerAdapter {
20
+
return &BadgerAdapter{
21
+
badgerDB: badgerDB,
22
+
keyPrefix: keyPrefix,
23
+
}
24
+
}
25
+
26
+
var _ db.DB = (*BadgerAdapter)(nil)
27
+
28
+
// prefixKey adds the keyPrefix to the given key
29
+
func (b *BadgerAdapter) prefixKey(key []byte) []byte {
30
+
result := make([]byte, 0, len(b.keyPrefix)+len(key))
31
+
result = append(result, b.keyPrefix...)
32
+
result = append(result, key...)
33
+
return result
34
+
}
35
+
36
+
// Close implements [db.DB].
37
+
func (b *BadgerAdapter) Close() error {
38
+
return b.badgerDB.Close()
39
+
}
40
+
41
+
// Get implements [db.DB].
42
+
func (b *BadgerAdapter) Get(key []byte) ([]byte, error) {
43
+
prefixedKey := b.prefixKey(key)
44
+
45
+
var value []byte
46
+
err := b.badgerDB.View(func(txn *badger.Txn) error {
47
+
item, err := txn.Get(prefixedKey)
48
+
if err != nil {
49
+
return err
50
+
}
51
+
value, err = item.ValueCopy(nil)
52
+
return err
53
+
})
54
+
55
+
if err == badger.ErrKeyNotFound {
56
+
return nil, nil
57
+
}
58
+
if err != nil {
59
+
return nil, stacktrace.Propagate(err, "failed to get key from badger")
60
+
}
61
+
62
+
return value, nil
63
+
}
64
+
65
+
// Has implements [db.DB].
66
+
func (b *BadgerAdapter) Has(key []byte) (bool, error) {
67
+
prefixedKey := b.prefixKey(key)
68
+
69
+
var has bool
70
+
err := b.badgerDB.View(func(txn *badger.Txn) error {
71
+
_, err := txn.Get(prefixedKey)
72
+
if err == badger.ErrKeyNotFound {
73
+
has = false
74
+
return nil
75
+
}
76
+
if err != nil {
77
+
return err
78
+
}
79
+
has = true
80
+
return nil
81
+
})
82
+
83
+
if err != nil {
84
+
return false, stacktrace.Propagate(err, "failed to check key existence in badger")
85
+
}
86
+
87
+
return has, nil
88
+
}
89
+
90
+
// BadgerIterator adapts badger.Iterator to store.Iterator
91
+
type BadgerIterator struct {
92
+
badgerIter *badger.Iterator
93
+
txn *badger.Txn
94
+
start []byte
95
+
end []byte
96
+
reverse bool // true if this is a reverse iterator
97
+
valid bool
98
+
keyPrefix []byte
99
+
}
100
+
101
+
// hasPrefix checks if a prefixed key actually has the expected keyPrefix
102
+
func (i *BadgerIterator) hasPrefix(prefixedKey []byte) bool {
103
+
return len(prefixedKey) >= len(i.keyPrefix) && bytes.Equal(prefixedKey[:len(i.keyPrefix)], i.keyPrefix)
104
+
}
105
+
106
+
// stripPrefix removes the keyPrefix from a prefixed key
107
+
func (i *BadgerIterator) stripPrefix(prefixedKey []byte) []byte {
108
+
if len(prefixedKey) < len(i.keyPrefix) {
109
+
return prefixedKey // Shouldn't happen, but defensive programming
110
+
}
111
+
stripped := make([]byte, len(prefixedKey)-len(i.keyPrefix))
112
+
copy(stripped, prefixedKey[len(i.keyPrefix):])
113
+
return stripped
114
+
}
115
+
116
+
func (i *BadgerIterator) Domain() (start, end []byte) {
117
+
// Return copies to ensure they're safe for modification
118
+
startCopy := make([]byte, len(i.start))
119
+
endCopy := make([]byte, len(i.end))
120
+
copy(startCopy, i.start)
121
+
copy(endCopy, i.end)
122
+
return startCopy, endCopy
123
+
}
124
+
125
+
func (i *BadgerIterator) Valid() bool {
126
+
if !i.valid || !i.badgerIter.Valid() {
127
+
return false
128
+
}
129
+
130
+
// Ensure the current key has the correct keyPrefix
131
+
// If not, skip to the next valid key
132
+
item := i.badgerIter.Item()
133
+
prefixedKey := item.Key()
134
+
if !i.hasPrefix(prefixedKey) {
135
+
// We've gone out of the bounds of "our" prefixes
136
+
return false
137
+
}
138
+
139
+
// For forward iteration, check if we've reached the end (end is exclusive)
140
+
if i.end != nil && !i.reverse {
141
+
currentKey := i.stripPrefix(prefixedKey)
142
+
// If current key >= end key, we're done
143
+
if bytes.Compare(currentKey, i.end) >= 0 {
144
+
return false
145
+
}
146
+
}
147
+
148
+
// For reverse iteration, check if we've gone below the start (start is inclusive)
149
+
if i.start != nil && i.reverse {
150
+
currentKey := i.stripPrefix(prefixedKey)
151
+
// If current key < start key, we're done
152
+
if bytes.Compare(currentKey, i.start) < 0 {
153
+
return false
154
+
}
155
+
}
156
+
157
+
return true
158
+
}
159
+
160
+
func (i *BadgerIterator) Next() {
161
+
if !i.valid {
162
+
panic("iterator is not valid")
163
+
}
164
+
i.badgerIter.Next()
165
+
166
+
// Check if the badger iterator is still valid
167
+
if !i.badgerIter.Valid() {
168
+
i.valid = false
169
+
return
170
+
}
171
+
172
+
item := i.badgerIter.Item()
173
+
prefixedKey := item.Key()
174
+
if !i.hasPrefix(prefixedKey) {
175
+
// We've gone out of the bounds of "our" prefixes
176
+
i.valid = false
177
+
return
178
+
}
179
+
180
+
// For forward iteration, check if we've reached the end (end is exclusive)
181
+
if i.end != nil && !i.reverse {
182
+
currentKey := i.stripPrefix(prefixedKey)
183
+
// If current key >= end key, we're done
184
+
if bytes.Compare(currentKey, i.end) >= 0 {
185
+
i.valid = false
186
+
return
187
+
}
188
+
}
189
+
190
+
// For reverse iteration, check if we've gone below the start (start is inclusive)
191
+
if i.start != nil && i.reverse {
192
+
currentKey := i.stripPrefix(prefixedKey)
193
+
// If current key < start key, we're done
194
+
if bytes.Compare(currentKey, i.start) < 0 {
195
+
i.valid = false
196
+
return
197
+
}
198
+
}
199
+
200
+
i.valid = true
201
+
}
202
+
203
+
func (i *BadgerIterator) Key() []byte {
204
+
if !i.valid {
205
+
panic("iterator is not valid")
206
+
}
207
+
item := i.badgerIter.Item()
208
+
return i.stripPrefix(item.Key())
209
+
}
210
+
211
+
func (i *BadgerIterator) Value() []byte {
212
+
if !i.valid {
213
+
panic("iterator is not valid")
214
+
}
215
+
item := i.badgerIter.Item()
216
+
value, err := item.ValueCopy(nil)
217
+
if err != nil {
218
+
panic("failed to copy value: " + err.Error())
219
+
}
220
+
return value
221
+
}
222
+
223
+
func (i *BadgerIterator) Error() error {
224
+
// Badger iterator doesn't have a separate error method
225
+
// Errors are typically caught during iteration setup
226
+
return nil
227
+
}
228
+
229
+
func (i *BadgerIterator) Close() error {
230
+
// Close the badger iterator first - this is critical to avoid panics
231
+
if i.badgerIter != nil {
232
+
i.badgerIter.Close()
233
+
}
234
+
235
+
// Mark as invalid
236
+
i.valid = false
237
+
238
+
// Discard the transaction to release resources
239
+
if i.txn != nil {
240
+
i.txn.Discard()
241
+
i.txn = nil
242
+
}
243
+
244
+
return nil
245
+
}
246
+
247
+
// Iterator implements [db.DB].
248
+
func (b *BadgerAdapter) Iterator(start []byte, end []byte) (store.Iterator, error) {
249
+
// Create a read-only transaction to hold the iterator
250
+
txn := b.badgerDB.NewTransaction(false)
251
+
252
+
// Create prefixed version of start
253
+
prefixedStart := b.prefixKey(start)
254
+
255
+
opts := badger.IteratorOptions{
256
+
PrefetchValues: true,
257
+
Reverse: false,
258
+
AllVersions: false,
259
+
}
260
+
badgerIter := txn.NewIterator(opts)
261
+
262
+
badgerIter.Seek(prefixedStart)
263
+
264
+
iterator := &BadgerIterator{
265
+
badgerIter: badgerIter,
266
+
txn: txn,
267
+
start: start, // Store original start/end for Domain() method
268
+
end: end,
269
+
reverse: false, // This is a forward iterator
270
+
valid: badgerIter.Valid(),
271
+
keyPrefix: b.keyPrefix,
272
+
}
273
+
274
+
return iterator, nil
275
+
}
276
+
277
+
// incrementSlice assumes that the first byte of b is not 0xff
278
+
func incrementSlice(b []byte) {
279
+
for i := len(b) - 1; i >= 0; i-- {
280
+
b[i] += 1
281
+
if b[i] != 0 {
282
+
break
283
+
}
284
+
}
285
+
}
286
+
287
+
// ReverseIterator implements [db.DB].
288
+
func (b *BadgerAdapter) ReverseIterator(start []byte, end []byte) (store.Iterator, error) {
289
+
// Create a read-only transaction to hold the iterator
290
+
txn := b.badgerDB.NewTransaction(false)
291
+
292
+
opts := badger.IteratorOptions{
293
+
PrefetchValues: true,
294
+
Reverse: true, // This enables reverse iteration
295
+
AllVersions: false,
296
+
}
297
+
badgerIter := txn.NewIterator(opts)
298
+
299
+
prefixedEnd := b.prefixKey(end)
300
+
incrementedEnd := slices.Clone(prefixedEnd)
301
+
incrementSlice(incrementedEnd) // Badger's Seek is inclusive but in these iterators end is exclusive (except if nil)
302
+
303
+
badgerIter.Seek(incrementedEnd)
304
+
// if end is nil, then Badger might be (depending on whether end matches an existing key)
305
+
// already giving us the key we want and there's no need to skip
306
+
if end != nil && badgerIter.Valid() && bytes.Equal(badgerIter.Item().Key(), prefixedEnd) {
307
+
badgerIter.Next()
308
+
}
309
+
310
+
iterator := &BadgerIterator{
311
+
badgerIter: badgerIter,
312
+
txn: txn,
313
+
start: start,
314
+
end: end,
315
+
reverse: true, // This is a reverse iterator
316
+
valid: badgerIter.Valid(),
317
+
keyPrefix: b.keyPrefix,
318
+
}
319
+
320
+
return iterator, nil
321
+
}
322
+
323
+
// BadgerBatch implements store.Batch
324
+
// BadgerBatch writes are atomic up until the point where they'd exceed the badger max transaction size,
325
+
// at which point they are split into multiple non-atomic writes
326
+
type BadgerBatch struct {
327
+
wb *badger.WriteBatch
328
+
closed bool
329
+
keyPrefix []byte
330
+
}
331
+
332
+
func (b *BadgerBatch) Set(key, value []byte) error {
333
+
if b.closed {
334
+
return stacktrace.NewError("batch has been written or closed")
335
+
}
336
+
if len(key) == 0 {
337
+
return stacktrace.NewError("key cannot be empty")
338
+
}
339
+
if value == nil {
340
+
return stacktrace.NewError("value cannot be nil")
341
+
}
342
+
343
+
prefixedKey := make([]byte, 0, len(b.keyPrefix)+len(key))
344
+
prefixedKey = append(prefixedKey, b.keyPrefix...)
345
+
prefixedKey = append(prefixedKey, key...)
346
+
347
+
err := b.wb.Set(prefixedKey, value)
348
+
return stacktrace.Propagate(err, "failed to set key in batch")
349
+
}
350
+
351
+
func (b *BadgerBatch) Delete(key []byte) error {
352
+
if b.closed {
353
+
return stacktrace.NewError("batch has been written or closed")
354
+
}
355
+
if len(key) == 0 {
356
+
return stacktrace.NewError("key cannot be empty")
357
+
}
358
+
359
+
prefixedKey := make([]byte, 0, len(b.keyPrefix)+len(key))
360
+
prefixedKey = append(prefixedKey, b.keyPrefix...)
361
+
prefixedKey = append(prefixedKey, key...)
362
+
363
+
err := b.wb.Delete(prefixedKey)
364
+
return stacktrace.Propagate(err, "failed to delete key in batch")
365
+
}
366
+
367
+
func (b *BadgerBatch) Write() error {
368
+
if b.closed {
369
+
return stacktrace.NewError("batch has been written or closed")
370
+
}
371
+
b.closed = true
372
+
err := b.wb.Flush()
373
+
return stacktrace.Propagate(err, "failed to write batch")
374
+
}
375
+
376
+
func (b *BadgerBatch) WriteSync() error {
377
+
// Badger doesn't have separate WriteSync, so we just use Write
378
+
return b.Write()
379
+
}
380
+
381
+
func (b *BadgerBatch) Close() error {
382
+
if !b.closed {
383
+
b.wb.Cancel()
384
+
b.closed = true
385
+
}
386
+
return nil
387
+
}
388
+
389
+
func (b *BadgerBatch) GetByteSize() (int, error) {
390
+
// Badger doesn't provide byte size tracking for batches
391
+
// Return 0 as a placeholder
392
+
return 0, nil
393
+
}
394
+
395
+
// NewBatch implements [db.DB].
396
+
func (b *BadgerAdapter) NewBatch() store.Batch {
397
+
return &BadgerBatch{
398
+
wb: b.badgerDB.NewWriteBatch(),
399
+
keyPrefix: b.keyPrefix,
400
+
}
401
+
}
402
+
403
+
// NewBatchWithSize implements [db.DB].
404
+
func (b *BadgerAdapter) NewBatchWithSize(size int) store.Batch {
405
+
// Badger doesn't support pre-allocated batch sizes, so we just create a regular batch
406
+
return b.NewBatch()
407
+
}
+454
badgeradapter/adapter_test.go
+454
badgeradapter/adapter_test.go
···
···
1
+
package badgeradapter
2
+
3
+
import (
4
+
"testing"
5
+
6
+
badger "github.com/dgraph-io/badger/v4"
7
+
"github.com/stretchr/testify/require"
8
+
)
9
+
10
+
func TestBadgerAdapter_KeyPrefixStripping(t *testing.T) {
11
+
// Create a temporary badger database
12
+
opts := badger.DefaultOptions("").WithInMemory(true)
13
+
db, err := badger.Open(opts)
14
+
require.NoError(t, err)
15
+
defer db.Close()
16
+
17
+
// Create adapter with a specific key prefix
18
+
keyPrefix := []byte("test:")
19
+
adapter := AdaptBadger(db, keyPrefix)
20
+
21
+
// Write some test data
22
+
batch := adapter.NewBatch()
23
+
err = batch.Set([]byte("key1"), []byte("value1"))
24
+
require.NoError(t, err)
25
+
err = batch.Write()
26
+
require.NoError(t, err)
27
+
28
+
// Verify that the underlying badger database stores the key WITH the prefix
29
+
var foundPrefixedKey bool
30
+
err = db.View(func(txn *badger.Txn) error {
31
+
opts := badger.IteratorOptions{
32
+
PrefetchValues: true,
33
+
Reverse: false,
34
+
AllVersions: false,
35
+
}
36
+
iter := txn.NewIterator(opts)
37
+
defer iter.Close()
38
+
39
+
for iter.Seek([]byte("test:")); iter.Valid(); iter.Next() {
40
+
item := iter.Item()
41
+
key := item.KeyCopy(nil)
42
+
if string(key) == "test:key1" {
43
+
foundPrefixedKey = true
44
+
value, err := item.ValueCopy(nil)
45
+
require.NoError(t, err)
46
+
require.Equal(t, []byte("value1"), value)
47
+
break
48
+
}
49
+
}
50
+
return nil
51
+
})
52
+
require.NoError(t, err)
53
+
require.True(t, foundPrefixedKey, "Expected to find prefixed key 'test:key1' in underlying badger database")
54
+
55
+
// Test Get operation - should work with unprefixed key
56
+
value, err := adapter.Get([]byte("key1"))
57
+
require.NoError(t, err)
58
+
require.Equal(t, []byte("value1"), value)
59
+
60
+
// Test iterator - should iterate over prefixed keys but return unprefixed keys
61
+
iter, err := adapter.Iterator([]byte("key1"), []byte("key2"))
62
+
require.NoError(t, err)
63
+
defer iter.Close()
64
+
65
+
require.True(t, iter.Valid())
66
+
returnedKey := iter.Key()
67
+
returnedValue := iter.Value()
68
+
69
+
// The returned key should NOT have the prefix
70
+
require.Equal(t, []byte("key1"), returnedKey)
71
+
require.Equal(t, []byte("value1"), returnedValue)
72
+
73
+
iter.Next()
74
+
require.False(t, iter.Valid())
75
+
}
76
+
77
+
func TestBadgerAdapter_ReverseIteratorPrefixStripping(t *testing.T) {
78
+
// Create a temporary badger database
79
+
opts := badger.DefaultOptions("").WithInMemory(true)
80
+
db, err := badger.Open(opts)
81
+
require.NoError(t, err)
82
+
defer db.Close()
83
+
84
+
// Create adapter with a specific key prefix
85
+
keyPrefix := []byte("prefix:")
86
+
adapter := AdaptBadger(db, keyPrefix)
87
+
88
+
// Write multiple test data entries
89
+
batch := adapter.NewBatch()
90
+
for i := 1; i <= 3; i++ {
91
+
key := []byte("key" + string(rune('0'+i)))
92
+
value := []byte("value" + string(rune('0'+i)))
93
+
err = batch.Set(key, value)
94
+
require.NoError(t, err)
95
+
}
96
+
err = batch.Write()
97
+
require.NoError(t, err)
98
+
99
+
// Verify that the underlying badger database stores the keys WITH the prefix
100
+
var foundPrefixedKeys []string
101
+
err = db.View(func(txn *badger.Txn) error {
102
+
opts := badger.IteratorOptions{
103
+
PrefetchValues: true,
104
+
Reverse: false,
105
+
AllVersions: false,
106
+
}
107
+
iter := txn.NewIterator(opts)
108
+
defer iter.Close()
109
+
110
+
for iter.Seek([]byte("prefix:")); iter.Valid(); iter.Next() {
111
+
item := iter.Item()
112
+
key := item.KeyCopy(nil)
113
+
keyStr := string(key)
114
+
if len(keyStr) > len("prefix:") && keyStr[:len("prefix:")] == "prefix:" {
115
+
foundPrefixedKeys = append(foundPrefixedKeys, keyStr)
116
+
}
117
+
}
118
+
return nil
119
+
})
120
+
require.NoError(t, err)
121
+
require.Len(t, foundPrefixedKeys, 3, "Expected to find 3 prefixed keys in underlying badger database")
122
+
require.Contains(t, foundPrefixedKeys, "prefix:key1")
123
+
require.Contains(t, foundPrefixedKeys, "prefix:key2")
124
+
require.Contains(t, foundPrefixedKeys, "prefix:key3")
125
+
126
+
// Test reverse iterator - should iterate over prefixed keys but return unprefixed keys
127
+
iter, err := adapter.ReverseIterator([]byte("key1"), []byte("key4"))
128
+
require.NoError(t, err)
129
+
defer iter.Close()
130
+
131
+
// Should start with the last key in range
132
+
require.True(t, iter.Valid())
133
+
returnedKey := iter.Key()
134
+
returnedValue := iter.Value()
135
+
136
+
// The returned key should NOT have the prefix
137
+
require.Equal(t, []byte("key3"), returnedKey)
138
+
require.Equal(t, []byte("value3"), returnedValue)
139
+
140
+
// Move to previous key
141
+
iter.Next()
142
+
require.True(t, iter.Valid())
143
+
returnedKey = iter.Key()
144
+
returnedValue = iter.Value()
145
+
require.Equal(t, []byte("key2"), returnedKey)
146
+
require.Equal(t, []byte("value2"), returnedValue)
147
+
148
+
// Move to previous key again
149
+
iter.Next()
150
+
require.True(t, iter.Valid())
151
+
returnedKey = iter.Key()
152
+
returnedValue = iter.Value()
153
+
require.Equal(t, []byte("key1"), returnedKey)
154
+
require.Equal(t, []byte("value1"), returnedValue)
155
+
156
+
// Should be at the beginning of range
157
+
iter.Next()
158
+
require.False(t, iter.Valid())
159
+
}
160
+
161
+
func TestBadgerAdapter_IteratorRespectsEnd(t *testing.T) {
162
+
// Create a temporary badger database
163
+
opts := badger.DefaultOptions("").WithInMemory(true)
164
+
db, err := badger.Open(opts)
165
+
require.NoError(t, err)
166
+
defer db.Close()
167
+
168
+
// Create adapter with a specific key prefix
169
+
keyPrefix := []byte("test:")
170
+
adapter := AdaptBadger(db, keyPrefix)
171
+
172
+
// Write test data
173
+
batch := adapter.NewBatch()
174
+
data := map[string]string{
175
+
"apple": "fruit1",
176
+
"banana": "fruit2",
177
+
"cherry": "fruit3",
178
+
"date": "fruit4",
179
+
"elderberry": "fruit5",
180
+
}
181
+
for key, value := range data {
182
+
err = batch.Set([]byte(key), []byte(value))
183
+
require.NoError(t, err)
184
+
}
185
+
err = batch.Write()
186
+
require.NoError(t, err)
187
+
188
+
// Test forward iteration with end boundary
189
+
iter, err := adapter.Iterator([]byte("apple"), []byte("cherry"))
190
+
require.NoError(t, err)
191
+
defer iter.Close()
192
+
193
+
// Should include "apple" and "banana" but stop before "cherry"
194
+
require.True(t, iter.Valid())
195
+
require.Equal(t, []byte("apple"), iter.Key())
196
+
require.Equal(t, []byte("fruit1"), iter.Value())
197
+
198
+
iter.Next()
199
+
require.True(t, iter.Valid())
200
+
require.Equal(t, []byte("banana"), iter.Key())
201
+
require.Equal(t, []byte("fruit2"), iter.Value())
202
+
203
+
// Next should stop before "cherry" since end is exclusive
204
+
iter.Next()
205
+
require.False(t, iter.Valid(), "Iterator should be invalid after reaching end boundary")
206
+
207
+
// Test forward iteration with nil end (should iterate to the end)
208
+
iter, err = adapter.Iterator([]byte("apple"), nil)
209
+
require.NoError(t, err)
210
+
defer iter.Close()
211
+
212
+
count := 0
213
+
for iter.Valid() {
214
+
count++
215
+
iter.Next()
216
+
}
217
+
require.Equal(t, 5, count, "Should iterate over all 5 keys when end is nil")
218
+
219
+
// Test forward iteration with start = nil (should start from first key)
220
+
iter, err = adapter.Iterator(nil, []byte("cherry"))
221
+
require.NoError(t, err)
222
+
defer iter.Close()
223
+
224
+
count = 0
225
+
for iter.Valid() {
226
+
count++
227
+
iter.Next()
228
+
}
229
+
require.Equal(t, 2, count, "Should iterate over 2 keys (apple, banana) before cherry")
230
+
}
231
+
232
+
func TestBadgerAdapter_ReverseIteratorRespectsStart(t *testing.T) {
233
+
// Create a temporary badger database
234
+
opts := badger.DefaultOptions("").WithInMemory(true)
235
+
db, err := badger.Open(opts)
236
+
require.NoError(t, err)
237
+
defer db.Close()
238
+
239
+
// Create adapter with a specific key prefix
240
+
keyPrefix := []byte("test:")
241
+
adapter := AdaptBadger(db, keyPrefix)
242
+
243
+
// Write test data
244
+
batch := adapter.NewBatch()
245
+
data := map[string]string{
246
+
"apple": "fruit1",
247
+
"banana": "fruit2",
248
+
"cherry": "fruit3",
249
+
"date": "fruit4",
250
+
"elderberry": "fruit5",
251
+
}
252
+
for key, value := range data {
253
+
err = batch.Set([]byte(key), []byte(value))
254
+
require.NoError(t, err)
255
+
}
256
+
err = batch.Write()
257
+
require.NoError(t, err)
258
+
259
+
// Test reverse iteration with start boundary
260
+
iter, err := adapter.ReverseIterator([]byte("banana"), []byte("elderberry"))
261
+
require.NoError(t, err)
262
+
defer iter.Close()
263
+
264
+
// Should start from "date" and go backwards to "banana" (inclusive)
265
+
require.True(t, iter.Valid())
266
+
require.Equal(t, []byte("date"), iter.Key())
267
+
require.Equal(t, []byte("fruit4"), iter.Value())
268
+
269
+
iter.Next()
270
+
require.True(t, iter.Valid())
271
+
require.Equal(t, []byte("cherry"), iter.Key())
272
+
require.Equal(t, []byte("fruit3"), iter.Value())
273
+
274
+
iter.Next()
275
+
require.True(t, iter.Valid())
276
+
require.Equal(t, []byte("banana"), iter.Key())
277
+
require.Equal(t, []byte("fruit2"), iter.Value())
278
+
279
+
// Next should stop since we've reached the start boundary (inclusive)
280
+
iter.Next()
281
+
require.False(t, iter.Valid(), "Iterator should be invalid after reaching start boundary")
282
+
283
+
// Test reverse iteration with nil start (should go to the beginning)
284
+
iter, err = adapter.ReverseIterator(nil, []byte("cherry"))
285
+
require.NoError(t, err)
286
+
defer iter.Close()
287
+
288
+
count := 0
289
+
for iter.Valid() {
290
+
count++
291
+
iter.Next()
292
+
}
293
+
require.Equal(t, 2, count, "Should iterate over 2 keys (banana, apple) before cherry")
294
+
295
+
// Test reverse iteration with nil end (should start from the last key)
296
+
iter, err = adapter.ReverseIterator([]byte("banana"), nil)
297
+
require.NoError(t, err)
298
+
defer iter.Close()
299
+
300
+
count = 0
301
+
for iter.Valid() {
302
+
count++
303
+
iter.Next()
304
+
}
305
+
require.Equal(t, 4, count, "Should iterate over 4 keys (elderberry, date, cherry, banana) when end is nil")
306
+
}
307
+
308
+
func TestBadgerAdapter_IteratorRespectsKeyPrefix(t *testing.T) {
309
+
// Create a temporary badger database
310
+
opts := badger.DefaultOptions("").WithInMemory(true)
311
+
db, err := badger.Open(opts)
312
+
require.NoError(t, err)
313
+
defer db.Close()
314
+
315
+
// Create adapter with a specific key prefix
316
+
keyPrefix := []byte("table1:")
317
+
adapter := AdaptBadger(db, keyPrefix)
318
+
319
+
// Write test data directly to badger with different prefixes to simulate multiple "tables"
320
+
err = db.Update(func(txn *badger.Txn) error {
321
+
// Write keys with the correct prefix (what the adapter should see)
322
+
err := txn.Set([]byte("table1:apple"), []byte("fruit1"))
323
+
require.NoError(t, err)
324
+
err = txn.Set([]byte("table1:banana"), []byte("fruit2"))
325
+
require.NoError(t, err)
326
+
err = txn.Set([]byte("table1:cherry"), []byte("fruit3"))
327
+
require.NoError(t, err)
328
+
329
+
// Write keys with a different prefix (what the adapter should NOT see)
330
+
err = txn.Set([]byte("table2:apple"), []byte("other1"))
331
+
require.NoError(t, err)
332
+
err = txn.Set([]byte("table2:date"), []byte("other2"))
333
+
require.NoError(t, err)
334
+
335
+
// Write keys with no prefix (what the adapter should NOT see)
336
+
err = txn.Set([]byte("apple"), []byte("raw1"))
337
+
require.NoError(t, err)
338
+
err = txn.Set([]byte("zebra"), []byte("raw2"))
339
+
require.NoError(t, err)
340
+
341
+
return nil
342
+
})
343
+
require.NoError(t, err)
344
+
345
+
// Test forward iteration - should only see keys with "table1:" prefix
346
+
iter, err := adapter.Iterator(nil, nil)
347
+
require.NoError(t, err)
348
+
defer iter.Close()
349
+
350
+
var keys []string
351
+
for iter.Valid() {
352
+
keys = append(keys, string(iter.Key()))
353
+
iter.Next()
354
+
}
355
+
356
+
// Should only see the 3 keys with the correct prefix, stripped of the prefix
357
+
require.Equal(t, []string{"apple", "banana", "cherry"}, keys)
358
+
359
+
// Test forward iteration with range - should only see keys with "table1:" prefix in range
360
+
iter, err = adapter.Iterator([]byte("banana"), []byte("cherry"))
361
+
require.NoError(t, err)
362
+
defer iter.Close()
363
+
364
+
keys = nil
365
+
for iter.Valid() {
366
+
keys = append(keys, string(iter.Key()))
367
+
iter.Next()
368
+
}
369
+
370
+
// Should only see "banana" (inclusive) but not "cherry" (exclusive)
371
+
require.Equal(t, []string{"banana"}, keys)
372
+
373
+
// Test reverse iteration - should only see keys with "table1:" prefix
374
+
iter, err = adapter.ReverseIterator(nil, nil)
375
+
require.NoError(t, err)
376
+
defer iter.Close()
377
+
378
+
keys = nil
379
+
for iter.Valid() {
380
+
keys = append(keys, string(iter.Key()))
381
+
iter.Next()
382
+
}
383
+
384
+
// Should see the 3 keys in reverse order, stripped of the prefix
385
+
require.Equal(t, []string{"cherry", "banana", "apple"}, keys)
386
+
387
+
// Test reverse iteration with range - should only see keys with "table1:" prefix in range
388
+
iter, err = adapter.ReverseIterator([]byte("apple"), []byte("cherry"))
389
+
require.NoError(t, err)
390
+
defer iter.Close()
391
+
392
+
keys = nil
393
+
for iter.Valid() {
394
+
keys = append(keys, string(iter.Key()))
395
+
iter.Next()
396
+
}
397
+
398
+
// Should see keys from cherry (exclusive) down to apple (inclusive)
399
+
require.Equal(t, []string{"banana", "apple"}, keys)
400
+
401
+
// Test reverse iteration with wider range - should only see keys with "table1:" prefix in range
402
+
iter, err = adapter.ReverseIterator([]byte("apple"), []byte("zzz"))
403
+
require.NoError(t, err)
404
+
defer iter.Close()
405
+
406
+
keys = nil
407
+
for iter.Valid() {
408
+
keys = append(keys, string(iter.Key()))
409
+
iter.Next()
410
+
}
411
+
412
+
// Should see keys from cherry (exclusive) down to apple (inclusive)
413
+
require.Equal(t, []string{"cherry", "banana", "apple"}, keys)
414
+
415
+
// An adapter without key prefix should be able to iterate over all keys
416
+
adapter = AdaptBadger(db, []byte{})
417
+
418
+
iter, err = adapter.ReverseIterator(nil, nil)
419
+
require.NoError(t, err)
420
+
defer iter.Close()
421
+
422
+
keys = nil
423
+
for iter.Valid() {
424
+
keys = append(keys, string(iter.Key()))
425
+
iter.Next()
426
+
}
427
+
428
+
// Should see all keys in reverse order, regardless of prefix
429
+
require.Len(t, keys, 7)
430
+
431
+
iter, err = adapter.ReverseIterator([]byte("table2:date"), []byte("zebra"))
432
+
require.NoError(t, err)
433
+
defer iter.Close()
434
+
435
+
keys = nil
436
+
for iter.Valid() {
437
+
keys = append(keys, string(iter.Key()))
438
+
iter.Next()
439
+
}
440
+
441
+
require.Equal(t, []string{"table2:date"}, keys)
442
+
443
+
iter, err = adapter.ReverseIterator([]byte("table2:date"), []byte("zzz"))
444
+
require.NoError(t, err)
445
+
defer iter.Close()
446
+
447
+
keys = nil
448
+
for iter.Valid() {
449
+
keys = append(keys, string(iter.Key()))
450
+
iter.Next()
451
+
}
452
+
453
+
require.Equal(t, []string{"zebra", "table2:date"}, keys)
454
+
}
+128
go.mod
+128
go.mod
···
···
1
+
module tangled.org/gbl08ma/didplcbft
2
+
3
+
go 1.25.4
4
+
5
+
require (
6
+
cosmossdk.io/core v0.12.1-0.20240725072823-6a2d039e1212
7
+
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe
8
+
github.com/cometbft/cometbft v0.38.19
9
+
github.com/cosmos/iavl v1.3.5
10
+
github.com/cosmos/ics23/go v0.10.0
11
+
github.com/dgraph-io/badger/v4 v4.9.0
12
+
github.com/did-method-plc/go-didplc v0.0.0-20251125183445-342320c327e2
13
+
github.com/google/uuid v1.6.0
14
+
github.com/ipfs/go-cid v0.4.1
15
+
github.com/ipfs/go-ipld-cbor v0.1.0
16
+
github.com/klauspost/compress v1.18.0
17
+
github.com/palantir/stacktrace v0.0.0-20161112013806-78658fd2d177
18
+
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f
19
+
github.com/puzpuzpuz/xsync/v4 v4.2.0
20
+
github.com/rs/cors v1.11.1
21
+
github.com/samber/lo v1.52.0
22
+
github.com/samber/mo v1.16.0
23
+
github.com/spf13/viper v1.19.0
24
+
github.com/stretchr/testify v1.11.1
25
+
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb
26
+
)
27
+
28
+
require (
29
+
github.com/DataDog/zstd v1.4.5 // indirect
30
+
github.com/beorn7/perks v1.0.1 // indirect
31
+
github.com/cespare/xxhash/v2 v2.3.0 // indirect
32
+
github.com/cockroachdb/errors v1.11.3 // indirect
33
+
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect
34
+
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
35
+
github.com/cockroachdb/pebble v1.1.5 // indirect
36
+
github.com/cockroachdb/redact v1.1.5 // indirect
37
+
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
38
+
github.com/cometbft/cometbft-db v0.14.1 // indirect
39
+
github.com/cosmos/gogoproto v1.7.0 // indirect
40
+
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
41
+
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
42
+
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
43
+
github.com/dustin/go-humanize v1.0.1 // indirect
44
+
github.com/emicklei/dot v1.6.2 // indirect
45
+
github.com/fsnotify/fsnotify v1.7.0 // indirect
46
+
github.com/getsentry/sentry-go v0.27.0 // indirect
47
+
github.com/go-kit/kit v0.13.0 // indirect
48
+
github.com/go-kit/log v0.2.1 // indirect
49
+
github.com/go-logfmt/logfmt v0.6.0 // indirect
50
+
github.com/go-logr/logr v1.4.3 // indirect
51
+
github.com/go-logr/stdr v1.2.2 // indirect
52
+
github.com/gogo/protobuf v1.3.2 // indirect
53
+
github.com/golang/protobuf v1.5.4 // indirect
54
+
github.com/golang/snappy v0.0.4 // indirect
55
+
github.com/google/btree v1.1.3 // indirect
56
+
github.com/google/flatbuffers v25.2.10+incompatible // indirect
57
+
github.com/google/go-cmp v0.7.0 // indirect
58
+
github.com/google/orderedcode v0.0.1 // indirect
59
+
github.com/gorilla/websocket v1.5.3 // indirect
60
+
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
61
+
github.com/hashicorp/hcl v1.0.0 // indirect
62
+
github.com/ipfs/go-block-format v0.2.0 // indirect
63
+
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
64
+
github.com/ipfs/go-ipld-format v0.6.0 // indirect
65
+
github.com/jmhodges/levigo v1.0.0 // indirect
66
+
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
67
+
github.com/kr/pretty v0.3.1 // indirect
68
+
github.com/kr/text v0.2.0 // indirect
69
+
github.com/lib/pq v1.10.9 // indirect
70
+
github.com/linxGnu/grocksdb v1.8.14 // indirect
71
+
github.com/magiconair/properties v1.8.7 // indirect
72
+
github.com/minio/highwayhash v1.0.3 // indirect
73
+
github.com/minio/sha256-simd v1.0.1 // indirect
74
+
github.com/mitchellh/mapstructure v1.5.0 // indirect
75
+
github.com/mr-tron/base58 v1.2.0 // indirect
76
+
github.com/multiformats/go-base32 v0.1.0 // indirect
77
+
github.com/multiformats/go-base36 v0.2.0 // indirect
78
+
github.com/multiformats/go-multibase v0.2.0 // indirect
79
+
github.com/multiformats/go-multihash v0.2.3 // indirect
80
+
github.com/multiformats/go-varint v0.0.7 // indirect
81
+
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
82
+
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae // indirect
83
+
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
84
+
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect
85
+
github.com/pkg/errors v0.9.1 // indirect
86
+
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
87
+
github.com/prometheus/client_golang v1.21.0 // indirect
88
+
github.com/prometheus/client_model v0.6.1 // indirect
89
+
github.com/prometheus/common v0.62.0 // indirect
90
+
github.com/prometheus/procfs v0.15.1 // indirect
91
+
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
92
+
github.com/rogpeppe/go-internal v1.13.1 // indirect
93
+
github.com/sagikazarmark/locafero v0.4.0 // indirect
94
+
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
95
+
github.com/sasha-s/go-deadlock v0.3.5 // indirect
96
+
github.com/sourcegraph/conc v0.3.0 // indirect
97
+
github.com/spaolacci/murmur3 v1.1.0 // indirect
98
+
github.com/spf13/afero v1.11.0 // indirect
99
+
github.com/spf13/cast v1.8.0 // indirect
100
+
github.com/spf13/pflag v1.0.6 // indirect
101
+
github.com/subosito/gotenv v1.6.0 // indirect
102
+
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
103
+
github.com/whyrusleeping/cbor-gen v0.2.1-0.20241030202151-b7a6831be65e // indirect
104
+
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
105
+
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
106
+
go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect
107
+
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
108
+
go.opentelemetry.io/otel v1.37.0 // indirect
109
+
go.opentelemetry.io/otel/metric v1.37.0 // indirect
110
+
go.opentelemetry.io/otel/trace v1.37.0 // indirect
111
+
go.uber.org/multierr v1.11.0 // indirect
112
+
golang.org/x/crypto v0.41.0 // indirect
113
+
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
114
+
golang.org/x/net v0.43.0 // indirect
115
+
golang.org/x/sync v0.16.0 // indirect
116
+
golang.org/x/sys v0.36.0 // indirect
117
+
golang.org/x/text v0.28.0 // indirect
118
+
golang.org/x/tools v0.36.0 // indirect
119
+
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
120
+
google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect
121
+
google.golang.org/grpc v1.70.0 // indirect
122
+
google.golang.org/protobuf v1.36.7 // indirect
123
+
gopkg.in/ini.v1 v1.67.0 // indirect
124
+
gopkg.in/yaml.v3 v3.0.1 // indirect
125
+
lukechampine.com/blake3 v1.2.1 // indirect
126
+
)
127
+
128
+
replace github.com/palantir/stacktrace v0.0.0-20161112013806-78658fd2d177 => github.com/gsgalloway/stacktrace v0.0.0-20200507040314-ca3802f754c7
+405
go.sum
+405
go.sum
···
···
1
+
cosmossdk.io/core v0.12.1-0.20240725072823-6a2d039e1212 h1:yLjl2aq6XMa5Zb1aKfwNHn1U+fFmmAtZyWKNWm/yVH8=
2
+
cosmossdk.io/core v0.12.1-0.20240725072823-6a2d039e1212/go.mod h1:sLzMwAW9HW+Nm3GltUVHDRSRZbcXLy9+2AYgi2bwt/s=
3
+
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
4
+
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
5
+
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
6
+
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
7
+
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
8
+
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
9
+
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
10
+
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
11
+
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
12
+
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
13
+
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
14
+
github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I=
15
+
github.com/adlio/schema v1.3.6/go.mod h1:qkxwLgPBd1FgLRHYVCmQT/rrBr3JH38J9LjmVzWNudg=
16
+
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
17
+
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
18
+
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe h1:VBhaqE5ewQgXbY5SfSWFZC/AwHFo7cHxZKFYi2ce9Yo=
19
+
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe/go.mod h1:RuQVrCGm42QNsgumKaR6se+XkFKfCPNwdCiTvqKRUck=
20
+
github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c=
21
+
github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE=
22
+
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
23
+
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
24
+
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
25
+
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
26
+
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
27
+
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
28
+
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
29
+
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
30
+
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4=
31
+
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
32
+
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
33
+
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
34
+
github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw=
35
+
github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo=
36
+
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
37
+
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
38
+
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
39
+
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
40
+
github.com/cometbft/cometbft v0.38.19 h1:vNdtCkvhuwUlrcLPAyigV7lQpmmo+tAq8CsB8gZjEYw=
41
+
github.com/cometbft/cometbft v0.38.19/go.mod h1:UCu8dlHqvkAsmAFmWDRWNZJPlu6ya2fTWZlDrWsivwo=
42
+
github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ=
43
+
github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ=
44
+
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
45
+
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
46
+
github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro=
47
+
github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0=
48
+
github.com/cosmos/iavl v1.3.5 h1:wTDFbaa/L0FVUrwTlzMnjN3fphtKgWxgcZmTc45MZuA=
49
+
github.com/cosmos/iavl v1.3.5/go.mod h1:T6SfBcyhulVIY2G/ZtAtQm/QiJvsuhIos52V4dWYk88=
50
+
github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM=
51
+
github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0=
52
+
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
53
+
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
54
+
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
55
+
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
56
+
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
57
+
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
58
+
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
59
+
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
60
+
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
61
+
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
62
+
github.com/dgraph-io/badger/v4 v4.9.0 h1:tpqWb0NewSrCYqTvywbcXOhQdWcqephkVkbBmaaqHzc=
63
+
github.com/dgraph-io/badger/v4 v4.9.0/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0=
64
+
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
65
+
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
66
+
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
67
+
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
68
+
github.com/did-method-plc/go-didplc v0.0.0-20251125183445-342320c327e2 h1:ofb700k/ZtxvDoh+rLdO2BHbFa2KWKATlKoaEQ45GYI=
69
+
github.com/did-method-plc/go-didplc v0.0.0-20251125183445-342320c327e2/go.mod h1:KriMp3m/YfciVcyUoQdXr5klXHEasvPr4yQurhTSJr4=
70
+
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
71
+
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
72
+
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
73
+
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
74
+
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
75
+
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
76
+
github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A=
77
+
github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
78
+
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
79
+
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
80
+
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
81
+
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
82
+
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
83
+
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
84
+
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
85
+
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
86
+
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
87
+
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
88
+
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
89
+
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
90
+
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
91
+
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
92
+
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
93
+
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
94
+
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
95
+
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
96
+
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
97
+
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
98
+
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
99
+
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
100
+
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
101
+
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
102
+
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
103
+
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
104
+
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
105
+
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
106
+
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
107
+
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
108
+
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
109
+
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
110
+
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
111
+
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
112
+
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
113
+
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
114
+
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
115
+
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
116
+
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
117
+
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
118
+
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
119
+
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
120
+
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
121
+
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
122
+
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
123
+
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
124
+
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
125
+
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
126
+
github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us=
127
+
github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20=
128
+
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
129
+
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
130
+
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
131
+
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
132
+
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
133
+
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
134
+
github.com/gsgalloway/stacktrace v0.0.0-20200507040314-ca3802f754c7 h1:hEyBOiA4Zv1nIstYO3VU4p/t/m5ikHZWnyQOgteoc+E=
135
+
github.com/gsgalloway/stacktrace v0.0.0-20200507040314-ca3802f754c7/go.mod h1:AWPtRYnH+Coi03yskhmgZHPcZrv89PoxagXJr1TsFyc=
136
+
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
137
+
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
138
+
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
139
+
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
140
+
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
141
+
github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs=
142
+
github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM=
143
+
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
144
+
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
145
+
github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0=
146
+
github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs=
147
+
github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs=
148
+
github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk=
149
+
github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U=
150
+
github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg=
151
+
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
152
+
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
153
+
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
154
+
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
155
+
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
156
+
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
157
+
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
158
+
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
159
+
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
160
+
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
161
+
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
162
+
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
163
+
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
164
+
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
165
+
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
166
+
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
167
+
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
168
+
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
169
+
github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ=
170
+
github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA=
171
+
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
172
+
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
173
+
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
174
+
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
175
+
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
176
+
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
177
+
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
178
+
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
179
+
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
180
+
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
181
+
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
182
+
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
183
+
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
184
+
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
185
+
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
186
+
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
187
+
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
188
+
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
189
+
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
190
+
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
191
+
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
192
+
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
193
+
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
194
+
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
195
+
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I=
196
+
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s=
197
+
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
198
+
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
199
+
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
200
+
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
201
+
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
202
+
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
203
+
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
204
+
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
205
+
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
206
+
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
207
+
github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=
208
+
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
209
+
github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
210
+
github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
211
+
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
212
+
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
213
+
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
214
+
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
215
+
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw=
216
+
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
217
+
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
218
+
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
219
+
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
220
+
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
221
+
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
222
+
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
223
+
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
224
+
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
225
+
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f h1:VXTQfuJj9vKR4TCkEuWIckKvdHFeJH/huIFJ9/cXOB0=
226
+
github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
227
+
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
228
+
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
229
+
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
230
+
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
231
+
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
232
+
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
233
+
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
234
+
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
235
+
github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
236
+
github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
237
+
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
238
+
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
239
+
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
240
+
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
241
+
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
242
+
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
243
+
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
244
+
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
245
+
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
246
+
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
247
+
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
248
+
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
249
+
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
250
+
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
251
+
github.com/samber/mo v1.16.0 h1:qpEPCI63ou6wXlsNDMLE0IIN8A+devbGX/K1xdgr4b4=
252
+
github.com/samber/mo v1.16.0/go.mod h1:DlgzJ4SYhOh41nP1L9kh9rDNERuf8IqWSAs+gj2Vxag=
253
+
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
254
+
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
255
+
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
256
+
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
257
+
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
258
+
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
259
+
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
260
+
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
261
+
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
262
+
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
263
+
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
264
+
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
265
+
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
266
+
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
267
+
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
268
+
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
269
+
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
270
+
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
271
+
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
272
+
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
273
+
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
274
+
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
275
+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
276
+
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
277
+
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
278
+
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
279
+
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
280
+
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
281
+
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
282
+
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
283
+
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
284
+
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
285
+
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
286
+
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
287
+
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
288
+
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
289
+
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
290
+
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ=
291
+
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM=
292
+
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
293
+
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
294
+
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
295
+
github.com/whyrusleeping/cbor-gen v0.2.1-0.20241030202151-b7a6831be65e h1:28X54ciEwwUxyHn9yrZfl5ojgF4CBNLWX7LR0rvBkf4=
296
+
github.com/whyrusleeping/cbor-gen v0.2.1-0.20241030202151-b7a6831be65e/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
297
+
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
298
+
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
299
+
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA=
300
+
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8=
301
+
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q=
302
+
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02/go.mod h1:JTnUj0mpYiAsuZLmKjTx/ex3AtMowcCgnE7YNyCEP0I=
303
+
go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6ejyAJc760RwW4SnVDiTYTzwnXuxo=
304
+
go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY=
305
+
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
306
+
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
307
+
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
308
+
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
309
+
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
310
+
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
311
+
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
312
+
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
313
+
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
314
+
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
315
+
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
316
+
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
317
+
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
318
+
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
319
+
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
320
+
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
321
+
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
322
+
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
323
+
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
324
+
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
325
+
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
326
+
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
327
+
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
328
+
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
329
+
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
330
+
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
331
+
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
332
+
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
333
+
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
334
+
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
335
+
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
336
+
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
337
+
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
338
+
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
339
+
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
340
+
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
341
+
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
342
+
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
343
+
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
344
+
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
345
+
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
346
+
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
347
+
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
348
+
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
349
+
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
350
+
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
351
+
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
352
+
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
353
+
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
354
+
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
355
+
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
356
+
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
357
+
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
358
+
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
359
+
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
360
+
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
361
+
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
362
+
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
363
+
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
364
+
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
365
+
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
366
+
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
367
+
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
368
+
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
369
+
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
370
+
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
371
+
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
372
+
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
373
+
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
374
+
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
375
+
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
376
+
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
377
+
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
378
+
google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o=
379
+
google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
380
+
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
381
+
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
382
+
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
383
+
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
384
+
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
385
+
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
386
+
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
387
+
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
388
+
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
389
+
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
390
+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
391
+
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
392
+
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
393
+
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
394
+
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
395
+
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
396
+
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
397
+
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
398
+
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
399
+
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
400
+
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
401
+
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
402
+
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
403
+
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
404
+
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
405
+
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+83
httpapi/broadcast.go
+83
httpapi/broadcast.go
···
···
1
+
package httpapi
2
+
3
+
import (
4
+
"context"
5
+
"time"
6
+
7
+
abci "github.com/cometbft/cometbft/abci/types"
8
+
mempl "github.com/cometbft/cometbft/mempool"
9
+
"github.com/cometbft/cometbft/node"
10
+
"github.com/cometbft/cometbft/rpc/core"
11
+
ctypes "github.com/cometbft/cometbft/rpc/core/types"
12
+
"github.com/cometbft/cometbft/types"
13
+
"github.com/palantir/stacktrace"
14
+
)
15
+
16
+
func broadcastTxCommit(ctx context.Context, node *node.Node, subscriber string, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
17
+
eventBus := node.EventBus()
18
+
mempool := node.Mempool()
19
+
// Subscribe to tx being committed in block.
20
+
subCtx, cancel := context.WithTimeout(ctx, core.SubscribeTimeout)
21
+
defer cancel()
22
+
q := types.EventQueryTxFor(tx)
23
+
txSub, err := eventBus.Subscribe(subCtx, subscriber, q)
24
+
if err != nil {
25
+
return nil, stacktrace.Propagate(err, "failed to subscribe to tx")
26
+
}
27
+
defer func() {
28
+
err := eventBus.Unsubscribe(context.Background(), subscriber, q)
29
+
_ = err
30
+
}()
31
+
32
+
// Broadcast tx and wait for CheckTx result
33
+
checkTxResCh := make(chan *abci.ResponseCheckTx, 1)
34
+
err = mempool.CheckTx(tx, func(res *abci.ResponseCheckTx) {
35
+
select {
36
+
case <-ctx.Done():
37
+
case checkTxResCh <- res:
38
+
}
39
+
}, mempl.TxInfo{})
40
+
if err != nil {
41
+
return nil, stacktrace.Propagate(err, "error on broadcastTxCommit")
42
+
}
43
+
select {
44
+
case <-ctx.Done():
45
+
return nil, stacktrace.Propagate(ctx.Err(), "broadcast confirmation not received")
46
+
case checkTxRes := <-checkTxResCh:
47
+
if checkTxRes.Code != abci.CodeTypeOK {
48
+
return &ctypes.ResultBroadcastTxCommit{
49
+
CheckTx: *checkTxRes,
50
+
TxResult: abci.ExecTxResult{},
51
+
Hash: tx.Hash(),
52
+
}, nil
53
+
}
54
+
55
+
// Wait for the tx to be included in a block or timeout.
56
+
select {
57
+
case msg := <-txSub.Out(): // The tx was included in a block.
58
+
txResultEvent := msg.Data().(types.EventDataTx)
59
+
return &ctypes.ResultBroadcastTxCommit{
60
+
CheckTx: *checkTxRes,
61
+
TxResult: txResultEvent.Result,
62
+
Hash: tx.Hash(),
63
+
Height: txResultEvent.Height,
64
+
}, nil
65
+
case <-txSub.Canceled():
66
+
err := txSub.Err()
67
+
if err == nil {
68
+
err = stacktrace.NewError("CometBFT exited")
69
+
}
70
+
return &ctypes.ResultBroadcastTxCommit{
71
+
CheckTx: *checkTxRes,
72
+
TxResult: abci.ExecTxResult{},
73
+
Hash: tx.Hash(),
74
+
}, stacktrace.Propagate(err, "txSub was canceled")
75
+
case <-time.After(node.Config().RPC.TimeoutBroadcastTxCommit):
76
+
return &ctypes.ResultBroadcastTxCommit{
77
+
CheckTx: *checkTxRes,
78
+
TxResult: abci.ExecTxResult{},
79
+
Hash: tx.Hash(),
80
+
}, stacktrace.NewError("timed out waiting for tx to be included in a block")
81
+
}
82
+
}
83
+
}
+242
httpapi/server.go
+242
httpapi/server.go
···
···
1
+
package httpapi
2
+
3
+
import (
4
+
"context"
5
+
"encoding/json"
6
+
"errors"
7
+
"fmt"
8
+
"net/http"
9
+
"time"
10
+
11
+
"github.com/bluesky-social/indigo/atproto/syntax"
12
+
"github.com/cometbft/cometbft/node"
13
+
"github.com/did-method-plc/go-didplc"
14
+
"github.com/google/uuid"
15
+
cbornode "github.com/ipfs/go-ipld-cbor"
16
+
"github.com/rs/cors"
17
+
18
+
"tangled.org/gbl08ma/didplcbft/abciapp"
19
+
"tangled.org/gbl08ma/didplcbft/plc"
20
+
)
21
+
22
+
// Server represents the HTTP server for the PLC directory.
23
+
type Server struct {
24
+
plc plc.ReadPLC
25
+
router *http.ServeMux
26
+
node *node.Node
27
+
handlerTimeout time.Duration
28
+
}
29
+
30
+
// NewServer creates a new instance of the Server.
31
+
func NewServer(plc plc.ReadPLC, node *node.Node, handlerTimeout time.Duration) *Server {
32
+
s := &Server{
33
+
plc: plc,
34
+
router: http.NewServeMux(),
35
+
node: node,
36
+
handlerTimeout: handlerTimeout,
37
+
}
38
+
s.setupRoutes()
39
+
return s
40
+
}
41
+
42
+
// setupRoutes configures the routes for the server.
43
+
func (s *Server) setupRoutes() {
44
+
s.router.HandleFunc("GET /{did}", s.makeDIDHandler(s.handleResolveDID))
45
+
s.router.HandleFunc("POST /{did}", s.makeDIDHandler(s.handleCreatePLC))
46
+
s.router.HandleFunc("GET /{did}/log", s.makeDIDHandler(s.handleGetPLCLog))
47
+
s.router.HandleFunc("GET /{did}/log/audit", s.makeDIDHandler(s.handleGetPLCAuditLog))
48
+
s.router.HandleFunc("GET /{did}/log/last", s.makeDIDHandler(s.handleGetLastOp))
49
+
s.router.HandleFunc("GET /{did}/data", s.makeDIDHandler(s.handleGetPLCData))
50
+
s.router.HandleFunc("GET /export", s.handleExport)
51
+
}
52
+
53
+
// makeDIDHandler creates a wrapper handler that extracts DID from URL path
54
+
func (s *Server) makeDIDHandler(handler func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
55
+
return func(w http.ResponseWriter, r *http.Request) {
56
+
handler(w, r, r.PathValue("did"))
57
+
}
58
+
}
59
+
60
+
// Serve starts the HTTP server on the specified address.
61
+
func (s *Server) Serve(addr string) error {
62
+
handler := cors.Default().Handler(s.router)
63
+
64
+
timeoutMsg, _ := json.Marshal(map[string]string{"message": "Internal server timeout"})
65
+
66
+
handler = http.TimeoutHandler(handler, s.handlerTimeout, string(timeoutMsg))
67
+
return http.ListenAndServe(addr, handler)
68
+
}
69
+
70
+
// handleResolveDID handles the GET /{did} endpoint.
71
+
func (s *Server) handleResolveDID(w http.ResponseWriter, r *http.Request, did string) {
72
+
73
+
ctx := context.Background()
74
+
doc, err := s.plc.Resolve(ctx, plc.CommittedTreeVersion, did)
75
+
if handlePLCError(w, err, did) {
76
+
return
77
+
}
78
+
79
+
w.Header().Set("Content-Type", "application/did+ld+json")
80
+
json.NewEncoder(w).Encode(doc)
81
+
}
82
+
83
+
// handleCreatePLC handles the POST /{did} endpoint.
84
+
func (s *Server) handleCreatePLC(w http.ResponseWriter, r *http.Request, did string) {
85
+
86
+
var op didplc.OpEnum
87
+
if err := json.NewDecoder(r.Body).Decode(&op); err != nil {
88
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid operation")
89
+
return
90
+
}
91
+
92
+
if s.node == nil {
93
+
// Validate only
94
+
// Marshal the operation to JSON bytes for validation
95
+
opBytes, err := json.Marshal(op)
96
+
if err != nil {
97
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid operation")
98
+
return
99
+
}
100
+
101
+
if err := s.plc.ValidateOperation(r.Context(), plc.CommittedTreeVersion, time.Now(), did, opBytes); err != nil {
102
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid operation")
103
+
return
104
+
}
105
+
w.WriteHeader(http.StatusOK)
106
+
return
107
+
}
108
+
109
+
uuid, err := uuid.NewRandom()
110
+
if handlePLCError(w, err, "") {
111
+
return
112
+
}
113
+
114
+
tx := abciapp.Transaction[abciapp.CreatePlcOpArguments]{
115
+
Action: abciapp.TransactionActionCreatePlcOp,
116
+
Arguments: abciapp.CreatePlcOpArguments{
117
+
DID: did,
118
+
Operation: &op,
119
+
},
120
+
}
121
+
122
+
txBytes, err := cbornode.DumpObject(tx)
123
+
if handlePLCError(w, err, "") {
124
+
return
125
+
}
126
+
127
+
// broadcastTxCommit will wait for inclusion, up until the TimeoutBroadcastTxCommit configured for the node, or until the context deadline expires
128
+
// in practice we expect operations to be included in about one second
129
+
result, err := broadcastTxCommit(r.Context(), s.node, uuid.String(), txBytes)
130
+
// TODO more robust error handling
131
+
if handlePLCError(w, err, "") {
132
+
return
133
+
}
134
+
135
+
if result.CheckTx.Code != 0 {
136
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid operation")
137
+
return
138
+
}
139
+
140
+
if result.TxResult.Code != 0 {
141
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid operation")
142
+
return
143
+
}
144
+
145
+
w.WriteHeader(http.StatusOK)
146
+
147
+
}
148
+
149
+
// handleGetPLCLog handles the GET /{did}/log endpoint.
150
+
func (s *Server) handleGetPLCLog(w http.ResponseWriter, r *http.Request, did string) {
151
+
ops, err := s.plc.OperationLog(r.Context(), plc.CommittedTreeVersion, did)
152
+
if handlePLCError(w, err, did) {
153
+
return
154
+
}
155
+
156
+
w.Header().Set("Content-Type", "application/json")
157
+
json.NewEncoder(w).Encode(ops)
158
+
}
159
+
160
+
// handleGetPLCAuditLog handles the GET /{did}/log/audit endpoint.
161
+
func (s *Server) handleGetPLCAuditLog(w http.ResponseWriter, r *http.Request, did string) {
162
+
entries, err := s.plc.AuditLog(r.Context(), plc.CommittedTreeVersion, did)
163
+
if handlePLCError(w, err, did) {
164
+
return
165
+
}
166
+
167
+
w.Header().Set("Content-Type", "application/json")
168
+
json.NewEncoder(w).Encode(entries)
169
+
}
170
+
171
+
// handleGetLastOp handles the GET /{did}/log/last endpoint.
172
+
func (s *Server) handleGetLastOp(w http.ResponseWriter, r *http.Request, did string) {
173
+
op, err := s.plc.LastOperation(r.Context(), plc.CommittedTreeVersion, did)
174
+
if handlePLCError(w, err, did) {
175
+
return
176
+
}
177
+
178
+
w.Header().Set("Content-Type", "application/json")
179
+
json.NewEncoder(w).Encode(op)
180
+
}
181
+
182
+
// handleGetPLCData handles the GET /{did}/data endpoint.
183
+
func (s *Server) handleGetPLCData(w http.ResponseWriter, r *http.Request, did string) {
184
+
data, err := s.plc.Data(r.Context(), plc.CommittedTreeVersion, did)
185
+
if handlePLCError(w, err, did) {
186
+
return
187
+
}
188
+
189
+
w.Header().Set("Content-Type", "application/json")
190
+
json.NewEncoder(w).Encode(data)
191
+
}
192
+
193
+
// handleExport handles the GET /export endpoint.
194
+
func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
195
+
query := r.URL.Query()
196
+
count := 1000 // The OpenAPI spec says the default is 10, but in practice the official server defaults to 1000
197
+
if c := query.Get("count"); c != "" {
198
+
if _, err := fmt.Sscanf(c, "%d", &count); err != nil {
199
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid count parameter")
200
+
return
201
+
}
202
+
}
203
+
204
+
afterStr := query.Get("after")
205
+
after, err := syntax.ParseDatetime(afterStr)
206
+
if err != nil && afterStr != "" {
207
+
sendErrorResponse(w, http.StatusBadRequest, "Invalid after parameter")
208
+
return
209
+
}
210
+
211
+
entries, err := s.plc.Export(r.Context(), plc.CommittedTreeVersion, after.Time(), count)
212
+
if handlePLCError(w, err, "") {
213
+
return
214
+
}
215
+
216
+
w.Header().Set("Content-Type", "application/jsonlines")
217
+
for _, entry := range entries {
218
+
json.NewEncoder(w).Encode(entry)
219
+
}
220
+
}
221
+
222
+
// handlePLCError handles errors from the PLC interface and sends the appropriate HTTP response.
223
+
func handlePLCError(w http.ResponseWriter, err error, did string) bool {
224
+
if err == nil {
225
+
return false
226
+
}
227
+
switch {
228
+
case errors.Is(err, plc.ErrDIDNotFound):
229
+
sendErrorResponse(w, http.StatusNotFound, fmt.Sprintf("DID not registered: %s", did))
230
+
case errors.Is(err, plc.ErrDIDGone):
231
+
sendErrorResponse(w, http.StatusGone, fmt.Sprintf("DID not available: %s", did))
232
+
default:
233
+
sendErrorResponse(w, http.StatusInternalServerError, "Internal server error")
234
+
}
235
+
return true
236
+
}
237
+
238
+
// sendErrorResponse sends an error response with the specified status code and message.
239
+
func sendErrorResponse(w http.ResponseWriter, statusCode int, message string) {
240
+
w.WriteHeader(statusCode)
241
+
json.NewEncoder(w).Encode(map[string]string{"message": message})
242
+
}
+298
httpapi/server_test.go
+298
httpapi/server_test.go
···
···
1
+
package httpapi
2
+
3
+
import (
4
+
"bytes"
5
+
"context"
6
+
"encoding/json"
7
+
"fmt"
8
+
"net/http"
9
+
"net/http/httptest"
10
+
"testing"
11
+
"time"
12
+
13
+
"github.com/did-method-plc/go-didplc"
14
+
"github.com/stretchr/testify/assert"
15
+
"tangled.org/gbl08ma/didplcbft/plc"
16
+
)
17
+
18
+
// MockReadPLC is a mock implementation of the ReadPLC interface for testing.
19
+
type MockReadPLC struct {
20
+
shouldReturnError bool
21
+
errorType string
22
+
}
23
+
24
+
func (m *MockReadPLC) ValidateOperation(ctx context.Context, atHeight plc.TreeVersion, at time.Time, did string, opBytes []byte) error {
25
+
if m.shouldReturnError {
26
+
switch m.errorType {
27
+
case "notfound":
28
+
return plc.ErrDIDNotFound
29
+
case "gone":
30
+
return plc.ErrDIDGone
31
+
}
32
+
return fmt.Errorf("internal error")
33
+
}
34
+
return nil
35
+
}
36
+
37
+
func (m *MockReadPLC) Resolve(ctx context.Context, atHeight plc.TreeVersion, did string) (didplc.Doc, error) {
38
+
if m.shouldReturnError {
39
+
switch m.errorType {
40
+
case "notfound":
41
+
return didplc.Doc{}, plc.ErrDIDNotFound
42
+
case "gone":
43
+
return didplc.Doc{}, plc.ErrDIDGone
44
+
}
45
+
return didplc.Doc{}, fmt.Errorf("internal error")
46
+
}
47
+
return didplc.Doc{
48
+
ID: "did:plc:test",
49
+
}, nil
50
+
}
51
+
52
+
func (m *MockReadPLC) OperationLog(ctx context.Context, atHeight plc.TreeVersion, did string) ([]didplc.OpEnum, error) {
53
+
if m.shouldReturnError {
54
+
if m.errorType == "notfound" {
55
+
return []didplc.OpEnum{}, plc.ErrDIDNotFound
56
+
}
57
+
return []didplc.OpEnum{}, fmt.Errorf("internal error")
58
+
}
59
+
return []didplc.OpEnum{}, nil
60
+
}
61
+
62
+
func (m *MockReadPLC) AuditLog(ctx context.Context, atHeight plc.TreeVersion, did string) ([]didplc.LogEntry, error) {
63
+
if m.shouldReturnError {
64
+
if m.errorType == "notfound" {
65
+
return []didplc.LogEntry{}, plc.ErrDIDNotFound
66
+
}
67
+
return []didplc.LogEntry{}, fmt.Errorf("internal error")
68
+
}
69
+
return []didplc.LogEntry{}, nil
70
+
}
71
+
72
+
func (m *MockReadPLC) LastOperation(ctx context.Context, atHeight plc.TreeVersion, did string) (didplc.OpEnum, error) {
73
+
if m.shouldReturnError {
74
+
if m.errorType == "notfound" {
75
+
return didplc.OpEnum{}, plc.ErrDIDNotFound
76
+
}
77
+
return didplc.OpEnum{}, fmt.Errorf("internal error")
78
+
}
79
+
return didplc.OpEnum{}, nil
80
+
}
81
+
82
+
func (m *MockReadPLC) Data(ctx context.Context, atHeight plc.TreeVersion, did string) (didplc.RegularOp, error) {
83
+
if m.shouldReturnError {
84
+
switch m.errorType {
85
+
case "notfound":
86
+
return didplc.RegularOp{}, plc.ErrDIDNotFound
87
+
case "gone":
88
+
return didplc.RegularOp{}, plc.ErrDIDGone
89
+
}
90
+
return didplc.RegularOp{}, fmt.Errorf("internal error")
91
+
}
92
+
return didplc.RegularOp{}, nil
93
+
}
94
+
95
+
func (m *MockReadPLC) Export(ctx context.Context, atHeight plc.TreeVersion, after time.Time, count int) ([]didplc.LogEntry, error) {
96
+
if m.shouldReturnError {
97
+
return []didplc.LogEntry{}, fmt.Errorf("internal error")
98
+
}
99
+
return []didplc.LogEntry{}, nil
100
+
}
101
+
102
+
func TestServer(t *testing.T) {
103
+
mockPLC := &MockReadPLC{}
104
+
105
+
t.Run("Test Resolve DID", func(t *testing.T) {
106
+
server := NewServer(mockPLC, nil, 15*time.Second)
107
+
108
+
req, err := http.NewRequest("GET", "/did:plc:test", nil)
109
+
assert.NoError(t, err)
110
+
111
+
rr := httptest.NewRecorder()
112
+
server.router.ServeHTTP(rr, req)
113
+
114
+
assert.Equal(t, http.StatusOK, rr.Code)
115
+
assert.Contains(t, rr.Body.String(), "did:plc:test")
116
+
})
117
+
118
+
t.Run("Test Resolve DID Not Found", func(t *testing.T) {
119
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "notfound"}
120
+
server := NewServer(mockPLC, nil, 15*time.Second)
121
+
122
+
req, err := http.NewRequest("GET", "/did:plc:test", nil)
123
+
assert.NoError(t, err)
124
+
125
+
rr := httptest.NewRecorder()
126
+
server.router.ServeHTTP(rr, req)
127
+
128
+
assert.Equal(t, http.StatusNotFound, rr.Code)
129
+
assert.Contains(t, rr.Body.String(), "DID not registered: did:plc:test")
130
+
})
131
+
132
+
t.Run("Test Resolve DID Gone", func(t *testing.T) {
133
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "gone"}
134
+
server := NewServer(mockPLC, nil, 15*time.Second)
135
+
136
+
req, err := http.NewRequest("GET", "/did:plc:test", nil)
137
+
assert.NoError(t, err)
138
+
139
+
rr := httptest.NewRecorder()
140
+
server.router.ServeHTTP(rr, req)
141
+
142
+
assert.Equal(t, http.StatusGone, rr.Code)
143
+
assert.Contains(t, rr.Body.String(), "DID not available: did:plc:test")
144
+
})
145
+
146
+
t.Run("Test Resolve DID Internal Error", func(t *testing.T) {
147
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "internal"}
148
+
server := NewServer(mockPLC, nil, 15*time.Second)
149
+
150
+
req, err := http.NewRequest("GET", "/did:plc:test", nil)
151
+
assert.NoError(t, err)
152
+
153
+
rr := httptest.NewRecorder()
154
+
server.router.ServeHTTP(rr, req)
155
+
156
+
assert.Equal(t, http.StatusInternalServerError, rr.Code)
157
+
assert.Contains(t, rr.Body.String(), "Internal server error")
158
+
})
159
+
160
+
t.Run("Test Create PLC Operation", func(t *testing.T) {
161
+
server := NewServer(mockPLC, nil, 15*time.Second)
162
+
163
+
op := map[string]interface{}{
164
+
"type": "plc_operation",
165
+
"rotationKeys": []string{"did:key:test"},
166
+
"verificationMethods": map[string]string{"atproto": "did:key:test"},
167
+
"alsoKnownAs": []string{"at://test"},
168
+
"services": map[string]interface{}{"atproto_pds": map[string]string{"type": "AtprotoPersonalDataServer", "endpoint": "https://test.com"}},
169
+
"prev": nil,
170
+
"sig": "test",
171
+
}
172
+
opBytes, _ := json.Marshal(op)
173
+
174
+
req, err := http.NewRequest("POST", "/did:plc:test", bytes.NewBuffer(opBytes))
175
+
assert.NoError(t, err)
176
+
177
+
rr := httptest.NewRecorder()
178
+
server.router.ServeHTTP(rr, req)
179
+
180
+
assert.Equal(t, http.StatusOK, rr.Code)
181
+
})
182
+
183
+
t.Run("Test Get PLC Log", func(t *testing.T) {
184
+
server := NewServer(mockPLC, nil, 15*time.Second)
185
+
186
+
req, err := http.NewRequest("GET", "/did:plc:test/log", nil)
187
+
assert.NoError(t, err)
188
+
189
+
rr := httptest.NewRecorder()
190
+
server.router.ServeHTTP(rr, req)
191
+
192
+
assert.Equal(t, http.StatusOK, rr.Code)
193
+
})
194
+
195
+
t.Run("Test Get PLC Log Not Found", func(t *testing.T) {
196
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "notfound"}
197
+
server := NewServer(mockPLC, nil, 15*time.Second)
198
+
199
+
req, err := http.NewRequest("GET", "/did:plc:test/log", nil)
200
+
assert.NoError(t, err)
201
+
202
+
rr := httptest.NewRecorder()
203
+
server.router.ServeHTTP(rr, req)
204
+
205
+
assert.Equal(t, http.StatusNotFound, rr.Code)
206
+
assert.Contains(t, rr.Body.String(), "DID not registered: did:plc:test")
207
+
})
208
+
209
+
t.Run("Test Get PLC Audit Log", func(t *testing.T) {
210
+
server := NewServer(mockPLC, nil, 15*time.Second)
211
+
212
+
req, err := http.NewRequest("GET", "/did:plc:test/log/audit", nil)
213
+
assert.NoError(t, err)
214
+
215
+
rr := httptest.NewRecorder()
216
+
server.router.ServeHTTP(rr, req)
217
+
218
+
assert.Equal(t, http.StatusOK, rr.Code)
219
+
})
220
+
221
+
t.Run("Test Get Last Operation", func(t *testing.T) {
222
+
server := NewServer(mockPLC, nil, 15*time.Second)
223
+
224
+
req, err := http.NewRequest("GET", "/did:plc:test/log/last", nil)
225
+
assert.NoError(t, err)
226
+
227
+
rr := httptest.NewRecorder()
228
+
server.router.ServeHTTP(rr, req)
229
+
230
+
assert.Equal(t, http.StatusOK, rr.Code)
231
+
})
232
+
233
+
t.Run("Test Get Last Operation Internal Error", func(t *testing.T) {
234
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "internal"}
235
+
server := NewServer(mockPLC, nil, 15*time.Second)
236
+
237
+
req, err := http.NewRequest("GET", "/did:plc:test/log/last", nil)
238
+
assert.NoError(t, err)
239
+
240
+
rr := httptest.NewRecorder()
241
+
server.router.ServeHTTP(rr, req)
242
+
243
+
assert.Equal(t, http.StatusInternalServerError, rr.Code)
244
+
assert.Contains(t, rr.Body.String(), "Internal server error")
245
+
})
246
+
247
+
t.Run("Test Get PLC Data", func(t *testing.T) {
248
+
server := NewServer(mockPLC, nil, 15*time.Second)
249
+
250
+
req, err := http.NewRequest("GET", "/did:plc:test/data", nil)
251
+
assert.NoError(t, err)
252
+
253
+
rr := httptest.NewRecorder()
254
+
server.router.ServeHTTP(rr, req)
255
+
256
+
assert.Equal(t, http.StatusOK, rr.Code)
257
+
})
258
+
259
+
t.Run("Test Get PLC Data Not Found", func(t *testing.T) {
260
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "notfound"}
261
+
server := NewServer(mockPLC, nil, 15*time.Second)
262
+
263
+
req, err := http.NewRequest("GET", "/did:plc:test/data", nil)
264
+
assert.NoError(t, err)
265
+
266
+
rr := httptest.NewRecorder()
267
+
server.router.ServeHTTP(rr, req)
268
+
269
+
assert.Equal(t, http.StatusNotFound, rr.Code)
270
+
assert.Contains(t, rr.Body.String(), "DID not registered: did:plc:test")
271
+
})
272
+
273
+
t.Run("Test Export", func(t *testing.T) {
274
+
server := NewServer(mockPLC, nil, 15*time.Second)
275
+
276
+
req, err := http.NewRequest("GET", "/export?count=10", nil)
277
+
assert.NoError(t, err)
278
+
279
+
rr := httptest.NewRecorder()
280
+
server.router.ServeHTTP(rr, req)
281
+
282
+
assert.Equal(t, http.StatusOK, rr.Code)
283
+
})
284
+
285
+
t.Run("Test Export Internal Error", func(t *testing.T) {
286
+
mockPLC := &MockReadPLC{shouldReturnError: true, errorType: "internal"}
287
+
server := NewServer(mockPLC, nil, 15*time.Second)
288
+
289
+
req, err := http.NewRequest("GET", "/export?count=10", nil)
290
+
assert.NoError(t, err)
291
+
292
+
rr := httptest.NewRecorder()
293
+
server.router.ServeHTTP(rr, req)
294
+
295
+
assert.Equal(t, http.StatusInternalServerError, rr.Code)
296
+
assert.Contains(t, rr.Body.String(), "Internal server error")
297
+
})
298
+
}
+265
importer/importer_test.go
+265
importer/importer_test.go
···
···
1
+
package importer_test
2
+
3
+
import (
4
+
"bufio"
5
+
"context"
6
+
"encoding/json"
7
+
"fmt"
8
+
"iter"
9
+
"net/http"
10
+
"strings"
11
+
"sync"
12
+
"sync/atomic"
13
+
"testing"
14
+
"time"
15
+
16
+
"github.com/cometbft/cometbft/crypto/tmhash"
17
+
"github.com/cometbft/cometbft/libs/bytes"
18
+
rpchttp "github.com/cometbft/cometbft/rpc/client/http"
19
+
"github.com/did-method-plc/go-didplc"
20
+
cbornode "github.com/ipfs/go-ipld-cbor"
21
+
"github.com/puzpuzpuz/xsync/v4"
22
+
"github.com/samber/lo"
23
+
"github.com/stretchr/testify/require"
24
+
"tangled.org/gbl08ma/didplcbft/abciapp"
25
+
)
26
+
27
+
func TestImportV2(t *testing.T) {
28
+
c, err := rpchttp.New("http://localhost:26100", "/websocket")
29
+
require.NoError(t, err)
30
+
31
+
ctx := t.Context()
32
+
33
+
entryCh := make(chan *didplc.LogEntry)
34
+
reprocessCh := make(chan *didplc.LogEntry, 1000)
35
+
awaitCh := make(chan *didplc.LogEntry, 100)
36
+
doneCh := make(chan *didplc.LogEntry, 100)
37
+
exitCh := make(chan struct{})
38
+
39
+
totalAwaiting := xsync.NewMap[string, struct{}]()
40
+
41
+
var wg sync.WaitGroup
42
+
noMoreNewEntries := atomic.Bool{}
43
+
wg.Go(func() {
44
+
for entry := range iterateOverExport(ctx, "2023-10-10T00:00:00.000Z") {
45
+
if totalAwaiting.Size() > 5000 {
46
+
for totalAwaiting.Size() > 1000 {
47
+
time.Sleep(1 * time.Second)
48
+
}
49
+
}
50
+
entryCh <- &entry
51
+
}
52
+
noMoreNewEntries.Store(true)
53
+
close(entryCh)
54
+
})
55
+
56
+
wg.Go(func() {
57
+
awaitingByDID := make(map[string][]*didplc.LogEntry)
58
+
inReprocessCh := make(map[string]struct{})
59
+
ticker := time.NewTicker(1 * time.Second)
60
+
defer ticker.Stop()
61
+
62
+
checkReprocess := func() {
63
+
toDelete := make(map[string]struct{})
64
+
for did, q := range awaitingByDID {
65
+
if len(q) > 0 {
66
+
if _, ok := inReprocessCh[q[0].CID]; !ok {
67
+
select {
68
+
case reprocessCh <- q[0]:
69
+
t.Logf("DID %s has %d operations awaiting", did, len(q))
70
+
inReprocessCh[q[0].CID] = struct{}{}
71
+
default:
72
+
}
73
+
}
74
+
} else {
75
+
toDelete[did] = struct{}{}
76
+
}
77
+
}
78
+
for v := range toDelete {
79
+
delete(awaitingByDID, v)
80
+
}
81
+
if len(awaitingByDID) == 0 && noMoreNewEntries.Load() {
82
+
close(reprocessCh)
83
+
}
84
+
}
85
+
for {
86
+
select {
87
+
case <-exitCh:
88
+
return
89
+
case entry := <-awaitCh:
90
+
totalAwaiting.Store(entry.CID, struct{}{})
91
+
delete(inReprocessCh, entry.CID)
92
+
93
+
if !lo.ContainsBy(awaitingByDID[entry.DID], func(e *didplc.LogEntry) bool {
94
+
return e.CID == entry.CID
95
+
}) {
96
+
awaitingByDID[entry.DID] = append(awaitingByDID[entry.DID], entry)
97
+
}
98
+
case entry := <-doneCh:
99
+
totalAwaiting.Delete(entry.CID)
100
+
delete(inReprocessCh, entry.CID)
101
+
if q, ok := awaitingByDID[entry.DID]; ok {
102
+
awaitingByDID[entry.DID] = lo.Filter(q, func(e *didplc.LogEntry, _ int) bool {
103
+
return e.CID != entry.CID
104
+
})
105
+
if len(awaitingByDID[entry.DID]) == 0 {
106
+
delete(awaitingByDID, entry.DID)
107
+
}
108
+
checkReprocess()
109
+
}
110
+
case <-ticker.C:
111
+
checkReprocess()
112
+
}
113
+
}
114
+
})
115
+
116
+
totalSkipped := 0
117
+
totalSubmitted := 0
118
+
for entry := range lo.FanIn(100, entryCh, reprocessCh) {
119
+
tx := abciapp.Transaction[abciapp.CreatePlcOpArguments]{
120
+
Action: abciapp.TransactionActionCreatePlcOp,
121
+
Arguments: abciapp.CreatePlcOpArguments{
122
+
DID: entry.DID,
123
+
Operation: &entry.Operation,
124
+
},
125
+
}
126
+
127
+
out, err := cbornode.DumpObject(tx)
128
+
require.NoError(t, err)
129
+
130
+
query := fmt.Sprintf("tx.hash='%s'", bytes.HexBytes(tmhash.Sum(out)).String())
131
+
result, err := c.TxSearch(ctx, query, false, nil, nil, "")
132
+
require.NoError(t, err)
133
+
if result.TotalCount > 0 {
134
+
totalSkipped++
135
+
if totalSkipped%2000 == 0 {
136
+
t.Logf("Skipped %d transactions, createdAt %s", totalSkipped, entry.CreatedAt)
137
+
}
138
+
continue
139
+
}
140
+
141
+
//st := time.Now()
142
+
broadcastResult, err := c.BroadcastTxSync(ctx, out)
143
+
/*if d := time.Since(st); d > 50*time.Millisecond {
144
+
t.Logf("broadcast took %v %s %s %v %v", d, entry.DID, entry.CID, err, broadcastResult)
145
+
}*/
146
+
if err != nil && strings.Contains(err.Error(), "tx already exists in cache") {
147
+
totalSkipped++
148
+
doneCh <- entry
149
+
t.Logf("Skipped %d transactions, createdAt %s", totalSkipped, entry.CreatedAt)
150
+
continue
151
+
}
152
+
153
+
if err != nil && strings.Contains(err.Error(), "mempool is full") {
154
+
t.Logf("Mempool is full, waiting...")
155
+
time.Sleep(20 * time.Second)
156
+
awaitCh <- entry
157
+
continue
158
+
}
159
+
require.NoError(t, err)
160
+
161
+
if broadcastResult.Code != 0 {
162
+
query := fmt.Sprintf("tx.hash='%s'", broadcastResult.Hash)
163
+
result, err := c.TxSearch(ctx, query, false, nil, nil, "")
164
+
require.NoError(t, err)
165
+
if result.TotalCount > 0 {
166
+
totalSkipped++
167
+
t.Logf("Skipped %d transactions, createdAt %s", totalSkipped, entry.CreatedAt)
168
+
continue
169
+
}
170
+
171
+
awaitCh <- entry
172
+
173
+
continue
174
+
}
175
+
176
+
doneCh <- entry
177
+
178
+
totalSubmitted++
179
+
180
+
t.Logf("Submitted %d transactions, createdAt %s, %d awaiting", totalSubmitted, entry.CreatedAt, totalAwaiting.Size())
181
+
}
182
+
183
+
exitCh <- struct{}{}
184
+
wg.Wait()
185
+
}
186
+
187
+
func iterateOverExport(ctx context.Context, startAt string) iter.Seq[didplc.LogEntry] {
188
+
return func(yield func(didplc.LogEntry) bool) {
189
+
const batchSize = 1000
190
+
baseURL := didplc.DefaultDirectoryURL + "/export"
191
+
client := &http.Client{Timeout: 30 * time.Second}
192
+
193
+
// The /export seems to sometimes return outright duplicated entries :weary:
194
+
seenCIDs := map[string]struct{}{}
195
+
196
+
after := startAt
197
+
for {
198
+
req, err := http.NewRequestWithContext(ctx, "GET", baseURL, nil)
199
+
if err != nil {
200
+
return // Failed to create request
201
+
}
202
+
203
+
req.Header.Set("User-Agent", "go-did-method-plc")
204
+
205
+
q := req.URL.Query()
206
+
q.Add("count", fmt.Sprint(batchSize))
207
+
if after != "" {
208
+
q.Add("after", after)
209
+
}
210
+
req.URL.RawQuery = q.Encode()
211
+
212
+
resp, err := client.Do(req)
213
+
if err != nil {
214
+
return // Failed to make request
215
+
}
216
+
defer resp.Body.Close()
217
+
218
+
if resp.StatusCode != http.StatusOK {
219
+
return // Non-200 status code
220
+
}
221
+
222
+
entries := make([]didplc.LogEntry, 0, batchSize)
223
+
224
+
// Read response body
225
+
s := bufio.NewScanner(resp.Body)
226
+
receivedEntries := 0
227
+
for s.Scan() {
228
+
var entry didplc.LogEntry
229
+
if err := json.Unmarshal(s.Bytes(), &entry); err != nil {
230
+
return // Failed to decode JSON
231
+
}
232
+
if _, present := seenCIDs[entry.CID]; !present {
233
+
entries = append(entries, entry)
234
+
seenCIDs[entry.CID] = struct{}{}
235
+
}
236
+
receivedEntries++
237
+
}
238
+
if s.Err() != nil {
239
+
return // handle scan error
240
+
}
241
+
242
+
if len(entries) == 0 {
243
+
return
244
+
}
245
+
246
+
// Process each entry
247
+
var lastCreatedAt string
248
+
for _, entry := range entries {
249
+
lastCreatedAt = entry.CreatedAt
250
+
if !yield(entry) {
251
+
return
252
+
}
253
+
}
254
+
255
+
if receivedEntries < batchSize {
256
+
return
257
+
}
258
+
259
+
after = lastCreatedAt
260
+
261
+
// Small delay to be respectful to the API
262
+
time.Sleep(100 * time.Millisecond)
263
+
}
264
+
}
265
+
}
+146
main.go
+146
main.go
···
···
1
+
package main
2
+
3
+
import (
4
+
"flag"
5
+
"fmt"
6
+
"log"
7
+
"os"
8
+
"os/signal"
9
+
"path/filepath"
10
+
"sync"
11
+
"syscall"
12
+
"time"
13
+
14
+
"github.com/cometbft/cometbft/p2p"
15
+
"github.com/cometbft/cometbft/privval"
16
+
"github.com/cometbft/cometbft/proxy"
17
+
"github.com/samber/lo"
18
+
"tangled.org/gbl08ma/didplcbft/abciapp"
19
+
20
+
bftconfig "github.com/cometbft/cometbft/config"
21
+
cmtflags "github.com/cometbft/cometbft/libs/cli/flags"
22
+
cmtlog "github.com/cometbft/cometbft/libs/log"
23
+
nm "github.com/cometbft/cometbft/node"
24
+
"github.com/dgraph-io/badger/v4"
25
+
"github.com/dgraph-io/badger/v4/options"
26
+
"github.com/spf13/viper"
27
+
)
28
+
29
+
var homeDir string
30
+
31
+
func init() {
32
+
flag.StringVar(&homeDir, "data-dir", "", "Path to the CometBFT config directory (if empty, uses ./didplcbft-data)")
33
+
}
34
+
35
+
func main() {
36
+
flag.Parse()
37
+
if homeDir == "" {
38
+
homeDir = filepath.Join(lo.Must(os.Getwd()), "didplcbft-data")
39
+
}
40
+
41
+
config := bftconfig.DefaultConfig()
42
+
config.SetRoot(homeDir)
43
+
viper.SetConfigFile(fmt.Sprintf("%s/%s", homeDir, "config/config.toml"))
44
+
45
+
if err := viper.ReadInConfig(); err != nil {
46
+
log.Fatalf("Reading config: %v", err)
47
+
}
48
+
if err := viper.Unmarshal(config); err != nil {
49
+
log.Fatalf("Decoding config: %v", err)
50
+
}
51
+
if err := config.ValidateBasic(); err != nil {
52
+
log.Fatalf("Invalid configuration data: %v", err)
53
+
}
54
+
badgerDBPath := filepath.Join(homeDir, "badger")
55
+
badgerDB, err := badger.Open(badger.
56
+
DefaultOptions(badgerDBPath).
57
+
WithBlockSize(8 * 1024).
58
+
WithMemTableSize(256 << 20).
59
+
WithCompression(options.ZSTD))
60
+
if err != nil {
61
+
log.Fatalf("Opening badger database: %v", err)
62
+
}
63
+
64
+
for err == nil {
65
+
err = badgerDB.RunValueLogGC(0.5)
66
+
}
67
+
68
+
var wg sync.WaitGroup
69
+
closeGoroutinesCh := make(chan struct{})
70
+
wg.Go(func() {
71
+
ticker := time.NewTicker(5 * time.Minute)
72
+
defer ticker.Stop()
73
+
for {
74
+
select {
75
+
case <-ticker.C:
76
+
var err error
77
+
for err == nil {
78
+
err = badgerDB.RunValueLogGC(0.5)
79
+
}
80
+
case <-closeGoroutinesCh:
81
+
return
82
+
}
83
+
}
84
+
})
85
+
86
+
defer func() {
87
+
if err := badgerDB.Close(); err != nil {
88
+
log.Printf("Closing badger database: %v", err)
89
+
}
90
+
}()
91
+
92
+
app, plc, cleanup, err := abciapp.NewDIDPLCApplication(badgerDB, filepath.Join(homeDir, "snapshots"))
93
+
if err != nil {
94
+
log.Fatalf("failed to create DIDPLC application: %v", err)
95
+
}
96
+
defer cleanup()
97
+
98
+
_ = plc // TODO
99
+
100
+
pv := privval.LoadFilePV(
101
+
config.PrivValidatorKeyFile(),
102
+
config.PrivValidatorStateFile(),
103
+
)
104
+
105
+
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
106
+
if err != nil {
107
+
log.Fatalf("failed to load node's key: %v", err)
108
+
}
109
+
110
+
logger := cmtlog.NewTMLogger(cmtlog.NewSyncWriter(os.Stdout))
111
+
logger, err = cmtflags.ParseLogLevel(config.LogLevel, logger, bftconfig.DefaultLogLevel)
112
+
113
+
if err != nil {
114
+
log.Fatalf("failed to parse log level: %v", err)
115
+
}
116
+
117
+
node, err := nm.NewNode(
118
+
config,
119
+
pv,
120
+
nodeKey,
121
+
proxy.NewLocalClientCreator(app),
122
+
nm.DefaultGenesisDocProviderFunc(config),
123
+
bftconfig.DefaultDBProvider,
124
+
nm.DefaultMetricsProvider(config.Instrumentation),
125
+
logger,
126
+
)
127
+
128
+
if err != nil {
129
+
log.Fatalf("Creating node: %v", err)
130
+
}
131
+
132
+
err = node.Start()
133
+
if err != nil {
134
+
log.Fatalf("Starting node: %v", err)
135
+
}
136
+
defer func() {
137
+
node.Stop()
138
+
node.Wait()
139
+
}()
140
+
141
+
c := make(chan os.Signal, 1)
142
+
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
143
+
<-c
144
+
close(closeGoroutinesCh)
145
+
wg.Wait()
146
+
}
+238
plc/impl.go
+238
plc/impl.go
···
···
1
+
package plc
2
+
3
+
import (
4
+
"context"
5
+
"iter"
6
+
"sync"
7
+
"time"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
"github.com/cosmos/iavl"
11
+
"github.com/did-method-plc/go-didplc"
12
+
"github.com/ipfs/go-cid"
13
+
"github.com/palantir/stacktrace"
14
+
"github.com/samber/lo"
15
+
"tangled.org/gbl08ma/didplcbft/store"
16
+
)
17
+
18
+
type TreeProvider interface {
19
+
MutableTree() (*iavl.MutableTree, error)
20
+
ImmutableTree(version TreeVersion) (store.PossiblyMutableTree, error)
21
+
}
22
+
23
+
type plcImpl struct {
24
+
mu sync.Mutex // probably redundant, but let's keep for now
25
+
treeProvider TreeProvider
26
+
validator OperationValidator
27
+
}
28
+
29
+
var _ PLC = (*plcImpl)(nil)
30
+
31
+
func NewPLC(treeProvider TreeProvider) *plcImpl {
32
+
p := &plcImpl{
33
+
treeProvider: treeProvider,
34
+
}
35
+
36
+
p.validator = NewV0OperationValidator(&inMemoryAuditLogFetcher{
37
+
plc: p,
38
+
})
39
+
return p
40
+
}
41
+
42
+
func (plc *plcImpl) ValidateOperation(ctx context.Context, atHeight TreeVersion, at time.Time, did string, opBytes []byte) error {
43
+
plc.mu.Lock()
44
+
defer plc.mu.Unlock()
45
+
46
+
timestamp := syntax.Datetime(at.Format(syntax.AtprotoDatetimeLayout))
47
+
48
+
// TODO set true to false only while importing old ops
49
+
_, err := plc.validator.Validate(atHeight, timestamp, did, opBytes, true)
50
+
if err != nil {
51
+
return stacktrace.Propagate(err, "operation failed validation")
52
+
}
53
+
54
+
return nil
55
+
}
56
+
57
+
func (plc *plcImpl) ExecuteOperation(ctx context.Context, t time.Time, did string, opBytes []byte) (cid.Cid, error) {
58
+
plc.mu.Lock()
59
+
defer plc.mu.Unlock()
60
+
61
+
timestamp := syntax.Datetime(t.Format(syntax.AtprotoDatetimeLayout))
62
+
63
+
// TODO set true to false only while importing old ops
64
+
effects, err := plc.validator.Validate(WorkingTreeVersion, timestamp, did, opBytes, true)
65
+
if err != nil {
66
+
return cid.Undef, stacktrace.Propagate(err, "operation failed validation")
67
+
}
68
+
69
+
tree, err := plc.treeProvider.MutableTree()
70
+
if err != nil {
71
+
return cid.Undef, stacktrace.Propagate(err, "failed to obtain mutable tree")
72
+
}
73
+
74
+
err = store.Tree.StoreOperation(tree, effects.NewLogEntry, effects.NewOperationIndex, effects.NullifiedEntriesStartingIndex)
75
+
if err != nil {
76
+
return cid.Undef, stacktrace.Propagate(err, "failed to commit operation")
77
+
}
78
+
79
+
return effects.NewOperationCID, nil
80
+
}
81
+
82
+
func (plc *plcImpl) Resolve(ctx context.Context, atHeight TreeVersion, did string) (didplc.Doc, error) {
83
+
plc.mu.Lock()
84
+
defer plc.mu.Unlock()
85
+
86
+
tree, err := plc.treeProvider.ImmutableTree(atHeight)
87
+
if err != nil {
88
+
return didplc.Doc{}, stacktrace.Propagate(err, "failed to obtain immutable tree")
89
+
}
90
+
91
+
l, _, err := store.Tree.AuditLog(tree, did, false)
92
+
if err != nil {
93
+
return didplc.Doc{}, stacktrace.Propagate(err, "")
94
+
}
95
+
96
+
if len(l) == 0 {
97
+
return didplc.Doc{}, stacktrace.Propagate(ErrDIDNotFound, "")
98
+
}
99
+
100
+
opEnum := l[len(l)-1].Operation
101
+
if opEnum.Tombstone != nil {
102
+
return didplc.Doc{}, stacktrace.Propagate(ErrDIDGone, "")
103
+
}
104
+
return opEnum.AsOperation().Doc(did)
105
+
}
106
+
107
+
func (plc *plcImpl) OperationLog(ctx context.Context, atHeight TreeVersion, did string) ([]didplc.OpEnum, error) {
108
+
// GetPlcOpLog - /:did/log - same data as audit log but excludes nullified. just the inner operations
109
+
// if missing -> returns ErrDIDNotFound
110
+
// if tombstone -> returns log as normal
111
+
112
+
plc.mu.Lock()
113
+
defer plc.mu.Unlock()
114
+
115
+
tree, err := plc.treeProvider.ImmutableTree(atHeight)
116
+
if err != nil {
117
+
return nil, stacktrace.Propagate(err, "failed to obtain immutable tree")
118
+
}
119
+
120
+
l, _, err := store.Tree.AuditLog(tree, did, false)
121
+
if err != nil {
122
+
return nil, stacktrace.Propagate(err, "")
123
+
}
124
+
125
+
if len(l) == 0 {
126
+
return nil, stacktrace.Propagate(ErrDIDNotFound, "")
127
+
}
128
+
129
+
return lo.Map(l, func(logEntry didplc.LogEntry, _ int) didplc.OpEnum {
130
+
return logEntry.Operation
131
+
}), nil
132
+
}
133
+
134
+
func (plc *plcImpl) AuditLog(ctx context.Context, atHeight TreeVersion, did string) ([]didplc.LogEntry, error) {
135
+
// GetPlcAuditLog - /:did/log/audit - full audit log, with nullified
136
+
// if missing -> returns ErrDIDNotFound
137
+
// if tombstone -> returns log as normal
138
+
plc.mu.Lock()
139
+
defer plc.mu.Unlock()
140
+
141
+
tree, err := plc.treeProvider.ImmutableTree(atHeight)
142
+
if err != nil {
143
+
return nil, stacktrace.Propagate(err, "failed to obtain immutable tree")
144
+
}
145
+
146
+
l, _, err := store.Tree.AuditLog(tree, did, false)
147
+
if err != nil {
148
+
return nil, stacktrace.Propagate(err, "")
149
+
}
150
+
151
+
if len(l) == 0 {
152
+
return nil, stacktrace.Propagate(ErrDIDNotFound, "")
153
+
}
154
+
155
+
return l, nil
156
+
}
157
+
158
+
func (plc *plcImpl) LastOperation(ctx context.Context, atHeight TreeVersion, did string) (didplc.OpEnum, error) {
159
+
// GetLastOp - /:did/log/last - latest op from audit log which isn't nullified (isn't the latest op guaranteed to not be nullified?)
160
+
// if missing -> returns ErrDIDNotFound
161
+
// if tombstone -> returns tombstone op
162
+
plc.mu.Lock()
163
+
defer plc.mu.Unlock()
164
+
165
+
tree, err := plc.treeProvider.ImmutableTree(atHeight)
166
+
if err != nil {
167
+
return didplc.OpEnum{}, stacktrace.Propagate(err, "failed to obtain immutable tree")
168
+
}
169
+
170
+
l, _, err := store.Tree.AuditLog(tree, did, false)
171
+
if err != nil {
172
+
return didplc.OpEnum{}, stacktrace.Propagate(err, "")
173
+
}
174
+
175
+
if len(l) == 0 {
176
+
return didplc.OpEnum{}, stacktrace.Propagate(ErrDIDNotFound, "")
177
+
}
178
+
179
+
return l[len(l)-1].Operation, nil
180
+
}
181
+
182
+
func (plc *plcImpl) Data(ctx context.Context, atHeight TreeVersion, did string) (didplc.RegularOp, error) {
183
+
// GetPlcData - /:did/data - similar to GetLastOp but applies a transformation on the op which normalizes it into a modern op
184
+
// if missing -> returns ErrDIDNotFound
185
+
// if tombstone -> returns ErrDIDGone
186
+
plc.mu.Lock()
187
+
defer plc.mu.Unlock()
188
+
189
+
tree, err := plc.treeProvider.ImmutableTree(atHeight)
190
+
if err != nil {
191
+
return didplc.RegularOp{}, stacktrace.Propagate(err, "failed to obtain immutable tree")
192
+
}
193
+
194
+
l, _, err := store.Tree.AuditLog(tree, did, false)
195
+
if err != nil {
196
+
return didplc.RegularOp{}, stacktrace.Propagate(err, "")
197
+
}
198
+
199
+
if len(l) == 0 {
200
+
return didplc.RegularOp{}, stacktrace.Propagate(ErrDIDNotFound, "")
201
+
}
202
+
203
+
opEnum := l[len(l)-1].Operation
204
+
if opEnum.Tombstone != nil {
205
+
return didplc.RegularOp{}, stacktrace.Propagate(ErrDIDGone, "")
206
+
}
207
+
if opEnum.Regular != nil {
208
+
return *opEnum.Regular, nil
209
+
}
210
+
return *modernizeOp(opEnum.Legacy), nil
211
+
}
212
+
213
+
func (plc *plcImpl) Export(ctx context.Context, atHeight TreeVersion, after time.Time, count int) ([]didplc.LogEntry, error) {
214
+
plc.mu.Lock()
215
+
defer plc.mu.Unlock()
216
+
217
+
tree, err := plc.treeProvider.ImmutableTree(atHeight)
218
+
if err != nil {
219
+
return nil, stacktrace.Propagate(err, "failed to obtain immutable tree")
220
+
}
221
+
222
+
entries, err := store.Tree.ExportOperations(tree, after, count)
223
+
return entries, stacktrace.Propagate(err, "")
224
+
}
225
+
226
+
type inMemoryAuditLogFetcher struct {
227
+
plc *plcImpl
228
+
}
229
+
230
+
func (a *inMemoryAuditLogFetcher) AuditLogReverseIterator(atHeight TreeVersion, did string, retErr *error) iter.Seq2[int, didplc.LogEntry] {
231
+
tree, err := a.plc.treeProvider.ImmutableTree(atHeight)
232
+
if err != nil {
233
+
*retErr = stacktrace.Propagate(err, "")
234
+
return func(yield func(int, didplc.LogEntry) bool) {}
235
+
}
236
+
237
+
return store.Tree.AuditLogReverseIterator(tree, did, retErr)
238
+
}
+467
plc/operation_validator.go
+467
plc/operation_validator.go
···
···
1
+
package plc
2
+
3
+
import (
4
+
"errors"
5
+
"iter"
6
+
"strings"
7
+
"time"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
10
+
"github.com/bluesky-social/indigo/atproto/syntax"
11
+
"github.com/did-method-plc/go-didplc"
12
+
"github.com/ipfs/go-cid"
13
+
"github.com/palantir/stacktrace"
14
+
"github.com/samber/mo"
15
+
)
16
+
17
+
type AuditLogFetcher interface {
18
+
// AuditLogReverseIterator should return an iterator over the list of log entries for the specified DID, in reverse
19
+
AuditLogReverseIterator(atHeight TreeVersion, did string, err *error) iter.Seq2[int, didplc.LogEntry]
20
+
}
21
+
22
+
type V0OperationValidator struct {
23
+
auditLogFetcher AuditLogFetcher
24
+
}
25
+
26
+
func NewV0OperationValidator(logFetcher AuditLogFetcher) *V0OperationValidator {
27
+
return &V0OperationValidator{
28
+
auditLogFetcher: logFetcher,
29
+
}
30
+
}
31
+
32
+
type OperationEffects struct {
33
+
NullifiedEntriesStartingIndex mo.Option[int]
34
+
NewLogEntry didplc.LogEntry
35
+
NewOperationCID cid.Cid // should be equivalent to the CID field inside NewLogEntry, but that's a string and we need the strongly typed Cid sometimes
36
+
NewOperationIndex int
37
+
}
38
+
39
+
// Validate returns the new complete AuditLog that the DID history would assume if validation passes, and an error if it doesn't pass
40
+
func (v *V0OperationValidator) Validate(atHeight TreeVersion, timestamp syntax.Datetime, expectedDid string, opBytes []byte, laxChecking bool) (OperationEffects, error) {
41
+
opEnum, op, err := unmarshalOp(opBytes)
42
+
if err != nil {
43
+
return OperationEffects{}, stacktrace.Propagate(errors.Join(ErrMalformedOperation, err), "")
44
+
}
45
+
46
+
if !op.IsSigned() {
47
+
return OperationEffects{}, stacktrace.Propagate(ErrOperationNotSigned, "")
48
+
}
49
+
50
+
september29Of2023 := time.Date(2023, time.September, 28, 0, 0, 0, 0, time.UTC)
51
+
// certain constraints appear to only have been enforced after late September 27, 2023 based on a Sep 29 announcement
52
+
// https://github.com/did-method-plc/did-method-plc/pull/47 (introduced rate limits and 'disallows create v1s' test)
53
+
// https://github.com/bluesky-social/atproto/discussions/1632
54
+
if !laxChecking && opEnum.Legacy != nil && !timestamp.Time().After(september29Of2023) {
55
+
return OperationEffects{}, stacktrace.Propagate(ErrLegacyOperationNotAllowed, "")
56
+
}
57
+
58
+
if op.IsGenesis() {
59
+
if opEnum.Tombstone != nil {
60
+
return OperationEffects{}, stacktrace.Propagate(ErrInvalidOperationSequence, "genesis operation cannot be tombstone")
61
+
}
62
+
calcDid, err := op.DID()
63
+
if err != nil {
64
+
return OperationEffects{}, stacktrace.Propagate(errors.Join(ErrMalformedOperation, err), "")
65
+
}
66
+
if calcDid != expectedDid {
67
+
return OperationEffects{}, stacktrace.Propagate(ErrDIDMismatch, "")
68
+
}
69
+
}
70
+
71
+
// Validate operation structure and field constraints
72
+
if !laxChecking && timestamp.Time().After(september29Of2023) {
73
+
if err := v.validateOperationConstraints(timestamp.Time(), op); err != nil {
74
+
return OperationEffects{}, stacktrace.Propagate(err, "")
75
+
}
76
+
}
77
+
78
+
proposedPrev := op.PrevCIDStr()
79
+
80
+
partialLog := make(map[int]didplc.LogEntry)
81
+
mostRecentOpIndex := -1
82
+
indexOfPrev := -1
83
+
var iteratorErr error
84
+
for entryIdx, entry := range v.auditLogFetcher.AuditLogReverseIterator(atHeight, expectedDid, &iteratorErr) {
85
+
partialLog[entryIdx] = entry
86
+
if mostRecentOpIndex == -1 {
87
+
mostRecentOpIndex = entryIdx
88
+
89
+
if proposedPrev == "" {
90
+
return OperationEffects{}, stacktrace.Propagate(ErrInvalidOperationSequence, "creation operation not allowed as DID already exists")
91
+
}
92
+
}
93
+
94
+
if entry.CID == proposedPrev {
95
+
indexOfPrev = entryIdx
96
+
break
97
+
}
98
+
}
99
+
100
+
if iteratorErr != nil {
101
+
return OperationEffects{}, stacktrace.Propagate(iteratorErr, "")
102
+
}
103
+
104
+
nullifiedEntries := []didplc.LogEntry{}
105
+
nullifiedEntriesStartingIndex := mo.None[int]()
106
+
107
+
if mostRecentOpIndex < 0 {
108
+
// we are expecting a creation op, validate it like so
109
+
newOperationCID := op.CID()
110
+
newEntry := didplc.LogEntry{
111
+
DID: expectedDid,
112
+
Operation: opEnum,
113
+
CID: newOperationCID.String(),
114
+
Nullified: false,
115
+
CreatedAt: timestamp.String(),
116
+
}
117
+
118
+
err = didplc.VerifyOpLog([]didplc.LogEntry{newEntry})
119
+
if err != nil {
120
+
err = NewInvalidOperationError(4023, err)
121
+
return OperationEffects{}, stacktrace.Propagate(err, "invalid operation")
122
+
}
123
+
} else if indexOfPrev < 0 {
124
+
// there are entries in the audit log but none of them has a CID matching prev
125
+
return OperationEffects{}, stacktrace.Propagate(ErrInvalidPrev, "")
126
+
} else {
127
+
// we've found the targeted prev operation
128
+
129
+
// timestamps must increase monotonically
130
+
mostRecentOp := partialLog[mostRecentOpIndex]
131
+
mostRecentCreatedAt, err := syntax.ParseDatetime(mostRecentOp.CreatedAt)
132
+
if err != nil {
133
+
return OperationEffects{}, stacktrace.Propagate(err, "reached invalid internal state")
134
+
}
135
+
if !timestamp.Time().After(mostRecentCreatedAt.Time()) {
136
+
return OperationEffects{}, stacktrace.Propagate(ErrInvalidOperationSequence, "")
137
+
}
138
+
139
+
// if we are forking history, these are the ops still in the proposed canonical history
140
+
141
+
lastOpEntry := partialLog[indexOfPrev]
142
+
lastOp := lastOpEntry.Operation.AsOperation()
143
+
lastOpRotationKeys := lastOp.EquivalentRotationKeys()
144
+
for i := indexOfPrev + 1; i <= mostRecentOpIndex; i++ {
145
+
nullifiedEntries = append(nullifiedEntries, partialLog[i])
146
+
}
147
+
if len(nullifiedEntries) > 0 {
148
+
nullifiedEntriesStartingIndex = mo.Some(indexOfPrev + 1)
149
+
150
+
disputedSignerIdx, err := didplc.VerifySignatureAny(nullifiedEntries[0].Operation.AsOperation(), lastOpRotationKeys)
151
+
if err != nil {
152
+
return OperationEffects{}, stacktrace.Propagate(err, "reached invalid internal state")
153
+
}
154
+
morePowerfulKeys := lastOpRotationKeys[0:disputedSignerIdx]
155
+
156
+
_, err = didplc.VerifySignatureAny(op, morePowerfulKeys)
157
+
if err != nil {
158
+
return OperationEffects{}, stacktrace.Propagate(ErrInvalidSignature, "")
159
+
}
160
+
161
+
// recovery key gets a 72hr window to do historical re-writes
162
+
firstNullifiedCreatedAt, err := syntax.ParseDatetime(nullifiedEntries[0].CreatedAt)
163
+
if err != nil {
164
+
return OperationEffects{}, stacktrace.Propagate(err, "reached invalid internal state")
165
+
}
166
+
if timestamp.Time().Sub(firstNullifiedCreatedAt.Time()) > 72*time.Hour {
167
+
return OperationEffects{}, stacktrace.Propagate(ErrRecoveryWindowExpired, "")
168
+
}
169
+
} else {
170
+
// this does not involve nullification
171
+
_, err := didplc.VerifySignatureAny(op, lastOpRotationKeys)
172
+
if err != nil {
173
+
return OperationEffects{}, stacktrace.Propagate(ErrInvalidSignature, "")
174
+
}
175
+
}
176
+
}
177
+
178
+
newOperationCID := op.CID()
179
+
newEntry := didplc.LogEntry{
180
+
DID: expectedDid,
181
+
Operation: opEnum,
182
+
CID: newOperationCID.String(),
183
+
Nullified: false,
184
+
CreatedAt: timestamp.String(),
185
+
}
186
+
187
+
// do not enforce rate limits on recovery operations to prevent DDOS by a bad actor
188
+
if len(nullifiedEntries) == 0 {
189
+
// (see prior note on september27Of2023)
190
+
if !laxChecking && timestamp.Time().After(september29Of2023) {
191
+
err = v.EnforceOpsRateLimit(atHeight, expectedDid, timestamp.Time())
192
+
if err != nil {
193
+
return OperationEffects{}, stacktrace.Propagate(err, "")
194
+
}
195
+
}
196
+
}
197
+
198
+
return OperationEffects{
199
+
NullifiedEntriesStartingIndex: nullifiedEntriesStartingIndex,
200
+
NewLogEntry: newEntry,
201
+
NewOperationCID: newOperationCID,
202
+
NewOperationIndex: mostRecentOpIndex + 1,
203
+
}, nil
204
+
}
205
+
206
+
// Constants for validation limits (matching TypeScript implementation)
207
+
const (
208
+
MaxOpBytes = 4000
209
+
MaxAkaEntries = 10
210
+
MaxAkaLength = 258
211
+
MaxRotationEntries = 10 // the spec suggests this should be 5 but the Typescript implementation uses 10
212
+
MaxServiceEntries = 10
213
+
MaxServiceTypeLength = 256
214
+
MaxServiceEndpointLen = 512
215
+
MaxVmEntries = 10
216
+
MaxIdLength = 32
217
+
MaxDidKeyLength = 256
218
+
)
219
+
220
+
// Rate limiting constants (matching TypeScript implementation)
221
+
const (
222
+
HourLimit = 10
223
+
DayLimit = 30
224
+
WeekLimit = 100
225
+
)
226
+
227
+
// EnforceOpsRateLimit is ported from the TypeScript enforceOpsRateLimit function, adapted to not require fetching the entire log
228
+
func (v *V0OperationValidator) EnforceOpsRateLimit(atHeight TreeVersion, did string, newOperationTimestamp time.Time) error {
229
+
hourAgo := newOperationTimestamp.Add(-time.Hour)
230
+
dayAgo := newOperationTimestamp.Add(-24 * time.Hour)
231
+
weekAgo := newOperationTimestamp.Add(-7 * 24 * time.Hour)
232
+
233
+
var withinHour, withinDay, withinWeek int
234
+
var err error
235
+
for _, entry := range v.auditLogFetcher.AuditLogReverseIterator(atHeight, did, &err) {
236
+
if entry.Nullified {
237
+
// The typescript implementation operates over a `ops` array which doesn't include nullified ops
238
+
// (With recovery ops also skipping rate limits, doesn't this leave the PLC vulnerable to the spam of constant recovery operations?)
239
+
continue
240
+
}
241
+
// Parse the CreatedAt timestamp string
242
+
// The CreatedAt field is stored as a string in ISO 8601 format
243
+
opDatetime, err := syntax.ParseDatetime(entry.CreatedAt)
244
+
if err != nil {
245
+
return stacktrace.Propagate(err, "")
246
+
}
247
+
opTime := opDatetime.Time()
248
+
249
+
if opTime.Before(weekAgo) {
250
+
// operations are always ordered by timestamp, and we're iterating from newest to oldest
251
+
// if we're already looking at operations over a week ago,
252
+
// there's no way we'll ever increase withinHour, withinDay or withinWeek
253
+
return nil
254
+
}
255
+
256
+
if opTime.After(weekAgo) {
257
+
withinWeek++
258
+
if withinWeek >= WeekLimit {
259
+
return stacktrace.Propagate(ErrRateLimitExceeded, "too many operations within last week (max %d)", WeekLimit)
260
+
}
261
+
}
262
+
if opTime.After(dayAgo) {
263
+
withinDay++
264
+
if withinDay >= DayLimit {
265
+
return stacktrace.Propagate(ErrRateLimitExceeded, "too many operations within last day (max %d)", DayLimit)
266
+
}
267
+
}
268
+
if opTime.After(hourAgo) {
269
+
withinHour++
270
+
if withinHour >= HourLimit {
271
+
return stacktrace.Propagate(ErrRateLimitExceeded, "too many operations within last hour (max %d)", HourLimit)
272
+
}
273
+
}
274
+
}
275
+
return stacktrace.Propagate(err, "")
276
+
}
277
+
278
+
// EnforceOpsRateLimit checks whether a slice of log entries exceeds rate limits
279
+
// This method is ported from the TypeScript enforceOpsRateLimit function
280
+
func EnforceOpsRateLimit(ops []didplc.LogEntry) error {
281
+
now := time.Now()
282
+
hourAgo := now.Add(-time.Hour)
283
+
dayAgo := now.Add(-24 * time.Hour)
284
+
weekAgo := now.Add(-7 * 24 * time.Hour)
285
+
286
+
var withinHour, withinDay, withinWeek int
287
+
288
+
for _, op := range ops {
289
+
// Parse the CreatedAt timestamp string
290
+
// The CreatedAt field is stored as a string in ISO 8601 format
291
+
opDatetime, err := syntax.ParseDatetime(op.CreatedAt)
292
+
if err != nil {
293
+
// If parsing fails, skip this operation for rate limiting
294
+
continue
295
+
}
296
+
opTime := opDatetime.Time()
297
+
298
+
if opTime.After(weekAgo) {
299
+
withinWeek++
300
+
if withinWeek >= WeekLimit {
301
+
return stacktrace.Propagate(ErrRateLimitExceeded, "too many operations within last week (max %d)", WeekLimit)
302
+
}
303
+
}
304
+
if opTime.After(dayAgo) {
305
+
withinDay++
306
+
if withinDay >= DayLimit {
307
+
return stacktrace.Propagate(ErrRateLimitExceeded, "too many operations within last day (max %d)", DayLimit)
308
+
}
309
+
}
310
+
if opTime.After(hourAgo) {
311
+
withinHour++
312
+
if withinHour >= HourLimit {
313
+
return stacktrace.Propagate(ErrRateLimitExceeded, "too many operations within last hour (max %d)", HourLimit)
314
+
}
315
+
}
316
+
}
317
+
318
+
return nil
319
+
}
320
+
321
+
func (v *V0OperationValidator) validateOperationConstraints(createdAt time.Time, op didplc.Operation) error {
322
+
// Check operation size
323
+
opBytes := op.SignedCBORBytes()
324
+
if len(opBytes) > MaxOpBytes {
325
+
return stacktrace.Propagate(ErrOperationTooLarge, "operation too large (%d bytes maximum, got %d)", MaxOpBytes, len(opBytes))
326
+
}
327
+
328
+
// Handle tombstone operations (no additional constraints needed)
329
+
if tombstone, ok := op.(*didplc.TombstoneOp); ok {
330
+
if tombstone.Prev == "" {
331
+
return stacktrace.Propagate(ErrInvalidPrev, "missing previous in tombstone operation")
332
+
}
333
+
return nil
334
+
}
335
+
336
+
// Handle regular operations
337
+
if regular, ok := op.(*didplc.RegularOp); ok {
338
+
return stacktrace.Propagate(v.validateRegularOpConstraints(createdAt, regular), "")
339
+
}
340
+
341
+
// Handle legacy operations (creation only)
342
+
if legacy, ok := op.(*didplc.LegacyOp); ok {
343
+
return stacktrace.Propagate(v.validateLegacyOpConstraints(createdAt, legacy), "")
344
+
}
345
+
346
+
return stacktrace.Propagate(ErrMalformedOperation, "unknown operation type")
347
+
}
348
+
349
+
func (v *V0OperationValidator) validateLegacyOpConstraints(createdAt time.Time, op *didplc.LegacyOp) error {
350
+
if !op.IsGenesis() {
351
+
return stacktrace.Propagate(ErrLegacyOperationNotAllowed, "legacy format only allowed for genesis operations")
352
+
}
353
+
354
+
// normalize the operation then use the regular validation
355
+
return stacktrace.Propagate(v.validateRegularOpConstraints(createdAt, modernizeOp(op)), "")
356
+
}
357
+
358
+
func (v *V0OperationValidator) validateRegularOpConstraints(createdAt time.Time, op *didplc.RegularOp) error {
359
+
// Validate type
360
+
if op.Type != "plc_operation" {
361
+
return stacktrace.Propagate(ErrMalformedOperation, "invalid operation type: %s", op.Type)
362
+
}
363
+
364
+
// Validate alsoKnownAs
365
+
if len(op.AlsoKnownAs) > MaxAkaEntries {
366
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "too many alsoKnownAs entries (max %d)", MaxAkaEntries)
367
+
}
368
+
369
+
// Check for duplicate alsoKnownAs and length limits
370
+
seenAka := make(map[string]bool)
371
+
for _, aka := range op.AlsoKnownAs {
372
+
if len(aka) > MaxAkaLength {
373
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "alsoKnownAs entry too long (max %d): %s", MaxAkaLength, aka)
374
+
}
375
+
if seenAka[aka] {
376
+
return stacktrace.Propagate(ErrDuplicateFields, "duplicate alsoKnownAs entry: %s", aka)
377
+
}
378
+
seenAka[aka] = true
379
+
}
380
+
381
+
// Validate rotationKeys
382
+
if len(op.RotationKeys) == 0 {
383
+
// the typescript implementation doesn't appear to check this at first sight,
384
+
// but the spec says "must include least 1 key and at most 5 keys, with no duplication"
385
+
return stacktrace.Propagate(ErrMalformedOperationField, "missing rotation keys")
386
+
}
387
+
if len(op.RotationKeys) > MaxRotationEntries {
388
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "too many rotationKey entries (max %d)", MaxRotationEntries)
389
+
}
390
+
391
+
//seenKeys := make(map[string]struct{})
392
+
for _, key := range op.RotationKeys {
393
+
parsedKey, err := atcrypto.ParsePublicDIDKey(key)
394
+
if err != nil {
395
+
return stacktrace.Propagate(ErrMalformedOperationField, "invalid rotationKey: %s", key)
396
+
}
397
+
// the typescript implementation doesn't appear to check this at first sight,
398
+
// but the spec says "must include least 1 key and at most 5 keys, with no duplication"
399
+
_ = parsedKey
400
+
// as somewhat expected, we had to relax this check:
401
+
// CID bafyreigpavgvxvba3b3mkj2tfoqcmubxh2576cvipkx2stmv5bnskxjacq (did:plc:z4cu2w6uedmhnrjhkmv2ajsq) has a duplicate rotationKey
402
+
// see also https://github.com/did-method-plc/did-method-plc/issues/26 for more examples
403
+
/*dk := parsedKey.DIDKey()
404
+
if _, present := seenKeys[dk]; present {
405
+
return stacktrace.Propagate(ErrDuplicateFields, "duplicate rotationKey: %s", key)
406
+
}
407
+
seenKeys[dk] = struct{}{}*/
408
+
}
409
+
410
+
// Validate services
411
+
if len(op.Services) > MaxServiceEntries {
412
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "too many service entries (max %d)", MaxServiceEntries)
413
+
}
414
+
415
+
for id, service := range op.Services {
416
+
if len(id) > MaxIdLength {
417
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "service id too long (max %d): %s", MaxIdLength, id)
418
+
}
419
+
if len(service.Type) > MaxServiceTypeLength {
420
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "service type too long (max %d): %s", MaxServiceTypeLength, service.Type)
421
+
}
422
+
if len(service.Endpoint) > MaxServiceEndpointLen {
423
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "service endpoint too long (max %d)", MaxServiceEndpointLen)
424
+
}
425
+
}
426
+
427
+
// Validate verificationMethods
428
+
// some of these constraints appear to only have been enforced after the start of June 2025, we'll be more lenient and enforce from July onwards only
429
+
// (https://github.com/did-method-plc/did-method-plc/pull/101)
430
+
july2025 := time.Date(2025, time.July, 1, 0, 0, 0, 0, time.UTC)
431
+
if createdAt.After(july2025) && len(op.VerificationMethods) > MaxVmEntries {
432
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "too many verificationMethod entries (max %d)", MaxVmEntries)
433
+
}
434
+
435
+
for id, key := range op.VerificationMethods {
436
+
if len(id) > MaxIdLength {
437
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "verificationMethod id too long (max %d): %s", MaxIdLength, id)
438
+
}
439
+
if !createdAt.After(july2025) {
440
+
continue
441
+
}
442
+
if len(key) > MaxDidKeyLength {
443
+
return stacktrace.Propagate(ErrMaxFieldLengthExceeded, "verificationMethod key too long (max %d): %s", MaxDidKeyLength, key)
444
+
}
445
+
if _, err := atcrypto.ParsePublicDIDKey(key); err != nil {
446
+
return stacktrace.Propagate(ErrMalformedOperationField, "invalid verificationMethod key: %s", key)
447
+
}
448
+
}
449
+
450
+
return nil
451
+
}
452
+
453
+
func ensureAtprotoPrefix(str string) string {
454
+
if strings.HasPrefix(str, "at://") {
455
+
return str
456
+
}
457
+
str = strings.TrimPrefix(str, "http://")
458
+
str = strings.TrimPrefix(str, "https://")
459
+
return "at://" + str
460
+
}
461
+
462
+
func ensureHttpPrefix(str string) string {
463
+
if strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") {
464
+
return str
465
+
}
466
+
return "https://" + str
467
+
}
+69
plc/plc.go
+69
plc/plc.go
···
···
1
+
package plc
2
+
3
+
import (
4
+
"context"
5
+
"errors"
6
+
"time"
7
+
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"github.com/did-method-plc/go-didplc"
10
+
"github.com/ipfs/go-cid"
11
+
)
12
+
13
+
var ErrDIDNotFound = errors.New("DID not found")
14
+
var ErrDIDGone = errors.New("DID deactivated")
15
+
16
+
type TreeVersion struct {
17
+
workingHeight bool
18
+
committedHeight bool
19
+
specificHeight int64
20
+
}
21
+
22
+
func (v TreeVersion) IsMutable() bool {
23
+
return v.workingHeight
24
+
}
25
+
26
+
func (v TreeVersion) IsCommitted() bool {
27
+
return v.committedHeight
28
+
}
29
+
30
+
func (v TreeVersion) SpecificVersion() (int64, bool) {
31
+
return v.specificHeight, !v.workingHeight && !v.committedHeight
32
+
}
33
+
34
+
var WorkingTreeVersion = TreeVersion{
35
+
workingHeight: true,
36
+
}
37
+
38
+
var CommittedTreeVersion = TreeVersion{
39
+
committedHeight: true,
40
+
}
41
+
42
+
func SpecificTreeVersion(height int64) TreeVersion {
43
+
return TreeVersion{
44
+
specificHeight: height,
45
+
}
46
+
}
47
+
48
+
type OperationValidator interface {
49
+
Validate(atHeight TreeVersion, timestamp syntax.Datetime, expectedDid string, opBytes []byte, allowLegacy bool) (OperationEffects, error)
50
+
}
51
+
52
+
type PLC interface {
53
+
ReadPLC
54
+
WritePLC
55
+
}
56
+
57
+
type ReadPLC interface {
58
+
ValidateOperation(ctx context.Context, atHeight TreeVersion, at time.Time, did string, opBytes []byte) error
59
+
Resolve(ctx context.Context, atHeight TreeVersion, did string) (didplc.Doc, error)
60
+
OperationLog(ctx context.Context, atHeight TreeVersion, did string) ([]didplc.OpEnum, error)
61
+
AuditLog(ctx context.Context, atHeight TreeVersion, did string) ([]didplc.LogEntry, error)
62
+
LastOperation(ctx context.Context, atHeight TreeVersion, did string) (didplc.OpEnum, error)
63
+
Data(ctx context.Context, atHeight TreeVersion, did string) (didplc.RegularOp, error)
64
+
Export(ctx context.Context, atHeight TreeVersion, after time.Time, count int) ([]didplc.LogEntry, error)
65
+
}
66
+
67
+
type WritePLC interface {
68
+
ExecuteOperation(ctx context.Context, timestamp time.Time, did string, opBytes []byte) (cid.Cid, error)
69
+
}
+393
plc/plc_test.go
+393
plc/plc_test.go
···
···
1
+
package plc_test
2
+
3
+
import (
4
+
"encoding/json"
5
+
"testing"
6
+
"time"
7
+
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"github.com/did-method-plc/go-didplc"
10
+
"github.com/samber/lo"
11
+
"github.com/stretchr/testify/require"
12
+
"tangled.org/gbl08ma/didplcbft/plc"
13
+
)
14
+
15
+
func TestPLC(t *testing.T) {
16
+
type operationToPerform struct {
17
+
DID string
18
+
Operation string
19
+
ApplyAt syntax.Datetime
20
+
ExpectFailure bool
21
+
}
22
+
23
+
testDID := "did:plc:uyauirpjzk6le4ygqzatcwnq"
24
+
25
+
var operations = []operationToPerform{
26
+
{
27
+
DID: testDID,
28
+
Operation: `
29
+
{
30
+
"sig": "v7O8dkLUv0UVHs8BX3DXUpy60V0AjljDR14LfGN5mDsqY__JVjpzMWdIaCaRt2KLQq8uoQxHHFjyz4jS906SpQ",
31
+
"prev": "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q",
32
+
"type": "plc_operation",
33
+
"services": {
34
+
"atproto_pds": {
35
+
"type": "AtprotoPersonalDataServer",
36
+
"endpoint": "https://at.tny.im"
37
+
}
38
+
},
39
+
"alsoKnownAs": [
40
+
"at://pds.labeler.tny.im"
41
+
],
42
+
"rotationKeys": [
43
+
"did:key:zQ3shhguVfzmkfgXHzrnSeDxzbAvw7NjiVUcu2nmkeiQUrZUM"
44
+
],
45
+
"verificationMethods": {
46
+
"atproto": "did:key:zQ3shsHRci5EP9nr7dzsy5QkzTbmm8uJBXbXdghbLc8JAqogb"
47
+
}
48
+
}`,
49
+
ApplyAt: syntax.Datetime("2025-09-19T19:38:16.480Z"),
50
+
ExpectFailure: true,
51
+
},
52
+
{
53
+
DID: testDID,
54
+
Operation: `
55
+
{
56
+
"sig": "JICl9boFJK12rmBbnuKLHdV51f_CMsrsmWUrgu17DDwtsqSiSazidWA_RZbJplYNr34bTwXyY7kkJ7oJHsbOjQ",
57
+
"prev": null,
58
+
"type": "plc_operation",
59
+
"services": {
60
+
"atproto_pds": {
61
+
"type": "AtprotoPersonalDataServer",
62
+
"endpoint": "https://at.tny.im"
63
+
}
64
+
},
65
+
"alsoKnownAs": [
66
+
"at://pdslabeler.at.tny.im"
67
+
],
68
+
"rotationKeys": [
69
+
"did:key:zQ3shhguVfzmkfgXHzrnSeDxzbAvw7NjiVUcu2nmkeiQUrZUM"
70
+
],
71
+
"verificationMethods": {
72
+
"atproto": "did:key:zQ3shsHRci5EP9nr7dzsy5QkzTbmm8uJBXbXdghbLc8JAqogb"
73
+
}
74
+
}`,
75
+
ApplyAt: syntax.Datetime("2025-09-19T19:38:16.480Z"),
76
+
},
77
+
{
78
+
DID: testDID,
79
+
Operation: `
80
+
{
81
+
"sig": "v7O8dkLUv0UVHs8BX3DXUpy60V0AjljDR14LfGN5mDsqY__JVjpzMWdIaCaRt2KLQq8uoQxHHFjyz4jS906SpQ",
82
+
"prev": "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q",
83
+
"type": "plc_operation",
84
+
"services": {
85
+
"atproto_pds": {
86
+
"type": "AtprotoPersonalDataServer",
87
+
"endpoint": "https://at.tny.im"
88
+
}
89
+
},
90
+
"alsoKnownAs": [
91
+
"at://pds.labeler.tny.im"
92
+
],
93
+
"rotationKeys": [
94
+
"did:key:zQ3shhguVfzmkfgXHzrnSeDxzbAvw7NjiVUcu2nmkeiQUrZUM"
95
+
],
96
+
"verificationMethods": {
97
+
"atproto": "did:key:zQ3shsHRci5EP9nr7dzsy5QkzTbmm8uJBXbXdghbLc8JAqogb"
98
+
}
99
+
}`,
100
+
ExpectFailure: false,
101
+
ApplyAt: syntax.Datetime("2025-09-19T19:41:10.132Z"),
102
+
},
103
+
{
104
+
DID: testDID,
105
+
Operation: `
106
+
{
107
+
"sig": "RFrHbpN5fjdTnPCaXGN5eF_bkz2fRORVisAYu1a2YTh7G4fF9UERjtetJyEXPovjmFapFu-CEoBjuCcIboTztg",
108
+
"prev": "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m",
109
+
"type": "plc_operation",
110
+
"services": {
111
+
"atproto_pds": {
112
+
"type": "AtprotoPersonalDataServer",
113
+
"endpoint": "https://at.tny.im"
114
+
},
115
+
"atproto_labeler": {
116
+
"type": "AtprotoLabeler",
117
+
"endpoint": "https://pds.labeler.tny.im"
118
+
}
119
+
},
120
+
"alsoKnownAs": [
121
+
"at://pds.labeler.tny.im"
122
+
],
123
+
"rotationKeys": [
124
+
"did:key:zQ3shhguVfzmkfgXHzrnSeDxzbAvw7NjiVUcu2nmkeiQUrZUM"
125
+
],
126
+
"verificationMethods": {
127
+
"atproto": "did:key:zQ3shsHRci5EP9nr7dzsy5QkzTbmm8uJBXbXdghbLc8JAqogb",
128
+
"atproto_label": "did:key:zQ3shZ5WhBQGvBUg5z7a2NJ11698KQk9skaGr2YzJiVsXYzi9"
129
+
}
130
+
}`,
131
+
ExpectFailure: false,
132
+
ApplyAt: syntax.Datetime("2025-09-19T22:32:52.503Z"),
133
+
},
134
+
}
135
+
136
+
ctx := t.Context()
137
+
138
+
treeProvider := NewTestTreeProvider()
139
+
testPLC := plc.NewPLC(treeProvider)
140
+
141
+
tree, err := treeProvider.MutableTree()
142
+
require.NoError(t, err)
143
+
_, origVersion, err := tree.SaveVersion()
144
+
require.NoError(t, err)
145
+
146
+
// resolving a unknown DID should return an error
147
+
_, err = testPLC.Resolve(ctx, plc.WorkingTreeVersion, "did:plc:y5gazb6lrsk3j4riiro62zjn")
148
+
require.ErrorIs(t, err, plc.ErrDIDNotFound)
149
+
150
+
for _, c := range operations {
151
+
_, err := testPLC.ExecuteOperation(ctx, c.ApplyAt.Time(), c.DID, []byte(c.Operation))
152
+
if c.ExpectFailure {
153
+
require.Error(t, err)
154
+
} else {
155
+
require.NoError(t, err)
156
+
}
157
+
_, _, err = tree.SaveVersion()
158
+
require.NoError(t, err)
159
+
}
160
+
161
+
// now try resolving the DID, should return the document with the latest state
162
+
doc, err := testPLC.Resolve(ctx, plc.WorkingTreeVersion, testDID)
163
+
require.NoError(t, err)
164
+
require.Equal(t, testDID, doc.ID)
165
+
require.Len(t, doc.Service, 2)
166
+
require.Equal(t, []string{"at://pds.labeler.tny.im"}, doc.AlsoKnownAs)
167
+
168
+
log, err := testPLC.OperationLog(ctx, plc.WorkingTreeVersion, testDID)
169
+
require.NoError(t, err)
170
+
require.Len(t, log, 3)
171
+
require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", log[0].AsOperation().CID().String())
172
+
require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", log[1].AsOperation().CID().String())
173
+
require.Equal(t, "bafyreigyzl2esgnk7nvav5myvgywbshdmatzthc73iiar7tyeq3xjt47m4", log[2].AsOperation().CID().String())
174
+
175
+
log, err = testPLC.OperationLog(ctx, plc.SpecificTreeVersion(origVersion+2), testDID)
176
+
require.NoError(t, err)
177
+
require.Len(t, log, 1)
178
+
require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", log[0].AsOperation().CID().String())
179
+
180
+
// the DID should still be not found in older versions of the tree
181
+
_, err = testPLC.Resolve(ctx, plc.SpecificTreeVersion(origVersion), testDID)
182
+
require.ErrorIs(t, err, plc.ErrDIDNotFound)
183
+
184
+
doc, err = testPLC.Resolve(ctx, plc.SpecificTreeVersion(origVersion+4), testDID)
185
+
require.NoError(t, err)
186
+
187
+
export, err := testPLC.Export(ctx, plc.CommittedTreeVersion, time.Time{}, 1000)
188
+
require.NoError(t, err)
189
+
require.Len(t, export, 3)
190
+
191
+
require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", export[0].Operation.AsOperation().CID().String())
192
+
require.Equal(t, "bafyreifgafcel2okxszhgbugieyvtmfig2gtf3dgqoh5fvdh3nlh6ncv6q", export[0].CID)
193
+
require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[1].Operation.AsOperation().CID().String())
194
+
require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[1].CID)
195
+
require.Equal(t, "bafyreigyzl2esgnk7nvav5myvgywbshdmatzthc73iiar7tyeq3xjt47m4", export[2].Operation.AsOperation().CID().String())
196
+
require.Equal(t, "bafyreigyzl2esgnk7nvav5myvgywbshdmatzthc73iiar7tyeq3xjt47m4", export[2].CID)
197
+
198
+
// the after parameter is exclusive, we should just get the second successful operation
199
+
export, err = testPLC.Export(ctx, plc.CommittedTreeVersion, operations[1].ApplyAt.Time(), 1)
200
+
require.NoError(t, err)
201
+
require.Len(t, export, 1)
202
+
require.Equal(t, "bafyreia6ewwkwjgly6dijfepaq2ey6zximodbtqqi5f6fyugli3cxohn5m", export[0].CID)
203
+
}
204
+
205
+
func TestPLCFromRemoteOperations(t *testing.T) {
206
+
var client didplc.Client
207
+
208
+
testDIDs := []string{
209
+
"did:plc:uyauirpjzk6le4ygqzatcwnq",
210
+
"did:plc:l37td5yhxl2irrzrgvei4qay",
211
+
"did:plc:ia76kvnndjutgedggx2ibrem",
212
+
"did:plc:mn45tewwnse5btfftvd3powc",
213
+
"did:plc:ragtjsm2j2vknwkz3zp4oxrd",
214
+
"did:plc:rnpkyqnmsw4ipey6eotbdnnf",
215
+
"did:plc:syldnchxgxctqcjvkhqprrxz",
216
+
"did:plc:yk4dd2qkboz2yv6tpubpc6co",
217
+
//"did:plc:qeikkritllfcxmdtzet2tqcr",
218
+
"did:plc:pkmfz5soq2swsvbhvjekb36g", // tombstone with nullifications
219
+
}
220
+
221
+
remoteLogs := make([][]didplc.LogEntry, 0, len(testDIDs))
222
+
for _, testDID := range testDIDs {
223
+
auditLog, err := client.AuditLog(t.Context(), testDID)
224
+
require.NoError(t, err)
225
+
226
+
remoteLogs = append(remoteLogs, auditLog)
227
+
}
228
+
229
+
ctx := t.Context()
230
+
231
+
treeProvider := NewTestTreeProvider()
232
+
testPLC := plc.NewPLC(treeProvider)
233
+
234
+
for _, auditLog := range remoteLogs {
235
+
for _, logEntry := range auditLog {
236
+
b, err := json.Marshal(&logEntry.Operation)
237
+
require.NoError(t, err)
238
+
239
+
at := syntax.Datetime(logEntry.CreatedAt).Time()
240
+
241
+
err = testPLC.ValidateOperation(ctx, plc.WorkingTreeVersion, at, logEntry.DID, b)
242
+
require.NoError(t, err)
243
+
244
+
_, err = testPLC.ExecuteOperation(ctx, at, logEntry.DID, b)
245
+
require.NoError(t, err)
246
+
247
+
_, err = testPLC.ExecuteOperation(ctx, at, logEntry.DID, b)
248
+
// committing the same operation twice should never work,
249
+
// as though even in non-genesis ops the referenced prev will exist,
250
+
// (and thus could seem like a recovery operation at first glance)
251
+
// valid recovery operations must be signed by a key with a lower index in the rotationKeys array
252
+
// than the one which signed the operation to be invalidated
253
+
require.Error(t, err)
254
+
}
255
+
}
256
+
257
+
_, newVersion, err := lo.Must(treeProvider.MutableTree()).SaveVersion()
258
+
require.NoError(t, err)
259
+
260
+
for i, testDID := range testDIDs {
261
+
doc, err := testPLC.Resolve(ctx, plc.SpecificTreeVersion(newVersion), testDID)
262
+
if testDID == "did:plc:pkmfz5soq2swsvbhvjekb36g" {
263
+
require.ErrorContains(t, err, "deactivated")
264
+
} else {
265
+
require.NoError(t, err)
266
+
require.Equal(t, testDID, doc.ID)
267
+
}
268
+
269
+
actualLog, err := testPLC.AuditLog(ctx, plc.SpecificTreeVersion(newVersion), testDID)
270
+
require.NoError(t, err)
271
+
require.Len(t, actualLog, len(remoteLogs[i]))
272
+
for j := range actualLog {
273
+
require.Equal(t, remoteLogs[i][j].DID, actualLog[j].DID)
274
+
require.Equal(t, remoteLogs[i][j].CID, actualLog[j].CID)
275
+
require.Equal(t, remoteLogs[i][j].Nullified, actualLog[j].Nullified)
276
+
require.Equal(t, syntax.Datetime(remoteLogs[i][j].CreatedAt).Time(), syntax.Datetime(actualLog[j].CreatedAt).Time())
277
+
278
+
remoteBytes, err := json.Marshal(&remoteLogs[i][j].Operation)
279
+
require.NoError(t, err)
280
+
281
+
actualBytes, err := json.Marshal(&actualLog[j].Operation)
282
+
require.NoError(t, err)
283
+
284
+
require.Equal(t, remoteBytes, actualBytes)
285
+
}
286
+
}
287
+
288
+
export, err := testPLC.Export(ctx, plc.CommittedTreeVersion, time.Time{}, 0)
289
+
require.NoError(t, err)
290
+
require.Len(t, export, 96)
291
+
292
+
// ensure entries are sorted correctly
293
+
last := time.Time{}
294
+
for _, entry := range export {
295
+
et, err := syntax.ParseDatetime(entry.CreatedAt)
296
+
require.NoError(t, err)
297
+
require.True(t, et.Time().After(last))
298
+
last = et.Time()
299
+
}
300
+
}
301
+
302
+
func TestEnforceOpsRateLimit(t *testing.T) {
303
+
// Test case 1: Operations within rate limits should pass
304
+
t.Run("WithinLimits", func(t *testing.T) {
305
+
now := time.Now()
306
+
ops := []didplc.LogEntry{
307
+
{
308
+
DID: "did:plc:test1",
309
+
CreatedAt: now.Add(-30 * time.Minute).Format(time.RFC3339),
310
+
},
311
+
{
312
+
DID: "did:plc:test1",
313
+
CreatedAt: now.Add(-45 * time.Minute).Format(time.RFC3339),
314
+
},
315
+
}
316
+
317
+
err := plc.EnforceOpsRateLimit(ops)
318
+
require.NoError(t, err)
319
+
})
320
+
321
+
// Test case 2: Exceeding hourly limit should fail
322
+
t.Run("ExceedHourlyLimit", func(t *testing.T) {
323
+
now := time.Now()
324
+
ops := make([]didplc.LogEntry, plc.HourLimit+1)
325
+
for i := 0; i < len(ops); i++ {
326
+
ops[i] = didplc.LogEntry{
327
+
DID: "did:plc:test2",
328
+
CreatedAt: now.Add(-time.Duration(i) * time.Minute).Format(time.RFC3339),
329
+
}
330
+
}
331
+
332
+
err := plc.EnforceOpsRateLimit(ops)
333
+
require.ErrorContains(t, err, "too many operations within last hour")
334
+
})
335
+
336
+
// Test case 3: Exceeding daily limit should fail
337
+
t.Run("ExceedDailyLimit", func(t *testing.T) {
338
+
now := time.Now()
339
+
ops := make([]didplc.LogEntry, plc.DayLimit+1)
340
+
for i := 0; i < len(ops); i++ {
341
+
// Create operations within the last day but over the daily limit
342
+
ops[i] = didplc.LogEntry{
343
+
DID: "did:plc:test3",
344
+
CreatedAt: now.Add(-time.Duration(i%24) * time.Hour).Format(time.RFC3339),
345
+
}
346
+
}
347
+
348
+
err := plc.EnforceOpsRateLimit(ops)
349
+
require.ErrorContains(t, err, "too many operations within last day")
350
+
})
351
+
352
+
// Test case 4: Exceeding weekly limit should fail
353
+
t.Run("ExceedWeeklyLimit", func(t *testing.T) {
354
+
now := time.Now()
355
+
ops := make([]didplc.LogEntry, plc.WeekLimit+1)
356
+
for i := 0; i < len(ops); i++ {
357
+
// Create operations within the last week but over the weekly limit
358
+
ops[i] = didplc.LogEntry{
359
+
DID: "did:plc:test4",
360
+
CreatedAt: now.Add(-time.Duration(i%168) * time.Hour).Format(time.RFC3339),
361
+
}
362
+
}
363
+
364
+
err := plc.EnforceOpsRateLimit(ops)
365
+
require.ErrorContains(t, err, "too many operations within last week")
366
+
})
367
+
368
+
// Test case 5: Many operations within day but spread across hours should not exceed hourly limit
369
+
t.Run("OperationsSpreadAcrossHours", func(t *testing.T) {
370
+
now := time.Now()
371
+
// Create 15 operations within the last day (within daily limit of 30)
372
+
// but spread across 2+ hours so no single hour exceeds the limit of 10
373
+
ops := make([]didplc.LogEntry, 15)
374
+
for i := 0; i < 15; i++ {
375
+
// First 8 operations in the last 30 minutes
376
+
if i < 8 {
377
+
ops[i] = didplc.LogEntry{
378
+
DID: "did:plc:test6",
379
+
CreatedAt: now.Add(-time.Duration(i) * time.Minute).Format(time.RFC3339),
380
+
}
381
+
} else {
382
+
// Next 7 operations spread across 2+ hours ago
383
+
ops[i] = didplc.LogEntry{
384
+
DID: "did:plc:test6",
385
+
CreatedAt: now.Add(-(2*time.Hour + time.Duration(i-8)*time.Minute)).Format(time.RFC3339),
386
+
}
387
+
}
388
+
}
389
+
390
+
err := plc.EnforceOpsRateLimit(ops)
391
+
require.NoError(t, err)
392
+
})
393
+
}
+46
plc/testutil_test.go
+46
plc/testutil_test.go
···
···
1
+
package plc_test
2
+
3
+
import (
4
+
"github.com/cosmos/iavl"
5
+
dbm "github.com/cosmos/iavl/db"
6
+
"github.com/palantir/stacktrace"
7
+
"tangled.org/gbl08ma/didplcbft/plc"
8
+
"tangled.org/gbl08ma/didplcbft/store"
9
+
)
10
+
11
+
type testTreeProvider struct {
12
+
tree *iavl.MutableTree
13
+
}
14
+
15
+
func NewTestTreeProvider() *testTreeProvider {
16
+
return &testTreeProvider{
17
+
tree: iavl.NewMutableTree(dbm.NewMemDB(), 128, false, iavl.NewNopLogger()),
18
+
}
19
+
}
20
+
21
+
func (t *testTreeProvider) ImmutableTree(version plc.TreeVersion) (store.PossiblyMutableTree, error) {
22
+
if version.IsMutable() {
23
+
return store.AdaptMutableTree(t.tree), nil
24
+
}
25
+
var v int64
26
+
if version.IsCommitted() {
27
+
var err error
28
+
v, err = t.tree.GetLatestVersion()
29
+
if err != nil {
30
+
return nil, stacktrace.Propagate(err, "")
31
+
}
32
+
} else {
33
+
var ok bool
34
+
v, ok = version.SpecificVersion()
35
+
if !ok {
36
+
return nil, stacktrace.NewError("unsupported TreeVersion")
37
+
}
38
+
}
39
+
40
+
it, err := t.tree.GetImmutable(v)
41
+
return store.AdaptImmutableTree(it), stacktrace.Propagate(err, "")
42
+
}
43
+
44
+
func (t *testTreeProvider) MutableTree() (*iavl.MutableTree, error) {
45
+
return t.tree, nil
46
+
}
+43
plc/util.go
+43
plc/util.go
···
···
1
+
package plc
2
+
3
+
import (
4
+
"encoding/json"
5
+
6
+
"github.com/did-method-plc/go-didplc"
7
+
"github.com/palantir/stacktrace"
8
+
"github.com/samber/lo"
9
+
)
10
+
11
+
func unmarshalOp(opBytes []byte) (didplc.OpEnum, didplc.Operation, error) {
12
+
var opEnum didplc.OpEnum
13
+
14
+
err := json.Unmarshal(opBytes, &opEnum)
15
+
if err != nil {
16
+
return didplc.OpEnum{}, nil, stacktrace.Propagate(err, "")
17
+
}
18
+
19
+
op := opEnum.AsOperation()
20
+
if op == nil {
21
+
return didplc.OpEnum{}, nil, stacktrace.NewError("unknown operation type")
22
+
}
23
+
return opEnum, op, nil
24
+
}
25
+
26
+
func modernizeOp(op *didplc.LegacyOp) *didplc.RegularOp {
27
+
return &didplc.RegularOp{
28
+
Type: "plc_operation",
29
+
VerificationMethods: map[string]string{
30
+
"atproto": op.SigningKey,
31
+
},
32
+
RotationKeys: lo.Uniq([]string{op.RecoveryKey, op.SigningKey}),
33
+
AlsoKnownAs: []string{ensureAtprotoPrefix(op.Service)},
34
+
Services: map[string]didplc.OpService{
35
+
"atproto_pds": {
36
+
Type: "AtprotoPersonalDataServer",
37
+
Endpoint: ensureHttpPrefix(op.Service),
38
+
},
39
+
},
40
+
Prev: op.Prev,
41
+
Sig: op.Sig,
42
+
}
43
+
}
+54
plc/validation_errors.go
+54
plc/validation_errors.go
···
···
1
+
package plc
2
+
3
+
import "errors"
4
+
5
+
type invalidOperationError struct {
6
+
inner error
7
+
checkTxCode uint32
8
+
}
9
+
10
+
func (i *invalidOperationError) Error() string {
11
+
return i.inner.Error()
12
+
}
13
+
14
+
func (i *invalidOperationError) Unwrap() error {
15
+
return i.inner
16
+
}
17
+
18
+
func (i *invalidOperationError) Is(err error) bool {
19
+
var e *invalidOperationError
20
+
return errors.As(err, &e)
21
+
}
22
+
23
+
func NewInvalidOperationError(checkTxCode uint32, inner error) error {
24
+
return &invalidOperationError{
25
+
inner: inner,
26
+
checkTxCode: checkTxCode,
27
+
}
28
+
}
29
+
30
+
func IsInvalidOperationError(err error) bool {
31
+
return errors.Is(err, &invalidOperationError{})
32
+
}
33
+
34
+
func InvalidOperationErrorCode(err error) (uint32, bool) {
35
+
var e *invalidOperationError
36
+
if errors.As(err, &e) {
37
+
return e.checkTxCode, true
38
+
}
39
+
return 0, false
40
+
}
41
+
42
+
var ErrMalformedOperation = NewInvalidOperationError(4010, errors.New("malformed operation"))
43
+
var ErrMalformedOperationField = NewInvalidOperationError(4011, errors.New("malformed operation field"))
44
+
var ErrRateLimitExceeded = NewInvalidOperationError(4012, errors.New("rate limit exceeded"))
45
+
var ErrOperationNotSigned = NewInvalidOperationError(4013, errors.New("operation not signed"))
46
+
var ErrInvalidSignature = NewInvalidOperationError(4014, errors.New("invalid signature on operation"))
47
+
var ErrLegacyOperationNotAllowed = NewInvalidOperationError(4015, errors.New("legacy operation format not allowed"))
48
+
var ErrInvalidOperationSequence = NewInvalidOperationError(4016, errors.New("invalid operation sequence"))
49
+
var ErrDIDMismatch = NewInvalidOperationError(4017, errors.New("genesis operation does not match expected DID"))
50
+
var ErrInvalidPrev = NewInvalidOperationError(4018, errors.New("operation references invalid prev"))
51
+
var ErrOperationTooLarge = NewInvalidOperationError(4019, errors.New("operation too large"))
52
+
var ErrMaxFieldLengthExceeded = NewInvalidOperationError(4020, errors.New("maximum field length exceeded"))
53
+
var ErrDuplicateFields = NewInvalidOperationError(4021, errors.New("duplicate field data"))
54
+
var ErrRecoveryWindowExpired = NewInvalidOperationError(4022, errors.New("recovery window has passed"))
+114
start-testnet.sh
+114
start-testnet.sh
···
···
1
+
#!/bin/sh
2
+
3
+
# Default to 4 nodes if no argument provided
4
+
NUM_NODES="${1:-4}"
5
+
6
+
# Validate input
7
+
if ! echo "$NUM_NODES" | grep -qE '^[0-9]+$'; then
8
+
echo "Error: Number of nodes must be a positive integer"
9
+
echo "Usage: $0 [number_of_nodes]"
10
+
echo "Example: $0 7"
11
+
exit 1
12
+
fi
13
+
14
+
if [ "$NUM_NODES" -lt 1 ]; then
15
+
echo "Error: Number of nodes must be at least 1"
16
+
exit 1
17
+
fi
18
+
19
+
echo "Starting testnet with $NUM_NODES nodes (preserving existing data)..."
20
+
21
+
# Check if testnet directory exists and has the expected number of nodes
22
+
if [ ! -d "testnet/node0" ]; then
23
+
echo "Error: No existing testnet found. Run ./startfresh-testnet.sh first to create the testnet."
24
+
exit 1
25
+
fi
26
+
27
+
# Count existing nodes
28
+
existing_nodes=0
29
+
for i in $(seq 0 99); do
30
+
if [ -d "testnet/node$i" ]; then
31
+
existing_nodes=$((existing_nodes + 1))
32
+
else
33
+
break
34
+
fi
35
+
done
36
+
37
+
if [ "$NUM_NODES" -gt "$existing_nodes" ]; then
38
+
echo "Error: Requested $NUM_NODES nodes but only $existing_nodes nodes exist in testnet/"
39
+
echo "Run ./startfresh-testnet.sh $NUM_NODES to create additional nodes."
40
+
exit 1
41
+
fi
42
+
43
+
echo "Found $existing_nodes existing nodes, starting first $NUM_NODES nodes..."
44
+
45
+
# Check if binary exists, build if needed
46
+
if [ ! -f "./didplcbft" ]; then
47
+
echo "Binary not found, building didplcbft..."
48
+
go build -trimpath
49
+
fi
50
+
51
+
# Array to store background process IDs
52
+
pids=""
53
+
54
+
# Cleanup function to kill all background processes
55
+
cleanup() {
56
+
echo ""
57
+
echo "Shutting down all nodes..."
58
+
59
+
# Kill all background processes
60
+
for pid in $pids; do
61
+
if kill -0 "$pid" 2>/dev/null; then
62
+
echo " Stopping node process $pid..."
63
+
kill "$pid" 2>/dev/null
64
+
fi
65
+
done
66
+
67
+
# Clean up temporary fifo files
68
+
for i in $(seq 0 99); do
69
+
rm -f "/tmp/didplcbft-node$i-stdout" "/tmp/didplcbft-node$i-stderr" 2>/dev/null
70
+
done
71
+
72
+
# Wait for all processes to terminate
73
+
wait $pids 2>/dev/null
74
+
75
+
echo "All nodes stopped."
76
+
exit 0
77
+
}
78
+
79
+
# Set up signal traps
80
+
trap cleanup INT TERM EXIT
81
+
82
+
# Launch all nodes in parallel
83
+
echo "Launching $NUM_NODES nodes in parallel..."
84
+
85
+
for i in $(seq 0 $((NUM_NODES - 1))); do
86
+
if [ -d "testnet/node$i" ]; then
87
+
echo " Starting node$i..."
88
+
mkfifo "/tmp/didplcbft-node$i-stdout" 2>/dev/null || true
89
+
mkfifo "/tmp/didplcbft-node$i-stderr" 2>/dev/null || true
90
+
91
+
# Start sed processes to prefix output
92
+
sed "s/^/[node$i-stdout] /" < "/tmp/didplcbft-node$i-stdout" &
93
+
sed "s/^/[node$i-stderr] /" < "/tmp/didplcbft-node$i-stderr" &
94
+
95
+
# Start the didplcbft process with redirected output
96
+
./didplcbft --data-dir "testnet/node$i" > "/tmp/didplcbft-node$i-stdout" 2> "/tmp/didplcbft-node$i-stderr" &
97
+
pid=$!
98
+
pids="$pids $pid"
99
+
echo " PID: $pid"
100
+
else
101
+
echo " Warning: node$i directory not found, skipping..."
102
+
fi
103
+
done
104
+
105
+
echo ""
106
+
echo "All $NUM_NODES nodes are now running."
107
+
echo "Press Ctrl+C to stop all nodes."
108
+
echo ""
109
+
110
+
# Wait for all background processes
111
+
wait $pids
112
+
113
+
# If we reach here, all processes have terminated normally
114
+
echo "All nodes have terminated."
+135
startfresh-testnet.sh
+135
startfresh-testnet.sh
···
···
1
+
#!/bin/sh
2
+
3
+
# Default to 4 nodes if no argument provided
4
+
NUM_NODES="${1:-4}"
5
+
6
+
# Validate input
7
+
if ! echo "$NUM_NODES" | grep -qE '^[0-9]+$'; then
8
+
echo "Error: Number of nodes must be a positive integer"
9
+
echo "Usage: $0 [number_of_nodes]"
10
+
echo "Example: $0 7"
11
+
exit 1
12
+
fi
13
+
14
+
if [ "$NUM_NODES" -lt 1 ]; then
15
+
echo "Error: Number of nodes must be at least 1"
16
+
exit 1
17
+
fi
18
+
19
+
echo "Setting up testnet with $NUM_NODES nodes..."
20
+
21
+
# Build the binary
22
+
echo "Building didplcbft binary..."
23
+
go build -trimpath
24
+
25
+
# Clean up existing testnet data
26
+
echo "Cleaning up existing testnet data..."
27
+
rm -r testnet/node*
28
+
29
+
# Generate testnet with specified number of nodes
30
+
echo "Generating testnet configuration for $NUM_NODES nodes..."
31
+
go run github.com/cometbft/cometbft/cmd/cometbft@v0.38.19 testnet --v "$((NUM_NODES - 1))" --n 1 --starting-ip-address 127.67.67.1 --config ./testnet/baseconfig.toml --o ./testnet
32
+
33
+
# Adjust RPC and P2P listen addresses for each node
34
+
echo "Configuring RPC and P2P addresses for $NUM_NODES nodes..."
35
+
36
+
for i in $(seq 0 $((NUM_NODES - 1))); do
37
+
# Calculate RPC port (starting from 26100)
38
+
rpc_port=$((26100 + i))
39
+
40
+
# Calculate P2P IP address (127.67.67.1 + node_index)
41
+
p2p_ip="127.67.67.$((1 + i))"
42
+
43
+
echo " Configuring node$i (RPC: $rpc_port, P2P: $p2p_ip:26656)"
44
+
45
+
# Adjust RPC listen address
46
+
sed -i "s|^laddr = \"tcp://127.0.0.1:26657\"\$|laddr = \"tcp://127.0.0.1:$rpc_port\"|g" "testnet/node$i/config/config.toml"
47
+
48
+
# Adjust P2P listen address
49
+
sed -i "s|^laddr = \"tcp://0.0.0.0:26656\"\$|laddr = \"tcp://$p2p_ip:26656\"|g" "testnet/node$i/config/config.toml"
50
+
done
51
+
52
+
# Configure rpc_servers for the last node (the one that will be started manually)
53
+
last_node=$((NUM_NODES - 1))
54
+
echo "Configuring rpc_servers for node$last_node..."
55
+
56
+
# Build comma-separated list of RPC addresses for automatically started nodes
57
+
rpc_servers_list=""
58
+
for i in $(seq 0 $((NUM_NODES - 2))); do
59
+
if [ -n "$rpc_servers_list" ]; then
60
+
rpc_servers_list="$rpc_servers_list,"
61
+
fi
62
+
rpc_port=$((26100 + i))
63
+
rpc_servers_list="${rpc_servers_list}tcp://127.0.0.1:$rpc_port"
64
+
done
65
+
66
+
# Replace empty rpc_servers configuration in the last node's config
67
+
echo " Setting rpc_servers = \"$rpc_servers_list\" for node$last_node"
68
+
sed -i "s|^rpc_servers = \"\"\$|rpc_servers = \"$rpc_servers_list\"|g" "testnet/node$last_node/config/config.toml"
69
+
70
+
# Enable state sync for the last node (the one that will be started manually)
71
+
echo " Enabling state sync for node$last_node"
72
+
sed -i '/\[statesync\]/,/enable = false/s/enable = false/enable = true/' "testnet/node$last_node/config/config.toml"
73
+
74
+
# Array to store background process IDs
75
+
pids=""
76
+
77
+
# Cleanup function to kill all background processes
78
+
cleanup() {
79
+
echo ""
80
+
echo "Shutting down all nodes..."
81
+
82
+
# Kill all background processes
83
+
for pid in $pids; do
84
+
if kill -0 "$pid" 2>/dev/null; then
85
+
echo " Stopping node process $pid..."
86
+
kill "$pid" 2>/dev/null
87
+
fi
88
+
done
89
+
90
+
# Clean up temporary fifo files
91
+
for i in $(seq 0 99); do
92
+
rm -f "/tmp/didplcbft-node$i-stdout" "/tmp/didplcbft-node$i-stderr" 2>/dev/null
93
+
done
94
+
95
+
# Wait for all processes to terminate
96
+
wait $pids 2>/dev/null
97
+
98
+
echo "All nodes stopped."
99
+
exit 0
100
+
}
101
+
102
+
# Set up signal traps
103
+
trap cleanup INT TERM EXIT
104
+
105
+
# Launch all nodes except the last one (for testing later bringup)
106
+
nodes_to_start=$((NUM_NODES - 1))
107
+
echo "Launching $nodes_to_start nodes in parallel..."
108
+
109
+
for i in $(seq 0 $((nodes_to_start - 1))); do
110
+
echo " Starting node$i..."
111
+
mkfifo "/tmp/didplcbft-node$i-stdout" 2>/dev/null || true
112
+
mkfifo "/tmp/didplcbft-node$i-stderr" 2>/dev/null || true
113
+
114
+
# Start sed processes to prefix output
115
+
sed "s/^/[node$i-stdout] /" < "/tmp/didplcbft-node$i-stdout" &
116
+
sed "s/^/[node$i-stderr] /" < "/tmp/didplcbft-node$i-stderr" &
117
+
118
+
# Start the didplcbft process with redirected output
119
+
./didplcbft --data-dir "testnet/node$i" > "/tmp/didplcbft-node$i-stdout" 2> "/tmp/didplcbft-node$i-stderr" &
120
+
pid=$!
121
+
pids="$pids $pid"
122
+
echo " PID: $pid"
123
+
done
124
+
125
+
echo ""
126
+
echo "All $nodes_to_start nodes are now running."
127
+
echo "Note: Node $((NUM_NODES - 1)) is not started and can be launched later for testing."
128
+
echo "Press Ctrl+C to stop all running nodes."
129
+
echo ""
130
+
131
+
# Wait for all background processes
132
+
wait $pids
133
+
134
+
# If we reach here, all processes have terminated normally
135
+
echo "All nodes have terminated."
+6
startfresh.sh
+6
startfresh.sh
+508
store/tree.go
+508
store/tree.go
···
···
1
+
package store
2
+
3
+
import (
4
+
"encoding/base32"
5
+
"encoding/binary"
6
+
"iter"
7
+
"slices"
8
+
"strings"
9
+
"time"
10
+
11
+
"github.com/bluesky-social/indigo/atproto/syntax"
12
+
"github.com/cosmos/iavl"
13
+
ics23 "github.com/cosmos/ics23/go"
14
+
"github.com/did-method-plc/go-didplc"
15
+
cbornode "github.com/ipfs/go-ipld-cbor"
16
+
"github.com/palantir/stacktrace"
17
+
"github.com/polydawn/refmt/obj/atlas"
18
+
"github.com/samber/lo"
19
+
"github.com/samber/mo"
20
+
)
21
+
22
+
var Tree PLCTreeStore = &TreeStore{}
23
+
24
+
type PLCTreeStore interface {
25
+
AuditLog(tree PossiblyMutableTree, did string, withProof bool) ([]didplc.LogEntry, *ics23.CommitmentProof, error)
26
+
AuditLogReverseIterator(tree PossiblyMutableTree, did string, err *error) iter.Seq2[int, didplc.LogEntry]
27
+
ExportOperations(tree PossiblyMutableTree, after time.Time, count int) ([]didplc.LogEntry, error) // passing a count of zero means unlimited
28
+
StoreOperation(tree *iavl.MutableTree, entry didplc.LogEntry, newIndex int, nullifyWithIndexEqualOrGreaterThan mo.Option[int]) error
29
+
}
30
+
31
+
var _ PLCTreeStore = (*TreeStore)(nil)
32
+
33
+
// TreeStore exists just to groups methods nicely
34
+
type TreeStore struct{}
35
+
36
+
type PossiblyMutableTree interface {
37
+
IsMutable() bool
38
+
Has(key []byte) (bool, error)
39
+
Get(key []byte) ([]byte, error)
40
+
GetProof(key []byte) (*ics23.CommitmentProof, error) // won't actually work on mutable trees, but we don't need it to
41
+
IterateRange(start, end []byte, ascending bool, fn func(key []byte, value []byte) bool) (stopped bool)
42
+
Set(key []byte, value []byte) (bool, error)
43
+
}
44
+
45
+
type mutableToUnifiedTree struct {
46
+
tree *iavl.MutableTree
47
+
}
48
+
49
+
var _ PossiblyMutableTree = (*mutableToUnifiedTree)(nil)
50
+
51
+
func AdaptMutableTree(tree *iavl.MutableTree) PossiblyMutableTree {
52
+
return &mutableToUnifiedTree{
53
+
tree: tree,
54
+
}
55
+
}
56
+
57
+
// IsMutable implements [PossiblyMutableTree].
58
+
func (m *mutableToUnifiedTree) IsMutable() bool {
59
+
return true
60
+
}
61
+
62
+
// Has implements [PossiblyMutableTree].
63
+
func (m *mutableToUnifiedTree) Has(key []byte) (bool, error) {
64
+
return m.tree.Has(key)
65
+
}
66
+
67
+
// Get implements [PossiblyMutableTree].
68
+
func (m *mutableToUnifiedTree) Get(key []byte) ([]byte, error) {
69
+
return m.tree.Get(key)
70
+
}
71
+
72
+
// GetProof implements [PossiblyMutableTree].
73
+
func (m *mutableToUnifiedTree) GetProof(key []byte) (*ics23.CommitmentProof, error) {
74
+
return nil, stacktrace.NewError("proof calculation not possible over mutable tree")
75
+
}
76
+
77
+
// Set implements [PossiblyMutableTree].
78
+
func (m *mutableToUnifiedTree) Set(key []byte, value []byte) (bool, error) {
79
+
return m.tree.Set(key, value)
80
+
}
81
+
82
+
// IterateRange implements [PossiblyMutableTree].
83
+
func (m *mutableToUnifiedTree) IterateRange(start []byte, end []byte, ascending bool, fn func(key []byte, value []byte) bool) (stopped bool) {
84
+
// it might look like MutableTree implements IterateRange but it doesn't,
85
+
// most iteration methods actually come from the embedded ImmutableTree we're not meant to use
86
+
// (terrible API)
87
+
itr, err := m.tree.Iterator(start, end, ascending)
88
+
if err != nil {
89
+
return false
90
+
}
91
+
92
+
defer itr.Close()
93
+
94
+
for ; itr.Valid(); itr.Next() {
95
+
if fn(itr.Key(), itr.Value()) {
96
+
return true
97
+
}
98
+
}
99
+
return false
100
+
}
101
+
102
+
type immutableToUnifiedTree struct {
103
+
tree *iavl.ImmutableTree
104
+
}
105
+
106
+
var _ PossiblyMutableTree = (*immutableToUnifiedTree)(nil)
107
+
108
+
func AdaptImmutableTree(tree *iavl.ImmutableTree) PossiblyMutableTree {
109
+
return &immutableToUnifiedTree{
110
+
tree: tree,
111
+
}
112
+
}
113
+
114
+
// IsMutable implements [PossiblyMutableTree].
115
+
func (m *immutableToUnifiedTree) IsMutable() bool {
116
+
return false
117
+
}
118
+
119
+
// Has implements [PossiblyMutableTree].
120
+
func (i *immutableToUnifiedTree) Has(key []byte) (bool, error) {
121
+
return i.tree.Has(key)
122
+
}
123
+
124
+
// Get implements [PossiblyMutableTree].
125
+
func (i *immutableToUnifiedTree) Get(key []byte) ([]byte, error) {
126
+
return i.tree.Get(key)
127
+
}
128
+
129
+
// GetProof implements [PossiblyMutableTree].
130
+
func (i *immutableToUnifiedTree) GetProof(key []byte) (*ics23.CommitmentProof, error) {
131
+
return i.tree.GetProof(key)
132
+
}
133
+
134
+
// IterateRange implements [PossiblyMutableTree].
135
+
func (i *immutableToUnifiedTree) IterateRange(start []byte, end []byte, ascending bool, fn func(key []byte, value []byte) bool) (stopped bool) {
136
+
return i.tree.IterateRange(start, end, ascending, fn)
137
+
}
138
+
139
+
// Set implements [PossiblyMutableTree].
140
+
func (i *immutableToUnifiedTree) Set(key []byte, value []byte) (bool, error) {
141
+
return false, stacktrace.NewError("set not possible over immutable tree")
142
+
}
143
+
144
+
func (t *TreeStore) AuditLog(tree PossiblyMutableTree, did string, withProof bool) ([]didplc.LogEntry, *ics23.CommitmentProof, error) {
145
+
proofs := []*ics23.CommitmentProof{}
146
+
147
+
didBytes, err := didToBytes(did)
148
+
if err != nil {
149
+
return nil, nil, stacktrace.Propagate(err, "")
150
+
}
151
+
152
+
logKey := marshalDIDLogKey(didBytes)
153
+
154
+
has, err := tree.Has(logKey)
155
+
if err != nil {
156
+
return nil, nil, stacktrace.Propagate(err, "")
157
+
}
158
+
159
+
var operationKeys [][]byte
160
+
if has {
161
+
logOperations, err := tree.Get(logKey)
162
+
if err != nil {
163
+
return nil, nil, stacktrace.Propagate(err, "")
164
+
}
165
+
operationKeys = make([][]byte, 0, len(logOperations)/8)
166
+
for ts := range slices.Chunk(logOperations, 8) {
167
+
operationKeys = append(operationKeys, timestampBytesToDIDOperationKey(ts, didBytes))
168
+
}
169
+
}
170
+
171
+
if withProof {
172
+
proof, err := tree.GetProof(logKey)
173
+
if err != nil {
174
+
return nil, nil, stacktrace.Propagate(err, "")
175
+
}
176
+
proofs = append(proofs, proof)
177
+
}
178
+
179
+
logEntries := make([]didplc.LogEntry, 0, len(operationKeys))
180
+
for _, opKey := range operationKeys {
181
+
operationValue, err := tree.Get(opKey)
182
+
if err != nil {
183
+
return nil, nil, stacktrace.Propagate(err, "")
184
+
}
185
+
186
+
if withProof {
187
+
proof, err := tree.GetProof(opKey)
188
+
if err != nil {
189
+
return nil, nil, stacktrace.Propagate(err, "")
190
+
}
191
+
proofs = append(proofs, proof)
192
+
}
193
+
194
+
nullified, operation, err := unmarshalOperationValue(operationValue)
195
+
if err != nil {
196
+
return nil, nil, stacktrace.Propagate(err, "")
197
+
}
198
+
199
+
timestamp, actualDID, err := unmarshalOperationKey(opKey)
200
+
if err != nil {
201
+
return nil, nil, stacktrace.Propagate(err, "")
202
+
}
203
+
204
+
logEntries = append(logEntries, didplc.LogEntry{
205
+
DID: actualDID,
206
+
Operation: operation,
207
+
CID: operation.AsOperation().CID().String(),
208
+
Nullified: nullified,
209
+
CreatedAt: timestamp.Format(syntax.AtprotoDatetimeLayout),
210
+
})
211
+
}
212
+
213
+
var combinedProof *ics23.CommitmentProof
214
+
if withProof {
215
+
combinedProof, err = ics23.CombineProofs(proofs)
216
+
if err != nil {
217
+
return nil, nil, stacktrace.Propagate(err, "failed to combine proofs")
218
+
}
219
+
}
220
+
return logEntries, combinedProof, nil
221
+
}
222
+
223
+
func (t *TreeStore) AuditLogReverseIterator(tree PossiblyMutableTree, did string, retErr *error) iter.Seq2[int, didplc.LogEntry] {
224
+
return func(yield func(int, didplc.LogEntry) bool) {
225
+
didBytes, err := didToBytes(did)
226
+
if err != nil {
227
+
*retErr = stacktrace.Propagate(err, "")
228
+
return
229
+
}
230
+
231
+
logKey := marshalDIDLogKey(didBytes)
232
+
233
+
has, err := tree.Has(logKey)
234
+
if err != nil {
235
+
*retErr = stacktrace.Propagate(err, "")
236
+
return
237
+
}
238
+
239
+
var operationKeys [][]byte
240
+
if has {
241
+
logOperations, err := tree.Get(logKey)
242
+
if err != nil {
243
+
*retErr = stacktrace.Propagate(err, "")
244
+
return
245
+
}
246
+
operationKeys = make([][]byte, 0, len(logOperations)/8)
247
+
for ts := range slices.Chunk(logOperations, 8) {
248
+
operationKeys = append(operationKeys, timestampBytesToDIDOperationKey(ts, didBytes))
249
+
}
250
+
}
251
+
252
+
for i := len(operationKeys) - 1; i >= 0; i-- {
253
+
opKey := operationKeys[i]
254
+
operationValue, err := tree.Get(opKey)
255
+
if err != nil {
256
+
*retErr = stacktrace.Propagate(err, "")
257
+
return
258
+
}
259
+
260
+
nullified, operation, err := unmarshalOperationValue(operationValue)
261
+
if err != nil {
262
+
*retErr = stacktrace.Propagate(err, "")
263
+
return
264
+
}
265
+
266
+
timestamp, actualDID, err := unmarshalOperationKey(opKey)
267
+
if err != nil {
268
+
*retErr = stacktrace.Propagate(err, "")
269
+
return
270
+
}
271
+
272
+
if !yield(i, didplc.LogEntry{
273
+
DID: actualDID,
274
+
Operation: operation,
275
+
CID: operation.AsOperation().CID().String(),
276
+
Nullified: nullified,
277
+
CreatedAt: timestamp.Format(syntax.AtprotoDatetimeLayout),
278
+
}) {
279
+
return
280
+
}
281
+
}
282
+
}
283
+
}
284
+
285
+
func (t *TreeStore) ExportOperations(tree PossiblyMutableTree, after time.Time, count int) ([]didplc.LogEntry, error) {
286
+
// as the name suggests, after is an exclusive lower bound, but our iterators use inclusive lower bounds
287
+
start := after.Add(1 * time.Nanosecond)
288
+
startKey := marshalOperationKey(start, make([]byte, 15))
289
+
if after.UnixNano() < 0 {
290
+
// our storage format doesn't deal well with negative unix timestamps,
291
+
// but that's fine because we don't have operations created that far back. assume we just want to iterate from the start
292
+
copy(startKey[1:8], make([]byte, 8))
293
+
}
294
+
295
+
entries := make([]didplc.LogEntry, 0, count)
296
+
var iterErr error
297
+
tree.IterateRange(startKey, nil, true, func(operationKey, operationValue []byte) bool {
298
+
nullified, operation, err := unmarshalOperationValue(operationValue)
299
+
if err != nil {
300
+
iterErr = stacktrace.Propagate(err, "")
301
+
return true
302
+
}
303
+
304
+
timestamp, actualDID, err := unmarshalOperationKey(operationKey)
305
+
if err != nil {
306
+
iterErr = stacktrace.Propagate(err, "")
307
+
return true
308
+
}
309
+
310
+
entries = append(entries, didplc.LogEntry{
311
+
DID: actualDID,
312
+
Operation: operation,
313
+
CID: operation.AsOperation().CID().String(),
314
+
Nullified: nullified,
315
+
CreatedAt: timestamp.Format(syntax.AtprotoDatetimeLayout),
316
+
})
317
+
return len(entries) == count // this condition being checked here also makes it so that a count of zero means unlimited
318
+
})
319
+
if iterErr != nil {
320
+
return nil, stacktrace.Propagate(iterErr, "ran into an error while iterating")
321
+
}
322
+
return entries, nil
323
+
}
324
+
325
+
func (t *TreeStore) StoreOperation(tree *iavl.MutableTree, entry didplc.LogEntry, newIndex int, nullifyWithIndexEqualOrGreaterThan mo.Option[int]) error {
326
+
didBytes, err := didToBytes(entry.DID)
327
+
if err != nil {
328
+
return stacktrace.Propagate(err, "")
329
+
}
330
+
331
+
logKey := marshalDIDLogKey(didBytes)
332
+
333
+
var operationKeys [][]byte
334
+
logOperations, err := tree.Get(logKey)
335
+
logOperations = slices.Clone(logOperations)
336
+
if err != nil {
337
+
operationKeys = [][]byte{}
338
+
} else {
339
+
operationKeys = make([][]byte, 0, len(logOperations)/8)
340
+
for ts := range slices.Chunk(logOperations, 8) {
341
+
operationKeys = append(operationKeys, timestampBytesToDIDOperationKey(ts, didBytes))
342
+
}
343
+
}
344
+
345
+
if nullifyEGt, ok := nullifyWithIndexEqualOrGreaterThan.Get(); ok {
346
+
for _, opKey := range operationKeys[nullifyEGt:] {
347
+
operationValue, err := tree.Get(opKey)
348
+
if err != nil {
349
+
return stacktrace.Propagate(err, "")
350
+
}
351
+
operationValue = slices.Clone(operationValue)
352
+
operationValue[0] = 1
353
+
354
+
_, err = tree.Set(opKey, operationValue)
355
+
if err != nil {
356
+
return stacktrace.Propagate(err, "")
357
+
}
358
+
}
359
+
}
360
+
361
+
opDatetime, err := syntax.ParseDatetime(entry.CreatedAt)
362
+
if err != nil {
363
+
return stacktrace.Propagate(err, "invalid CreatedAt")
364
+
}
365
+
366
+
opKey := marshalOperationKey(opDatetime.Time().Truncate(1*time.Millisecond), didBytes)
367
+
opValue := marshalOperationValue(entry.Nullified, entry.Operation)
368
+
369
+
_, err = tree.Set(opKey, opValue)
370
+
if err != nil {
371
+
return stacktrace.Propagate(err, "")
372
+
}
373
+
374
+
logOperations = append(logOperations, opKey[1:9]...)
375
+
_, err = tree.Set(logKey, logOperations)
376
+
if err != nil {
377
+
return stacktrace.Propagate(err, "")
378
+
}
379
+
380
+
return nil
381
+
}
382
+
383
+
func didToBytes(did string) ([]byte, error) {
384
+
if !strings.HasPrefix(did, "did:plc:") {
385
+
return nil, stacktrace.NewError("invalid did:plc")
386
+
}
387
+
388
+
didBytes := make([]byte, 15)
389
+
390
+
did = strings.ToUpper(did)
391
+
392
+
numWritten, err := base32.StdEncoding.Decode(didBytes, []byte(did[8:]))
393
+
if err != nil {
394
+
return nil, stacktrace.Propagate(err, "invalid did:plc")
395
+
}
396
+
if numWritten != 15 {
397
+
return nil, stacktrace.NewError("invalid did:plc")
398
+
}
399
+
400
+
return didBytes, nil
401
+
}
402
+
403
+
func bytesToDID(didBytes []byte) (string, error) {
404
+
did := "did:plc:" + strings.ToLower(base32.StdEncoding.EncodeToString(didBytes))
405
+
if len(did) != 8+24 {
406
+
return "", stacktrace.NewError("invalid did:plc")
407
+
}
408
+
return did, nil
409
+
}
410
+
411
+
func marshalDIDLogKey(didBytes []byte) []byte {
412
+
key := make([]byte, 1+15)
413
+
key[0] = 'l'
414
+
copy(key[1:], didBytes)
415
+
return key
416
+
}
417
+
418
+
func timestampBytesToDIDOperationKey(timestamp []byte, didBytes []byte) []byte {
419
+
key := make([]byte, 1+8+15)
420
+
key[0] = 'o'
421
+
copy(key[1:9], timestamp)
422
+
copy(key[9:], didBytes)
423
+
return key
424
+
}
425
+
426
+
func marshalOperationKey(createdAt time.Time, didBytes []byte) []byte {
427
+
key := make([]byte, 1+8+15)
428
+
key[0] = 'o'
429
+
binary.BigEndian.PutUint64(key[1:], uint64(createdAt.UTC().UnixNano()))
430
+
copy(key[9:], didBytes)
431
+
return key
432
+
}
433
+
434
+
func unmarshalOperationKey(key []byte) (time.Time, string, error) {
435
+
createdAtUnixNano := binary.BigEndian.Uint64(key[1:9])
436
+
createdAt := time.Unix(0, int64(createdAtUnixNano)).UTC()
437
+
did, err := bytesToDID(key[9:])
438
+
return createdAt, did, stacktrace.Propagate(err, "")
439
+
}
440
+
441
+
func marshalOperationValue(nullified bool, operation didplc.OpEnum) []byte {
442
+
o := []byte{lo.Ternary[byte](nullified, 1, 0)}
443
+
o = append(o, operation.AsOperation().SignedCBORBytes()...)
444
+
return o
445
+
}
446
+
447
+
func unmarshalOperationValue(value []byte) (bool, didplc.OpEnum, error) {
448
+
nullified := value[0] != 0
449
+
var opEnum didplc.OpEnum
450
+
err := cbornode.DecodeInto(value[1:], &opEnum)
451
+
if err != nil {
452
+
return false, didplc.OpEnum{}, stacktrace.Propagate(err, "")
453
+
}
454
+
return nullified, opEnum, nil
455
+
}
456
+
457
+
func init() {
458
+
cbornode.RegisterCborType(atlas.BuildEntry(didplc.OpEnum{}).
459
+
Transform().
460
+
TransformMarshal(atlas.MakeMarshalTransformFunc(
461
+
func(o didplc.OpEnum) (interface{}, error) {
462
+
if o.Regular != nil {
463
+
return o.Regular, nil
464
+
} else if o.Legacy != nil {
465
+
return o.Legacy, nil
466
+
} else if o.Tombstone != nil {
467
+
return o.Tombstone, nil
468
+
}
469
+
return nil, stacktrace.NewError("invalid OpEnum")
470
+
})).
471
+
TransformUnmarshal(atlas.MakeUnmarshalTransformFunc(
472
+
func(x map[string]any) (didplc.OpEnum, error) {
473
+
typ, ok := x["type"]
474
+
if !ok {
475
+
return didplc.OpEnum{}, stacktrace.NewError("did not find expected operation 'type' field")
476
+
}
477
+
478
+
// this is so stupid but oh well - maybe one day the entire thing will be upgraded to a less stupid cbor encoder
479
+
b, err := cbornode.DumpObject(x)
480
+
if err != nil {
481
+
return didplc.OpEnum{}, stacktrace.Propagate(err, "")
482
+
}
483
+
484
+
switch typ {
485
+
case "plc_operation":
486
+
o := &didplc.RegularOp{}
487
+
err = cbornode.DecodeInto(b, &o)
488
+
return didplc.OpEnum{
489
+
Regular: o,
490
+
}, stacktrace.Propagate(err, "")
491
+
case "create":
492
+
o := &didplc.LegacyOp{}
493
+
err = cbornode.DecodeInto(b, &o)
494
+
return didplc.OpEnum{
495
+
Legacy: o,
496
+
}, stacktrace.Propagate(err, "")
497
+
case "plc_tombstone":
498
+
o := &didplc.TombstoneOp{}
499
+
err = cbornode.DecodeInto(b, &o)
500
+
return didplc.OpEnum{
501
+
Tombstone: o,
502
+
}, stacktrace.Propagate(err, "")
503
+
default:
504
+
return didplc.OpEnum{}, stacktrace.NewError("unexpected operation type: %s", typ)
505
+
}
506
+
})).
507
+
Complete())
508
+
}