+7
-3
bundle/metadata.go
+7
-3
bundle/metadata.go
···
7
7
"os"
8
8
"time"
9
9
10
-
gozstd "github.com/DataDog/zstd"
11
10
"tangled.org/atscan.net/plcbundle/internal/bundleindex"
12
11
"tangled.org/atscan.net/plcbundle/internal/plcclient"
12
+
"tangled.org/atscan.net/plcbundle/internal/storage"
13
13
)
14
14
15
15
// CalculateBundleMetadata calculates complete metadata for a bundle
···
131
131
}
132
132
defer file.Close()
133
133
134
-
reader := gozstd.NewReader(file)
135
-
defer reader.Close()
134
+
// ✅ Use abstracted reader from storage package
135
+
reader, err := storage.NewStreamingReader(file)
136
+
if err != nil {
137
+
return 0, 0, time.Time{}, time.Time{}, fmt.Errorf("failed to create reader: %w", err)
138
+
}
139
+
defer reader.Release()
136
140
137
141
scanner := bufio.NewScanner(reader)
138
142
buf := make([]byte, 64*1024)
+1
go.mod
+1
go.mod
+2
go.sum
+2
go.sum
···
12
12
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
13
13
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
14
14
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
15
+
github.com/valyala/gozstd v1.23.2 h1:S3rRsskaDvBCM2XJzQFYIDAO6txxmvTc1arA/9Wgi9o=
16
+
github.com/valyala/gozstd v1.23.2/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
15
17
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
16
18
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
17
19
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
+203
-184
internal/storage/storage.go
+203
-184
internal/storage/storage.go
···
12
12
"sync"
13
13
"time"
14
14
15
-
gozstd "github.com/DataDog/zstd"
16
15
"github.com/goccy/go-json"
17
-
"tangled.org/atscan.net/plcbundle/internal/plcclient" // ONLY import plcclient, NOT bundle
16
+
"tangled.org/atscan.net/plcbundle/internal/plcclient"
18
17
)
19
18
20
19
// Operations handles low-level bundle file operations
···
84
83
}
85
84
86
85
// ========================================
87
-
// FILE OPERATIONS
86
+
// FILE OPERATIONS (using zstd abstraction)
88
87
// ========================================
89
88
90
-
// LoadBundle loads a compressed bundle
91
-
func (op *Operations) LoadBundle(path string) ([]plcclient.PLCOperation, error) {
92
-
// ✅ FIX: Use streaming reader instead of one-shot Decompress
93
-
file, err := os.Open(path)
94
-
if err != nil {
95
-
return nil, fmt.Errorf("failed to open file: %w", err)
96
-
}
97
-
defer file.Close()
98
-
99
-
// NewReader properly handles multi-frame concatenated zstd
100
-
reader := gozstd.NewReader(file)
101
-
defer reader.Close()
102
-
103
-
// Read ALL decompressed data
104
-
decompressed, err := io.ReadAll(reader)
105
-
if err != nil {
106
-
return nil, fmt.Errorf("failed to decompress: %w", err)
107
-
}
108
-
109
-
// Parse JSONL
110
-
return op.ParseJSONL(decompressed)
111
-
}
112
-
113
-
// SaveBundle saves operations to disk (compressed)
89
+
// SaveBundle saves operations to disk (compressed with multi-frame support)
114
90
func (op *Operations) SaveBundle(path string, operations []plcclient.PLCOperation) (string, string, int64, int64, error) {
115
-
// 1. Serialize all operations once to get a single, consistent content hash.
116
-
// This is critical for preserving chain hash integrity.
91
+
// 1. Serialize all operations once
117
92
jsonlData := op.SerializeJSONL(operations)
118
93
contentSize := int64(len(jsonlData))
119
94
contentHash := op.Hash(jsonlData)
120
95
121
-
// --- Correct Multi-Frame Streaming Logic ---
122
-
123
-
// 2. Create the destination file.
96
+
// 2. Create the destination file
124
97
bundleFile, err := os.Create(path)
125
98
if err != nil {
126
99
return "", "", 0, 0, fmt.Errorf("could not create bundle file: %w", err)
127
100
}
128
-
defer bundleFile.Close() // Ensure the file is closed on exit.
101
+
defer bundleFile.Close()
129
102
130
-
frameSize := 100 // Each frame will contain 100 operations.
131
-
frameOffsets := []int64{0} // The first frame always starts at offset 0.
103
+
frameOffsets := []int64{0}
132
104
133
-
// 3. Loop through operations in chunks.
134
-
for i := 0; i < len(operations); i += frameSize {
135
-
end := i + frameSize
105
+
// 3. Loop through operations in chunks
106
+
for i := 0; i < len(operations); i += FrameSize {
107
+
end := i + FrameSize
136
108
if end > len(operations) {
137
109
end = len(operations)
138
110
}
139
111
opChunk := operations[i:end]
140
112
chunkJsonlData := op.SerializeJSONL(opChunk)
141
113
142
-
// a. Create a NEW zstd writer FOR EACH CHUNK. This is the key.
143
-
zstdWriter := gozstd.NewWriter(bundleFile)
144
-
145
-
// b. Write the uncompressed chunk to the zstd writer.
146
-
_, err := zstdWriter.Write(chunkJsonlData)
114
+
// ✅ Use abstracted compression
115
+
compressedChunk, err := CompressFrame(chunkJsonlData)
147
116
if err != nil {
148
-
zstdWriter.Close() // Attempt to clean up
149
-
return "", "", 0, 0, fmt.Errorf("failed to write frame data: %w", err)
117
+
return "", "", 0, 0, fmt.Errorf("failed to compress frame: %w", err)
150
118
}
151
119
152
-
// c. Close the zstd writer. This finalizes the frame and flushes it
153
-
// to the underlying file. It does NOT close the bundleFile itself.
154
-
if err := zstdWriter.Close(); err != nil {
155
-
return "", "", 0, 0, fmt.Errorf("failed to close/finalize frame: %w", err)
120
+
// Write frame to file
121
+
_, err = bundleFile.Write(compressedChunk)
122
+
if err != nil {
123
+
return "", "", 0, 0, fmt.Errorf("failed to write frame: %w", err)
156
124
}
157
125
158
-
// d. After closing the frame, get the file's new total size.
126
+
// Get current offset for next frame
159
127
currentOffset, err := bundleFile.Seek(0, io.SeekCurrent)
160
128
if err != nil {
161
129
return "", "", 0, 0, fmt.Errorf("failed to get file offset: %w", err)
162
130
}
163
131
164
-
// e. Record this offset as the start of the next frame.
165
132
if end < len(operations) {
166
133
frameOffsets = append(frameOffsets, currentOffset)
167
134
}
168
135
}
169
136
170
-
// 4. Get the final total file size. This is the end of the last frame.
137
+
// 4. Get final file size
171
138
finalSize, _ := bundleFile.Seek(0, io.SeekCurrent)
172
139
frameOffsets = append(frameOffsets, finalSize)
173
140
174
-
// 5. Save the companion frame-offset index file.
141
+
// 5. Sync to disk
142
+
if err := bundleFile.Sync(); err != nil {
143
+
return "", "", 0, 0, fmt.Errorf("failed to sync file: %w", err)
144
+
}
145
+
146
+
// 6. Save frame index
175
147
indexPath := path + ".idx"
176
148
indexData, _ := json.Marshal(frameOffsets)
177
149
if err := os.WriteFile(indexPath, indexData, 0644); err != nil {
178
-
os.Remove(path) // Clean up to avoid inconsistent state.
150
+
os.Remove(path)
179
151
return "", "", 0, 0, fmt.Errorf("failed to write frame index: %w", err)
180
152
}
181
153
182
-
// 6. Re-read the full compressed file to get its final hash for the main index.
154
+
// 7. Calculate compressed hash
183
155
compressedData, err := os.ReadFile(path)
184
156
if err != nil {
185
157
return "", "", 0, 0, fmt.Errorf("failed to re-read bundle for hashing: %w", err)
···
189
161
return contentHash, compressedHash, contentSize, finalSize, nil
190
162
}
191
163
192
-
// Pool for scanner buffers
193
-
var scannerBufPool = sync.Pool{
194
-
New: func() interface{} {
195
-
buf := make([]byte, 64*1024)
196
-
return &buf
197
-
},
198
-
}
199
-
200
-
// LoadOperationAtPosition loads a single operation from a bundle
201
-
func (op *Operations) LoadOperationAtPosition(path string, position int) (*plcclient.PLCOperation, error) {
202
-
if position < 0 {
203
-
return nil, fmt.Errorf("invalid position: %d", position)
204
-
}
205
-
206
-
frameSize := 100 // Must match the frame size used in SaveBundle
207
-
indexPath := path + ".idx"
208
-
209
-
// 1. Load the frame offset index.
210
-
indexData, err := os.ReadFile(indexPath)
211
-
if err != nil {
212
-
// If the frame index doesn't exist, fall back to the legacy full-scan method.
213
-
// This ensures backward compatibility with your old bundle files during migration.
214
-
if os.IsNotExist(err) {
215
-
op.logger.Printf("DEBUG: Frame index not found for %s, falling back to legacy full scan.", filepath.Base(path))
216
-
return op.loadOperationAtPositionLegacy(path, position)
217
-
}
218
-
return nil, fmt.Errorf("could not read frame index %s: %w", indexPath, err)
219
-
}
220
-
221
-
var frameOffsets []int64
222
-
if err := json.Unmarshal(indexData, &frameOffsets); err != nil {
223
-
return nil, fmt.Errorf("could not parse frame index %s: %w", indexPath, err)
224
-
}
225
-
226
-
// 2. Calculate target frame and the line number within that frame.
227
-
frameIndex := position / frameSize
228
-
lineInFrame := position % frameSize
229
-
230
-
if frameIndex >= len(frameOffsets)-1 {
231
-
return nil, fmt.Errorf("position %d is out of bounds for bundle with %d frames", position, len(frameOffsets)-1)
232
-
}
233
-
234
-
// 3. Get frame boundaries from the index.
235
-
startOffset := frameOffsets[frameIndex]
236
-
endOffset := frameOffsets[frameIndex+1]
237
-
frameLength := endOffset - startOffset
238
-
239
-
if frameLength <= 0 {
240
-
return nil, fmt.Errorf("invalid frame length calculated for position %d", position)
241
-
}
242
-
243
-
// 4. Open the bundle file.
244
-
bundleFile, err := os.Open(path)
245
-
if err != nil {
246
-
return nil, err
247
-
}
248
-
defer bundleFile.Close()
249
-
250
-
// 5. Read ONLY the bytes for that single frame from the correct offset.
251
-
compressedFrame := make([]byte, frameLength)
252
-
_, err = bundleFile.ReadAt(compressedFrame, startOffset)
253
-
if err != nil {
254
-
return nil, fmt.Errorf("failed to read frame %d from bundle: %w", frameIndex, err)
255
-
}
256
-
257
-
// 6. Decompress just that small frame.
258
-
decompressed, err := gozstd.Decompress(nil, compressedFrame)
259
-
if err != nil {
260
-
return nil, fmt.Errorf("failed to decompress frame %d: %w", frameIndex, err)
261
-
}
262
-
263
-
// 7. Scan the ~100 lines to get the target operation.
264
-
scanner := bufio.NewScanner(bytes.NewReader(decompressed))
265
-
lineNum := 0
266
-
for scanner.Scan() {
267
-
if lineNum == lineInFrame {
268
-
line := scanner.Bytes()
269
-
var operation plcclient.PLCOperation
270
-
if err := json.UnmarshalNoEscape(line, &operation); err != nil {
271
-
return nil, fmt.Errorf("failed to parse operation at position %d: %w", position, err)
272
-
}
273
-
operation.RawJSON = make([]byte, len(line))
274
-
copy(operation.RawJSON, line)
275
-
return &operation, nil
276
-
}
277
-
lineNum++
278
-
}
279
-
280
-
if err := scanner.Err(); err != nil {
281
-
return nil, fmt.Errorf("scanner error on frame %d: %w", frameIndex, err)
282
-
}
283
-
284
-
return nil, fmt.Errorf("operation at position %d not found", position)
285
-
}
286
-
287
-
func (op *Operations) loadOperationAtPositionLegacy(path string, position int) (*plcclient.PLCOperation, error) {
164
+
// LoadBundle loads a compressed bundle
165
+
func (op *Operations) LoadBundle(path string) ([]plcclient.PLCOperation, error) {
288
166
file, err := os.Open(path)
289
167
if err != nil {
290
168
return nil, fmt.Errorf("failed to open file: %w", err)
291
169
}
292
170
defer file.Close()
293
171
294
-
reader := gozstd.NewReader(file)
295
-
defer reader.Close()
296
-
297
-
scanner := bufio.NewScanner(reader)
298
-
// Use a larger buffer for potentially large lines
299
-
buf := make([]byte, 512*1024)
300
-
scanner.Buffer(buf, 1024*1024)
301
-
302
-
lineNum := 0
303
-
for scanner.Scan() {
304
-
if lineNum == position {
305
-
line := scanner.Bytes()
306
-
var operation plcclient.PLCOperation
307
-
if err := json.UnmarshalNoEscape(line, &operation); err != nil {
308
-
return nil, fmt.Errorf("failed to parse legacy operation at position %d: %w", position, err)
309
-
}
310
-
operation.RawJSON = make([]byte, len(line))
311
-
copy(operation.RawJSON, line)
312
-
return &operation, nil
313
-
}
314
-
lineNum++
172
+
// ✅ Use abstracted streaming reader
173
+
reader, err := NewStreamingReader(file)
174
+
if err != nil {
175
+
return nil, fmt.Errorf("failed to create reader: %w", err)
315
176
}
177
+
defer reader.Release()
316
178
317
-
if err := scanner.Err(); err != nil {
318
-
return nil, fmt.Errorf("legacy scanner error: %w", err)
179
+
// Read all decompressed data from all frames
180
+
decompressed, err := io.ReadAll(reader)
181
+
if err != nil {
182
+
return nil, fmt.Errorf("failed to decompress: %w", err)
319
183
}
320
184
321
-
return nil, fmt.Errorf("position %d not found in legacy bundle", position)
185
+
// Parse JSONL
186
+
return op.ParseJSONL(decompressed)
322
187
}
323
188
324
189
// ========================================
···
341
206
return nil, fmt.Errorf("failed to open bundle: %w", err)
342
207
}
343
208
344
-
reader := gozstd.NewReader(file)
209
+
// ✅ Use abstracted reader
210
+
reader, err := NewStreamingReader(file)
211
+
if err != nil {
212
+
file.Close()
213
+
return nil, fmt.Errorf("failed to create reader: %w", err)
214
+
}
345
215
346
216
return &decompressedReader{
347
217
reader: reader,
···
351
221
352
222
// decompressedReader wraps a zstd decoder and underlying file
353
223
type decompressedReader struct {
354
-
reader io.ReadCloser
224
+
reader StreamReader
355
225
file *os.File
356
226
}
357
227
···
360
230
}
361
231
362
232
func (dr *decompressedReader) Close() error {
363
-
dr.reader.Close()
233
+
dr.reader.Release()
364
234
return dr.file.Close()
365
235
}
366
236
···
396
266
compressedHash = op.Hash(compressedData)
397
267
compressedSize = int64(len(compressedData))
398
268
399
-
decompressed, err := gozstd.Decompress(nil, compressedData)
269
+
// ✅ Use abstracted decompression
270
+
decompressed, err := DecompressAll(compressedData)
400
271
if err != nil {
401
272
return "", 0, "", 0, fmt.Errorf("failed to decompress: %w", err)
402
273
}
···
505
376
return operations[startIdx:]
506
377
}
507
378
379
+
// Pool for scanner buffers
380
+
var scannerBufPool = sync.Pool{
381
+
New: func() interface{} {
382
+
buf := make([]byte, 64*1024)
383
+
return &buf
384
+
},
385
+
}
386
+
387
+
// ========================================
388
+
// POSITION-BASED LOADING (with frame index)
389
+
// ========================================
390
+
391
+
// LoadOperationAtPosition loads a single operation from a bundle
392
+
func (op *Operations) LoadOperationAtPosition(path string, position int) (*plcclient.PLCOperation, error) {
393
+
if position < 0 {
394
+
return nil, fmt.Errorf("invalid position: %d", position)
395
+
}
396
+
397
+
indexPath := path + ".idx"
398
+
399
+
// 1. Try to load frame index
400
+
indexData, err := os.ReadFile(indexPath)
401
+
if err != nil {
402
+
if os.IsNotExist(err) {
403
+
// Fallback to legacy full scan
404
+
if op.logger != nil {
405
+
op.logger.Printf("Frame index not found for %s, using legacy scan", filepath.Base(path))
406
+
}
407
+
return op.loadOperationAtPositionLegacy(path, position)
408
+
}
409
+
return nil, fmt.Errorf("could not read frame index: %w", err)
410
+
}
411
+
412
+
var frameOffsets []int64
413
+
if err := json.Unmarshal(indexData, &frameOffsets); err != nil {
414
+
return nil, fmt.Errorf("could not parse frame index: %w", err)
415
+
}
416
+
417
+
// 2. Calculate target frame
418
+
frameIndex := position / FrameSize
419
+
lineInFrame := position % FrameSize
420
+
421
+
if frameIndex >= len(frameOffsets)-1 {
422
+
return nil, fmt.Errorf("position %d out of bounds (frame %d, total frames %d)",
423
+
position, frameIndex, len(frameOffsets)-1)
424
+
}
425
+
426
+
// 3. Read the specific frame from file
427
+
startOffset := frameOffsets[frameIndex]
428
+
endOffset := frameOffsets[frameIndex+1]
429
+
frameLength := endOffset - startOffset
430
+
431
+
if frameLength <= 0 {
432
+
return nil, fmt.Errorf("invalid frame length: %d", frameLength)
433
+
}
434
+
435
+
bundleFile, err := os.Open(path)
436
+
if err != nil {
437
+
return nil, fmt.Errorf("failed to open bundle: %w", err)
438
+
}
439
+
defer bundleFile.Close()
440
+
441
+
compressedFrame := make([]byte, frameLength)
442
+
_, err = bundleFile.ReadAt(compressedFrame, startOffset)
443
+
if err != nil {
444
+
return nil, fmt.Errorf("failed to read frame %d: %w", frameIndex, err)
445
+
}
446
+
447
+
// 4. ✅ Decompress this single frame
448
+
decompressed, err := DecompressFrame(compressedFrame)
449
+
if err != nil {
450
+
return nil, fmt.Errorf("failed to decompress frame %d: %w", frameIndex, err)
451
+
}
452
+
453
+
// 5. Scan the decompressed data to find the target line
454
+
scanner := bufio.NewScanner(bytes.NewReader(decompressed))
455
+
lineNum := 0
456
+
457
+
for scanner.Scan() {
458
+
if lineNum == lineInFrame {
459
+
line := scanner.Bytes()
460
+
var operation plcclient.PLCOperation
461
+
if err := json.UnmarshalNoEscape(line, &operation); err != nil {
462
+
return nil, fmt.Errorf("failed to parse operation at position %d: %w", position, err)
463
+
}
464
+
operation.RawJSON = make([]byte, len(line))
465
+
copy(operation.RawJSON, line)
466
+
return &operation, nil
467
+
}
468
+
lineNum++
469
+
}
470
+
471
+
if err := scanner.Err(); err != nil {
472
+
return nil, fmt.Errorf("scanner error on frame %d: %w", frameIndex, err)
473
+
}
474
+
475
+
return nil, fmt.Errorf("position %d not found in frame %d", position, frameIndex)
476
+
}
477
+
478
+
// loadOperationAtPositionLegacy loads operation from old single-frame bundles
479
+
func (op *Operations) loadOperationAtPositionLegacy(path string, position int) (*plcclient.PLCOperation, error) {
480
+
file, err := os.Open(path)
481
+
if err != nil {
482
+
return nil, fmt.Errorf("failed to open file: %w", err)
483
+
}
484
+
defer file.Close()
485
+
486
+
// ✅ Use abstracted streaming reader
487
+
reader, err := NewStreamingReader(file)
488
+
if err != nil {
489
+
return nil, fmt.Errorf("failed to create reader: %w", err)
490
+
}
491
+
defer reader.Release()
492
+
493
+
scanner := bufio.NewScanner(reader)
494
+
buf := make([]byte, 512*1024)
495
+
scanner.Buffer(buf, 1024*1024)
496
+
497
+
lineNum := 0
498
+
for scanner.Scan() {
499
+
if lineNum == position {
500
+
line := scanner.Bytes()
501
+
var operation plcclient.PLCOperation
502
+
if err := json.UnmarshalNoEscape(line, &operation); err != nil {
503
+
return nil, fmt.Errorf("failed to parse operation at position %d: %w", position, err)
504
+
}
505
+
operation.RawJSON = make([]byte, len(line))
506
+
copy(operation.RawJSON, line)
507
+
return &operation, nil
508
+
}
509
+
lineNum++
510
+
}
511
+
512
+
if err := scanner.Err(); err != nil {
513
+
return nil, fmt.Errorf("scanner error: %w", err)
514
+
}
515
+
516
+
return nil, fmt.Errorf("position %d not found in bundle", position)
517
+
}
518
+
508
519
// LoadOperationsAtPositions loads multiple operations from a bundle in one pass
509
520
func (op *Operations) LoadOperationsAtPositions(path string, positions []int) (map[int]*plcclient.PLCOperation, error) {
510
521
if len(positions) == 0 {
···
530
541
}
531
542
defer file.Close()
532
543
533
-
reader := gozstd.NewReader(file)
534
-
defer reader.Close()
544
+
// ✅ Use abstracted streaming reader
545
+
reader, err := NewStreamingReader(file)
546
+
if err != nil {
547
+
return nil, fmt.Errorf("failed to create reader: %w", err)
548
+
}
549
+
defer reader.Release()
535
550
536
551
bufPtr := scannerBufPool.Get().(*[]byte)
537
552
defer scannerBufPool.Put(bufPtr)
···
563
578
564
579
lineNum++
565
580
566
-
// Early exit if we passed the max position we need
581
+
// Early exit if we passed the max position
567
582
if lineNum > maxPos {
568
583
break
569
584
}
···
584
599
}
585
600
defer file.Close()
586
601
587
-
reader := gozstd.NewReader(file)
588
-
defer reader.Close()
602
+
// ✅ Use abstracted reader
603
+
reader, err := NewStreamingReader(file)
604
+
if err != nil {
605
+
return 0, 0, time.Time{}, time.Time{}, fmt.Errorf("failed to create reader: %w", err)
606
+
}
607
+
defer reader.Release()
589
608
590
609
scanner := bufio.NewScanner(reader)
591
610
buf := make([]byte, 64*1024)
+118
internal/storage/zstd.go
+118
internal/storage/zstd.go
···
1
+
package storage
2
+
3
+
import (
4
+
"fmt"
5
+
"io"
6
+
7
+
"github.com/valyala/gozstd"
8
+
)
9
+
10
+
// ============================================================================
11
+
// ZSTD COMPRESSION ABSTRACTION LAYER
12
+
// ============================================================================
13
+
// This file provides a clean interface for zstd operations.
14
+
// Swap implementations by changing the functions in this file.
15
+
16
+
const (
17
+
// CompressionLevel is the default compression level
18
+
CompressionLevel = 2 // Default from zstd
19
+
20
+
// FrameSize is the number of operations per frame
21
+
FrameSize = 100
22
+
)
23
+
24
+
// CompressFrame compresses a single chunk of data into a zstd frame
25
+
// with proper content size headers for multi-frame concatenation
26
+
func CompressFrame(data []byte) ([]byte, error) {
27
+
// ✅ valyala/gozstd.Compress creates proper frames with content size
28
+
compressed := gozstd.Compress(nil, data)
29
+
return compressed, nil
30
+
}
31
+
32
+
// DecompressAll decompresses all frames in the compressed data
33
+
func DecompressAll(compressed []byte) ([]byte, error) {
34
+
// ✅ valyala/gozstd.Decompress handles multi-frame
35
+
decompressed, err := gozstd.Decompress(nil, compressed)
36
+
if err != nil {
37
+
return nil, fmt.Errorf("decompression failed: %w", err)
38
+
}
39
+
return decompressed, nil
40
+
}
41
+
42
+
// DecompressFrame decompresses a single frame
43
+
func DecompressFrame(compressedFrame []byte) ([]byte, error) {
44
+
return gozstd.Decompress(nil, compressedFrame)
45
+
}
46
+
47
+
// NewStreamingReader creates a streaming decompressor
48
+
// Returns a reader that must be released with Release()
49
+
func NewStreamingReader(r io.Reader) (StreamReader, error) {
50
+
reader := gozstd.NewReader(r)
51
+
return &gozstdReader{reader: reader}, nil
52
+
}
53
+
54
+
// NewStreamingWriter creates a streaming compressor at default level
55
+
// Returns a writer that must be closed with Close() then released with Release()
56
+
func NewStreamingWriter(w io.Writer) (StreamWriter, error) {
57
+
writer := gozstd.NewWriterLevel(w, CompressionLevel)
58
+
return &gozstdWriter{writer: writer}, nil
59
+
}
60
+
61
+
// ============================================================================
62
+
// INTERFACES (for abstraction)
63
+
// ============================================================================
64
+
65
+
// StreamReader is a streaming decompression reader
66
+
type StreamReader interface {
67
+
io.Reader
68
+
io.WriterTo
69
+
Release()
70
+
}
71
+
72
+
// StreamWriter is a streaming compression writer
73
+
type StreamWriter interface {
74
+
io.Writer
75
+
io.Closer
76
+
Flush() error
77
+
Release()
78
+
}
79
+
80
+
// ============================================================================
81
+
// WRAPPER TYPES (valyala/gozstd specific)
82
+
// ============================================================================
83
+
84
+
type gozstdReader struct {
85
+
reader *gozstd.Reader
86
+
}
87
+
88
+
func (r *gozstdReader) Read(p []byte) (int, error) {
89
+
return r.reader.Read(p)
90
+
}
91
+
92
+
func (r *gozstdReader) WriteTo(w io.Writer) (int64, error) {
93
+
return r.reader.WriteTo(w)
94
+
}
95
+
96
+
func (r *gozstdReader) Release() {
97
+
r.reader.Release()
98
+
}
99
+
100
+
type gozstdWriter struct {
101
+
writer *gozstd.Writer
102
+
}
103
+
104
+
func (w *gozstdWriter) Write(p []byte) (int, error) {
105
+
return w.writer.Write(p)
106
+
}
107
+
108
+
func (w *gozstdWriter) Close() error {
109
+
return w.writer.Close()
110
+
}
111
+
112
+
func (w *gozstdWriter) Flush() error {
113
+
return w.writer.Flush()
114
+
}
115
+
116
+
func (w *gozstdWriter) Release() {
117
+
w.writer.Release()
118
+
}