+14
-7
internal/api/handlers.go
+14
-7
internal/api/handlers.go
···
47
r.w.Header().Set("X-Bundle-Start-Time", bundle.StartTime.Format(time.RFC3339Nano))
48
r.w.Header().Set("X-Bundle-End-Time", bundle.EndTime.Format(time.RFC3339Nano))
49
r.w.Header().Set("X-Bundle-Operation-Count", fmt.Sprintf("%d", plc.BUNDLE_SIZE))
50
-
r.w.Header().Set("X-Bundle-DID-Count", fmt.Sprintf("%d", len(bundle.DIDs)))
51
}
52
53
// ===== REQUEST HELPERS =====
···
83
"start_time": bundle.StartTime,
84
"end_time": bundle.EndTime,
85
"operation_count": plc.BUNDLE_SIZE,
86
-
"did_count": len(bundle.DIDs),
87
"hash": bundle.Hash,
88
"compressed_hash": bundle.CompressedHash,
89
"compressed_size": bundle.CompressedSize,
···
809
"progress_percent": float64(mempoolCount) / float64(plc.BUNDLE_SIZE) * 100,
810
"operations_needed": operationsNeeded,
811
"did_count": uniqueDIDCount,
812
-
"start_time": firstOp.CreatedAt, // This is FIXED once first op exists
813
-
"current_end_time": lastOp.CreatedAt, // This will change as more ops arrive
814
"uncompressed_size": uncompressedSize,
815
"estimated_compressed_size": estimatedCompressedSize,
816
"compression_ratio": float64(uncompressedSize) / float64(estimatedCompressedSize),
···
823
result["current_rate_per_second"] = currentRate
824
}
825
826
-
// Get actual mempool operations if requested
827
if r.URL.Query().Get("include_dids") == "true" {
828
ops, err := s.db.GetMempoolOperations(ctx, plc.BUNDLE_SIZE)
829
if err == nil {
···
858
return
859
}
860
861
resp.json(map[string]interface{}{
862
"plc_bundle_number": bundle.BundleNumber,
863
-
"did_count": len(bundle.DIDs),
864
-
"dids": bundle.DIDs,
865
})
866
}
867
···
47
r.w.Header().Set("X-Bundle-Start-Time", bundle.StartTime.Format(time.RFC3339Nano))
48
r.w.Header().Set("X-Bundle-End-Time", bundle.EndTime.Format(time.RFC3339Nano))
49
r.w.Header().Set("X-Bundle-Operation-Count", fmt.Sprintf("%d", plc.BUNDLE_SIZE))
50
+
r.w.Header().Set("X-Bundle-DID-Count", fmt.Sprintf("%d", bundle.DIDCount))
51
}
52
53
// ===== REQUEST HELPERS =====
···
83
"start_time": bundle.StartTime,
84
"end_time": bundle.EndTime,
85
"operation_count": plc.BUNDLE_SIZE,
86
+
"did_count": bundle.DIDCount, // Use DIDCount instead of len(DIDs)
87
"hash": bundle.Hash,
88
"compressed_hash": bundle.CompressedHash,
89
"compressed_size": bundle.CompressedSize,
···
809
"progress_percent": float64(mempoolCount) / float64(plc.BUNDLE_SIZE) * 100,
810
"operations_needed": operationsNeeded,
811
"did_count": uniqueDIDCount,
812
+
"start_time": firstOp.CreatedAt,
813
+
"current_end_time": lastOp.CreatedAt,
814
"uncompressed_size": uncompressedSize,
815
"estimated_compressed_size": estimatedCompressedSize,
816
"compression_ratio": float64(uncompressedSize) / float64(estimatedCompressedSize),
···
823
result["current_rate_per_second"] = currentRate
824
}
825
826
+
// Get actual mempool operations if requested (for DIDs list)
827
if r.URL.Query().Get("include_dids") == "true" {
828
ops, err := s.db.GetMempoolOperations(ctx, plc.BUNDLE_SIZE)
829
if err == nil {
···
858
return
859
}
860
861
+
// Query DIDs from dids table instead
862
+
dids, err := s.db.GetDIDsForBundle(r.Context(), bundleNum)
863
+
if err != nil {
864
+
resp.error(fmt.Sprintf("failed to get DIDs: %v", err), http.StatusInternalServerError)
865
+
return
866
+
}
867
+
868
resp.json(map[string]interface{}{
869
"plc_bundle_number": bundle.BundleNumber,
870
+
"did_count": bundle.DIDCount,
871
+
"dids": dids,
872
})
873
}
874
+38
-18
internal/plc/bundle.go
+38
-18
internal/plc/bundle.go
···
32
33
func NewBundleManager(dir string, enabled bool, db storage.Database, indexDIDs bool) (*BundleManager, error) {
34
if !enabled {
35
return &BundleManager{enabled: false}, nil
36
}
37
···
49
return nil, err
50
}
51
52
return &BundleManager{
53
dir: dir,
54
enabled: enabled,
55
encoder: encoder,
56
decoder: decoder,
57
db: db,
58
-
indexDIDs: indexDIDs, // NEW
59
}, nil
60
}
61
···
337
// ===== BUNDLE INDEXING =====
338
339
func (bm *BundleManager) indexBundle(ctx context.Context, bundleNum int, bf *bundleFile, cursor string) error {
340
prevHash := ""
341
if bundleNum > 1 {
342
if prev, err := bm.db.GetBundleByNumber(ctx, bundleNum-1); err == nil {
···
345
}
346
347
dids := bm.extractUniqueDIDs(bf.operations)
348
compressedFileSize := bm.getFileSize(bf.path)
349
350
// Calculate uncompressed size
351
uncompressedSize := int64(0)
352
for _, op := range bf.operations {
353
-
uncompressedSize += int64(len(op.RawJSON)) + 1 // +1 for newline
354
}
355
356
// Get time range from operations
···
361
BundleNumber: bundleNum,
362
StartTime: firstSeenAt,
363
EndTime: lastSeenAt,
364
-
DIDs: dids,
365
Hash: bf.uncompressedHash,
366
CompressedHash: bf.compressedHash,
367
CompressedSize: compressedFileSize,
···
372
CreatedAt: time.Now().UTC(),
373
}
374
375
// Create bundle first
376
if err := bm.db.CreateBundle(ctx, bundle); err != nil {
377
return err
378
}
379
380
-
// NEW: Only index DIDs if enabled
381
if bm.indexDIDs {
382
start := time.Now()
383
384
-
// Extract handle and PDS for each DID using centralized helper
385
didInfoMap := ExtractDIDInfoMap(bf.operations)
386
387
-
if err := bm.db.AddBundleDIDs(ctx, bundleNum, dids); err != nil {
388
-
log.Error("Failed to index DIDs for bundle %06d: %v", bundleNum, err)
389
-
// Don't return error - bundle is already created
390
-
} else {
391
-
// Update handle and PDS for each DID
392
-
for did, info := range didInfoMap {
393
-
// Validate handle length before saving
394
-
validHandle := ValidateHandle(info.Handle)
395
396
-
if err := bm.db.UpsertDID(ctx, did, bundleNum, validHandle, info.PDS); err != nil {
397
-
log.Error("Failed to update DID %s metadata: %v", did, err)
398
-
}
399
}
400
401
-
elapsed := time.Since(start)
402
-
log.Verbose("✓ Indexed %d unique DIDs for bundle %06d in %v", len(dids), bundleNum, elapsed)
403
}
404
} else {
405
log.Verbose("⊘ Skipped DID indexing for bundle %06d (disabled in config)", bundleNum)
406
}
···
32
33
func NewBundleManager(dir string, enabled bool, db storage.Database, indexDIDs bool) (*BundleManager, error) {
34
if !enabled {
35
+
log.Verbose("BundleManager disabled (enabled=false)")
36
return &BundleManager{enabled: false}, nil
37
}
38
···
50
return nil, err
51
}
52
53
+
log.Verbose("BundleManager initialized: enabled=%v, indexDIDs=%v, dir=%s", enabled, indexDIDs, dir)
54
+
55
return &BundleManager{
56
dir: dir,
57
enabled: enabled,
58
encoder: encoder,
59
decoder: decoder,
60
db: db,
61
+
indexDIDs: indexDIDs,
62
}, nil
63
}
64
···
340
// ===== BUNDLE INDEXING =====
341
342
func (bm *BundleManager) indexBundle(ctx context.Context, bundleNum int, bf *bundleFile, cursor string) error {
343
+
log.Verbose("indexBundle called for bundle %06d: indexDIDs=%v", bundleNum, bm.indexDIDs)
344
+
345
prevHash := ""
346
if bundleNum > 1 {
347
if prev, err := bm.db.GetBundleByNumber(ctx, bundleNum-1); err == nil {
···
350
}
351
352
dids := bm.extractUniqueDIDs(bf.operations)
353
+
log.Verbose("Extracted %d unique DIDs from bundle %06d", len(dids), bundleNum)
354
+
355
compressedFileSize := bm.getFileSize(bf.path)
356
357
// Calculate uncompressed size
358
uncompressedSize := int64(0)
359
for _, op := range bf.operations {
360
+
uncompressedSize += int64(len(op.RawJSON)) + 1
361
}
362
363
// Get time range from operations
···
368
BundleNumber: bundleNum,
369
StartTime: firstSeenAt,
370
EndTime: lastSeenAt,
371
+
DIDCount: len(dids),
372
Hash: bf.uncompressedHash,
373
CompressedHash: bf.compressedHash,
374
CompressedSize: compressedFileSize,
···
379
CreatedAt: time.Now().UTC(),
380
}
381
382
+
log.Verbose("About to create bundle %06d in database (DIDCount=%d)", bundleNum, bundle.DIDCount)
383
+
384
// Create bundle first
385
if err := bm.db.CreateBundle(ctx, bundle); err != nil {
386
+
log.Error("Failed to create bundle %06d in database: %v", bundleNum, err)
387
return err
388
}
389
390
+
log.Verbose("Bundle %06d created successfully in database", bundleNum)
391
+
392
+
// Index DIDs if enabled
393
if bm.indexDIDs {
394
start := time.Now()
395
+
log.Verbose("Starting DID indexing for bundle %06d: %d unique DIDs", bundleNum, len(dids))
396
397
+
// Extract handle and PDS for each DID
398
didInfoMap := ExtractDIDInfoMap(bf.operations)
399
+
log.Verbose("Extracted info for %d DIDs from operations", len(didInfoMap))
400
401
+
successCount := 0
402
+
errorCount := 0
403
+
invalidHandleCount := 0
404
405
+
// Upsert each DID with handle, pds, and bundle number
406
+
for did, info := range didInfoMap {
407
+
validHandle := ValidateHandle(info.Handle)
408
+
if info.Handle != "" && validHandle == "" {
409
+
//log.Verbose("Bundle %06d: Skipping invalid handle for DID %s (length: %d)", bundleNum, did, len(info.Handle))
410
+
invalidHandleCount++
411
}
412
413
+
if err := bm.db.UpsertDID(ctx, did, bundleNum, validHandle, info.PDS); err != nil {
414
+
log.Error("Failed to index DID %s for bundle %06d: %v", did, bundleNum, err)
415
+
errorCount++
416
+
} else {
417
+
successCount++
418
+
}
419
}
420
+
421
+
elapsed := time.Since(start)
422
+
log.Info("✓ Indexed bundle %06d: %d DIDs succeeded, %d errors, %d invalid handles in %v",
423
+
bundleNum, successCount, errorCount, invalidHandleCount, elapsed)
424
} else {
425
log.Verbose("⊘ Skipped DID indexing for bundle %06d (disabled in config)", bundleNum)
426
}
+3
-1
internal/plc/scanner.go
+3
-1
internal/plc/scanner.go
···
21
}
22
23
func NewScanner(db storage.Database, cfg config.PLCConfig) *Scanner {
24
-
bundleManager, err := NewBundleManager(cfg.BundleDir, cfg.UseCache, db, cfg.IndexDIDs) // NEW: pass IndexDIDs
25
if err != nil {
26
log.Error("Warning: failed to initialize bundle manager: %v", err)
27
bundleManager = &BundleManager{enabled: false}
···
21
}
22
23
func NewScanner(db storage.Database, cfg config.PLCConfig) *Scanner {
24
+
log.Verbose("NewScanner: IndexDIDs config = %v", cfg.IndexDIDs)
25
+
26
+
bundleManager, err := NewBundleManager(cfg.BundleDir, cfg.UseCache, db, cfg.IndexDIDs)
27
if err != nil {
28
log.Error("Warning: failed to initialize bundle manager: %v", err)
29
bundleManager = &BundleManager{enabled: false}
+1
internal/storage/db.go
+1
internal/storage/db.go
···
55
GetBundleByNumber(ctx context.Context, bundleNumber int) (*PLCBundle, error)
56
GetBundles(ctx context.Context, limit int) ([]*PLCBundle, error)
57
GetBundlesForDID(ctx context.Context, did string) ([]*PLCBundle, error)
58
GetBundleStats(ctx context.Context) (count, compressedSize, uncompressedSize, lastBundle int64, err error)
59
GetLastBundleNumber(ctx context.Context) (int, error)
60
GetBundleForTimestamp(ctx context.Context, afterTime time.Time) (int, error)
···
55
GetBundleByNumber(ctx context.Context, bundleNumber int) (*PLCBundle, error)
56
GetBundles(ctx context.Context, limit int) ([]*PLCBundle, error)
57
GetBundlesForDID(ctx context.Context, did string) ([]*PLCBundle, error)
58
+
GetDIDsForBundle(ctx context.Context, bundleNum int) ([]string, error)
59
GetBundleStats(ctx context.Context) (count, compressedSize, uncompressedSize, lastBundle int64, err error)
60
GetLastBundleNumber(ctx context.Context) (int, error)
61
GetBundleForTimestamp(ctx context.Context, afterTime time.Time) (int, error)
+89
-43
internal/storage/postgres.go
+89
-43
internal/storage/postgres.go
···
157
records_processed BIGINT DEFAULT 0
158
);
159
160
-
CREATE TABLE IF NOT EXISTS plc_bundles (
161
-
bundle_number INTEGER PRIMARY KEY,
162
-
start_time TIMESTAMP NOT NULL,
163
-
end_time TIMESTAMP NOT NULL,
164
-
dids JSONB NOT NULL,
165
-
hash TEXT NOT NULL,
166
-
compressed_hash TEXT NOT NULL,
167
-
compressed_size BIGINT NOT NULL,
168
-
uncompressed_size BIGINT NOT NULL,
169
-
cumulative_compressed_size BIGINT NOT NULL,
170
-
cumulative_uncompressed_size BIGINT NOT NULL,
171
-
cursor TEXT,
172
-
prev_bundle_hash TEXT,
173
-
compressed BOOLEAN DEFAULT true,
174
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
175
-
);
176
177
-
CREATE INDEX IF NOT EXISTS idx_plc_bundles_time ON plc_bundles(start_time, end_time);
178
-
CREATE INDEX IF NOT EXISTS idx_plc_bundles_hash ON plc_bundles(hash);
179
-
CREATE INDEX IF NOT EXISTS idx_plc_bundles_prev ON plc_bundles(prev_bundle_hash);
180
-
CREATE INDEX IF NOT EXISTS idx_plc_bundles_number_desc ON plc_bundles(bundle_number DESC);
181
-
CREATE INDEX IF NOT EXISTS idx_plc_bundles_dids ON plc_bundles USING gin(dids);
182
183
CREATE TABLE IF NOT EXISTS plc_mempool (
184
id BIGSERIAL PRIMARY KEY,
···
1168
// ===== BUNDLE OPERATIONS =====
1169
1170
func (p *PostgresDB) CreateBundle(ctx context.Context, bundle *PLCBundle) error {
1171
-
didsJSON, err := json.Marshal(bundle.DIDs)
1172
-
if err != nil {
1173
-
return err
1174
-
}
1175
-
1176
// Calculate cumulative sizes from previous bundle
1177
if bundle.BundleNumber > 1 {
1178
prevBundle, err := p.GetBundleByNumber(ctx, bundle.BundleNumber-1)
···
1190
1191
query := `
1192
INSERT INTO plc_bundles (
1193
-
bundle_number, start_time, end_time, dids,
1194
hash, compressed_hash, compressed_size, uncompressed_size,
1195
cumulative_compressed_size, cumulative_uncompressed_size,
1196
cursor, prev_bundle_hash, compressed
···
1199
ON CONFLICT(bundle_number) DO UPDATE SET
1200
start_time = EXCLUDED.start_time,
1201
end_time = EXCLUDED.end_time,
1202
-
dids = EXCLUDED.dids,
1203
hash = EXCLUDED.hash,
1204
compressed_hash = EXCLUDED.compressed_hash,
1205
compressed_size = EXCLUDED.compressed_size,
···
1210
prev_bundle_hash = EXCLUDED.prev_bundle_hash,
1211
compressed = EXCLUDED.compressed
1212
`
1213
-
_, err = p.db.ExecContext(ctx, query,
1214
bundle.BundleNumber, bundle.StartTime, bundle.EndTime,
1215
-
didsJSON, bundle.Hash, bundle.CompressedHash,
1216
bundle.CompressedSize, bundle.UncompressedSize,
1217
bundle.CumulativeCompressedSize, bundle.CumulativeUncompressedSize,
1218
bundle.Cursor, bundle.PrevBundleHash, bundle.Compressed,
···
1223
1224
func (p *PostgresDB) GetBundleByNumber(ctx context.Context, bundleNumber int) (*PLCBundle, error) {
1225
query := `
1226
-
SELECT bundle_number, start_time, end_time, dids, hash, compressed_hash,
1227
compressed_size, uncompressed_size, cumulative_compressed_size,
1228
cumulative_uncompressed_size, cursor, prev_bundle_hash, compressed, created_at
1229
FROM plc_bundles
···
1231
`
1232
1233
var bundle PLCBundle
1234
-
var didsJSON []byte
1235
var prevHash sql.NullString
1236
var cursor sql.NullString
1237
1238
err := p.db.QueryRowContext(ctx, query, bundleNumber).Scan(
1239
&bundle.BundleNumber, &bundle.StartTime, &bundle.EndTime,
1240
-
&didsJSON, &bundle.Hash, &bundle.CompressedHash,
1241
&bundle.CompressedSize, &bundle.UncompressedSize,
1242
&bundle.CumulativeCompressedSize, &bundle.CumulativeUncompressedSize,
1243
&cursor, &prevHash, &bundle.Compressed, &bundle.CreatedAt,
···
1253
bundle.Cursor = cursor.String
1254
}
1255
1256
-
json.Unmarshal(didsJSON, &bundle.DIDs)
1257
return &bundle, nil
1258
}
1259
1260
func (p *PostgresDB) GetBundles(ctx context.Context, limit int) ([]*PLCBundle, error) {
1261
query := `
1262
-
SELECT bundle_number, start_time, end_time, dids, hash, compressed_hash,
1263
compressed_size, uncompressed_size, cumulative_compressed_size,
1264
cumulative_uncompressed_size, cursor, prev_bundle_hash, compressed, created_at
1265
FROM plc_bundles
···
1277
}
1278
1279
func (p *PostgresDB) GetBundlesForDID(ctx context.Context, did string) ([]*PLCBundle, error) {
1280
-
query := `
1281
-
SELECT bundle_number, start_time, end_time, dids, hash, compressed_hash,
1282
compressed_size, uncompressed_size, cumulative_compressed_size,
1283
cumulative_uncompressed_size, cursor, prev_bundle_hash, compressed, created_at
1284
FROM plc_bundles
1285
-
WHERE dids ? $1
1286
ORDER BY bundle_number ASC
1287
-
`
1288
1289
-
rows, err := p.db.QueryContext(ctx, query, did)
1290
if err != nil {
1291
return nil, err
1292
}
···
1295
return p.scanBundles(rows)
1296
}
1297
1298
func (p *PostgresDB) scanBundles(rows *sql.Rows) ([]*PLCBundle, error) {
1299
var bundles []*PLCBundle
1300
1301
for rows.Next() {
1302
var bundle PLCBundle
1303
-
var didsJSON []byte
1304
var prevHash sql.NullString
1305
var cursor sql.NullString
1306
···
1308
&bundle.BundleNumber,
1309
&bundle.StartTime,
1310
&bundle.EndTime,
1311
-
&didsJSON,
1312
&bundle.Hash,
1313
&bundle.CompressedHash,
1314
&bundle.CompressedSize,
···
1330
bundle.Cursor = cursor.String
1331
}
1332
1333
-
json.Unmarshal(didsJSON, &bundle.DIDs)
1334
bundles = append(bundles, &bundle)
1335
}
1336
···
157
records_processed BIGINT DEFAULT 0
158
);
159
160
+
CREATE TABLE IF NOT EXISTS plc_bundles (
161
+
bundle_number INTEGER PRIMARY KEY,
162
+
start_time TIMESTAMP NOT NULL,
163
+
end_time TIMESTAMP NOT NULL,
164
+
did_count INTEGER NOT NULL DEFAULT 0,
165
+
hash TEXT NOT NULL,
166
+
compressed_hash TEXT NOT NULL,
167
+
compressed_size BIGINT NOT NULL,
168
+
uncompressed_size BIGINT NOT NULL,
169
+
cumulative_compressed_size BIGINT NOT NULL,
170
+
cumulative_uncompressed_size BIGINT NOT NULL,
171
+
cursor TEXT,
172
+
prev_bundle_hash TEXT,
173
+
compressed BOOLEAN DEFAULT true,
174
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
175
+
);
176
177
+
CREATE INDEX IF NOT EXISTS idx_plc_bundles_time ON plc_bundles(start_time, end_time);
178
+
CREATE INDEX IF NOT EXISTS idx_plc_bundles_hash ON plc_bundles(hash);
179
+
CREATE INDEX IF NOT EXISTS idx_plc_bundles_prev ON plc_bundles(prev_bundle_hash);
180
+
CREATE INDEX IF NOT EXISTS idx_plc_bundles_number_desc ON plc_bundles(bundle_number DESC);
181
182
CREATE TABLE IF NOT EXISTS plc_mempool (
183
id BIGSERIAL PRIMARY KEY,
···
1167
// ===== BUNDLE OPERATIONS =====
1168
1169
func (p *PostgresDB) CreateBundle(ctx context.Context, bundle *PLCBundle) error {
1170
// Calculate cumulative sizes from previous bundle
1171
if bundle.BundleNumber > 1 {
1172
prevBundle, err := p.GetBundleByNumber(ctx, bundle.BundleNumber-1)
···
1184
1185
query := `
1186
INSERT INTO plc_bundles (
1187
+
bundle_number, start_time, end_time, did_count,
1188
hash, compressed_hash, compressed_size, uncompressed_size,
1189
cumulative_compressed_size, cumulative_uncompressed_size,
1190
cursor, prev_bundle_hash, compressed
···
1193
ON CONFLICT(bundle_number) DO UPDATE SET
1194
start_time = EXCLUDED.start_time,
1195
end_time = EXCLUDED.end_time,
1196
+
did_count = EXCLUDED.did_count,
1197
hash = EXCLUDED.hash,
1198
compressed_hash = EXCLUDED.compressed_hash,
1199
compressed_size = EXCLUDED.compressed_size,
···
1204
prev_bundle_hash = EXCLUDED.prev_bundle_hash,
1205
compressed = EXCLUDED.compressed
1206
`
1207
+
_, err := p.db.ExecContext(ctx, query,
1208
bundle.BundleNumber, bundle.StartTime, bundle.EndTime,
1209
+
bundle.DIDCount, bundle.Hash, bundle.CompressedHash,
1210
bundle.CompressedSize, bundle.UncompressedSize,
1211
bundle.CumulativeCompressedSize, bundle.CumulativeUncompressedSize,
1212
bundle.Cursor, bundle.PrevBundleHash, bundle.Compressed,
···
1217
1218
func (p *PostgresDB) GetBundleByNumber(ctx context.Context, bundleNumber int) (*PLCBundle, error) {
1219
query := `
1220
+
SELECT bundle_number, start_time, end_time, did_count, hash, compressed_hash,
1221
compressed_size, uncompressed_size, cumulative_compressed_size,
1222
cumulative_uncompressed_size, cursor, prev_bundle_hash, compressed, created_at
1223
FROM plc_bundles
···
1225
`
1226
1227
var bundle PLCBundle
1228
var prevHash sql.NullString
1229
var cursor sql.NullString
1230
1231
err := p.db.QueryRowContext(ctx, query, bundleNumber).Scan(
1232
&bundle.BundleNumber, &bundle.StartTime, &bundle.EndTime,
1233
+
&bundle.DIDCount, &bundle.Hash, &bundle.CompressedHash,
1234
&bundle.CompressedSize, &bundle.UncompressedSize,
1235
&bundle.CumulativeCompressedSize, &bundle.CumulativeUncompressedSize,
1236
&cursor, &prevHash, &bundle.Compressed, &bundle.CreatedAt,
···
1246
bundle.Cursor = cursor.String
1247
}
1248
1249
return &bundle, nil
1250
}
1251
1252
func (p *PostgresDB) GetBundles(ctx context.Context, limit int) ([]*PLCBundle, error) {
1253
query := `
1254
+
SELECT bundle_number, start_time, end_time, did_count, hash, compressed_hash,
1255
compressed_size, uncompressed_size, cumulative_compressed_size,
1256
cumulative_uncompressed_size, cursor, prev_bundle_hash, compressed, created_at
1257
FROM plc_bundles
···
1269
}
1270
1271
func (p *PostgresDB) GetBundlesForDID(ctx context.Context, did string) ([]*PLCBundle, error) {
1272
+
// Get bundle numbers from dids table
1273
+
var bundleNumbersJSON []byte
1274
+
err := p.db.QueryRowContext(ctx, `
1275
+
SELECT bundle_numbers FROM dids WHERE did = $1
1276
+
`, did).Scan(&bundleNumbersJSON)
1277
+
1278
+
if err == sql.ErrNoRows {
1279
+
return []*PLCBundle{}, nil
1280
+
}
1281
+
if err != nil {
1282
+
return nil, err
1283
+
}
1284
+
1285
+
var bundleNumbers []int
1286
+
if err := json.Unmarshal(bundleNumbersJSON, &bundleNumbers); err != nil {
1287
+
return nil, err
1288
+
}
1289
+
1290
+
if len(bundleNumbers) == 0 {
1291
+
return []*PLCBundle{}, nil
1292
+
}
1293
+
1294
+
// Build query with IN clause
1295
+
placeholders := make([]string, len(bundleNumbers))
1296
+
args := make([]interface{}, len(bundleNumbers))
1297
+
for i, num := range bundleNumbers {
1298
+
placeholders[i] = fmt.Sprintf("$%d", i+1)
1299
+
args[i] = num
1300
+
}
1301
+
1302
+
query := fmt.Sprintf(`
1303
+
SELECT bundle_number, start_time, end_time, did_count, hash, compressed_hash,
1304
compressed_size, uncompressed_size, cumulative_compressed_size,
1305
cumulative_uncompressed_size, cursor, prev_bundle_hash, compressed, created_at
1306
FROM plc_bundles
1307
+
WHERE bundle_number IN (%s)
1308
ORDER BY bundle_number ASC
1309
+
`, strings.Join(placeholders, ","))
1310
1311
+
rows, err := p.db.QueryContext(ctx, query, args...)
1312
if err != nil {
1313
return nil, err
1314
}
···
1317
return p.scanBundles(rows)
1318
}
1319
1320
+
func (p *PostgresDB) GetDIDsForBundle(ctx context.Context, bundleNum int) ([]string, error) {
1321
+
query := `
1322
+
SELECT did
1323
+
FROM dids
1324
+
WHERE bundle_numbers @> $1::jsonb
1325
+
ORDER BY did
1326
+
`
1327
+
1328
+
rows, err := p.db.QueryContext(ctx, query, fmt.Sprintf("[%d]", bundleNum))
1329
+
if err != nil {
1330
+
return nil, err
1331
+
}
1332
+
defer rows.Close()
1333
+
1334
+
var dids []string
1335
+
for rows.Next() {
1336
+
var did string
1337
+
if err := rows.Scan(&did); err != nil {
1338
+
return nil, err
1339
+
}
1340
+
dids = append(dids, did)
1341
+
}
1342
+
1343
+
return dids, rows.Err()
1344
+
}
1345
+
1346
func (p *PostgresDB) scanBundles(rows *sql.Rows) ([]*PLCBundle, error) {
1347
var bundles []*PLCBundle
1348
1349
for rows.Next() {
1350
var bundle PLCBundle
1351
var prevHash sql.NullString
1352
var cursor sql.NullString
1353
···
1355
&bundle.BundleNumber,
1356
&bundle.StartTime,
1357
&bundle.EndTime,
1358
+
&bundle.DIDCount,
1359
&bundle.Hash,
1360
&bundle.CompressedHash,
1361
&bundle.CompressedSize,
···
1377
bundle.Cursor = cursor.String
1378
}
1379
1380
bundles = append(bundles, &bundle)
1381
}
1382
+1
-1
internal/storage/types.go
+1
-1
internal/storage/types.go