+201
-147
internal/api/handlers.go
+201
-147
internal/api/handlers.go
···
20
20
"github.com/klauspost/compress/zstd"
21
21
)
22
22
23
-
func (s *Server) handleGetPDSList(w http.ResponseWriter, r *http.Request) {
23
+
// ====================
24
+
// Endpoint Handlers (new)
25
+
// ====================
26
+
27
+
func (s *Server) handleGetEndpoints(w http.ResponseWriter, r *http.Request) {
24
28
ctx := r.Context()
25
29
26
-
filter := &storage.PDSFilter{}
30
+
filter := &storage.EndpointFilter{}
31
+
32
+
if typ := r.URL.Query().Get("type"); typ != "" {
33
+
filter.Type = typ
34
+
}
27
35
28
36
if status := r.URL.Query().Get("status"); status != "" {
29
37
filter.Status = status
30
38
}
31
39
40
+
if minUserCount := r.URL.Query().Get("min_user_count"); minUserCount != "" {
41
+
if count, err := strconv.ParseInt(minUserCount, 10, 64); err == nil {
42
+
filter.MinUserCount = count
43
+
}
44
+
}
45
+
32
46
if limit := r.URL.Query().Get("limit"); limit != "" {
33
47
if l, err := strconv.Atoi(limit); err == nil {
34
48
filter.Limit = l
···
41
55
}
42
56
}
43
57
44
-
servers, err := s.db.GetPDSServers(ctx, filter)
58
+
endpoints, err := s.db.GetEndpoints(ctx, filter)
45
59
if err != nil {
46
60
http.Error(w, err.Error(), http.StatusInternalServerError)
47
61
return
48
62
}
49
63
50
64
// Convert status codes to strings for API
51
-
response := make([]map[string]interface{}, len(servers))
52
-
for i, srv := range servers {
65
+
response := make([]map[string]interface{}, len(endpoints))
66
+
for i, ep := range endpoints {
53
67
response[i] = map[string]interface{}{
54
-
"id": srv.ID,
55
-
"endpoint": srv.Endpoint,
56
-
"discovered_at": srv.DiscoveredAt,
57
-
"last_checked": srv.LastChecked,
58
-
"status": statusToString(srv.Status),
59
-
"user_count": srv.UserCount,
68
+
"id": ep.ID,
69
+
"endpoint_type": ep.EndpointType,
70
+
"endpoint": ep.Endpoint,
71
+
"discovered_at": ep.DiscoveredAt,
72
+
"last_checked": ep.LastChecked,
73
+
"status": statusToString(ep.Status),
74
+
"user_count": ep.UserCount,
60
75
}
61
76
}
62
77
63
78
respondJSON(w, response)
64
79
}
65
80
66
-
func (s *Server) handleGetPDS(w http.ResponseWriter, r *http.Request) {
81
+
func (s *Server) handleGetEndpoint(w http.ResponseWriter, r *http.Request) {
67
82
ctx := r.Context()
68
83
vars := mux.Vars(r)
69
84
endpoint := vars["endpoint"]
70
85
71
-
pds, err := s.db.GetPDS(ctx, endpoint)
86
+
// Get type from query param, default to "pds" for backward compatibility
87
+
endpointType := r.URL.Query().Get("type")
88
+
if endpointType == "" {
89
+
endpointType = "pds"
90
+
}
91
+
92
+
ep, err := s.db.GetEndpoint(ctx, endpoint, endpointType)
72
93
if err != nil {
73
-
http.Error(w, "PDS not found", http.StatusNotFound)
94
+
http.Error(w, "Endpoint not found", http.StatusNotFound)
74
95
return
75
96
}
76
97
77
98
// Get recent scans
78
-
scans, _ := s.db.GetPDSScans(ctx, pds.ID, 10)
99
+
scans, _ := s.db.GetEndpointScans(ctx, ep.ID, 10)
79
100
80
101
response := map[string]interface{}{
81
-
"id": pds.ID,
82
-
"endpoint": pds.Endpoint,
83
-
"discovered_at": pds.DiscoveredAt,
84
-
"last_checked": pds.LastChecked,
85
-
"status": statusToString(pds.Status),
86
-
"user_count": pds.UserCount,
102
+
"id": ep.ID,
103
+
"endpoint_type": ep.EndpointType,
104
+
"endpoint": ep.Endpoint,
105
+
"discovered_at": ep.DiscoveredAt,
106
+
"last_checked": ep.LastChecked,
107
+
"status": statusToString(ep.Status),
108
+
"user_count": ep.UserCount,
87
109
"recent_scans": scans,
88
110
}
89
111
90
112
respondJSON(w, response)
91
113
}
92
114
93
-
func (s *Server) handleGetPDSStats(w http.ResponseWriter, r *http.Request) {
115
+
func (s *Server) handleGetEndpointStats(w http.ResponseWriter, r *http.Request) {
94
116
ctx := r.Context()
95
117
96
-
stats, err := s.db.GetPDSStats(ctx)
118
+
stats, err := s.db.GetEndpointStats(ctx)
97
119
if err != nil {
98
120
http.Error(w, err.Error(), http.StatusInternalServerError)
99
121
return
···
101
123
102
124
respondJSON(w, stats)
103
125
}
126
+
127
+
// ====================
128
+
// DID Handlers
129
+
// ====================
104
130
105
131
func (s *Server) handleGetDID(w http.ResponseWriter, r *http.Request) {
106
132
ctx := r.Context()
···
196
222
respondJSON(w, history)
197
223
}
198
224
225
+
// ====================
226
+
// PLC Bundle Handlers
227
+
// ====================
228
+
199
229
func (s *Server) handleGetPLCBundle(w http.ResponseWriter, r *http.Request) {
200
230
ctx := r.Context()
201
231
vars := mux.Vars(r)
···
218
248
"end_time": bundle.EndTime,
219
249
"operation_count": plc.BUNDLE_SIZE,
220
250
"did_count": len(bundle.DIDs),
221
-
"hash": bundle.Hash, // Uncompressed (verifiable)
222
-
"compressed_hash": bundle.CompressedHash, // File integrity
251
+
"hash": bundle.Hash,
252
+
"compressed_hash": bundle.CompressedHash,
223
253
"compressed_size": bundle.CompressedSize,
224
254
"prev_bundle_hash": bundle.PrevBundleHash,
225
255
"created_at": bundle.CreatedAt,
···
348
378
}
349
379
}
350
380
381
+
func (s *Server) handleGetPLCBundles(w http.ResponseWriter, r *http.Request) {
382
+
ctx := r.Context()
383
+
384
+
limit := 50
385
+
if l := r.URL.Query().Get("limit"); l != "" {
386
+
if parsed, err := strconv.Atoi(l); err == nil {
387
+
limit = parsed
388
+
}
389
+
}
390
+
391
+
bundles, err := s.db.GetBundles(ctx, limit)
392
+
if err != nil {
393
+
http.Error(w, err.Error(), http.StatusInternalServerError)
394
+
return
395
+
}
396
+
397
+
response := make([]map[string]interface{}, len(bundles))
398
+
for i, bundle := range bundles {
399
+
response[i] = map[string]interface{}{
400
+
"plc_bundle_number": bundle.BundleNumber,
401
+
"start_time": bundle.StartTime,
402
+
"end_time": bundle.EndTime,
403
+
"operation_count": plc.BUNDLE_SIZE,
404
+
"did_count": len(bundle.DIDs),
405
+
"hash": bundle.Hash,
406
+
"compressed_hash": bundle.CompressedHash,
407
+
"compressed_size": bundle.CompressedSize,
408
+
"prev_bundle_hash": bundle.PrevBundleHash,
409
+
}
410
+
}
411
+
412
+
respondJSON(w, response)
413
+
}
414
+
415
+
func (s *Server) handleGetPLCBundleStats(w http.ResponseWriter, r *http.Request) {
416
+
ctx := r.Context()
417
+
418
+
count, size, err := s.db.GetBundleStats(ctx)
419
+
if err != nil {
420
+
http.Error(w, err.Error(), http.StatusInternalServerError)
421
+
return
422
+
}
423
+
424
+
respondJSON(w, map[string]interface{}{
425
+
"plc_bundle_count": count,
426
+
"total_size": size,
427
+
"total_size_mb": float64(size) / 1024 / 1024,
428
+
})
429
+
}
430
+
431
+
// ====================
432
+
// Mempool Handlers
433
+
// ====================
434
+
351
435
func (s *Server) handleGetMempoolStats(w http.ResponseWriter, r *http.Request) {
352
436
ctx := r.Context()
353
437
···
403
487
respondJSON(w, response)
404
488
}
405
489
406
-
// Helper to load bundle operations - UPDATED FOR JSONL FORMAT
407
-
func (s *Server) loadBundleOperations(path string) ([]plc.PLCOperation, error) {
408
-
decoder, err := zstd.NewReader(nil)
409
-
if err != nil {
410
-
return nil, err
411
-
}
412
-
defer decoder.Close()
413
-
414
-
compressedData, err := os.ReadFile(path)
415
-
if err != nil {
416
-
return nil, err
417
-
}
418
-
419
-
decompressed, err := decoder.DecodeAll(compressedData, nil)
420
-
if err != nil {
421
-
return nil, err
422
-
}
423
-
424
-
// Parse JSONL (newline-delimited JSON)
425
-
var operations []plc.PLCOperation
426
-
scanner := bufio.NewScanner(bytes.NewReader(decompressed))
427
-
428
-
lineNum := 0
429
-
for scanner.Scan() {
430
-
lineNum++
431
-
line := scanner.Bytes()
432
-
433
-
// Skip empty lines
434
-
if len(line) == 0 {
435
-
continue
436
-
}
437
-
438
-
var op plc.PLCOperation
439
-
if err := json.Unmarshal(line, &op); err != nil {
440
-
return nil, fmt.Errorf("failed to parse operation on line %d: %w", lineNum, err)
441
-
}
442
-
443
-
// CRITICAL: Store the original raw JSON bytes
444
-
op.RawJSON = make([]byte, len(line))
445
-
copy(op.RawJSON, line)
446
-
447
-
operations = append(operations, op)
448
-
}
449
-
450
-
if err := scanner.Err(); err != nil {
451
-
return nil, fmt.Errorf("error reading JSONL: %w", err)
452
-
}
453
-
454
-
return operations, nil
455
-
}
490
+
// ====================
491
+
// PLC Metrics Handlers
492
+
// ====================
456
493
457
494
func (s *Server) handleGetPLCMetrics(w http.ResponseWriter, r *http.Request) {
458
495
ctx := r.Context()
···
473
510
respondJSON(w, metrics)
474
511
}
475
512
476
-
func (s *Server) handleGetPLCBundles(w http.ResponseWriter, r *http.Request) {
477
-
ctx := r.Context()
478
-
479
-
limit := 50
480
-
if l := r.URL.Query().Get("limit"); l != "" {
481
-
if parsed, err := strconv.Atoi(l); err == nil {
482
-
limit = parsed
483
-
}
484
-
}
485
-
486
-
bundles, err := s.db.GetBundles(ctx, limit)
487
-
if err != nil {
488
-
http.Error(w, err.Error(), http.StatusInternalServerError)
489
-
return
490
-
}
491
-
492
-
response := make([]map[string]interface{}, len(bundles))
493
-
for i, bundle := range bundles {
494
-
response[i] = map[string]interface{}{
495
-
"plc_bundle_number": bundle.BundleNumber,
496
-
"start_time": bundle.StartTime,
497
-
"end_time": bundle.EndTime,
498
-
"operation_count": 10000,
499
-
"did_count": len(bundle.DIDs),
500
-
"hash": bundle.Hash,
501
-
"compressed_hash": bundle.CompressedHash,
502
-
"compressed_size": bundle.CompressedSize,
503
-
"prev_bundle_hash": bundle.PrevBundleHash,
504
-
}
505
-
}
506
-
507
-
respondJSON(w, response)
508
-
}
509
-
510
-
func (s *Server) handleGetPLCBundleStats(w http.ResponseWriter, r *http.Request) {
511
-
ctx := r.Context()
512
-
513
-
count, size, err := s.db.GetBundleStats(ctx)
514
-
if err != nil {
515
-
http.Error(w, err.Error(), http.StatusInternalServerError)
516
-
return
517
-
}
518
-
519
-
respondJSON(w, map[string]interface{}{
520
-
"plc_bundle_count": count,
521
-
"total_size": size,
522
-
"total_size_mb": float64(size) / 1024 / 1024,
523
-
})
524
-
}
525
-
526
-
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
527
-
respondJSON(w, map[string]string{"status": "ok"})
528
-
}
513
+
// ====================
514
+
// Verification Handlers
515
+
// ====================
529
516
530
517
func (s *Server) handleVerifyPLCBundle(w http.ResponseWriter, r *http.Request) {
531
518
ctx := r.Context()
···
637
624
"verified": verified,
638
625
"local_hash": bundle.Hash,
639
626
"remote_hash": remoteHash,
640
-
"local_op_count": bundle.OperationCount,
627
+
"local_op_count": plc.BUNDLE_SIZE,
641
628
"remote_op_count": len(allRemoteOps),
642
629
"boundary_cids_used": len(prevBoundaryCIDs),
643
630
})
···
736
723
"chain_start_time": firstBundle.StartTime,
737
724
"chain_end_time": lastBundleData.EndTime,
738
725
"chain_head_hash": lastBundleData.Hash,
739
-
"first_prev_hash": firstBundle.PrevBundleHash, // Should be empty
726
+
"first_prev_hash": firstBundle.PrevBundleHash,
740
727
"last_prev_hash": lastBundleData.PrevBundleHash,
741
728
})
742
729
}
743
730
744
-
// handlePLCExport simulates PLC directory /export endpoint using cached bundles
731
+
// ====================
732
+
// PLC Export Handler
733
+
// ====================
734
+
745
735
func (s *Server) handlePLCExport(w http.ResponseWriter, r *http.Request) {
746
736
ctx := r.Context()
747
737
···
763
753
if afterStr != "" {
764
754
// Try multiple timestamp formats (from most specific to least)
765
755
formats := []string{
766
-
time.RFC3339Nano, // 2023-11-09T03:55:00.123456789Z
767
-
time.RFC3339, // 2023-11-09T03:55:00Z
768
-
"2006-01-02T15:04:05.000Z", // 2023-11-09T03:55:00.000Z
769
-
"2006-01-02T15:04:05", // 2023-11-09T03:55:00
770
-
"2006-01-02T15:04", // 2023-11-09T03:55
771
-
"2006-01-02", // 2023-11-09
756
+
time.RFC3339Nano,
757
+
time.RFC3339,
758
+
"2006-01-02T15:04:05.000Z",
759
+
"2006-01-02T15:04:05",
760
+
"2006-01-02T15:04",
761
+
"2006-01-02",
772
762
}
773
763
774
764
var parsed time.Time
775
765
var parseErr error
776
-
parsed = time.Time{} // zero value
766
+
parsed = time.Time{}
777
767
778
768
for _, format := range formats {
779
769
parsed, parseErr = time.Parse(format, afterStr)
···
867
857
}
868
858
}
869
859
870
-
// computeRemoteOperationsHash - matching format
860
+
// ====================
861
+
// Health Handler
862
+
// ====================
863
+
864
+
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
865
+
respondJSON(w, map[string]string{"status": "ok"})
866
+
}
867
+
868
+
// ====================
869
+
// Helper Functions
870
+
// ====================
871
+
872
+
// loadBundleOperations loads operations from a bundle file
873
+
func (s *Server) loadBundleOperations(path string) ([]plc.PLCOperation, error) {
874
+
decoder, err := zstd.NewReader(nil)
875
+
if err != nil {
876
+
return nil, err
877
+
}
878
+
defer decoder.Close()
879
+
880
+
compressedData, err := os.ReadFile(path)
881
+
if err != nil {
882
+
return nil, err
883
+
}
884
+
885
+
decompressed, err := decoder.DecodeAll(compressedData, nil)
886
+
if err != nil {
887
+
return nil, err
888
+
}
889
+
890
+
// Parse JSONL (newline-delimited JSON)
891
+
var operations []plc.PLCOperation
892
+
scanner := bufio.NewScanner(bytes.NewReader(decompressed))
893
+
894
+
lineNum := 0
895
+
for scanner.Scan() {
896
+
lineNum++
897
+
line := scanner.Bytes()
898
+
899
+
// Skip empty lines
900
+
if len(line) == 0 {
901
+
continue
902
+
}
903
+
904
+
var op plc.PLCOperation
905
+
if err := json.Unmarshal(line, &op); err != nil {
906
+
return nil, fmt.Errorf("failed to parse operation on line %d: %w", lineNum, err)
907
+
}
908
+
909
+
// CRITICAL: Store the original raw JSON bytes
910
+
op.RawJSON = make([]byte, len(line))
911
+
copy(op.RawJSON, line)
912
+
913
+
operations = append(operations, op)
914
+
}
915
+
916
+
if err := scanner.Err(); err != nil {
917
+
return nil, fmt.Errorf("error reading JSONL: %w", err)
918
+
}
919
+
920
+
return operations, nil
921
+
}
922
+
923
+
// computeRemoteOperationsHash computes hash for remote operations
871
924
func computeRemoteOperationsHash(ops []plc.PLCOperation) (string, error) {
872
925
var jsonlData []byte
873
926
for i, op := range ops {
···
884
937
return hex.EncodeToString(hash[:]), nil
885
938
}
886
939
887
-
func respondJSON(w http.ResponseWriter, data interface{}) {
888
-
w.Header().Set("Content-Type", "application/json")
889
-
json.NewEncoder(w).Encode(data)
890
-
}
891
-
892
-
// Helper function
940
+
// statusToString converts status int to string
893
941
func statusToString(status int) string {
894
942
switch status {
895
-
case storage.PDSStatusOnline:
943
+
case storage.PDSStatusOnline: // Use PDSStatusOnline (alias)
896
944
return "online"
897
-
case storage.PDSStatusOffline:
945
+
case storage.PDSStatusOffline: // Use PDSStatusOffline (alias)
898
946
return "offline"
899
947
default:
900
948
return "unknown"
901
949
}
902
950
}
951
+
952
+
// respondJSON writes JSON response
953
+
func respondJSON(w http.ResponseWriter, data interface{}) {
954
+
w.Header().Set("Content-Type", "application/json")
955
+
json.NewEncoder(w).Encode(data)
956
+
}
+13
-12
internal/api/server.go
+13
-12
internal/api/server.go
···
52
52
func (s *Server) setupRoutes() {
53
53
api := s.router.PathPrefix("/api/v1").Subrouter()
54
54
55
-
// PDS endpoints
56
-
api.HandleFunc("/pds", s.handleGetPDSList).Methods("GET")
57
-
api.HandleFunc("/pds/stats", s.handleGetPDSStats).Methods("GET")
58
-
api.HandleFunc("/pds/{endpoint}", s.handleGetPDS).Methods("GET")
55
+
// Endpoint routes (replaces PDS routes)
56
+
api.HandleFunc("/endpoints", s.handleGetEndpoints).Methods("GET")
57
+
api.HandleFunc("/endpoints/stats", s.handleGetEndpointStats).Methods("GET")
58
+
api.HandleFunc("/endpoints/{endpoint}", s.handleGetEndpoint).Methods("GET")
59
59
60
-
// Metrics endpoints
61
-
api.HandleFunc("/metrics/plc", s.handleGetPLCMetrics).Methods("GET")
62
-
63
-
// PLC Bundle endpoints
60
+
// PLC Bundle routes
64
61
api.HandleFunc("/plc/bundles", s.handleGetPLCBundles).Methods("GET")
65
62
api.HandleFunc("/plc/bundles/stats", s.handleGetPLCBundleStats).Methods("GET")
66
63
api.HandleFunc("/plc/bundles/chain", s.handleGetChainInfo).Methods("GET")
67
64
api.HandleFunc("/plc/bundles/verify-chain", s.handleVerifyChain).Methods("POST")
65
+
api.HandleFunc("/plc/bundles/{number}", s.handleGetPLCBundle).Methods("GET")
68
66
api.HandleFunc("/plc/bundles/{number}/dids", s.handleGetPLCBundleDIDs).Methods("GET")
69
67
api.HandleFunc("/plc/bundles/{number}/download", s.handleDownloadPLCBundle).Methods("GET")
70
68
api.HandleFunc("/plc/bundles/{bundleNumber}/verify", s.handleVerifyPLCBundle).Methods("POST")
71
-
api.HandleFunc("/plc/bundles/{number}", s.handleGetPLCBundle).Methods("GET")
69
+
70
+
// PLC Export endpoint (simulates PLC directory)
72
71
api.HandleFunc("/plc/export", s.handlePLCExport).Methods("GET")
73
72
74
-
// PLC/DID endpoints
73
+
// DID routes
75
74
api.HandleFunc("/plc/did/{did}", s.handleGetDID).Methods("GET")
76
75
api.HandleFunc("/plc/did/{did}/history", s.handleGetDIDHistory).Methods("GET")
77
76
78
-
// Mempool endpoint - NEW
77
+
// Mempool routes
79
78
api.HandleFunc("/mempool/stats", s.handleGetMempoolStats).Methods("GET")
80
79
81
-
// Chain verification - NEW
80
+
// Metrics routes
81
+
api.HandleFunc("/metrics/plc", s.handleGetPLCMetrics).Methods("GET")
82
82
83
83
// Health check
84
84
s.router.HandleFunc("/health", s.handleHealth).Methods("GET")
85
85
}
86
+
86
87
func (s *Server) Start() error {
87
88
log.Info("API server listening on %s", s.server.Addr)
88
89
return s.server.ListenAndServe()
+13
-10
internal/pds/scanner.go
+13
-10
internal/pds/scanner.go
···
29
29
startTime := time.Now()
30
30
log.Info("Starting PDS availability scan...")
31
31
32
-
servers, err := s.db.GetPDSServers(ctx, nil)
32
+
// Get only PDS endpoints
33
+
servers, err := s.db.GetEndpoints(ctx, &storage.EndpointFilter{
34
+
Type: "pds",
35
+
})
33
36
if err != nil {
34
37
return err
35
38
}
···
37
40
log.Info("Scanning %d PDS servers...", len(servers))
38
41
39
42
// Worker pool
40
-
jobs := make(chan *storage.PDS, len(servers))
43
+
jobs := make(chan *storage.Endpoint, len(servers))
41
44
results := make(chan *PDSStatus, len(servers))
42
45
43
46
var wg sync.WaitGroup
···
74
77
}
75
78
76
79
// Build scan data
77
-
scanData := &storage.PDSScanData{
80
+
scanData := &storage.EndpointScanData{
78
81
ServerInfo: status.Description,
79
82
DIDs: status.DIDs,
80
83
DIDCount: len(status.DIDs),
81
84
}
82
85
83
-
// Update using PDS ID
84
-
if err := s.db.UpdatePDSStatus(ctx, status.PDSID, &storage.PDSUpdate{
86
+
// Update using Endpoint ID
87
+
if err := s.db.UpdateEndpointStatus(ctx, status.EndpointID, &storage.EndpointUpdate{
85
88
Status: statusCode,
86
89
LastChecked: status.LastChecked,
87
90
ResponseTime: status.ResponseTime.Seconds() * 1000, // Convert to ms
88
91
ScanData: scanData,
89
92
}); err != nil {
90
-
log.Error("Error updating PDS ID %d: %v", status.PDSID, err)
93
+
log.Error("Error updating endpoint ID %d: %v", status.EndpointID, err)
91
94
}
92
95
93
96
if status.Available {
···
104
107
return nil
105
108
}
106
109
107
-
func (s *Scanner) worker(ctx context.Context, jobs <-chan *storage.PDS, results chan<- *PDSStatus) {
110
+
func (s *Scanner) worker(ctx context.Context, jobs <-chan *storage.Endpoint, results chan<- *PDSStatus) {
108
111
for server := range jobs {
109
112
select {
110
113
case <-ctx.Done():
···
116
119
}
117
120
}
118
121
119
-
func (s *Scanner) scanPDS(ctx context.Context, pdsID int64, endpoint string) *PDSStatus {
122
+
func (s *Scanner) scanPDS(ctx context.Context, endpointID int64, endpoint string) *PDSStatus {
120
123
status := &PDSStatus{
121
-
PDSID: pdsID, // Store ID
124
+
EndpointID: endpointID, // Store Endpoint ID
122
125
Endpoint: endpoint,
123
126
LastChecked: time.Now(),
124
127
}
···
146
149
status.Description = desc
147
150
}
148
151
149
-
// List repos (DIDs)
152
+
// Optionally list repos (DIDs) - commented out by default for performance
150
153
/*dids, err := s.client.ListRepos(ctx, endpoint)
151
154
if err != nil {
152
155
log.Verbose("Warning: failed to list repos for %s: %v", endpoint, err)
+1
-1
internal/pds/types.go
+1
-1
internal/pds/types.go
+50
-22
internal/plc/scanner.go
+50
-22
internal/plc/scanner.go
···
4
4
"context"
5
5
"encoding/json"
6
6
"fmt"
7
+
"strings"
7
8
"time"
8
9
9
-
"github.com/acarl005/stripansi"
10
10
"github.com/atscan/atscanner/internal/config"
11
11
"github.com/atscan/atscanner/internal/log"
12
12
"github.com/atscan/atscanner/internal/storage"
···
373
373
374
374
// processBatch processes operations for PDS discovery
375
375
func (s *Scanner) processBatch(ctx context.Context, operations []PLCOperation) (int64, error) {
376
-
newPDSCount := int64(0)
377
-
seenInBatch := make(map[string]*PLCOperation)
376
+
newEndpointCount := int64(0)
377
+
seenInBatch := make(map[string]*PLCOperation) // key: "type:endpoint"
378
378
379
379
for _, op := range operations {
380
380
if op.IsNullified() {
381
381
continue
382
382
}
383
383
384
-
pdsEndpoint := s.extractPDSFromOperation(op)
385
-
if pdsEndpoint == "" {
386
-
continue
384
+
endpoints := s.extractEndpointsFromOperation(op)
385
+
for _, ep := range endpoints {
386
+
key := fmt.Sprintf("%s:%s", ep.Type, ep.Endpoint)
387
+
if _, seen := seenInBatch[key]; !seen {
388
+
seenInBatch[key] = &op
389
+
}
387
390
}
391
+
}
388
392
389
-
if _, seen := seenInBatch[pdsEndpoint]; !seen {
390
-
seenInBatch[pdsEndpoint] = &op
391
-
}
392
-
}
393
+
for key, firstOp := range seenInBatch {
394
+
parts := strings.SplitN(key, ":", 2)
395
+
endpointType := parts[0]
396
+
endpoint := parts[1]
393
397
394
-
for pdsEndpoint, firstOp := range seenInBatch {
395
-
exists, err := s.db.PDSExists(ctx, pdsEndpoint)
398
+
exists, err := s.db.EndpointExists(ctx, endpoint, endpointType)
396
399
if err != nil || exists {
397
400
continue
398
401
}
399
402
400
-
if err := s.db.UpsertPDS(ctx, &storage.PDS{
401
-
Endpoint: pdsEndpoint,
403
+
if err := s.db.UpsertEndpoint(ctx, &storage.Endpoint{
404
+
EndpointType: endpointType,
405
+
Endpoint: endpoint,
402
406
DiscoveredAt: firstOp.CreatedAt,
403
407
LastChecked: time.Time{},
404
-
Status: storage.PDSStatusUnknown,
408
+
Status: storage.EndpointStatusUnknown,
405
409
}); err != nil {
406
-
log.Error("Error storing PDS %s: %v", stripansi.Strip(pdsEndpoint), err)
410
+
log.Error("Error storing %s endpoint %s: %v", endpointType, endpoint, err)
407
411
continue
408
412
}
409
413
410
-
log.Info("✓ Discovered new PDS: %s", stripansi.Strip(pdsEndpoint))
411
-
newPDSCount++
414
+
log.Info("✓ Discovered new %s endpoint: %s", endpointType, endpoint)
415
+
newEndpointCount++
412
416
}
413
417
414
-
return newPDSCount, nil
418
+
return newEndpointCount, nil
415
419
}
416
420
417
-
func (s *Scanner) extractPDSFromOperation(op PLCOperation) string {
421
+
// extractEndpointsFromOperation extracts ALL service endpoints
422
+
func (s *Scanner) extractEndpointsFromOperation(op PLCOperation) []EndpointInfo {
423
+
var endpoints []EndpointInfo
424
+
418
425
if services, ok := op.Operation["services"].(map[string]interface{}); ok {
426
+
// Extract PDS
419
427
if atprotoPDS, ok := services["atproto_pds"].(map[string]interface{}); ok {
420
428
if endpoint, ok := atprotoPDS["endpoint"].(string); ok {
421
429
if svcType, ok := atprotoPDS["type"].(string); ok {
422
430
if svcType == "AtprotoPersonalDataServer" {
423
-
return endpoint
431
+
endpoints = append(endpoints, EndpointInfo{
432
+
Type: "pds",
433
+
Endpoint: endpoint,
434
+
})
435
+
}
436
+
}
437
+
}
438
+
}
439
+
440
+
// Extract Labeler
441
+
if atprotoLabeler, ok := services["atproto_labeler"].(map[string]interface{}); ok {
442
+
if endpoint, ok := atprotoLabeler["endpoint"].(string); ok {
443
+
if svcType, ok := atprotoLabeler["type"].(string); ok {
444
+
if svcType == "AtprotoLabeler" {
445
+
endpoints = append(endpoints, EndpointInfo{
446
+
Type: "labeler",
447
+
Endpoint: endpoint,
448
+
})
424
449
}
425
450
}
426
451
}
427
452
}
453
+
454
+
// Add more service types as needed...
428
455
}
429
-
return ""
456
+
457
+
return endpoints
430
458
}
431
459
432
460
func contains(s, substr string) bool {
+5
internal/plc/types.go
+5
internal/plc/types.go
+10
-11
internal/storage/db.go
+10
-11
internal/storage/db.go
···
9
9
Close() error
10
10
Migrate() error
11
11
12
-
// PDS operations
13
-
UpsertPDS(ctx context.Context, pds *PDS) error
14
-
GetPDS(ctx context.Context, endpoint string) (*PDS, error)
15
-
GetPDSByID(ctx context.Context, id int64) (*PDS, error)
16
-
GetPDSServers(ctx context.Context, filter *PDSFilter) ([]*PDS, error)
17
-
UpdatePDSStatus(ctx context.Context, pdsID int64, update *PDSUpdate) error
18
-
PDSExists(ctx context.Context, endpoint string) (bool, error)
19
-
GetPDSIDByEndpoint(ctx context.Context, endpoint string) (int64, error)
20
-
GetPDSScans(ctx context.Context, pdsID int64, limit int) ([]*PDSScan, error)
12
+
// Endpoint operations (renamed from PDS)
13
+
UpsertEndpoint(ctx context.Context, endpoint *Endpoint) error
14
+
GetEndpoint(ctx context.Context, endpoint string, endpointType string) (*Endpoint, error)
15
+
GetEndpointByID(ctx context.Context, id int64) (*Endpoint, error)
16
+
GetEndpoints(ctx context.Context, filter *EndpointFilter) ([]*Endpoint, error)
17
+
UpdateEndpointStatus(ctx context.Context, endpointID int64, update *EndpointUpdate) error
18
+
EndpointExists(ctx context.Context, endpoint string, endpointType string) (bool, error)
19
+
GetEndpointIDByEndpoint(ctx context.Context, endpoint string, endpointType string) (int64, error)
20
+
GetEndpointScans(ctx context.Context, endpointID int64, limit int) ([]*EndpointScan, error)
21
21
22
22
// Cursor operations
23
23
GetScanCursor(ctx context.Context, source string) (*ScanCursor, error)
···
26
26
// Bundle operations
27
27
CreateBundle(ctx context.Context, bundle *PLCBundle) error
28
28
GetBundleByNumber(ctx context.Context, bundleNumber int) (*PLCBundle, error)
29
-
// GetBundleByID removed - bundle_number IS the ID
30
29
GetBundles(ctx context.Context, limit int) ([]*PLCBundle, error)
31
30
GetBundlesForDID(ctx context.Context, did string) ([]*PLCBundle, error)
32
31
GetBundleStats(ctx context.Context) (int64, int64, error)
···
44
43
// Metrics
45
44
StorePLCMetrics(ctx context.Context, metrics *PLCMetrics) error
46
45
GetPLCMetrics(ctx context.Context, limit int) ([]*PLCMetrics, error)
47
-
GetPDSStats(ctx context.Context) (*PDSStats, error)
46
+
GetEndpointStats(ctx context.Context) (*EndpointStats, error)
48
47
}
+121
-85
internal/storage/sqlite.go
+121
-85
internal/storage/sqlite.go
···
35
35
36
36
func (s *SQLiteDB) Migrate() error {
37
37
schema := `
38
-
-- PDS tables (same as before)
39
-
CREATE TABLE IF NOT EXISTS pds_servers (
38
+
-- Endpoints table (replaces pds_servers)
39
+
CREATE TABLE IF NOT EXISTS endpoints (
40
40
id INTEGER PRIMARY KEY AUTOINCREMENT,
41
-
endpoint TEXT UNIQUE NOT NULL,
41
+
endpoint_type TEXT NOT NULL DEFAULT 'pds',
42
+
endpoint TEXT NOT NULL,
42
43
discovered_at TIMESTAMP NOT NULL,
43
44
last_checked TIMESTAMP,
44
45
status INTEGER DEFAULT 0,
45
46
user_count INTEGER DEFAULT 0,
46
-
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
47
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
48
+
UNIQUE(endpoint_type, endpoint)
47
49
);
48
50
49
-
CREATE INDEX IF NOT EXISTS idx_pds_endpoint ON pds_servers(endpoint);
50
-
CREATE INDEX IF NOT EXISTS idx_pds_status ON pds_servers(status);
51
-
CREATE INDEX IF NOT EXISTS idx_pds_user_count ON pds_servers(user_count);
51
+
CREATE INDEX IF NOT EXISTS idx_endpoints_type_endpoint ON endpoints(endpoint_type, endpoint);
52
+
CREATE INDEX IF NOT EXISTS idx_endpoints_status ON endpoints(status);
53
+
CREATE INDEX IF NOT EXISTS idx_endpoints_type ON endpoints(endpoint_type);
54
+
CREATE INDEX IF NOT EXISTS idx_endpoints_user_count ON endpoints(user_count);
52
55
56
+
-- Keep pds_scans table (or rename to endpoint_scans later)
53
57
CREATE TABLE IF NOT EXISTS pds_scans (
54
58
id INTEGER PRIMARY KEY AUTOINCREMENT,
55
59
pds_id INTEGER NOT NULL,
···
57
61
response_time REAL,
58
62
scan_data TEXT,
59
63
scanned_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
60
-
FOREIGN KEY (pds_id) REFERENCES pds_servers(id) ON DELETE CASCADE
64
+
FOREIGN KEY (pds_id) REFERENCES endpoints(id) ON DELETE CASCADE
61
65
);
62
66
63
67
CREATE INDEX IF NOT EXISTS idx_pds_scans_pds_id ON pds_scans(pds_id);
···
501
505
return count, totalSize, err
502
506
}
503
507
504
-
// UpsertPDS inserts or updates a PDS server
505
-
func (s *SQLiteDB) UpsertPDS(ctx context.Context, pds *PDS) error {
508
+
// UpsertEndpoint inserts or updates an endpoint
509
+
func (s *SQLiteDB) UpsertEndpoint(ctx context.Context, endpoint *Endpoint) error {
506
510
query := `
507
-
INSERT INTO pds_servers (endpoint, discovered_at, last_checked, status)
508
-
VALUES (?, ?, ?, ?)
509
-
ON CONFLICT(endpoint) DO UPDATE SET
511
+
INSERT INTO endpoints (endpoint_type, endpoint, discovered_at, last_checked, status)
512
+
VALUES (?, ?, ?, ?, ?)
513
+
ON CONFLICT(endpoint_type, endpoint) DO UPDATE SET
510
514
last_checked = excluded.last_checked
511
515
RETURNING id
512
516
`
513
-
err := s.db.QueryRowContext(ctx, query, pds.Endpoint, pds.DiscoveredAt, pds.LastChecked, pds.Status).Scan(&pds.ID)
517
+
err := s.db.QueryRowContext(ctx, query,
518
+
endpoint.EndpointType, endpoint.Endpoint, endpoint.DiscoveredAt,
519
+
endpoint.LastChecked, endpoint.Status).Scan(&endpoint.ID)
514
520
return err
515
521
}
516
522
517
-
// PDSExists checks if a PDS endpoint already exists
518
-
func (s *SQLiteDB) PDSExists(ctx context.Context, endpoint string) (bool, error) {
519
-
query := "SELECT EXISTS(SELECT 1 FROM pds_servers WHERE endpoint = ?)"
523
+
// EndpointExists checks if an endpoint already exists
524
+
func (s *SQLiteDB) EndpointExists(ctx context.Context, endpoint string, endpointType string) (bool, error) {
525
+
query := "SELECT EXISTS(SELECT 1 FROM endpoints WHERE endpoint = ? AND endpoint_type = ?)"
520
526
var exists bool
521
-
err := s.db.QueryRowContext(ctx, query, endpoint).Scan(&exists)
527
+
err := s.db.QueryRowContext(ctx, query, endpoint, endpointType).Scan(&exists)
522
528
return exists, err
523
529
}
524
530
525
-
// GetPDSIDByEndpoint gets the ID for an endpoint
526
-
func (s *SQLiteDB) GetPDSIDByEndpoint(ctx context.Context, endpoint string) (int64, error) {
527
-
query := "SELECT id FROM pds_servers WHERE endpoint = ?"
531
+
// GetEndpointIDByEndpoint gets the ID for an endpoint
532
+
func (s *SQLiteDB) GetEndpointIDByEndpoint(ctx context.Context, endpoint string, endpointType string) (int64, error) {
533
+
query := "SELECT id FROM endpoints WHERE endpoint = ? AND endpoint_type = ?"
528
534
var id int64
529
-
err := s.db.QueryRowContext(ctx, query, endpoint).Scan(&id)
535
+
err := s.db.QueryRowContext(ctx, query, endpoint, endpointType).Scan(&id)
530
536
return id, err
531
537
}
532
538
533
-
// GetPDS retrieves a PDS by endpoint
534
-
func (s *SQLiteDB) GetPDS(ctx context.Context, endpoint string) (*PDS, error) {
539
+
// GetEndpoint retrieves an endpoint by endpoint string and type
540
+
func (s *SQLiteDB) GetEndpoint(ctx context.Context, endpoint string, endpointType string) (*Endpoint, error) {
535
541
query := `
536
-
SELECT id, endpoint, discovered_at, last_checked, status, user_count, updated_at
537
-
FROM pds_servers
538
-
WHERE endpoint = ?
542
+
SELECT id, endpoint_type, endpoint, discovered_at, last_checked, status, user_count, updated_at
543
+
FROM endpoints
544
+
WHERE endpoint = ? AND endpoint_type = ?
539
545
`
540
546
541
-
var pds PDS
547
+
var ep Endpoint
542
548
var lastChecked sql.NullTime
543
549
544
-
err := s.db.QueryRowContext(ctx, query, endpoint).Scan(
545
-
&pds.ID, &pds.Endpoint, &pds.DiscoveredAt, &lastChecked,
546
-
&pds.Status, &pds.UserCount, &pds.UpdatedAt,
550
+
err := s.db.QueryRowContext(ctx, query, endpoint, endpointType).Scan(
551
+
&ep.ID, &ep.EndpointType, &ep.Endpoint, &ep.DiscoveredAt, &lastChecked,
552
+
&ep.Status, &ep.UserCount, &ep.UpdatedAt,
547
553
)
548
554
if err != nil {
549
555
return nil, err
550
556
}
551
557
552
558
if lastChecked.Valid {
553
-
pds.LastChecked = lastChecked.Time
559
+
ep.LastChecked = lastChecked.Time
554
560
}
555
561
556
-
return &pds, nil
562
+
return &ep, nil
557
563
}
558
564
559
-
// GetPDSByID retrieves a PDS by ID
560
-
func (s *SQLiteDB) GetPDSByID(ctx context.Context, id int64) (*PDS, error) {
565
+
// GetEndpointByID retrieves an endpoint by ID
566
+
func (s *SQLiteDB) GetEndpointByID(ctx context.Context, id int64) (*Endpoint, error) {
561
567
query := `
562
-
SELECT id, endpoint, discovered_at, last_checked, status, user_count, updated_at
563
-
FROM pds_servers
568
+
SELECT id, endpoint_type, endpoint, discovered_at, last_checked, status, user_count, updated_at
569
+
FROM endpoints
564
570
WHERE id = ?
565
571
`
566
572
567
-
var pds PDS
573
+
var ep Endpoint
568
574
var lastChecked sql.NullTime
569
575
570
576
err := s.db.QueryRowContext(ctx, query, id).Scan(
571
-
&pds.ID, &pds.Endpoint, &pds.DiscoveredAt, &lastChecked,
572
-
&pds.Status, &pds.UserCount, &pds.UpdatedAt,
577
+
&ep.ID, &ep.EndpointType, &ep.Endpoint, &ep.DiscoveredAt, &lastChecked,
578
+
&ep.Status, &ep.UserCount, &ep.UpdatedAt,
573
579
)
574
580
if err != nil {
575
581
return nil, err
576
582
}
577
583
578
584
if lastChecked.Valid {
579
-
pds.LastChecked = lastChecked.Time
585
+
ep.LastChecked = lastChecked.Time
580
586
}
581
587
582
-
return &pds, nil
588
+
return &ep, nil
583
589
}
584
590
585
-
// GetPDSServers retrieves multiple PDS servers
586
-
func (s *SQLiteDB) GetPDSServers(ctx context.Context, filter *PDSFilter) ([]*PDS, error) {
591
+
// GetEndpoints retrieves multiple endpoints
592
+
func (s *SQLiteDB) GetEndpoints(ctx context.Context, filter *EndpointFilter) ([]*Endpoint, error) {
587
593
query := `
588
-
SELECT id, endpoint, discovered_at, last_checked, status, user_count, updated_at
589
-
FROM pds_servers
594
+
SELECT id, endpoint_type, endpoint, discovered_at, last_checked, status, user_count, updated_at
595
+
FROM endpoints
596
+
WHERE 1=1
590
597
`
591
598
args := []interface{}{}
592
599
593
-
if filter != nil && filter.Status != "" {
594
-
// Map string status to int
595
-
statusInt := PDSStatusUnknown
596
-
switch filter.Status {
597
-
case "online":
598
-
statusInt = PDSStatusOnline
599
-
case "offline":
600
-
statusInt = PDSStatusOffline
600
+
if filter != nil {
601
+
if filter.Type != "" {
602
+
query += " AND endpoint_type = ?"
603
+
args = append(args, filter.Type)
601
604
}
602
-
query += " WHERE status = ?"
603
-
args = append(args, statusInt)
605
+
if filter.Status != "" {
606
+
statusInt := EndpointStatusUnknown
607
+
switch filter.Status {
608
+
case "online":
609
+
statusInt = EndpointStatusOnline
610
+
case "offline":
611
+
statusInt = EndpointStatusOffline
612
+
}
613
+
query += " AND status = ?"
614
+
args = append(args, statusInt)
615
+
}
616
+
if filter.MinUserCount > 0 {
617
+
query += " AND user_count >= ?"
618
+
args = append(args, filter.MinUserCount)
619
+
}
604
620
}
605
621
606
622
query += " ORDER BY user_count DESC"
···
615
631
}
616
632
defer rows.Close()
617
633
618
-
var servers []*PDS
634
+
var endpoints []*Endpoint
619
635
for rows.Next() {
620
-
var pds PDS
636
+
var ep Endpoint
621
637
var lastChecked sql.NullTime
622
638
623
639
err := rows.Scan(
624
-
&pds.ID, &pds.Endpoint, &pds.DiscoveredAt, &lastChecked,
625
-
&pds.Status, &pds.UserCount, &pds.UpdatedAt,
640
+
&ep.ID, &ep.EndpointType, &ep.Endpoint, &ep.DiscoveredAt, &lastChecked,
641
+
&ep.Status, &ep.UserCount, &ep.UpdatedAt,
626
642
)
627
643
if err != nil {
628
644
return nil, err
629
645
}
630
646
631
647
if lastChecked.Valid {
632
-
pds.LastChecked = lastChecked.Time
648
+
ep.LastChecked = lastChecked.Time
633
649
}
634
650
635
-
servers = append(servers, &pds)
651
+
endpoints = append(endpoints, &ep)
636
652
}
637
653
638
-
return servers, rows.Err()
654
+
return endpoints, rows.Err()
639
655
}
640
656
641
-
// UpdatePDSStatus updates the status and creates a scan record
642
-
func (s *SQLiteDB) UpdatePDSStatus(ctx context.Context, pdsID int64, update *PDSUpdate) error {
657
+
// UpdateEndpointStatus updates the status and creates a scan record
658
+
func (s *SQLiteDB) UpdateEndpointStatus(ctx context.Context, endpointID int64, update *EndpointUpdate) error {
643
659
tx, err := s.db.BeginTx(ctx, nil)
644
660
if err != nil {
645
661
return err
···
652
668
userCount = update.ScanData.DIDCount
653
669
}
654
670
655
-
// Update main pds_servers record
671
+
// Update main endpoints record
656
672
query := `
657
-
UPDATE pds_servers
673
+
UPDATE endpoints
658
674
SET status = ?, last_checked = ?, user_count = ?, updated_at = ?
659
675
WHERE id = ?
660
676
`
661
-
_, err = tx.ExecContext(ctx, query, update.Status, update.LastChecked, userCount, time.Now(), pdsID)
677
+
_, err = tx.ExecContext(ctx, query, update.Status, update.LastChecked, userCount, time.Now(), endpointID)
662
678
if err != nil {
663
679
return err
664
680
}
···
669
685
scanDataJSON, _ = json.Marshal(update.ScanData)
670
686
}
671
687
672
-
// Insert scan history
688
+
// Insert scan history (reuse pds_scans table or rename it to endpoint_scans)
673
689
scanQuery := `
674
690
INSERT INTO pds_scans (pds_id, status, response_time, scan_data)
675
691
VALUES (?, ?, ?, ?)
676
692
`
677
-
_, err = tx.ExecContext(ctx, scanQuery, pdsID, update.Status, update.ResponseTime, string(scanDataJSON))
693
+
_, err = tx.ExecContext(ctx, scanQuery, endpointID, update.Status, update.ResponseTime, string(scanDataJSON))
678
694
if err != nil {
679
695
return err
680
696
}
···
682
698
return tx.Commit()
683
699
}
684
700
685
-
// GetPDSScans retrieves scan history for a PDS
686
-
func (s *SQLiteDB) GetPDSScans(ctx context.Context, pdsID int64, limit int) ([]*PDSScan, error) {
701
+
// GetEndpointScans retrieves scan history for an endpoint
702
+
func (s *SQLiteDB) GetEndpointScans(ctx context.Context, endpointID int64, limit int) ([]*EndpointScan, error) {
687
703
query := `
688
704
SELECT id, pds_id, status, response_time, scan_data, scanned_at
689
705
FROM pds_scans
···
692
708
LIMIT ?
693
709
`
694
710
695
-
rows, err := s.db.QueryContext(ctx, query, pdsID, limit)
711
+
rows, err := s.db.QueryContext(ctx, query, endpointID, limit)
696
712
if err != nil {
697
713
return nil, err
698
714
}
699
715
defer rows.Close()
700
716
701
-
var scans []*PDSScan
717
+
var scans []*EndpointScan
702
718
for rows.Next() {
703
-
var scan PDSScan
719
+
var scan EndpointScan
704
720
var responseTime sql.NullFloat64
705
721
var scanDataJSON sql.NullString
706
722
707
-
err := rows.Scan(&scan.ID, &scan.PDSID, &scan.Status, &responseTime, &scanDataJSON, &scan.ScannedAt)
723
+
err := rows.Scan(&scan.ID, &scan.EndpointID, &scan.Status, &responseTime, &scanDataJSON, &scan.ScannedAt)
708
724
if err != nil {
709
725
return nil, err
710
726
}
···
714
730
}
715
731
716
732
if scanDataJSON.Valid && scanDataJSON.String != "" {
717
-
var scanData PDSScanData
733
+
var scanData EndpointScanData
718
734
if err := json.Unmarshal([]byte(scanDataJSON.String), &scanData); err == nil {
719
735
scan.ScanData = &scanData
720
736
}
···
726
742
return scans, rows.Err()
727
743
}
728
744
729
-
// GetPDSStats returns aggregate statistics
730
-
func (s *SQLiteDB) GetPDSStats(ctx context.Context) (*PDSStats, error) {
745
+
// GetEndpointStats returns aggregate statistics about all endpoints
746
+
func (s *SQLiteDB) GetEndpointStats(ctx context.Context) (*EndpointStats, error) {
731
747
query := `
732
748
SELECT
733
-
COUNT(*) as total_pds,
734
-
SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) as online_pds,
735
-
SUM(CASE WHEN status = 2 THEN 1 ELSE 0 END) as offline_pds,
749
+
COUNT(*) as total_endpoints,
750
+
SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) as online_endpoints,
751
+
SUM(CASE WHEN status = 2 THEN 1 ELSE 0 END) as offline_endpoints,
736
752
(SELECT AVG(response_time) FROM pds_scans WHERE response_time > 0
737
753
AND scanned_at > datetime('now', '-1 hour')) as avg_response_time,
738
754
SUM(user_count) as total_dids
739
-
FROM pds_servers
755
+
FROM endpoints
740
756
`
741
757
742
-
var stats PDSStats
758
+
var stats EndpointStats
743
759
var avgResponseTime sql.NullFloat64
744
760
745
761
err := s.db.QueryRowContext(ctx, query).Scan(
746
-
&stats.TotalPDS, &stats.OnlinePDS, &stats.OfflinePDS, &avgResponseTime, &stats.TotalDIDs,
762
+
&stats.TotalEndpoints, &stats.OnlineEndpoints, &stats.OfflineEndpoints,
763
+
&avgResponseTime, &stats.TotalDIDs,
747
764
)
748
765
749
766
if avgResponseTime.Valid {
750
767
stats.AvgResponseTime = avgResponseTime.Float64
768
+
}
769
+
770
+
// Get counts by type
771
+
typeQuery := `
772
+
SELECT endpoint_type, COUNT(*)
773
+
FROM endpoints
774
+
GROUP BY endpoint_type
775
+
`
776
+
rows, err := s.db.QueryContext(ctx, typeQuery)
777
+
if err == nil {
778
+
defer rows.Close()
779
+
stats.ByType = make(map[string]int64)
780
+
for rows.Next() {
781
+
var typ string
782
+
var count int64
783
+
if err := rows.Scan(&typ, &count); err == nil {
784
+
stats.ByType[typ] = count
785
+
}
786
+
}
751
787
}
752
788
753
789
return &stats, err
+53
-35
internal/storage/types.go
+53
-35
internal/storage/types.go
···
15
15
UpdatedAt time.Time
16
16
}
17
17
18
-
// PDS represents a Personal Data Server
19
-
type PDS struct {
20
-
ID int64 // NEW: Primary key
21
-
Endpoint string // UNIQUE but not primary key
18
+
// Endpoint represents any AT Protocol service endpoint
19
+
type Endpoint struct {
20
+
ID int64
21
+
EndpointType string // "pds", "labeler", etc.
22
+
Endpoint string
22
23
DiscoveredAt time.Time
23
24
LastChecked time.Time
24
-
Status int // 0=unknown, 1=online, 2=offline
25
+
Status int
25
26
UserCount int64
26
27
UpdatedAt time.Time
27
28
}
28
29
29
-
// PDSUpdate contains fields to update for a PDS
30
-
type PDSUpdate struct {
30
+
// EndpointUpdate contains fields to update for an Endpoint
31
+
type EndpointUpdate struct {
31
32
Status int
32
33
LastChecked time.Time
33
-
ResponseTime float64 // milliseconds as float
34
-
ScanData *PDSScanData
34
+
ResponseTime float64
35
+
ScanData *EndpointScanData
35
36
}
36
37
37
-
// PDSScanData contains data from a PDS scan
38
-
type PDSScanData struct {
38
+
// EndpointScanData contains data from an endpoint scan
39
+
type EndpointScanData struct {
39
40
ServerInfo interface{} `json:"server_info,omitempty"`
40
41
DIDs []string `json:"dids,omitempty"`
41
42
DIDCount int `json:"did_count"`
43
+
Metadata interface{} `json:"metadata,omitempty"` // Type-specific metadata
42
44
}
43
45
44
-
// PDSScan represents a historical PDS scan
45
-
type PDSScan struct {
46
+
// EndpointScan represents a historical endpoint scan
47
+
type EndpointScan struct {
46
48
ID int64
47
-
PDSID int64
49
+
EndpointID int64
48
50
Status int
49
51
ResponseTime float64
50
-
ScanData *PDSScanData
52
+
ScanData *EndpointScanData
51
53
ScannedAt time.Time
52
54
}
53
55
···
58
60
PDSStatusOffline = 2
59
61
)
60
62
61
-
// PDSFilter for querying PDS servers
62
-
type PDSFilter struct {
63
+
// Endpoint status constants (aliases for compatibility)
64
+
const (
65
+
EndpointStatusUnknown = PDSStatusUnknown
66
+
EndpointStatusOnline = PDSStatusOnline
67
+
EndpointStatusOffline = PDSStatusOffline
68
+
)
69
+
70
+
// EndpointFilter for querying endpoints
71
+
type EndpointFilter struct {
72
+
Type string // "pds", "labeler", etc.
63
73
Status string
64
74
MinUserCount int64
65
75
Limit int
66
76
Offset int
67
77
}
68
78
69
-
// PDSStats contains aggregate statistics about PDS servers
70
-
type PDSStats struct {
71
-
TotalPDS int64 `json:"total_pds"`
72
-
UniquePDS int64 `json:"unique_pds"`
73
-
OnlinePDS int64 `json:"online_pds"`
74
-
OfflinePDS int64 `json:"offline_pds"`
75
-
AvgResponseTime float64 `json:"avg_response_time"`
76
-
TotalDIDs int64 `json:"total_dids"`
79
+
// EndpointStats contains aggregate statistics about endpoints
80
+
type EndpointStats struct {
81
+
TotalEndpoints int64 `json:"total_endpoints"`
82
+
ByType map[string]int64 `json:"by_type"`
83
+
OnlineEndpoints int64 `json:"online_endpoints"`
84
+
OfflineEndpoints int64 `json:"offline_endpoints"`
85
+
AvgResponseTime float64 `json:"avg_response_time"`
86
+
TotalDIDs int64 `json:"total_dids"` // Only for PDS
77
87
}
78
88
89
+
// Legacy type aliases for backward compatibility in code
90
+
type PDS = Endpoint
91
+
type PDSUpdate = EndpointUpdate
92
+
type PDSScanData = EndpointScanData
93
+
type PDSScan = EndpointScan
94
+
type PDSFilter = EndpointFilter
95
+
type PDSStats = EndpointStats
96
+
79
97
// PLCMetrics contains metrics from PLC directory scans
80
98
type PLCMetrics struct {
81
99
TotalDIDs int64 `json:"total_dids"`
···
88
106
89
107
// PLCBundle represents a cached bundle of PLC operations
90
108
type PLCBundle struct {
91
-
BundleNumber int // PRIMARY KEY
109
+
BundleNumber int
92
110
StartTime time.Time
93
111
EndTime time.Time
94
112
BoundaryCIDs []string
95
113
DIDs []string
96
-
Hash string // SHA256 of uncompressed JSONL (verifiable against PLC)
97
-
CompressedHash string // SHA256 of compressed file on disk
98
-
CompressedSize int64 // Size of compressed file in bytes
99
-
PrevBundleHash string // Hash of previous bundle (for chain)
114
+
Hash string
115
+
CompressedHash string
116
+
CompressedSize int64
117
+
PrevBundleHash string
100
118
Compressed bool
101
119
CreatedAt time.Time
102
120
}
···
106
124
return filepath.Join(bundleDir, fmt.Sprintf("%06d.jsonl.zst", b.BundleNumber))
107
125
}
108
126
109
-
// OperationCount() returns 1000 (all bundles have exactly 1000 operations)
127
+
// OperationCount returns the number of operations in a bundle (always 10000)
110
128
func (b *PLCBundle) OperationCount() int {
111
-
return 1000
129
+
return 10000
112
130
}
113
131
114
132
// MempoolOperation represents an operation waiting to be bundled
115
133
type MempoolOperation struct {
116
134
ID int64
117
135
DID string
118
-
Operation string // JSON of the full operation
136
+
Operation string
119
137
CID string
120
138
CreatedAt time.Time
121
139
AddedAt time.Time
122
140
}
123
141
124
-
// ScanCursor now stores bundle number
142
+
// ScanCursor stores scanning progress
125
143
type ScanCursor struct {
126
144
Source string
127
-
LastBundleNumber int // NEW: Last processed bundle number
145
+
LastBundleNumber int
128
146
LastScanTime time.Time
129
147
RecordsProcessed int64
130
148
}