+15
-64
internal/api/handlers.go
+15
-64
internal/api/handlers.go
···
458
458
459
459
// ===== GLOBAL DID HANDLER =====
460
460
461
-
// extractHandle safely extracts the handle from a PLC operation
462
-
func extractHandle(op *plc.PLCOperation) string {
463
-
if op == nil || op.Operation == nil {
464
-
return ""
465
-
}
466
-
467
-
// Get "alsoKnownAs"
468
-
aka, ok := op.Operation["alsoKnownAs"].([]interface{})
469
-
if !ok {
470
-
return ""
471
-
}
472
-
473
-
// Find the handle (e.g., "at://handle.bsky.social")
474
-
for _, item := range aka {
475
-
if handle, ok := item.(string); ok {
476
-
if strings.HasPrefix(handle, "at://") {
477
-
return strings.TrimPrefix(handle, "at://")
478
-
}
479
-
}
480
-
}
481
-
return ""
482
-
}
483
-
484
-
// extractPDS safely extracts the PDS endpoint from a PLC operation
485
-
func extractPDS(op *plc.PLCOperation) string {
486
-
if op == nil || op.Operation == nil {
487
-
return ""
488
-
}
489
-
490
-
// Get "services"
491
-
services, ok := op.Operation["services"].(map[string]interface{})
492
-
if !ok {
493
-
return ""
494
-
}
495
-
496
-
// Get "atproto_pds"
497
-
pdsService, ok := services["atproto_pds"].(map[string]interface{})
498
-
if !ok {
499
-
return ""
500
-
}
501
-
502
-
// Get "endpoint"
503
-
if endpoint, ok := pdsService["endpoint"].(string); ok {
504
-
return endpoint
505
-
}
506
-
507
-
return ""
508
-
}
509
-
510
461
// handleGetGlobalDID provides a consolidated view of a DID
511
462
func (s *Server) handleGetGlobalDID(w http.ResponseWriter, r *http.Request) {
512
463
resp := newResponse(w)
···
514
465
did := vars["did"]
515
466
ctx := r.Context()
516
467
517
-
// --- 1. Get Combined DID Info (from dids and pds_repos) ---
468
+
// Get DID info (now includes handle and pds from database)
518
469
didInfo, err := s.db.GetGlobalDIDInfo(ctx, did)
519
470
if err != nil {
520
471
if err == sql.ErrNoRows {
521
-
// Check if DID indexing is disabled (from config)
522
472
if !s.plcIndexDIDs {
523
473
resp.error("DID not found. Note: DID indexing is disabled in configuration.", http.StatusNotFound)
524
474
} else {
···
530
480
return
531
481
}
532
482
533
-
// --- 2. Get Latest PLC Operation (from plc_bundles) ---
483
+
// Optionally include latest operation details if requested
534
484
var latestOperation *plc.PLCOperation
535
-
if len(didInfo.BundleNumbers) > 0 {
485
+
if r.URL.Query().Get("include_operation") == "true" && len(didInfo.BundleNumbers) > 0 {
536
486
lastBundleNum := didInfo.BundleNumbers[len(didInfo.BundleNumbers)-1]
537
487
ops, err := s.bundleManager.LoadBundleOperations(ctx, lastBundleNum)
538
488
if err != nil {
···
548
498
}
549
499
}
550
500
551
-
// --- 3. Extract Handle and PDS from latest operation ---
552
-
currentHandle := extractHandle(latestOperation)
553
-
currentPDS := extractPDS(latestOperation)
554
-
555
-
// --- 4. Combine and Respond ---
556
-
resp.json(map[string]interface{}{
501
+
result := map[string]interface{}{
557
502
"did": didInfo.DID,
558
-
"handle": currentHandle, // NEW
559
-
"current_pds": currentPDS, // NEW
503
+
"handle": didInfo.Handle, // From database!
504
+
"current_pds": didInfo.CurrentPDS, // From database!
560
505
"plc_index_created_at": didInfo.CreatedAt,
561
506
"plc_bundle_history": didInfo.BundleNumbers,
562
-
"pds_hosting_on": didInfo.HostingOn, // This is the historical list from pds_repos
563
-
"latest_plc_operation": latestOperation,
564
-
})
507
+
"pds_hosting_on": didInfo.HostingOn,
508
+
}
509
+
510
+
// Only include operation if requested
511
+
if latestOperation != nil {
512
+
result["latest_plc_operation"] = latestOperation
513
+
}
514
+
515
+
resp.json(result)
565
516
}
566
517
567
518
// ===== DID HANDLERS =====
+14
internal/plc/bundle.go
+14
internal/plc/bundle.go
···
380
380
// NEW: Only index DIDs if enabled
381
381
if bm.indexDIDs {
382
382
start := time.Now()
383
+
384
+
// Extract handle and PDS for each DID using centralized helper
385
+
didInfoMap := ExtractDIDInfoMap(bf.operations)
386
+
383
387
if err := bm.db.AddBundleDIDs(ctx, bundleNum, dids); err != nil {
384
388
log.Error("Failed to index DIDs for bundle %06d: %v", bundleNum, err)
385
389
// Don't return error - bundle is already created
386
390
} else {
391
+
// Update handle and PDS for each DID
392
+
for did, info := range didInfoMap {
393
+
// Validate handle length before saving
394
+
validHandle := ValidateHandle(info.Handle)
395
+
396
+
if err := bm.db.UpsertDID(ctx, did, bundleNum, validHandle, info.PDS); err != nil {
397
+
log.Error("Failed to update DID %s metadata: %v", did, err)
398
+
}
399
+
}
400
+
387
401
elapsed := time.Since(start)
388
402
log.Verbose("✓ Indexed %d unique DIDs for bundle %06d in %v", len(dids), bundleNum, elapsed)
389
403
}
+94
internal/plc/helpers.go
+94
internal/plc/helpers.go
···
1
+
package plc
2
+
3
+
import "strings"
4
+
5
+
// MaxHandleLength is the maximum allowed handle length for database storage
6
+
const MaxHandleLength = 500
7
+
8
+
// ExtractHandle safely extracts the handle from a PLC operation
9
+
func ExtractHandle(op *PLCOperation) string {
10
+
if op == nil || op.Operation == nil {
11
+
return ""
12
+
}
13
+
14
+
// Get "alsoKnownAs"
15
+
aka, ok := op.Operation["alsoKnownAs"].([]interface{})
16
+
if !ok {
17
+
return ""
18
+
}
19
+
20
+
// Find the handle (e.g., "at://handle.bsky.social")
21
+
for _, item := range aka {
22
+
if handle, ok := item.(string); ok {
23
+
if strings.HasPrefix(handle, "at://") {
24
+
return strings.TrimPrefix(handle, "at://")
25
+
}
26
+
}
27
+
}
28
+
return ""
29
+
}
30
+
31
+
// ValidateHandle checks if a handle is valid for database storage
32
+
// Returns empty string if handle is too long
33
+
func ValidateHandle(handle string) string {
34
+
if len(handle) > MaxHandleLength {
35
+
return ""
36
+
}
37
+
return handle
38
+
}
39
+
40
+
// ExtractPDS safely extracts the PDS endpoint from a PLC operation
41
+
func ExtractPDS(op *PLCOperation) string {
42
+
if op == nil || op.Operation == nil {
43
+
return ""
44
+
}
45
+
46
+
// Get "services"
47
+
services, ok := op.Operation["services"].(map[string]interface{})
48
+
if !ok {
49
+
return ""
50
+
}
51
+
52
+
// Get "atproto_pds"
53
+
pdsService, ok := services["atproto_pds"].(map[string]interface{})
54
+
if !ok {
55
+
return ""
56
+
}
57
+
58
+
// Get "endpoint"
59
+
if endpoint, ok := pdsService["endpoint"].(string); ok {
60
+
return endpoint
61
+
}
62
+
63
+
return ""
64
+
}
65
+
66
+
// DIDInfo contains extracted metadata from a PLC operation
67
+
type DIDInfo struct {
68
+
Handle string
69
+
PDS string
70
+
}
71
+
72
+
// ExtractDIDInfo extracts both handle and PDS from an operation
73
+
func ExtractDIDInfo(op *PLCOperation) DIDInfo {
74
+
return DIDInfo{
75
+
Handle: ExtractHandle(op),
76
+
PDS: ExtractPDS(op),
77
+
}
78
+
}
79
+
80
+
// ExtractDIDInfoMap creates a map of DID -> info from operations
81
+
// Processes in reverse order to get the latest state for each DID
82
+
func ExtractDIDInfoMap(ops []PLCOperation) map[string]DIDInfo {
83
+
infoMap := make(map[string]DIDInfo)
84
+
85
+
// Process in reverse to get latest state
86
+
for i := len(ops) - 1; i >= 0; i-- {
87
+
op := ops[i]
88
+
if _, exists := infoMap[op.DID]; !exists {
89
+
infoMap[op.DID] = ExtractDIDInfo(&op)
90
+
}
91
+
}
92
+
93
+
return infoMap
94
+
}
+16
internal/plc/scanner.go
+16
internal/plc/scanner.go
···
332
332
return err
333
333
}
334
334
335
+
// NEW: Create/update DID records immediately when adding to mempool
336
+
for _, op := range ops {
337
+
info := ExtractDIDInfo(&op)
338
+
339
+
// Validate handle length before saving
340
+
validHandle := ValidateHandle(info.Handle)
341
+
if info.Handle != "" && validHandle == "" {
342
+
log.Verbose("Skipping invalid handle for DID %s (length: %d)", op.DID, len(info.Handle))
343
+
}
344
+
345
+
if err := s.db.UpsertDIDFromMempool(ctx, op.DID, validHandle, info.PDS); err != nil {
346
+
log.Error("Failed to upsert DID %s in mempool: %v", op.DID, err)
347
+
// Don't fail the whole operation, just log
348
+
}
349
+
}
350
+
335
351
// Process for endpoint discovery
336
352
batchCounts, err := s.processBatch(ctx, ops)
337
353
s.mergeCounts(counts, batchCounts)
+4
-3
internal/storage/db.go
+4
-3
internal/storage/db.go
···
75
75
GetPLCMetrics(ctx context.Context, limit int) ([]*PLCMetrics, error)
76
76
GetEndpointStats(ctx context.Context) (*EndpointStats, error)
77
77
78
-
// DID operations
79
-
UpsertDID(ctx context.Context, did string, bundleNum int) error
78
+
// DID operations - UPDATED SIGNATURES
79
+
UpsertDID(ctx context.Context, did string, bundleNum int, handle, pds string) error
80
+
UpsertDIDFromMempool(ctx context.Context, did string, handle, pds string) error
80
81
GetDIDRecord(ctx context.Context, did string) (*DIDRecord, error)
81
82
GetGlobalDIDInfo(ctx context.Context, did string) (*GlobalDIDInfo, error)
82
83
AddBundleDIDs(ctx context.Context, bundleNum int, dids []string) error
···
84
85
85
86
// PDS Repo operations
86
87
UpsertPDSRepos(ctx context.Context, endpointID int64, repos []PDSRepoData) error
87
-
GetPDSRepos(ctx context.Context, endpointID int64, activeOnly bool, limit int, offset int) ([]*PDSRepo, error) // Updated
88
+
GetPDSRepos(ctx context.Context, endpointID int64, activeOnly bool, limit int, offset int) ([]*PDSRepo, error)
88
89
GetReposByDID(ctx context.Context, did string) ([]*PDSRepo, error)
89
90
GetPDSRepoStats(ctx context.Context, endpointID int64) (map[string]interface{}, error)
90
91
+62
-27
internal/storage/postgres.go
+62
-27
internal/storage/postgres.go
···
185
185
CREATE INDEX IF NOT EXISTS idx_mempool_did ON plc_mempool(did);
186
186
CREATE UNIQUE INDEX IF NOT EXISTS idx_mempool_cid ON plc_mempool(cid);
187
187
188
-
-- Minimal dids table
189
-
CREATE TABLE IF NOT EXISTS dids (
190
-
did TEXT PRIMARY KEY,
191
-
bundle_numbers JSONB NOT NULL DEFAULT '[]'::jsonb,
192
-
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
193
-
);
188
+
-- Minimal dids table
189
+
CREATE TABLE IF NOT EXISTS dids (
190
+
did TEXT PRIMARY KEY,
191
+
handle TEXT,
192
+
pds TEXT,
193
+
bundle_numbers JSONB NOT NULL DEFAULT '[]'::jsonb,
194
+
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
195
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
196
+
);
194
197
195
-
CREATE INDEX IF NOT EXISTS idx_dids_bundle_numbers ON dids USING gin(bundle_numbers);
196
-
CREATE INDEX IF NOT EXISTS idx_dids_created_at ON dids(created_at);
198
+
CREATE INDEX IF NOT EXISTS idx_dids_bundle_numbers ON dids USING gin(bundle_numbers);
199
+
CREATE INDEX IF NOT EXISTS idx_dids_created_at ON dids(created_at);
200
+
CREATE INDEX IF NOT EXISTS idx_dids_handle ON dids(handle);
201
+
CREATE INDEX IF NOT EXISTS idx_dids_pds ON dids(pds);
197
202
198
203
-- PDS Repositories table
199
204
CREATE TABLE IF NOT EXISTS pds_repos (
···
1634
1639
1635
1640
// ===== DID OPERATIONS =====
1636
1641
1637
-
func (p *PostgresDB) UpsertDID(ctx context.Context, did string, bundleNum int) error {
1642
+
func (p *PostgresDB) UpsertDID(ctx context.Context, did string, bundleNum int, handle, pds string) error {
1638
1643
query := `
1639
-
INSERT INTO dids (did, bundle_numbers, created_at)
1640
-
VALUES ($1, jsonb_build_array($2), CURRENT_TIMESTAMP)
1644
+
INSERT INTO dids (did, handle, pds, bundle_numbers, created_at)
1645
+
VALUES ($1, $2, $3, jsonb_build_array($4::integer), CURRENT_TIMESTAMP)
1641
1646
ON CONFLICT(did) DO UPDATE SET
1647
+
handle = EXCLUDED.handle,
1648
+
pds = EXCLUDED.pds,
1642
1649
bundle_numbers = CASE
1643
-
WHEN dids.bundle_numbers ? $2::text THEN dids.bundle_numbers
1644
-
ELSE dids.bundle_numbers || jsonb_build_array($2)
1645
-
END
1650
+
WHEN dids.bundle_numbers @> jsonb_build_array($4::integer) THEN dids.bundle_numbers
1651
+
ELSE dids.bundle_numbers || jsonb_build_array($4::integer)
1652
+
END,
1653
+
updated_at = CURRENT_TIMESTAMP
1646
1654
`
1647
-
_, err := p.db.ExecContext(ctx, query, did, bundleNum)
1655
+
_, err := p.db.ExecContext(ctx, query, did, handle, pds, bundleNum)
1656
+
return err
1657
+
}
1658
+
1659
+
// UpsertDIDFromMempool creates/updates DID record without adding to bundle_numbers
1660
+
func (p *PostgresDB) UpsertDIDFromMempool(ctx context.Context, did string, handle, pds string) error {
1661
+
query := `
1662
+
INSERT INTO dids (did, handle, pds, bundle_numbers, created_at)
1663
+
VALUES ($1, $2, $3, '[]'::jsonb, CURRENT_TIMESTAMP)
1664
+
ON CONFLICT(did) DO UPDATE SET
1665
+
handle = EXCLUDED.handle,
1666
+
pds = EXCLUDED.pds,
1667
+
updated_at = CURRENT_TIMESTAMP
1668
+
`
1669
+
_, err := p.db.ExecContext(ctx, query, did, handle, pds)
1648
1670
return err
1649
1671
}
1650
1672
1651
1673
func (p *PostgresDB) GetDIDRecord(ctx context.Context, did string) (*DIDRecord, error) {
1652
1674
query := `
1653
-
SELECT did, bundle_numbers, created_at
1675
+
SELECT did, handle, pds, bundle_numbers, created_at
1654
1676
FROM dids
1655
1677
WHERE did = $1
1656
1678
`
1657
1679
1658
1680
var record DIDRecord
1659
1681
var bundleNumbersJSON []byte
1682
+
var handle, pds sql.NullString
1660
1683
1661
1684
err := p.db.QueryRowContext(ctx, query, did).Scan(
1662
1685
&record.DID,
1686
+
&handle,
1687
+
&pds,
1663
1688
&bundleNumbersJSON,
1664
1689
&record.CreatedAt,
1665
1690
)
1666
1691
if err != nil {
1667
1692
return nil, err
1693
+
}
1694
+
1695
+
if handle.Valid {
1696
+
record.Handle = handle.String
1697
+
}
1698
+
if pds.Valid {
1699
+
record.CurrentPDS = pds.String
1668
1700
}
1669
1701
1670
1702
if err := json.Unmarshal(bundleNumbersJSON, &record.BundleNumbers); err != nil {
···
1676
1708
1677
1709
// GetGlobalDIDInfo retrieves consolidated DID info from 'dids' and 'pds_repos'
1678
1710
func (p *PostgresDB) GetGlobalDIDInfo(ctx context.Context, did string) (*GlobalDIDInfo, error) {
1679
-
// This query now includes a CTE to find primary endpoints and filters
1680
-
// the 'hosting_on' aggregation to only include repos from those endpoints.
1681
1711
query := `
1682
1712
WITH primary_endpoints AS (
1683
-
-- First, get the ID of every "primary" PDS.
1684
-
-- A primary PDS is the one with the earliest 'discovered_at' timestamp
1685
-
-- for a given 'server_did'.
1686
1713
SELECT DISTINCT ON (COALESCE(server_did, id::text))
1687
1714
id
1688
1715
FROM endpoints
···
1691
1718
)
1692
1719
SELECT
1693
1720
d.did,
1721
+
d.handle,
1722
+
d.pds,
1694
1723
d.bundle_numbers,
1695
1724
d.created_at,
1696
1725
COALESCE(
···
1710
1739
)
1711
1740
ORDER BY pr.last_seen DESC
1712
1741
) FILTER (
1713
-
-- This filter clause ensures we only aggregate repos
1714
-
-- where the endpoint_id is in our list of primary endpoints.
1715
1742
WHERE pr.id IS NOT NULL AND pe.id IS NOT NULL
1716
1743
),
1717
1744
'[]'::jsonb
···
1723
1750
LEFT JOIN
1724
1751
endpoints e ON pr.endpoint_id = e.id
1725
1752
LEFT JOIN
1726
-
-- We join the primary_endpoints CTE. 'pe.id' will be NON-NULL
1727
-
-- only if the repo's endpoint_id (pr.endpoint_id) is a primary one.
1728
1753
primary_endpoints pe ON pr.endpoint_id = pe.id
1729
1754
WHERE
1730
1755
d.did = $1
1731
1756
GROUP BY
1732
-
d.did, d.bundle_numbers, d.created_at
1757
+
d.did, d.handle, d.pds, d.bundle_numbers, d.created_at
1733
1758
`
1734
1759
1735
1760
var info GlobalDIDInfo
1736
1761
var bundleNumbersJSON []byte
1737
1762
var hostingOnJSON []byte
1763
+
var handle, pds sql.NullString
1738
1764
1739
1765
err := p.db.QueryRowContext(ctx, query, did).Scan(
1740
1766
&info.DID,
1767
+
&handle,
1768
+
&pds,
1741
1769
&bundleNumbersJSON,
1742
1770
&info.CreatedAt,
1743
1771
&hostingOnJSON,
1744
1772
)
1745
1773
if err != nil {
1746
-
return nil, err // This will correctly be sql.ErrNoRows if not in 'dids'
1774
+
return nil, err
1775
+
}
1776
+
1777
+
if handle.Valid {
1778
+
info.Handle = handle.String
1779
+
}
1780
+
if pds.Valid {
1781
+
info.CurrentPDS = pds.String
1747
1782
}
1748
1783
1749
1784
if err := json.Unmarshal(bundleNumbersJSON, &info.BundleNumbers); err != nil {
+199
utils/vuln-scanner-parallel.sh
+199
utils/vuln-scanner-parallel.sh
···
1
+
#!/bin/bash
2
+
3
+
# Configuration
4
+
API_HOST="${API_HOST:-http://localhost:8080}"
5
+
TIMEOUT=5
6
+
PARALLEL_JOBS=20
7
+
OUTPUT_DIR="./pds_scan_results"
8
+
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
9
+
RESULTS_FILE="${OUTPUT_DIR}/scan_${TIMESTAMP}.txt"
10
+
FOUND_FILE="${OUTPUT_DIR}/found_${TIMESTAMP}.txt"
11
+
12
+
# Paths to check
13
+
PATHS=(
14
+
"/info.php"
15
+
"/phpinfo.php"
16
+
"/test.php"
17
+
"/admin"
18
+
"/admin.php"
19
+
"/wp-admin"
20
+
"/robots.txt"
21
+
"/.env"
22
+
"/.git/config"
23
+
"/config.php"
24
+
"/backup"
25
+
"/db.sql"
26
+
"/.DS_Store"
27
+
"/server-status"
28
+
"/.well-known/security.txt"
29
+
)
30
+
31
+
# Colors
32
+
RED='\033[0;31m'
33
+
GREEN='\033[0;32m'
34
+
YELLOW='\033[1;33m'
35
+
BLUE='\033[0;34m'
36
+
NC='\033[0m'
37
+
38
+
# Check dependencies
39
+
if ! command -v jq &> /dev/null; then
40
+
echo -e "${RED}Error: jq is required${NC}"
41
+
echo "Install: sudo apt-get install jq"
42
+
exit 1
43
+
fi
44
+
45
+
if ! command -v parallel &> /dev/null; then
46
+
echo -e "${RED}Error: GNU parallel is required${NC}"
47
+
echo "Install: sudo apt-get install parallel (or brew install parallel)"
48
+
exit 1
49
+
fi
50
+
51
+
mkdir -p "$OUTPUT_DIR"
52
+
53
+
echo -e "${BLUE}╔════════════════════════════════════════╗${NC}"
54
+
echo -e "${BLUE}║ PDS Security Scanner (Parallel) ║${NC}"
55
+
echo -e "${BLUE}╚════════════════════════════════════════╝${NC}"
56
+
echo ""
57
+
echo "API Host: $API_HOST"
58
+
echo "Timeout: ${TIMEOUT}s per request"
59
+
echo "Parallel jobs: ${PARALLEL_JOBS}"
60
+
echo "Paths to check: ${#PATHS[@]}"
61
+
echo ""
62
+
63
+
# Scan function - will be called by GNU parallel
64
+
scan_endpoint() {
65
+
local endpoint="$1"
66
+
local timeout="$2"
67
+
shift 2
68
+
local paths=("$@")
69
+
70
+
for path in "${paths[@]}"; do
71
+
url="${endpoint}${path}"
72
+
73
+
response=$(curl -s -o /dev/null -w "%{http_code}" \
74
+
--max-time "$timeout" \
75
+
--connect-timeout "$timeout" \
76
+
--retry 0 \
77
+
-A "Mozilla/5.0 (Security Scanner)" \
78
+
"$url" 2>/dev/null)
79
+
80
+
if [ -n "$response" ] && [ "$response" != "404" ] && [ "$response" != "000" ]; then
81
+
if [ "$response" = "200" ] || [ "$response" = "301" ] || [ "$response" = "302" ]; then
82
+
echo "FOUND|$endpoint|$path|$response"
83
+
elif [ "$response" != "403" ] && [ "$response" != "401" ]; then
84
+
echo "MAYBE|$endpoint|$path|$response"
85
+
fi
86
+
fi
87
+
done
88
+
}
89
+
90
+
export -f scan_endpoint
91
+
92
+
# Fetch active PDS endpoints
93
+
echo -e "${YELLOW}Fetching active PDS endpoints...${NC}"
94
+
ENDPOINTS=$(curl -s "${API_HOST}/api/v1/pds?status=online&limit=10000" | \
95
+
jq -r '.[].endpoint' 2>/dev/null)
96
+
97
+
if [ -z "$ENDPOINTS" ]; then
98
+
echo -e "${RED}Error: Could not fetch endpoints from API${NC}"
99
+
echo "Check that the API is running at: $API_HOST"
100
+
exit 1
101
+
fi
102
+
103
+
ENDPOINT_COUNT=$(echo "$ENDPOINTS" | wc -l | tr -d ' ')
104
+
echo -e "${GREEN}✓ Found ${ENDPOINT_COUNT} active PDS endpoints${NC}"
105
+
echo ""
106
+
107
+
# Write header to results file
108
+
{
109
+
echo "PDS Security Scan Results"
110
+
echo "========================="
111
+
echo "Scan started: $(date)"
112
+
echo "Endpoints scanned: ${ENDPOINT_COUNT}"
113
+
echo "Paths checked: ${#PATHS[@]}"
114
+
echo "Parallel jobs: ${PARALLEL_JOBS}"
115
+
echo ""
116
+
echo "Results:"
117
+
echo "--------"
118
+
} > "$RESULTS_FILE"
119
+
120
+
# Run parallel scan
121
+
echo -e "${YELLOW}Starting parallel scan...${NC}"
122
+
echo -e "${BLUE}(This may take a few minutes depending on endpoint count)${NC}"
123
+
echo ""
124
+
125
+
echo "$ENDPOINTS" | \
126
+
parallel \
127
+
-j "$PARALLEL_JOBS" \
128
+
--bar \
129
+
--joblog "${OUTPUT_DIR}/joblog_${TIMESTAMP}.txt" \
130
+
scan_endpoint {} "$TIMEOUT" "${PATHS[@]}" \
131
+
>> "$RESULTS_FILE"
132
+
133
+
echo ""
134
+
echo -e "${YELLOW}Processing results...${NC}"
135
+
136
+
# Count results
137
+
FOUND_COUNT=$(grep -c "^FOUND|" "$RESULTS_FILE" 2>/dev/null || echo 0)
138
+
MAYBE_COUNT=$(grep -c "^MAYBE|" "$RESULTS_FILE" 2>/dev/null || echo 0)
139
+
140
+
# Extract found URLs to separate file
141
+
{
142
+
echo "Found URLs (HTTP 200/301/302)"
143
+
echo "=============================="
144
+
echo "Scan: $(date)"
145
+
echo ""
146
+
} > "$FOUND_FILE"
147
+
148
+
grep "^FOUND|" "$RESULTS_FILE" 2>/dev/null | while IFS='|' read -r status endpoint path code; do
149
+
echo "$endpoint$path [$code]"
150
+
done >> "$FOUND_FILE"
151
+
152
+
# Create summary at end of results file
153
+
{
154
+
echo ""
155
+
echo "Summary"
156
+
echo "======="
157
+
echo "Scan completed: $(date)"
158
+
echo "Total endpoints scanned: ${ENDPOINT_COUNT}"
159
+
echo "Total paths checked: $((ENDPOINT_COUNT * ${#PATHS[@]}))"
160
+
echo "Found (200/301/302): ${FOUND_COUNT}"
161
+
echo "Maybe (other codes): ${MAYBE_COUNT}"
162
+
} >> "$RESULTS_FILE"
163
+
164
+
# Display summary
165
+
echo ""
166
+
echo -e "${BLUE}╔════════════════════════════════════════╗${NC}"
167
+
echo -e "${BLUE}║ Scan Complete! ║${NC}"
168
+
echo -e "${BLUE}╚════════════════════════════════════════╝${NC}"
169
+
echo ""
170
+
echo -e "Endpoints scanned: ${GREEN}${ENDPOINT_COUNT}${NC}"
171
+
echo -e "Paths checked per site: ${BLUE}${#PATHS[@]}${NC}"
172
+
echo -e "Total requests made: ${BLUE}$((ENDPOINT_COUNT * ${#PATHS[@]}))${NC}"
173
+
echo ""
174
+
echo -e "Results:"
175
+
echo -e " ${GREEN}✓ Found (200/301/302):${NC} ${FOUND_COUNT}"
176
+
echo -e " ${YELLOW}? Maybe (other):${NC} ${MAYBE_COUNT}"
177
+
echo ""
178
+
echo "Files created:"
179
+
echo " Full results: $RESULTS_FILE"
180
+
echo " Found URLs: $FOUND_FILE"
181
+
echo " Job log: ${OUTPUT_DIR}/joblog_${TIMESTAMP}.txt"
182
+
183
+
# Show sample of found URLs if any
184
+
if [ "$FOUND_COUNT" -gt 0 ]; then
185
+
echo ""
186
+
echo -e "${RED}⚠ SECURITY ALERT: Found exposed paths!${NC}"
187
+
echo ""
188
+
echo "Sample findings (first 10):"
189
+
grep "^FOUND|" "$RESULTS_FILE" 2>/dev/null | head -10 | while IFS='|' read -r status endpoint path code; do
190
+
echo -e " ${RED}✗${NC} $endpoint${RED}$path${NC} [$code]"
191
+
done
192
+
193
+
if [ "$FOUND_COUNT" -gt 10 ]; then
194
+
echo ""
195
+
echo " ... and $((FOUND_COUNT - 10)) more (see $FOUND_FILE)"
196
+
fi
197
+
fi
198
+
199
+
echo ""
+117
utils/vuln-scanner.sh
+117
utils/vuln-scanner.sh
···
1
+
#!/bin/bash
2
+
3
+
# Configuration
4
+
API_HOST="${API_HOST:-http://localhost:8080}"
5
+
TIMEOUT=5
6
+
OUTPUT_DIR="./pds_scan_results"
7
+
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
8
+
RESULTS_FILE="${OUTPUT_DIR}/scan_${TIMESTAMP}.txt"
9
+
FOUND_FILE="${OUTPUT_DIR}/found_${TIMESTAMP}.txt"
10
+
11
+
# Paths to check (one per line for easier editing)
12
+
PATHS=(
13
+
"/info.php"
14
+
"/phpinfo.php"
15
+
"/test.php"
16
+
"/admin"
17
+
"/admin.php"
18
+
"/wp-admin"
19
+
"/robots.txt"
20
+
"/.env"
21
+
"/.git/config"
22
+
"/config.php"
23
+
"/backup"
24
+
"/db.sql"
25
+
"/.DS_Store"
26
+
"/server-status"
27
+
"/.well-known/security.txt"
28
+
)
29
+
30
+
# Colors
31
+
RED='\033[0;31m'
32
+
GREEN='\033[0;32m'
33
+
YELLOW='\033[1;33m'
34
+
BLUE='\033[0;34m'
35
+
NC='\033[0m'
36
+
37
+
mkdir -p "$OUTPUT_DIR"
38
+
39
+
echo -e "${BLUE}=== PDS Security Scanner ===${NC}"
40
+
echo "API Host: $API_HOST"
41
+
echo "Timeout: ${TIMEOUT}s"
42
+
echo "Scanning for ${#PATHS[@]} paths"
43
+
echo "Results: $RESULTS_FILE"
44
+
echo ""
45
+
46
+
# Fetch active PDS endpoints
47
+
echo -e "${YELLOW}Fetching active PDS endpoints...${NC}"
48
+
ENDPOINTS=$(curl -s "${API_HOST}/api/v1/pds?status=online&limit=10000" | \
49
+
jq -r '.[].endpoint' 2>/dev/null)
50
+
51
+
if [ -z "$ENDPOINTS" ]; then
52
+
echo -e "${RED}Error: Could not fetch endpoints from API${NC}"
53
+
exit 1
54
+
fi
55
+
56
+
ENDPOINT_COUNT=$(echo "$ENDPOINTS" | wc -l)
57
+
echo -e "${GREEN}Found ${ENDPOINT_COUNT} active PDS endpoints${NC}"
58
+
echo ""
59
+
60
+
# Write header
61
+
echo "PDS Security Scan - $(date)" > "$RESULTS_FILE"
62
+
echo "========================================" >> "$RESULTS_FILE"
63
+
echo "" >> "$RESULTS_FILE"
64
+
65
+
# Counters
66
+
CURRENT=0
67
+
TOTAL_FOUND=0
68
+
TOTAL_MAYBE=0
69
+
70
+
# Scan each endpoint sequentially
71
+
while IFS= read -r endpoint; do
72
+
CURRENT=$((CURRENT + 1))
73
+
74
+
echo -e "${BLUE}[$CURRENT/$ENDPOINT_COUNT]${NC} Scanning: $endpoint"
75
+
76
+
# Scan each path
77
+
for path in "${PATHS[@]}"; do
78
+
url="${endpoint}${path}"
79
+
80
+
# Make request with timeout
81
+
response=$(curl -s -o /dev/null -w "%{http_code}" \
82
+
--max-time "$TIMEOUT" \
83
+
--connect-timeout "$TIMEOUT" \
84
+
-L \
85
+
-A "Mozilla/5.0 (Security Scanner)" \
86
+
"$url" 2>/dev/null)
87
+
88
+
# Check response
89
+
if [ -n "$response" ] && [ "$response" != "404" ] && [ "$response" != "000" ]; then
90
+
if [ "$response" = "200" ] || [ "$response" = "301" ] || [ "$response" = "302" ]; then
91
+
echo -e " ${GREEN}✓ FOUND${NC} $path ${YELLOW}[$response]${NC}"
92
+
echo "FOUND: $endpoint$path [$response]" >> "$RESULTS_FILE"
93
+
echo "$endpoint$path" >> "$FOUND_FILE"
94
+
TOTAL_FOUND=$((TOTAL_FOUND + 1))
95
+
elif [ "$response" != "403" ]; then
96
+
echo -e " ${YELLOW}? MAYBE${NC} $path ${YELLOW}[$response]${NC}"
97
+
echo "MAYBE: $endpoint$path [$response]" >> "$RESULTS_FILE"
98
+
TOTAL_MAYBE=$((TOTAL_MAYBE + 1))
99
+
fi
100
+
fi
101
+
done
102
+
103
+
echo "" >> "$RESULTS_FILE"
104
+
105
+
done <<< "$ENDPOINTS"
106
+
107
+
# Summary
108
+
echo ""
109
+
echo -e "${BLUE}========================================${NC}"
110
+
echo -e "${GREEN}Scan Complete!${NC}"
111
+
echo "Scanned: ${ENDPOINT_COUNT} endpoints"
112
+
echo "Paths checked per endpoint: ${#PATHS[@]}"
113
+
echo -e "${GREEN}Found (200/301/302): ${TOTAL_FOUND}${NC}"
114
+
echo -e "${YELLOW}Maybe (other codes): ${TOTAL_MAYBE}${NC}"
115
+
echo ""
116
+
echo "Full results: $RESULTS_FILE"
117
+
[ -f "$FOUND_FILE" ] && echo "Found URLs: $FOUND_FILE"