A Transparent and Verifiable Way to Sync the AT Protocol's PLC Directory

fix warnings

+238 -147
+6 -8
bundle/manager.go
··· 369 } 370 371 // loadBundleFromDisk loads a bundle from disk 372 - func (m *Manager) loadBundleFromDisk(ctx context.Context, bundleNumber int) (*Bundle, error) { 373 // Get metadata from index 374 meta, err := m.index.GetBundle(bundleNumber) 375 if err != nil { ··· 1321 // Find latest non-nullified location 1322 var latestLoc *didindex.OpLocation 1323 for i := range locations { 1324 - if locations[i].Nullified { 1325 continue 1326 } 1327 - if latestLoc == nil || 1328 - locations[i].Bundle > latestLoc.Bundle || 1329 - (locations[i].Bundle == latestLoc.Bundle && locations[i].Position > latestLoc.Position) { 1330 latestLoc = &locations[i] 1331 } 1332 } ··· 1337 1338 // STEP 3: Load operation 1339 opStart := time.Now() 1340 - op, err := m.LoadOperation(ctx, int(latestLoc.Bundle), int(latestLoc.Position)) 1341 result.LoadOpTime = time.Since(opStart) 1342 1343 if err != nil { 1344 return nil, fmt.Errorf("failed to load operation: %w", err) 1345 } 1346 1347 - result.BundleNumber = int(latestLoc.Bundle) 1348 - result.Position = int(latestLoc.Position) 1349 1350 // STEP 4: Resolve document 1351 doc, err := plcclient.ResolveDIDDocument(did, []plcclient.PLCOperation{*op})
··· 369 } 370 371 // loadBundleFromDisk loads a bundle from disk 372 + func (m *Manager) loadBundleFromDisk(_ context.Context, bundleNumber int) (*Bundle, error) { 373 // Get metadata from index 374 meta, err := m.index.GetBundle(bundleNumber) 375 if err != nil { ··· 1321 // Find latest non-nullified location 1322 var latestLoc *didindex.OpLocation 1323 for i := range locations { 1324 + if locations[i].Nullified() { 1325 continue 1326 } 1327 + if latestLoc == nil || locations[i].IsAfter(*latestLoc) { 1328 latestLoc = &locations[i] 1329 } 1330 } ··· 1335 1336 // STEP 3: Load operation 1337 opStart := time.Now() 1338 + op, err := m.LoadOperation(ctx, latestLoc.BundleInt(), latestLoc.PositionInt()) 1339 result.LoadOpTime = time.Since(opStart) 1340 1341 if err != nil { 1342 return nil, fmt.Errorf("failed to load operation: %w", err) 1343 } 1344 1345 + result.BundleNumber = latestLoc.BundleInt() 1346 + result.Position = latestLoc.PositionInt() 1347 1348 // STEP 4: Resolve document 1349 doc, err := plcclient.ResolveDIDDocument(did, []plcclient.PLCOperation{*op})
+1 -1
cmd/plcbundle/commands/detector.go
··· 62 `) 63 } 64 65 - func detectorList(args []string) error { 66 registry := detector.DefaultRegistry() 67 detectors := registry.List() 68
··· 62 `) 63 } 64 65 + func detectorList(_ []string) error { 66 registry := detector.DefaultRegistry() 67 detectors := registry.List() 68
+3 -3
cmd/plcbundle/commands/did.go
··· 795 return nil 796 } 797 798 - func batchLookup(mgr BundleManager, dids []string, output *os.File, workers int) error { 799 progress := ui.NewProgressBar(len(dids)) 800 ctx := context.Background() 801 ··· 855 return nil 856 } 857 858 - func batchResolve(mgr BundleManager, dids []string, output *os.File, workers int) error { 859 progress := ui.NewProgressBar(len(dids)) 860 ctx := context.Background() 861 ··· 900 return nil 901 } 902 903 - func batchExport(mgr BundleManager, dids []string, output *os.File, workers int) error { 904 progress := ui.NewProgressBar(len(dids)) 905 ctx := context.Background() 906
··· 795 return nil 796 } 797 798 + func batchLookup(mgr BundleManager, dids []string, output *os.File, _ int) error { 799 progress := ui.NewProgressBar(len(dids)) 800 ctx := context.Background() 801 ··· 855 return nil 856 } 857 858 + func batchResolve(mgr BundleManager, dids []string, output *os.File, _ int) error { 859 progress := ui.NewProgressBar(len(dids)) 860 ctx := context.Background() 861 ··· 900 return nil 901 } 902 903 + func batchExport(mgr BundleManager, dids []string, output *os.File, _ int) error { 904 progress := ui.NewProgressBar(len(dids)) 905 ctx := context.Background() 906
+12 -8
cmd/plcbundle/commands/export.go
··· 24 } 25 26 if !*all && *bundles == "" { 27 - return fmt.Errorf("usage: plcbundle export --bundles <number|range> [options]\n" + 28 - " or: plcbundle export --all [options]\n\n" + 29 - "Examples:\n" + 30 - " plcbundle export --bundles 42\n" + 31 - " plcbundle export --bundles 1-100\n" + 32 - " plcbundle export --all\n" + 33 - " plcbundle export --all --count 50000\n" + 34 - " plcbundle export --bundles 42 | jq .") 35 } 36 37 mgr, _, err := getManager(&ManagerOptions{Cmd: nil})
··· 24 } 25 26 if !*all && *bundles == "" { 27 + fmt.Fprint(os.Stderr, `usage: plcbundle export --bundles <number|range> [options] 28 + or: plcbundle export --all [options] 29 + 30 + Examples: 31 + plcbundle export --bundles 42 32 + plcbundle export --bundles 1-100 33 + plcbundle export --all 34 + plcbundle export --all --count 50000 35 + plcbundle export --bundles 42 | jq . 36 + 37 + `) 38 + return fmt.Errorf("missing required flag: --bundles or --all") 39 } 40 41 mgr, _, err := getManager(&ManagerOptions{Cmd: nil})
+6 -6
cmd/plcbundle/commands/log.go
··· 197 // Display bundles 198 for i, meta := range displayBundles { 199 if opts.oneline { 200 - displayBundleOneLine(w, meta, opts.showHashes, useColor, colorBundleNum, colorHash, colorDate, colorAge, colorSize, colorReset) 201 } else { 202 - displayBundleDetailed(w, meta, opts.showHashes, useColor, colorBundleNum, colorHash, colorDate, colorAge, colorSize, colorDim, colorReset) 203 204 // Add separator between bundles (except last) 205 if i < len(displayBundles)-1 { ··· 212 // Summary footer 213 if !opts.oneline && len(displayBundles) > 0 { 214 fmt.Fprintf(w, "\n") 215 - displayLogSummary(w, allBundles, displayBundles, opts.last, useColor, colorHeader, colorReset) 216 } 217 } 218 219 - func displayBundleOneLine(w io.Writer, meta *bundleindex.BundleMetadata, showHashes bool, useColor bool, colorBundle, colorHash, colorDate, colorAge, colorSize, colorReset string) { 220 age := time.Since(meta.EndTime) 221 ageStr := formatDurationShort(age) 222 ··· 236 colorSize, formatBytes(meta.CompressedSize), colorReset) 237 } 238 239 - func displayBundleDetailed(w io.Writer, meta *bundleindex.BundleMetadata, showHashes bool, useColor bool, colorBundle, colorHash, colorDate, colorAge, colorSize, colorDim, colorReset string) { 240 fmt.Fprintf(w, "%sBundle %06d%s\n", colorBundle, meta.BundleNumber, colorReset) 241 242 // Timestamp and age ··· 282 } 283 } 284 285 - func displayLogSummary(w io.Writer, allBundles, displayedBundles []*bundleindex.BundleMetadata, limit int, useColor bool, colorHeader, colorReset string) { 286 first := displayedBundles[0] 287 last := displayedBundles[len(displayedBundles)-1] 288
··· 197 // Display bundles 198 for i, meta := range displayBundles { 199 if opts.oneline { 200 + displayBundleOneLine(w, meta, opts.showHashes, colorBundleNum, colorHash, colorDate, colorAge, colorSize, colorReset) 201 } else { 202 + displayBundleDetailed(w, meta, opts.showHashes, colorBundleNum, colorHash, colorDate, colorAge, colorSize, colorDim, colorReset) 203 204 // Add separator between bundles (except last) 205 if i < len(displayBundles)-1 { ··· 212 // Summary footer 213 if !opts.oneline && len(displayBundles) > 0 { 214 fmt.Fprintf(w, "\n") 215 + displayLogSummary(w, allBundles, displayBundles, opts.last, colorHeader, colorReset) 216 } 217 } 218 219 + func displayBundleOneLine(w io.Writer, meta *bundleindex.BundleMetadata, showHashes bool, colorBundle, colorHash, colorDate, colorAge, colorSize, colorReset string) { 220 age := time.Since(meta.EndTime) 221 ageStr := formatDurationShort(age) 222 ··· 236 colorSize, formatBytes(meta.CompressedSize), colorReset) 237 } 238 239 + func displayBundleDetailed(w io.Writer, meta *bundleindex.BundleMetadata, showHashes bool, colorBundle, colorHash, colorDate, colorAge, colorSize, colorDim, colorReset string) { 240 fmt.Fprintf(w, "%sBundle %06d%s\n", colorBundle, meta.BundleNumber, colorReset) 241 242 // Timestamp and age ··· 282 } 283 } 284 285 + func displayLogSummary(w io.Writer, allBundles, displayedBundles []*bundleindex.BundleMetadata, limit int, colorHeader, colorReset string) { 286 first := displayedBundles[0] 287 last := displayedBundles[len(displayedBundles)-1] 288
+1 -1
cmd/plcbundle/commands/mempool.go
··· 83 return cmd 84 } 85 86 - func mempoolStatus(cmd *cobra.Command, args []string) error { 87 verbose, _ := cmd.Flags().GetBool("verbose") 88 if cmd.Parent() != nil { 89 // Called as subcommand, check parent's verbose flag
··· 83 return cmd 84 } 85 86 + func mempoolStatus(cmd *cobra.Command, _ []string) error { 87 verbose, _ := cmd.Flags().GetBool("verbose") 88 if cmd.Parent() != nil { 89 // Called as subcommand, check parent's verbose flag
-17
cmd/plcbundle/commands/rollback.go
··· 527 fmt.Printf(" plcbundle index build\n\n") 528 } 529 } 530 - 531 - // Validation helpers 532 - 533 - // validateRollbackSafety performs additional safety checks 534 - func validateRollbackSafety(mgr BundleManager, plan *rollbackPlan) error { 535 - // Check for chain integrity issues 536 - if len(plan.toKeep) > 1 { 537 - // Verify the target bundle exists and has valid hash 538 - lastKeep := plan.toKeep[len(plan.toKeep)-1] 539 - if lastKeep.Hash == "" { 540 - return fmt.Errorf("target bundle %06d has no chain hash - may be corrupted", 541 - lastKeep.BundleNumber) 542 - } 543 - } 544 - 545 - return nil 546 - }
··· 527 fmt.Printf(" plcbundle index build\n\n") 528 } 529 }
+1 -2
cmd/plcbundle/commands/stream.go
··· 230 } 231 232 type streamLogger struct { 233 - quiet bool 234 - verbose bool 235 } 236 237 func (l *streamLogger) Printf(format string, v ...interface{}) {
··· 230 } 231 232 type streamLogger struct { 233 + quiet bool 234 } 235 236 func (l *streamLogger) Printf(format string, v ...interface{}) {
+31 -41
internal/didindex/builder.go
··· 20 } 21 22 // add adds a location to the shard 23 - func (sb *ShardBuilder) add(identifier string, bundle uint16, position uint16, nullified bool) { 24 sb.mu.Lock() 25 defer sb.mu.Unlock() 26 27 - sb.entries[identifier] = append(sb.entries[identifier], OpLocation{ 28 - Bundle: bundle, 29 - Position: position, 30 - Nullified: nullified, 31 - }) 32 } 33 34 // BuildIndexFromScratch builds index with controlled memory usage ··· 92 93 shardNum := dim.calculateShard(identifier) 94 95 - // Write entry: [24 bytes ID][2 bytes bundle][2 bytes pos][1 byte nullified] 96 - entry := make([]byte, 29) 97 copy(entry[0:24], identifier) 98 - binary.LittleEndian.PutUint16(entry[24:26], uint16(meta.BundleNumber)) 99 - binary.LittleEndian.PutUint16(entry[26:28], uint16(pos)) 100 101 - // Store nullified flag 102 - if op.IsNullified() { 103 - entry[28] = 1 104 - } else { 105 - entry[28] = 0 106 - } 107 108 if _, err := tempShards[shardNum].Write(entry); err != nil { 109 dim.logger.Printf("Warning: failed to write to temp shard %02x: %v", shardNum, err) ··· 135 totalDIDs += count 136 } 137 138 - dim.config.TotalDIDs = totalDIDs 139 - dim.config.LastBundle = bundles[len(bundles)-1].BundleNumber 140 - 141 - if err := dim.saveIndexConfig(); err != nil { 142 return fmt.Errorf("failed to save config: %w", err) 143 } 144 ··· 165 return 0, nil 166 } 167 168 - // Parse entries (29 bytes each) 169 - entryCount := len(data) / 29 170 - if len(data)%29 != 0 { 171 - return 0, fmt.Errorf("corrupted temp shard: size not multiple of 29") 172 } 173 174 type tempEntry struct { 175 identifier string 176 - bundle uint16 177 - position uint16 178 - nullified bool 179 } 180 181 entries := make([]tempEntry, entryCount) 182 for i := 0; i < entryCount; i++ { 183 - offset := i * 29 184 entries[i] = tempEntry{ 185 identifier: string(data[offset : offset+24]), 186 - bundle: binary.LittleEndian.Uint16(data[offset+24 : offset+26]), 187 - position: binary.LittleEndian.Uint16(data[offset+26 : offset+28]), 188 - nullified: data[offset+28] != 0, 189 } 190 } 191 ··· 200 // Group by DID 201 builder := newShardBuilder() 202 for _, entry := range entries { 203 - builder.add(entry.identifier, entry.bundle, entry.position, entry.nullified) 204 } 205 206 // Free entries ··· 240 shardOps[shardNum] = make(map[string][]OpLocation) 241 } 242 243 - shardOps[shardNum][identifier] = append(shardOps[shardNum][identifier], OpLocation{ 244 - Bundle: uint16(bundle.BundleNumber), 245 - Position: uint16(pos), 246 - Nullified: op.IsNullified(), 247 - }) 248 } 249 250 groupDuration := time.Since(groupStart) ··· 349 // STEP 4: Update config 350 configStart := time.Now() 351 352 - dim.config.TotalDIDs += deltaCount 353 - dim.config.LastBundle = bundle.BundleNumber 354 - 355 - if err := dim.saveIndexConfig(); err != nil { 356 return fmt.Errorf("failed to save config: %w", err) 357 } 358
··· 20 } 21 22 // add adds a location to the shard 23 + func (sb *ShardBuilder) add(identifier string, loc OpLocation) { 24 sb.mu.Lock() 25 defer sb.mu.Unlock() 26 27 + sb.entries[identifier] = append(sb.entries[identifier], loc) 28 + } 29 + 30 + // updateAndSaveConfig updates config with new values and saves atomically 31 + func (dim *Manager) updateAndSaveConfig(totalDIDs int64, lastBundle int) error { 32 + dim.config.TotalDIDs = totalDIDs 33 + dim.config.LastBundle = lastBundle 34 + dim.config.Version = DIDINDEX_VERSION 35 + dim.config.Format = "binary_v4" 36 + dim.config.UpdatedAt = time.Now().UTC() 37 + 38 + return dim.saveIndexConfig() 39 } 40 41 // BuildIndexFromScratch builds index with controlled memory usage ··· 99 100 shardNum := dim.calculateShard(identifier) 101 102 + // Write entry: [24 bytes ID][4 bytes packed OpLocation] 103 + entry := make([]byte, 28) 104 copy(entry[0:24], identifier) 105 106 + // Create packed OpLocation (includes nullified bit) 107 + loc := NewOpLocation(uint16(meta.BundleNumber), uint16(pos), op.IsNullified()) 108 + binary.LittleEndian.PutUint32(entry[24:28], uint32(loc)) 109 110 if _, err := tempShards[shardNum].Write(entry); err != nil { 111 dim.logger.Printf("Warning: failed to write to temp shard %02x: %v", shardNum, err) ··· 137 totalDIDs += count 138 } 139 140 + if err := dim.updateAndSaveConfig(totalDIDs, bundles[len(bundles)-1].BundleNumber); err != nil { 141 return fmt.Errorf("failed to save config: %w", err) 142 } 143 ··· 164 return 0, nil 165 } 166 167 + // Parse entries (28 bytes each) 168 + entryCount := len(data) / 28 169 + if len(data)%28 != 0 { 170 + return 0, fmt.Errorf("corrupted temp shard: size not multiple of 28") 171 } 172 173 type tempEntry struct { 174 identifier string 175 + location OpLocation // ← Single packed value 176 } 177 178 entries := make([]tempEntry, entryCount) 179 for i := 0; i < entryCount; i++ { 180 + offset := i * 28 // ← 28 bytes 181 entries[i] = tempEntry{ 182 identifier: string(data[offset : offset+24]), 183 + location: OpLocation(binary.LittleEndian.Uint32(data[offset+24 : offset+28])), 184 } 185 } 186 ··· 195 // Group by DID 196 builder := newShardBuilder() 197 for _, entry := range entries { 198 + builder.add(entry.identifier, entry.location) 199 } 200 201 // Free entries ··· 235 shardOps[shardNum] = make(map[string][]OpLocation) 236 } 237 238 + loc := NewOpLocation(uint16(bundle.BundleNumber), uint16(pos), op.IsNullified()) 239 + shardOps[shardNum][identifier] = append(shardOps[shardNum][identifier], loc) 240 } 241 242 groupDuration := time.Since(groupStart) ··· 341 // STEP 4: Update config 342 configStart := time.Now() 343 344 + newTotal := dim.config.TotalDIDs + deltaCount 345 + if err := dim.updateAndSaveConfig(newTotal, bundle.BundleNumber); err != nil { 346 return fmt.Errorf("failed to save config: %w", err) 347 } 348
+12 -12
internal/didindex/lookup.go
··· 34 // Filter nullified 35 var validLocations []OpLocation 36 for _, loc := range locations { 37 - if !loc.Nullified { 38 validLocations = append(validLocations, loc) 39 } 40 } ··· 46 47 if len(validLocations) == 1 { 48 loc := validLocations[0] 49 - op, err := provider.LoadOperation(ctx, int(loc.Bundle), int(loc.Position)) 50 if err != nil { 51 return nil, err 52 } ··· 56 // For multiple operations: group by bundle to minimize bundle loads 57 bundleMap := make(map[uint16][]uint16) 58 for _, loc := range validLocations { 59 - bundleMap[loc.Bundle] = append(bundleMap[loc.Bundle], loc.Position) 60 } 61 62 if dim.verbose { ··· 133 // Group by bundle 134 bundleMap := make(map[uint16][]OpLocation) 135 for _, loc := range locations { 136 - bundleMap[loc.Bundle] = append(bundleMap[loc.Bundle], loc) 137 } 138 139 if dim.verbose { ··· 149 } 150 151 for _, loc := range locs { 152 - if int(loc.Position) >= len(bundle.Operations) { 153 continue 154 } 155 156 - op := bundle.Operations[loc.Position] 157 results = append(results, OpLocationWithOperation{ 158 Operation: op, 159 - Bundle: int(loc.Bundle), 160 - Position: int(loc.Position), 161 }) 162 } 163 } ··· 196 // Find latest non-nullified location 197 var latestLoc *OpLocation 198 for i := range locations { 199 - if locations[i].Nullified { 200 continue 201 } 202 203 if latestLoc == nil { 204 latestLoc = &locations[i] 205 } else { 206 - if locations[i].Bundle > latestLoc.Bundle || 207 - (locations[i].Bundle == latestLoc.Bundle && locations[i].Position > latestLoc.Position) { 208 latestLoc = &locations[i] 209 } 210 } ··· 215 } 216 217 // Load ONLY the specific operation (efficient!) 218 - return provider.LoadOperation(ctx, int(latestLoc.Bundle), int(latestLoc.Position)) 219 }
··· 34 // Filter nullified 35 var validLocations []OpLocation 36 for _, loc := range locations { 37 + if !loc.Nullified() { 38 validLocations = append(validLocations, loc) 39 } 40 } ··· 46 47 if len(validLocations) == 1 { 48 loc := validLocations[0] 49 + op, err := provider.LoadOperation(ctx, loc.BundleInt(), loc.PositionInt()) 50 if err != nil { 51 return nil, err 52 } ··· 56 // For multiple operations: group by bundle to minimize bundle loads 57 bundleMap := make(map[uint16][]uint16) 58 for _, loc := range validLocations { 59 + bundleMap[loc.Bundle()] = append(bundleMap[loc.Bundle()], loc.Position()) 60 } 61 62 if dim.verbose { ··· 133 // Group by bundle 134 bundleMap := make(map[uint16][]OpLocation) 135 for _, loc := range locations { 136 + bundleMap[loc.Bundle()] = append(bundleMap[loc.Bundle()], loc) 137 } 138 139 if dim.verbose { ··· 149 } 150 151 for _, loc := range locs { 152 + if loc.PositionInt() >= len(bundle.Operations) { 153 continue 154 } 155 156 + op := bundle.Operations[loc.Position()] 157 results = append(results, OpLocationWithOperation{ 158 Operation: op, 159 + Bundle: loc.BundleInt(), 160 + Position: loc.PositionInt(), 161 }) 162 } 163 } ··· 196 // Find latest non-nullified location 197 var latestLoc *OpLocation 198 for i := range locations { 199 + if locations[i].Nullified() { 200 continue 201 } 202 203 if latestLoc == nil { 204 latestLoc = &locations[i] 205 } else { 206 + if locations[i].Bundle() > latestLoc.Bundle() || 207 + (locations[i].Bundle() == latestLoc.Bundle() && locations[i].Position() > latestLoc.Position()) { 208 latestLoc = &locations[i] 209 } 210 } ··· 215 } 216 217 // Load ONLY the specific operation (efficient!) 218 + return provider.LoadOperation(ctx, latestLoc.BundleInt(), latestLoc.PositionInt()) 219 }
+40 -40
internal/didindex/manager.go
··· 25 config, _ := loadIndexConfig(configPath) 26 if config == nil { 27 config = &Config{ 28 - Version: DIDINDEX_VERSION, 29 - Format: "binary_v1", 30 ShardCount: DID_SHARD_COUNT, 31 UpdatedAt: time.Now().UTC(), 32 } 33 } 34 35 return &Manager{ ··· 43 config: config, 44 logger: logger, 45 } 46 - } 47 - 48 - // Add helper to ensure directories when actually writing 49 - func (dim *Manager) ensureDirectories() error { 50 - return os.MkdirAll(dim.shardDir, 0755) 51 } 52 53 // Close unmaps all shards and cleans up ··· 274 275 // searchShard performs optimized binary search using prefix index 276 func (dim *Manager) searchShard(shard *mmapShard, identifier string) []OpLocation { 277 - if shard.data == nil || len(shard.data) < 1056 { 278 return nil 279 } 280 ··· 449 // Read locations 450 locations := make([]OpLocation, count) 451 for i := 0; i < int(count); i++ { 452 - if offset+5 > len(data) { 453 return locations[:i] 454 } 455 456 - bundle := binary.LittleEndian.Uint16(data[offset : offset+2]) 457 - position := binary.LittleEndian.Uint16(data[offset+2 : offset+4]) 458 - nullified := data[offset+4] != 0 459 460 - locations[i] = OpLocation{ 461 - Bundle: bundle, 462 - Position: position, 463 - Nullified: nullified, 464 - } 465 - 466 - offset += 5 467 } 468 469 return locations ··· 660 for i, id := range identifiers { 661 offsetTable[i] = uint32(currentOffset) 662 locations := builder.entries[id] 663 - entrySize := DID_IDENTIFIER_LEN + 2 + (len(locations) * 5) 664 currentOffset += entrySize 665 } 666 ··· 700 offset += 2 701 702 for _, loc := range locations { 703 - binary.LittleEndian.PutUint16(buf[offset:offset+2], loc.Bundle) 704 - binary.LittleEndian.PutUint16(buf[offset+2:offset+4], loc.Position) 705 - 706 - if loc.Nullified { 707 - buf[offset+4] = 1 708 - } else { 709 - buf[offset+4] = 0 710 - } 711 - 712 - offset += 5 713 } 714 } 715 ··· 724 725 entryCount := binary.LittleEndian.Uint32(data[9:13]) 726 727 - var offsetTableStart int 728 - offsetTableStart = 1056 729 730 // Start reading entries after offset table 731 offset := offsetTableStart + (int(entryCount) * 4) ··· 745 746 // Read locations 747 locations := make([]OpLocation, locCount) 748 for j := 0; j < int(locCount); j++ { 749 - if offset+5 > len(data) { 750 - break 751 - } 752 753 - locations[j] = OpLocation{ 754 - Bundle: binary.LittleEndian.Uint16(data[offset : offset+2]), 755 - Position: binary.LittleEndian.Uint16(data[offset+2 : offset+4]), 756 - Nullified: data[offset+4] != 0, 757 } 758 - offset += 5 759 } 760 761 builder.entries[identifier] = locations
··· 25 config, _ := loadIndexConfig(configPath) 26 if config == nil { 27 config = &Config{ 28 + Version: DIDINDEX_VERSION, // Will be 4 29 + Format: "binary_v4", // Update format name 30 ShardCount: DID_SHARD_COUNT, 31 UpdatedAt: time.Now().UTC(), 32 } 33 + } else if config.Version < DIDINDEX_VERSION { 34 + // Auto-trigger rebuild on version mismatch 35 + logger.Printf("DID index version outdated (v%d, need v%d) - rebuild required", 36 + config.Version, DIDINDEX_VERSION) 37 } 38 39 return &Manager{ ··· 47 config: config, 48 logger: logger, 49 } 50 } 51 52 // Close unmaps all shards and cleans up ··· 273 274 // searchShard performs optimized binary search using prefix index 275 func (dim *Manager) searchShard(shard *mmapShard, identifier string) []OpLocation { 276 + if len(shard.data) < 1056 { 277 return nil 278 } 279 ··· 448 // Read locations 449 locations := make([]OpLocation, count) 450 for i := 0; i < int(count); i++ { 451 + if offset+4 > len(data) { // ← 4 bytes now 452 return locations[:i] 453 } 454 455 + // Read packed uint32 456 + packed := binary.LittleEndian.Uint32(data[offset : offset+4]) 457 + locations[i] = OpLocation(packed) 458 459 + offset += 4 // ← 4 bytes 460 } 461 462 return locations ··· 653 for i, id := range identifiers { 654 offsetTable[i] = uint32(currentOffset) 655 locations := builder.entries[id] 656 + entrySize := DID_IDENTIFIER_LEN + 2 + (len(locations) * 4) // ← 4 bytes 657 currentOffset += entrySize 658 } 659 ··· 693 offset += 2 694 695 for _, loc := range locations { 696 + // Write packed uint32 (global position + nullified bit) 697 + binary.LittleEndian.PutUint32(buf[offset:offset+4], uint32(loc)) 698 + offset += 4 // ← 4 bytes per location 699 } 700 } 701 ··· 710 711 entryCount := binary.LittleEndian.Uint32(data[9:13]) 712 713 + offsetTableStart := 1056 714 715 // Start reading entries after offset table 716 offset := offsetTableStart + (int(entryCount) * 4) ··· 730 731 // Read locations 732 locations := make([]OpLocation, locCount) 733 + 734 + // Check version to determine format 735 + version := binary.LittleEndian.Uint32(data[4:8]) 736 + 737 for j := 0; j < int(locCount); j++ { 738 + if version >= 4 { 739 + // New format: 4-byte packed uint32 740 + if offset+4 > len(data) { 741 + break 742 + } 743 + packed := binary.LittleEndian.Uint32(data[offset : offset+4]) 744 + locations[j] = OpLocation(packed) 745 + offset += 4 746 + } else { 747 + // Old format: 5-byte separate fields (for migration) 748 + if offset+5 > len(data) { 749 + break 750 + } 751 + bundle := binary.LittleEndian.Uint16(data[offset : offset+2]) 752 + position := binary.LittleEndian.Uint16(data[offset+2 : offset+4]) 753 + nullified := data[offset+4] != 0 754 755 + // Convert to new format 756 + locations[j] = NewOpLocation(bundle, position, nullified) 757 + offset += 5 758 } 759 } 760 761 builder.entries[identifier] = locations
+58
internal/didindex/manager_test.go
···
··· 1 + package didindex_test 2 + 3 + import ( 4 + "testing" 5 + 6 + "tangled.org/atscan.net/plcbundle/internal/didindex" 7 + ) 8 + 9 + func TestOpLocationPacking(t *testing.T) { 10 + tests := []struct { 11 + bundle uint16 12 + position uint16 13 + nullified bool 14 + }{ 15 + {1, 0, false}, 16 + {1, 9999, false}, 17 + {100, 5000, true}, 18 + {65535, 9999, true}, // Max values 19 + } 20 + 21 + for _, tt := range tests { 22 + loc := didindex.NewOpLocation(tt.bundle, tt.position, tt.nullified) 23 + 24 + // Test unpacking 25 + if loc.Bundle() != tt.bundle { 26 + t.Errorf("Bundle mismatch: got %d, want %d", loc.Bundle(), tt.bundle) 27 + } 28 + if loc.Position() != tt.position { 29 + t.Errorf("Position mismatch: got %d, want %d", loc.Position(), tt.position) 30 + } 31 + if loc.Nullified() != tt.nullified { 32 + t.Errorf("Nullified mismatch: got %v, want %v", loc.Nullified(), tt.nullified) 33 + } 34 + 35 + // Test global position 36 + expectedGlobal := uint32(tt.bundle)*10000 + uint32(tt.position) 37 + if loc.GlobalPosition() != expectedGlobal { 38 + t.Errorf("Global position mismatch: got %d, want %d", 39 + loc.GlobalPosition(), expectedGlobal) 40 + } 41 + } 42 + } 43 + 44 + func TestOpLocationComparison(t *testing.T) { 45 + loc1 := didindex.NewOpLocation(100, 50, false) // 1,000,050 46 + loc2 := didindex.NewOpLocation(100, 51, false) // 1,000,051 47 + loc3 := didindex.NewOpLocation(200, 30, false) // 2,000,030 48 + 49 + if !loc1.Less(loc2) { 50 + t.Error("Expected loc1 < loc2") 51 + } 52 + if !loc2.Less(loc3) { 53 + t.Error("Expected loc2 < loc3") 54 + } 55 + if loc3.Less(loc1) { 56 + t.Error("Expected loc3 > loc1") 57 + } 58 + }
+67 -8
internal/didindex/types.go
··· 17 18 // Binary format constants 19 DIDINDEX_MAGIC = "PLCD" 20 - DIDINDEX_VERSION = 3 21 22 BUILD_BATCH_SIZE = 100 // Process 100 bundles at a time 23 ··· 63 LastBundle int `json:"last_bundle"` 64 } 65 66 - // OpLocation represents exact location of an operation 67 - type OpLocation struct { 68 - Bundle uint16 69 - Position uint16 70 - Nullified bool 71 - } 72 - 73 // ShardBuilder accumulates DID positions for a shard 74 type ShardBuilder struct { 75 entries map[string][]OpLocation 76 mu sync.Mutex 77 } 78 79 // OpLocationWithOperation contains an operation with its bundle/position 80 type OpLocationWithOperation struct { 81 Operation plcclient.PLCOperation 82 Bundle int 83 Position int 84 }
··· 17 18 // Binary format constants 19 DIDINDEX_MAGIC = "PLCD" 20 + DIDINDEX_VERSION = 4 21 + 22 + // Format sizes 23 + LOCATION_SIZE_V3 = 5 // Old: 2+2+1 24 + LOCATION_SIZE_V4 = 4 // New: packed uint32 25 26 BUILD_BATCH_SIZE = 100 // Process 100 bundles at a time 27 ··· 67 LastBundle int `json:"last_bundle"` 68 } 69 70 // ShardBuilder accumulates DID positions for a shard 71 type ShardBuilder struct { 72 entries map[string][]OpLocation 73 mu sync.Mutex 74 } 75 76 + // OpLocation represents exact location of an operation 77 + type OpLocation uint32 78 + 79 // OpLocationWithOperation contains an operation with its bundle/position 80 type OpLocationWithOperation struct { 81 Operation plcclient.PLCOperation 82 Bundle int 83 Position int 84 } 85 + 86 + func NewOpLocation(bundle, position uint16, nullified bool) OpLocation { 87 + globalPos := uint32(bundle)*10000 + uint32(position) 88 + loc := globalPos << 1 89 + if nullified { 90 + loc |= 1 91 + } 92 + return OpLocation(loc) 93 + } 94 + 95 + // Getters 96 + func (loc OpLocation) GlobalPosition() uint32 { 97 + return uint32(loc) >> 1 98 + } 99 + 100 + func (loc OpLocation) Bundle() uint16 { 101 + return uint16(loc.GlobalPosition() / 10000) 102 + } 103 + 104 + func (loc OpLocation) Position() uint16 { 105 + return uint16(loc.GlobalPosition() % 10000) 106 + } 107 + 108 + func (loc OpLocation) Nullified() bool { 109 + return (loc & 1) == 1 110 + } 111 + 112 + func (loc OpLocation) IsAfter(other OpLocation) bool { 113 + // Compare global positions directly 114 + return loc.GlobalPosition() > other.GlobalPosition() 115 + } 116 + 117 + func (loc OpLocation) IsBefore(other OpLocation) bool { 118 + return loc.GlobalPosition() < other.GlobalPosition() 119 + } 120 + 121 + func (loc OpLocation) Equals(other OpLocation) bool { 122 + // Compare entire packed value (including nullified bit) 123 + return loc == other 124 + } 125 + 126 + func (loc OpLocation) PositionEquals(other OpLocation) bool { 127 + // Compare only position (ignore nullified bit) 128 + return loc.GlobalPosition() == other.GlobalPosition() 129 + } 130 + 131 + // Convenience conversions 132 + func (loc OpLocation) BundleInt() int { 133 + return int(loc.Bundle()) 134 + } 135 + 136 + func (loc OpLocation) PositionInt() int { 137 + return int(loc.Position()) 138 + } 139 + 140 + // For sorting/comparison 141 + func (loc OpLocation) Less(other OpLocation) bool { 142 + return loc.GlobalPosition() < other.GlobalPosition() 143 + }