1package commitgraph
2
3import (
4 "crypto"
5 "io"
6
7 "github.com/go-git/go-git/v5/plumbing"
8 "github.com/go-git/go-git/v5/plumbing/hash"
9 "github.com/go-git/go-git/v5/utils/binary"
10)
11
12// Encoder writes MemoryIndex structs to an output stream.
13//
14// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
15// Use the v2 package instead.
16type Encoder struct {
17 io.Writer
18 hash hash.Hash
19}
20
21// NewEncoder returns a new stream encoder that writes to w.
22//
23// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
24// Use the v2 package instead.
25func NewEncoder(w io.Writer) *Encoder {
26 h := hash.New(hash.CryptoType)
27 mw := io.MultiWriter(w, h)
28 return &Encoder{mw, h}
29}
30
31// Encode writes an index into the commit-graph file
32//
33// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
34// Use the v2 package instead.
35func (e *Encoder) Encode(idx Index) error {
36 // Get all the hashes in the input index
37 hashes := idx.Hashes()
38
39 // Sort the inout and prepare helper structures we'll need for encoding
40 hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
41
42 chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
43 chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * hash.Size, uint64(len(hashes)) * (hash.Size + commitDataSize)}
44 if extraEdgesCount > 0 {
45 chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
46 chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
47 }
48
49 if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
50 return err
51 }
52 if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
53 return err
54 }
55 if err := e.encodeFanout(fanout); err != nil {
56 return err
57 }
58 if err := e.encodeOidLookup(hashes); err != nil {
59 return err
60 }
61 if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
62 if err = e.encodeExtraEdges(extraEdges); err != nil {
63 return err
64 }
65 } else {
66 return err
67 }
68
69 return e.encodeChecksum()
70}
71
72func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
73 // Sort the hashes and build our index
74 plumbing.HashesSort(hashes)
75 hashToIndex = make(map[plumbing.Hash]uint32)
76 fanout = make([]uint32, 256)
77 for i, hash := range hashes {
78 hashToIndex[hash] = uint32(i)
79 fanout[hash[0]]++
80 }
81
82 // Convert the fanout to cumulative values
83 for i := 1; i <= 0xff; i++ {
84 fanout[i] += fanout[i-1]
85 }
86
87 // Find out if we will need extra edge table
88 for i := 0; i < len(hashes); i++ {
89 v, _ := idx.GetCommitDataByIndex(i)
90 if len(v.ParentHashes) > 2 {
91 extraEdgesCount += uint32(len(v.ParentHashes) - 1)
92 break
93 }
94 }
95
96 return
97}
98
99func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
100 if _, err = e.Write(commitFileSignature); err == nil {
101 version := byte(1)
102 if hash.CryptoType == crypto.SHA256 {
103 version = byte(2)
104 }
105 _, err = e.Write([]byte{1, version, byte(chunkCount), 0})
106 }
107 return
108}
109
110func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
111 // 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
112 offset := uint64(8 + len(chunkSignatures)*12 + 12)
113 for i, signature := range chunkSignatures {
114 if _, err = e.Write(signature); err == nil {
115 err = binary.WriteUint64(e, offset)
116 }
117 if err != nil {
118 return
119 }
120 offset += chunkSizes[i]
121 }
122 if _, err = e.Write(lastSignature); err == nil {
123 err = binary.WriteUint64(e, offset)
124 }
125 return
126}
127
128func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
129 for i := 0; i <= 0xff; i++ {
130 if err = binary.WriteUint32(e, fanout[i]); err != nil {
131 return
132 }
133 }
134 return
135}
136
137func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
138 for _, hash := range hashes {
139 if _, err = e.Write(hash[:]); err != nil {
140 return err
141 }
142 }
143 return
144}
145
146func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
147 for _, hash := range hashes {
148 origIndex, _ := idx.GetIndexByHash(hash)
149 commitData, _ := idx.GetCommitDataByIndex(origIndex)
150 if _, err = e.Write(commitData.TreeHash[:]); err != nil {
151 return
152 }
153
154 var parent1, parent2 uint32
155 if len(commitData.ParentHashes) == 0 {
156 parent1 = parentNone
157 parent2 = parentNone
158 } else if len(commitData.ParentHashes) == 1 {
159 parent1 = hashToIndex[commitData.ParentHashes[0]]
160 parent2 = parentNone
161 } else if len(commitData.ParentHashes) == 2 {
162 parent1 = hashToIndex[commitData.ParentHashes[0]]
163 parent2 = hashToIndex[commitData.ParentHashes[1]]
164 } else if len(commitData.ParentHashes) > 2 {
165 parent1 = hashToIndex[commitData.ParentHashes[0]]
166 parent2 = uint32(len(extraEdges)) | parentOctopusUsed
167 for _, parentHash := range commitData.ParentHashes[1:] {
168 extraEdges = append(extraEdges, hashToIndex[parentHash])
169 }
170 extraEdges[len(extraEdges)-1] |= parentLast
171 }
172
173 if err = binary.WriteUint32(e, parent1); err == nil {
174 err = binary.WriteUint32(e, parent2)
175 }
176 if err != nil {
177 return
178 }
179
180 unixTime := uint64(commitData.When.Unix())
181 unixTime |= uint64(commitData.Generation) << 34
182 if err = binary.WriteUint64(e, unixTime); err != nil {
183 return
184 }
185 }
186 return
187}
188
189func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
190 for _, parent := range extraEdges {
191 if err = binary.WriteUint32(e, parent); err != nil {
192 return
193 }
194 }
195 return
196}
197
198func (e *Encoder) encodeChecksum() error {
199 _, err := e.Write(e.hash.Sum(nil)[:hash.Size])
200 return err
201}