An experimental pub/sub client and server project.

Refactor app structure + test improvements (#10)

authored by willdot.net and committed by GitHub 8e8e0e1c 9ffc83c7

+5 -5
example/main.go
··· 7 7 "log/slog" 8 8 "time" 9 9 10 - "github.com/willdot/messagebroker/pubsub" 11 - "github.com/willdot/messagebroker/server" 10 + "github.com/willdot/messagebroker/client" 11 + "github.com/willdot/messagebroker/internal/server" 12 12 ) 13 13 14 14 var consumeOnly *bool ··· 23 23 go sendMessages() 24 24 } 25 25 26 - sub, err := pubsub.NewSubscriber(":3000") 26 + sub, err := client.NewSubscriber(":3000") 27 27 if err != nil { 28 28 panic(err) 29 29 } ··· 56 56 } 57 57 58 58 func sendMessages() { 59 - publisher, err := pubsub.NewPublisher("localhost:3000") 59 + publisher, err := client.NewPublisher("localhost:3000") 60 60 if err != nil { 61 61 panic(err) 62 62 } ··· 69 69 i := 0 70 70 for { 71 71 i++ 72 - msg := pubsub.NewMessage("topic a", []byte(fmt.Sprintf("message %d", i))) 72 + msg := client.NewMessage("topic a", []byte(fmt.Sprintf("message %d", i))) 73 73 74 74 err = publisher.PublishMessage(msg) 75 75 if err != nil {
+1 -1
example/server/main.go
··· 7 7 "syscall" 8 8 "time" 9 9 10 - "github.com/willdot/messagebroker/server" 10 + "github.com/willdot/messagebroker/internal/server" 11 11 ) 12 12 13 13 func main() {
+47
internal/messagestore/memory_store.go
··· 1 + package messagestore 2 + 3 + import ( 4 + "sync" 5 + 6 + "github.com/willdot/messagebroker/internal" 7 + ) 8 + 9 + // MemoryStore allows messages to be stored in memory 10 + type MemoryStore struct { 11 + mu sync.Mutex 12 + msgs map[int]internal.Message 13 + nextOffset int 14 + } 15 + 16 + // NewMemoryStore initializes a new in memory store 17 + func NewMemoryStore() *MemoryStore { 18 + return &MemoryStore{ 19 + msgs: make(map[int]internal.Message), 20 + } 21 + } 22 + 23 + // Write will write the provided message to the in memory store 24 + func (m *MemoryStore) Write(msg internal.Message) error { 25 + m.mu.Lock() 26 + defer m.mu.Unlock() 27 + 28 + m.msgs[m.nextOffset] = msg 29 + 30 + m.nextOffset++ 31 + 32 + return nil 33 + } 34 + 35 + // ReadFrom will read messages from (and including) the provided offset and pass them to the provided handler 36 + func (m *MemoryStore) ReadFrom(offset int, handleFunc func(msg internal.Message)) { 37 + if offset < 0 || offset >= m.nextOffset { 38 + return 39 + } 40 + 41 + m.mu.Lock() 42 + defer m.mu.Unlock() 43 + 44 + for i := offset; i < len(m.msgs); i++ { 45 + handleFunc(m.msgs[i]) 46 + } 47 + }
+12
internal/messge.go
··· 1 + package internal 2 + 3 + // Message represents a message that can be sent / received 4 + type Message struct { 5 + Data []byte 6 + DeliveryCount int 7 + } 8 + 9 + // NewMessage intializes a new message 10 + func NewMessage(data []byte) Message { 11 + return Message{Data: data, DeliveryCount: 1} 12 + }
+1 -1
pubsub/message.go client/message.go
··· 1 - package pubsub 1 + package client 2 2 3 3 // Message represents a message that can be published or consumed 4 4 type Message struct {
+4 -4
pubsub/publisher.go client/publisher.go
··· 1 - package pubsub 1 + package client 2 2 3 3 import ( 4 4 "encoding/binary" ··· 6 6 "net" 7 7 "sync" 8 8 9 - "github.com/willdot/messagebroker/server" 9 + "github.com/willdot/messagebroker/internal/server" 10 10 ) 11 11 12 12 // Publisher allows messages to be published to a server ··· 44 44 // send topic first 45 45 topic := fmt.Sprintf("topic:%s", message.Topic) 46 46 47 - topicLenB := make([]byte, 4) 48 - binary.BigEndian.PutUint32(topicLenB, uint32(len(topic))) 47 + topicLenB := make([]byte, 2) 48 + binary.BigEndian.PutUint16(topicLenB, uint16(len(topic))) 49 49 50 50 headers := append(topicLenB, []byte(topic)...) 51 51
+6 -6
pubsub/subscriber.go client/subscriber.go
··· 1 - package pubsub 1 + package client 2 2 3 3 import ( 4 4 "context" ··· 10 10 "sync" 11 11 "time" 12 12 13 - "github.com/willdot/messagebroker/server" 13 + "github.com/willdot/messagebroker/internal/server" 14 14 ) 15 15 16 16 type connOpp func(conn net.Conn) error ··· 96 96 return nil 97 97 } 98 98 99 - var dataLen uint32 99 + var dataLen uint16 100 100 err = binary.Read(conn, binary.BigEndian, &dataLen) 101 101 if err != nil { 102 102 return fmt.Errorf("received status %s:", resp) ··· 140 140 return nil 141 141 } 142 142 143 - var dataLen uint32 143 + var dataLen uint16 144 144 err = binary.Read(conn, binary.BigEndian, &dataLen) 145 145 if err != nil { 146 146 return fmt.Errorf("received status %s:", resp) ··· 198 198 199 199 func (s *Subscriber) readMessage(ctx context.Context, msgChan chan *Message) error { 200 200 op := func(conn net.Conn) error { 201 - err := s.conn.SetReadDeadline(time.Now().Add(time.Second)) 201 + err := s.conn.SetReadDeadline(time.Now().Add(time.Millisecond * 300)) 202 202 if err != nil { 203 203 return err 204 204 } 205 205 206 - var topicLen uint64 206 + var topicLen uint16 207 207 err = binary.Read(s.conn, binary.BigEndian, &topicLen) 208 208 if err != nil { 209 209 // TODO: check if this is needed elsewhere. I'm not sure where the read deadline resets....
+5 -5
pubsub/subscriber_test.go client/subscriber_test.go
··· 1 - package pubsub 1 + package client 2 2 3 3 import ( 4 4 "context" ··· 8 8 9 9 "github.com/stretchr/testify/assert" 10 10 "github.com/stretchr/testify/require" 11 - 12 - "github.com/willdot/messagebroker/server" 11 + "github.com/willdot/messagebroker/internal/server" 13 12 ) 14 13 15 14 const ( ··· 134 133 err = publisher.PublishMessage(msg) 135 134 require.NoError(t, err) 136 135 137 - time.Sleep(time.Second) 136 + // give the consumer some time to read the messages -- TODO: make better! 137 + time.Sleep(time.Millisecond * 300) 138 138 cancel() 139 139 140 140 select { ··· 181 181 } 182 182 183 183 // give the consumer some time to read the messages -- TODO: make better! 184 - time.Sleep(time.Second) 184 + time.Sleep(time.Millisecond * 300) 185 185 cancel() 186 186 187 187 select {
-45
server/message_store.go
··· 1 - package server 2 - 3 - import ( 4 - "sync" 5 - ) 6 - 7 - // Memory store allows messages to be stored in memory 8 - type MemoryStore struct { 9 - mu sync.Mutex 10 - msgs map[int]message 11 - nextOffset int 12 - } 13 - 14 - // New memory store initializes a new in memory store 15 - func NewMemoryStore() *MemoryStore { 16 - return &MemoryStore{ 17 - msgs: make(map[int]message), 18 - } 19 - } 20 - 21 - // Write will write the provided message to the in memory store 22 - func (m *MemoryStore) Write(msg message) error { 23 - m.mu.Lock() 24 - defer m.mu.Unlock() 25 - 26 - m.msgs[m.nextOffset] = msg 27 - 28 - m.nextOffset++ 29 - 30 - return nil 31 - } 32 - 33 - // ReadFrom will read messages from (and including) the provided offset and pass them to the provided handler 34 - func (m *MemoryStore) ReadFrom(offset int, handleFunc func(msg message)) { 35 - if offset < 0 || offset >= m.nextOffset { 36 - return 37 - } 38 - 39 - m.mu.Lock() 40 - defer m.mu.Unlock() 41 - 42 - for i := offset; i < len(m.msgs); i++ { 43 - handleFunc(m.msgs[i]) 44 - } 45 - }
+2 -2
server/peer/peer.go internal/server/peer.go
··· 1 - package peer 1 + package server 2 2 3 3 import ( 4 4 "net" ··· 12 12 } 13 13 14 14 // New returns a new peer. 15 - func New(conn net.Conn) *Peer { 15 + func NewPeer(conn net.Conn) *Peer { 16 16 return &Peer{ 17 17 conn: conn, 18 18 }
+38 -25
server/server.go internal/server/server.go
··· 13 13 "syscall" 14 14 "time" 15 15 16 - "github.com/willdot/messagebroker/server/peer" 16 + "github.com/willdot/messagebroker/internal" 17 17 ) 18 18 19 19 // Action represents the type of action that a peer requests to do ··· 111 111 } 112 112 113 113 func (s *Server) handleConn(conn net.Conn) { 114 - peer := peer.New(conn) 114 + peer := NewPeer(conn) 115 115 116 116 slog.Info("handling connection", "peer", peer.Addr()) 117 117 defer slog.Info("ending connection", "peer", peer.Addr()) ··· 137 137 } 138 138 } 139 139 140 - func (s *Server) handleSubscribe(peer *peer.Peer) { 140 + func (s *Server) handleSubscribe(peer *Peer) { 141 141 slog.Info("handling subscriber", "peer", peer.Addr()) 142 142 // subscribe the peer to the topic 143 143 s.subscribePeerToTopic(peer) 144 144 145 + s.waitForPeerAction(peer) 146 + } 147 + 148 + func (s *Server) waitForPeerAction(peer *Peer) { 145 149 // keep handling the peers connection, getting the action from the peer when it wishes to do something else. 146 150 // once the peers connection ends, it will be unsubscribed from all topics and returned 147 151 for { ··· 177 181 } 178 182 } 179 183 180 - func (s *Server) subscribePeerToTopic(peer *peer.Peer) { 184 + func (s *Server) subscribePeerToTopic(peer *Peer) { 181 185 op := func(conn net.Conn) error { 182 186 // get the topics the peer wishes to subscribe to 183 - dataLen, err := dataLength(conn) 187 + dataLen, err := dataLengthUint32(conn) 184 188 if err != nil { 185 189 slog.Error(err.Error(), "peer", peer.Addr()) 186 190 writeStatus(Error, "invalid data length of topics provided", conn) ··· 244 248 _ = peer.RunConnOperation(op) 245 249 } 246 250 247 - func (s *Server) handleUnsubscribe(peer *peer.Peer) { 251 + func (s *Server) handleUnsubscribe(peer *Peer) { 248 252 slog.Info("handling unsubscriber", "peer", peer.Addr()) 249 253 op := func(conn net.Conn) error { 250 254 // get the topics the peer wishes to unsubscribe from 251 - dataLen, err := dataLength(conn) 255 + dataLen, err := dataLengthUint32(conn) 252 256 if err != nil { 253 257 slog.Error(err.Error(), "peer", peer.Addr()) 254 258 writeStatus(Error, "invalid data length of topics provided", conn) ··· 284 288 _ = peer.RunConnOperation(op) 285 289 } 286 290 287 - func (s *Server) handlePublish(peer *peer.Peer) { 291 + func (s *Server) handlePublish(peer *Peer) { 288 292 slog.Info("handling publisher", "peer", peer.Addr()) 289 293 for { 290 294 op := func(conn net.Conn) error { 291 - dataLen, err := dataLength(conn) 295 + topicDataLen, err := dataLengthUint16(conn) 292 296 if err != nil { 293 297 if errors.Is(err, io.EOF) { 294 298 return nil ··· 297 301 writeStatus(Error, "invalid data length of data provided", conn) 298 302 return nil 299 303 } 300 - if dataLen == 0 { 304 + if topicDataLen == 0 { 301 305 return nil 302 306 } 303 - topicBuf := make([]byte, dataLen) 307 + topicBuf := make([]byte, topicDataLen) 304 308 _, err = conn.Read(topicBuf) 305 309 if err != nil { 306 310 slog.Error("failed to read topic from peer", "error", err, "peer", peer.Addr()) ··· 316 320 } 317 321 topicStr = strings.TrimPrefix(topicStr, "topic:") 318 322 319 - dataLen, err = dataLength(conn) 323 + msgDataLen, err := dataLengthUint32(conn) 320 324 if err != nil { 321 325 slog.Error(err.Error(), "peer", peer.Addr()) 322 326 writeStatus(Error, "invalid data length of data provided", conn) 323 327 return nil 324 328 } 325 - if dataLen == 0 { 329 + if msgDataLen == 0 { 326 330 return nil 327 331 } 328 332 329 - dataBuf := make([]byte, dataLen) 333 + dataBuf := make([]byte, msgDataLen) 330 334 _, err = conn.Read(dataBuf) 331 335 if err != nil { 332 336 slog.Error("failed to read data from peer", "error", err, "peer", peer.Addr()) ··· 340 344 s.topics[topicStr] = topic 341 345 } 342 346 343 - message := newMessage(dataBuf) 347 + message := internal.NewMessage(dataBuf) 344 348 345 349 err = topic.sendMessageToSubscribers(message) 346 350 if err != nil { ··· 356 360 } 357 361 } 358 362 359 - func (s *Server) subscribeToTopics(peer *peer.Peer, topics []string, startAt int) { 363 + func (s *Server) subscribeToTopics(peer *Peer, topics []string, startAt int) { 360 364 slog.Info("subscribing peer to topics", "topics", topics, "peer", peer.Addr()) 361 365 for _, topic := range topics { 362 366 s.addSubsciberToTopic(topic, peer, startAt) 363 367 } 364 368 } 365 369 366 - func (s *Server) addSubsciberToTopic(topicName string, peer *peer.Peer, startAt int) { 370 + func (s *Server) addSubsciberToTopic(topicName string, peer *Peer, startAt int) { 367 371 s.mu.Lock() 368 372 defer s.mu.Unlock() 369 373 ··· 377 381 s.topics[topicName] = t 378 382 } 379 383 380 - func (s *Server) unsubscribeToTopics(peer *peer.Peer, topics []string) { 384 + func (s *Server) unsubscribeToTopics(peer *Peer, topics []string) { 381 385 slog.Info("unsubscribing peer from topics", "topics", topics, "peer", peer.Addr()) 382 386 for _, topic := range topics { 383 387 s.removeSubsciberFromTopic(topic, peer) 384 388 } 385 389 } 386 390 387 - func (s *Server) removeSubsciberFromTopic(topicName string, peer *peer.Peer) { 391 + func (s *Server) removeSubsciberFromTopic(topicName string, peer *Peer) { 388 392 s.mu.Lock() 389 393 defer s.mu.Unlock() 390 394 ··· 400 404 delete(t.subscriptions, peer.Addr()) 401 405 } 402 406 403 - func (s *Server) unsubscribePeerFromAllTopics(peer *peer.Peer) { 407 + func (s *Server) unsubscribePeerFromAllTopics(peer *Peer) { 404 408 s.mu.Lock() 405 409 defer s.mu.Unlock() 406 410 ··· 425 429 return nil 426 430 } 427 431 428 - func readAction(peer *peer.Peer, timeout time.Duration) (Action, error) { 432 + func readAction(peer *Peer, timeout time.Duration) (Action, error) { 429 433 var action Action 430 434 op := func(conn net.Conn) error { 431 435 if timeout > 0 { ··· 454 458 return action, nil 455 459 } 456 460 457 - func writeInvalidAction(peer *peer.Peer) { 461 + func writeInvalidAction(peer *Peer) { 458 462 op := func(conn net.Conn) error { 459 463 writeStatus(Error, "unknown action", conn) 460 464 return nil ··· 463 467 _ = peer.RunConnOperation(op) 464 468 } 465 469 466 - func dataLength(conn net.Conn) (uint32, error) { 470 + func dataLengthUint32(conn net.Conn) (uint32, error) { 467 471 var dataLen uint32 472 + err := binary.Read(conn, binary.BigEndian, &dataLen) 473 + if err != nil { 474 + return 0, err 475 + } 476 + return dataLen, nil 477 + } 478 + 479 + func dataLengthUint16(conn net.Conn) (uint16, error) { 480 + var dataLen uint16 468 481 err := binary.Read(conn, binary.BigEndian, &dataLen) 469 482 if err != nil { 470 483 return 0, err ··· 479 492 headers := statusB 480 493 481 494 if len(message) > 0 { 482 - sizeB := make([]byte, 4) 483 - binary.BigEndian.PutUint32(sizeB, uint32(len(message))) 495 + sizeB := make([]byte, 2) 496 + binary.BigEndian.PutUint16(sizeB, uint16(len(message))) 484 497 headers = append(headers, sizeB...) 485 498 } 486 499
+11 -10
server/server_test.go internal/server/server_test.go
··· 10 10 11 11 "github.com/stretchr/testify/assert" 12 12 "github.com/stretchr/testify/require" 13 + "github.com/willdot/messagebroker/internal/messagestore" 13 14 ) 14 15 15 16 const ( ··· 39 40 srv.topics[topicName] = &topic{ 40 41 name: topicName, 41 42 subscriptions: make(map[net.Addr]*subscriber), 42 - messageStore: NewMemoryStore(), 43 + messageStore: messagestore.NewMemoryStore(), 43 44 } 44 45 45 46 return srv ··· 211 212 212 213 expectedMessage := "unknown action" 213 214 214 - var dataLen uint32 215 + var dataLen uint16 215 216 err = binary.Read(conn, binary.BigEndian, &dataLen) 216 217 require.NoError(t, err) 217 218 assert.Equal(t, len(expectedMessage), int(dataLen)) ··· 249 250 250 251 expectedMessage := "topic data does not contain 'topic:' prefix" 251 252 252 - var dataLen uint32 253 + var dataLen uint16 253 254 err = binary.Read(publisherConn, binary.BigEndian, &dataLen) 254 255 require.NoError(t, err) 255 256 assert.Equal(t, len(expectedMessage), int(dataLen)) ··· 352 353 353 354 // check the subsribers got the data 354 355 readMessage := func(conn net.Conn, ack Action) { 355 - var topicLen uint64 356 + var topicLen uint16 356 357 err = binary.Read(conn, binary.BigEndian, &topicLen) 357 358 require.NoError(t, err) 358 359 ··· 382 383 readMessage(subscriberConn, Ack) 383 384 // reading for another message should now timeout but give enough time for the ack delay to kick in 384 385 // should the second read of the message not have been ack'd properly 385 - var topicLen uint64 386 + var topicLen uint16 386 387 _ = subscriberConn.SetReadDeadline(time.Now().Add(ackDelay + time.Millisecond*100)) 387 388 err = binary.Read(subscriberConn, binary.BigEndian, &topicLen) 388 389 require.Error(t, err) ··· 406 407 407 408 // check the subsribers got the data 408 409 readMessage := func(conn net.Conn, ack bool) { 409 - var topicLen uint64 410 + var topicLen uint16 410 411 err = binary.Read(conn, binary.BigEndian, &topicLen) 411 412 require.NoError(t, err) 412 413 ··· 440 441 441 442 // reading for another message should now timeout but give enough time for the ack delay to kick in 442 443 // should the second read of the message not have been ack'd properly 443 - var topicLen uint64 444 + var topicLen uint16 444 445 _ = subscriberConn.SetReadDeadline(time.Now().Add(ackDelay + time.Millisecond*100)) 445 446 err = binary.Read(subscriberConn, binary.BigEndian, &topicLen) 446 447 require.Error(t, err) ··· 464 465 465 466 // check the subsribers got the data 466 467 readMessage := func(conn net.Conn, ack bool) { 467 - var topicLen uint64 468 + var topicLen uint16 468 469 err = binary.Read(conn, binary.BigEndian, &topicLen) 469 470 require.NoError(t, err) 470 471 ··· 500 501 readMessage(subscriberConn, false) 501 502 502 503 // reading for the message should now timeout as we have nack'd the message too many times 503 - var topicLen uint64 504 + var topicLen uint16 504 505 _ = subscriberConn.SetReadDeadline(time.Now().Add(ackDelay + time.Millisecond*100)) 505 506 err = binary.Read(subscriberConn, binary.BigEndian, &topicLen) 506 507 require.Error(t, err) ··· 592 593 } 593 594 594 595 func readMessage(t *testing.T, subscriberConn net.Conn) []byte { 595 - var topicLen uint64 596 + var topicLen uint16 596 597 err := binary.Read(subscriberConn, binary.BigEndian, &topicLen) 597 598 require.NoError(t, err) 598 599
+15 -25
server/subscriber.go internal/server/subscriber.go
··· 7 7 "net" 8 8 "time" 9 9 10 - "github.com/willdot/messagebroker/server/peer" 10 + "github.com/willdot/messagebroker/internal" 11 11 ) 12 12 13 13 type subscriber struct { 14 - peer *peer.Peer 14 + peer *Peer 15 15 topic string 16 - messages chan message 16 + messages chan internal.Message 17 17 unsubscribeCh chan struct{} 18 18 19 19 ackDelay time.Duration 20 20 ackTimeout time.Duration 21 21 } 22 22 23 - type message struct { 24 - data []byte 25 - deliveryCount int 26 - } 27 - 28 - func newMessage(data []byte) message { 29 - return message{data: data, deliveryCount: 1} 30 - } 31 - 32 - func newSubscriber(peer *peer.Peer, topic *topic, ackDelay, ackTimeout time.Duration, startAt int) *subscriber { 23 + func newSubscriber(peer *Peer, topic *topic, ackDelay, ackTimeout time.Duration, startAt int) *subscriber { 33 24 s := &subscriber{ 34 25 peer: peer, 35 26 topic: topic.name, 36 - messages: make(chan message), 27 + messages: make(chan internal.Message), 37 28 ackDelay: ackDelay, 38 29 ackTimeout: ackTimeout, 39 30 unsubscribeCh: make(chan struct{}, 1), ··· 42 33 go s.sendMessages() 43 34 44 35 go func() { 45 - topic.messageStore.ReadFrom(startAt, func(msg message) { 36 + topic.messageStore.ReadFrom(startAt, func(msg internal.Message) { 46 37 select { 47 38 case s.messages <- msg: 48 39 return ··· 70 61 continue 71 62 } 72 63 73 - if msg.deliveryCount >= 5 { 64 + if msg.DeliveryCount >= 5 { 74 65 slog.Error("max delivery count for message. Dropping", "peer", s.peer.Addr()) 75 66 continue 76 67 } 77 68 78 - msg.deliveryCount++ 69 + msg.DeliveryCount++ 79 70 s.addMessage(msg, s.ackDelay) 80 71 } 81 72 } 82 73 } 83 74 84 - func (s *subscriber) addMessage(msg message, delay time.Duration) { 75 + func (s *subscriber) addMessage(msg internal.Message, delay time.Duration) { 85 76 go func() { 86 77 timer := time.NewTimer(delay) 87 78 defer timer.Stop() ··· 95 86 }() 96 87 } 97 88 98 - func (s *subscriber) sendMessage(topic string, msg message) (bool, error) { 89 + func (s *subscriber) sendMessage(topic string, msg internal.Message) (bool, error) { 99 90 var ack bool 100 91 op := func(conn net.Conn) error { 101 - // TODO: why did I chose uint64 for topic len? 102 - topicB := make([]byte, 8) 103 - binary.BigEndian.PutUint64(topicB, uint64(len(topic))) 92 + topicB := make([]byte, 2) 93 + binary.BigEndian.PutUint16(topicB, uint16(len(topic))) 104 94 105 95 headers := topicB 106 96 headers = append(headers, []byte(topic)...) 107 97 108 98 // TODO: if message is empty, return error? 109 99 dataLenB := make([]byte, 8) 110 - binary.BigEndian.PutUint64(dataLenB, uint64(len(msg.data))) 100 + binary.BigEndian.PutUint64(dataLenB, uint64(len(msg.Data))) 111 101 headers = append(headers, dataLenB...) 112 102 113 - _, err := conn.Write(append(headers, msg.data...)) 103 + _, err := conn.Write(append(headers, msg.Data...)) 114 104 if err != nil { 115 105 return fmt.Errorf("failed to write to peer: %w", err) 116 106 } 117 107 118 - var ackRes Action 119 108 if err := conn.SetReadDeadline(time.Now().Add(s.ackTimeout)); err != nil { 120 109 slog.Error("failed to set connection read deadline", "error", err, "peer", s.peer.Addr()) 121 110 } ··· 124 113 slog.Error("failed to reset connection read deadline", "error", err, "peer", s.peer.Addr()) 125 114 } 126 115 }() 116 + var ackRes Action 127 117 err = binary.Read(conn, binary.BigEndian, &ackRes) 128 118 if err != nil { 129 119 return fmt.Errorf("failed to read ack from peer: %w", err)
+7 -4
server/topic.go internal/server/topic.go
··· 4 4 "fmt" 5 5 "net" 6 6 "sync" 7 + 8 + "github.com/willdot/messagebroker/internal" 9 + "github.com/willdot/messagebroker/internal/messagestore" 7 10 ) 8 11 9 12 type Store interface { 10 - Write(msg message) error 11 - ReadFrom(offset int, handleFunc func(msg message)) 13 + Write(msg internal.Message) error 14 + ReadFrom(offset int, handleFunc func(msg internal.Message)) 12 15 } 13 16 14 17 type topic struct { ··· 19 22 } 20 23 21 24 func newTopic(name string) *topic { 22 - messageStore := NewMemoryStore() 25 + messageStore := messagestore.NewMemoryStore() 23 26 return &topic{ 24 27 name: name, 25 28 subscriptions: make(map[net.Addr]*subscriber), ··· 27 30 } 28 31 } 29 32 30 - func (t *topic) sendMessageToSubscribers(msg message) error { 33 + func (t *topic) sendMessageToSubscribers(msg internal.Message) error { 31 34 err := t.messageStore.Write(msg) 32 35 if err != nil { 33 36 return fmt.Errorf("failed to write message to store: %w", err)