···11//! Protocol API
2233use std::{
44- collections::{BTreeMap, BTreeSet},
44+ collections::{BTreeMap, BTreeSet, HashMap},
55 time::Duration,
66};
77···5454 data: Bytes,
5555}
56565757+/// Ask a remote peer to return a stream of it's known PeerInfos
5858+#[derive(Debug, Clone, Serialize, Deserialize)]
5959+struct RequestPeerInfos {
6060+ remote_id: NodeId,
6161+}
6262+6363+/// Return a stream of peer infos
6464+#[derive(Debug, Clone, Serialize, Deserialize)]
6565+struct HandlePeerInfosRequest {}
6666+6767+/// Return this peer's local state
6868+#[derive(Debug, Clone, Serialize, Deserialize)]
6969+struct MyPeerInfo {}
7070+5771/// List all peers, and the subscriptions that they're believed to have
5872/// "believed", because subscription info can be out of date
5973#[derive(Debug, Clone, Serialize, Deserialize)]
6060-struct Peers {}
7474+struct MyPeers {}
61756276/// Prune peers that haven't been seen since the given timestamp
6377#[derive(Debug, Clone, Serialize, Deserialize)]
6464-struct PrunePeers {
7878+struct PruneMyPeers {
6579 cutoff_timestamp: u64,
6680}
6781···7488 info: PeerInfo,
7589}
76907777-/// list out our local subscriptions
7878-#[derive(Debug, Clone, Serialize, Deserialize)]
7979-struct MySubscriptions {}
8080-8191/// Request a node list out it's current subscriptions
8292#[derive(Debug, Clone, Serialize, Deserialize)]
8393struct GetSubscriptions {
···8595 node_id: NodeId,
8696}
87979898+/// Tell all remote peers that we're leaving the network
9999+#[derive(Debug, Clone, Serialize, Deserialize)]
100100+struct BroadcastLeaving {}
101101+102102+/// Tell a remote node that we're leaving the network
103103+#[derive(Debug, Clone, Serialize, Deserialize)]
104104+struct HandleLeaving {
105105+ /// the node id of the peer leaving
106106+ node_id: NodeId,
107107+}
108108+88109/// details about a peer in the network
89110#[derive(Debug, Clone, Ord, PartialOrd, PartialEq, Eq, Serialize, Deserialize)]
90111pub(crate) struct PeerInfo {
···100121enum Protocol {
101122 // swarm coordination
102123 #[rpc(tx=mpsc::Sender<PeerInfo>)]
103103- Peers(Peers),
124124+ MyPeers(MyPeers),
104125 #[rpc(tx=oneshot::Sender<PeerInfo>)]
105105- MyPeerInfo(MySubscriptions),
126126+ MyPeerInfo(MyPeerInfo),
127127+ #[rpc(tx=oneshot::Sender<()>)]
128128+ PruneMyPeers(PruneMyPeers),
129129+ #[rpc(tx=oneshot::Sender<()>)]
130130+ BroadcastLeaving(BroadcastLeaving),
131131+ #[rpc(tx=oneshot::Sender<()>)]
132132+ HandleLeaving(HandleLeaving),
133133+106134 #[rpc(tx=oneshot::Sender<()>)]
107107- PrunePeers(PrunePeers),
135135+ RequestPeerInfos(RequestPeerInfos),
136136+ #[rpc(tx=mpsc::Sender<PeerInfo>)]
137137+ HandlePeerInfosRequest(HandlePeerInfosRequest),
138138+108139 #[rpc(tx=oneshot::Sender<()>)]
109140 SendPeerInfo(SendPeerInfo),
110141 #[rpc(tx=oneshot::Sender<()>)]
···129160 /// peers we'll permanently broadcast to
130161 anchor_peers: Vec<NodeId>,
131162 /// set of all peers we believe to be life in the swarm
132132- peers: BTreeSet<PeerInfo>,
163163+ peers: HashMap<NodeId, PeerInfo>,
133164 /// set of stream subscriptions we're receiving data for
134165 subscriptions: BTreeMap<String, BTreeSet<NodeId>>,
135166 /// pool of open RPC connections
···155186 endpoint: endpoint.clone(),
156187 recv: rx,
157188 anchor_peers,
158158- peers: BTreeSet::new(),
189189+ peers: HashMap::new(),
159190 subscriptions: BTreeMap::new(),
160191 connections: BTreeMap::new(),
161192 handler: Box::new(handler),
···224255 async fn handle(&mut self, msg: Message) {
225256 match msg {
226257 // swarm coordination
227227- Message::Peers(sub) => {
258258+ Message::MyPeers(sub) => {
228259 debug!("peers {:?}", sub);
229260 let WithChannels { tx, .. } = sub;
230261···232263 let mut sent = BTreeSet::new();
233264234265 // stream over the list of peers we know about
235235- for sub in &self.peers {
236236- sent.insert(&sub.node_id);
266266+ for (id, sub) in &self.peers {
267267+ sent.insert(*id);
237268 if tx.send(sub.clone()).await.is_err() {
238269 break;
239270 }
240271 }
241272242242- // send over any anchor peers we know about, but haven't already sent
243243- // these go with empty subscription sets, which isn't great.
273273+ // send over any anchor peers we know about, but haven't already
274274+ // sent from our peers list. these go with empty subscription
275275+ // sets, which isn't great.
244276 for anchor in &self.anchor_peers {
245277 if sent.contains(anchor) {
246278 continue;
···259291 let WithChannels { tx, .. } = sub;
260292 tx.send(self.my_peer_info()).await.ok();
261293 }
294294+ Message::PruneMyPeers(sub) => {
295295+ let WithChannels { tx, inner, .. } = sub;
296296+ // prune peers that haven't been seen since the given timestamp
297297+ self.peers
298298+ .retain(|_, peer| peer.timestamp >= inner.cutoff_timestamp);
299299+ tx.send(()).await.ok();
300300+ }
301301+302302+ Message::RequestPeerInfos(list) => {
303303+ let WithChannels { inner, tx, .. } = list;
304304+ let conn = self.get_conn(&inner.remote_id).await;
305305+ let mut rx = conn
306306+ .rpc
307307+ .server_streaming(HandlePeerInfosRequest {}, 1000)
308308+ .await
309309+ .unwrap();
310310+ while let Some(mut peer_info) = rx.recv().await.unwrap() {
311311+ // update our tracked state about this peer, using timestamps
312312+ // to avoid confusion from external sources
313313+ peer_info.timestamp = timestamp();
314314+ self.peers.insert(peer_info.node_id, peer_info);
315315+ }
316316+317317+ tx.send(()).await.ok();
318318+ }
319319+ Message::HandlePeerInfosRequest(list) => {
320320+ let WithChannels { tx, .. } = list;
321321+ for (_, peer) in self.peers.clone() {
322322+ if let Err(e) = tx.send(peer).await {
323323+ tracing::error!("send peer error: {:?}", e);
324324+ }
325325+ }
326326+ }
327327+262328 Message::SendPeerInfo(info) => {
263329 let WithChannels { inner, tx, .. } = info;
264330 debug!(
···282348 // update our tracked state about this peer, using timestamps
283349 // to avoid confusion from external sources
284350 inner.timestamp = timestamp();
285285- self.peers.insert(inner);
351351+ self.peers.insert(inner.node_id, inner);
286352 tx.send(()).await.ok();
287353 }
288288- Message::PrunePeers(sub) => {
289289- let WithChannels { tx, inner, .. } = sub;
290290- // prune peers that haven't been seen since the given timestamp
291291- self.peers
292292- .retain(|peer| peer.timestamp >= inner.cutoff_timestamp);
354354+ Message::BroadcastLeaving(leaving) => {
355355+ let WithChannels { tx, .. } = leaving;
356356+ let node_id = self.endpoint.node_id();
357357+ let remotes = self
358358+ .peers
359359+ .values()
360360+ .map(|peer| peer.node_id)
361361+ .collect::<Vec<_>>();
362362+ for remote_node_id in remotes {
363363+ // ensure connection
364364+ let conn = self.get_conn(&remote_node_id).await;
365365+ if let Err(err) = conn.rpc.rpc(HandleLeaving { node_id }).await {
366366+ tracing::error!("failed to handle leaving: {}", err);
367367+ }
368368+ }
369369+ tx.send(()).await.ok();
370370+ }
371371+ Message::HandleLeaving(leaving) => {
372372+ let WithChannels { tx, inner, .. } = leaving;
373373+ self.peers.remove(&inner.node_id);
293374 tx.send(()).await.ok();
294375 }
295376···379460 peer_info_broadcast_interval: Duration,
380461 peer_prune_interval: Duration,
381462 ) -> Self {
463463+ let anchors = anchor_peers.clone();
382464 let api = Actor::spawn(endpoint, anchor_peers, handler);
383465466466+ // hydrate our peers list from anchor nodes
467467+ let api2 = api.clone();
468468+ n0_future::task::spawn(async move {
469469+ for anchor in anchors {
470470+ if let Err(e) = api2.inner.rpc(RequestPeerInfos { remote_id: anchor }).await {
471471+ tracing::error!("requesting peer infos: {:?}", e);
472472+ }
473473+ }
474474+ });
475475+384476 // re-broadcast our subscriptions every interval
385477 if peer_info_broadcast_interval > Duration::from_millis(0) {
386478 let api2 = api.clone();
···401493 loop {
402494 tokio::time::sleep(peer_prune_interval).await;
403495 let cutoff_timestamp = timestamp() - peer_prune_interval.as_secs();
404404- if let Err(e) = api2.inner.rpc(PrunePeers { cutoff_timestamp }).await {
496496+ if let Err(e) = api2.inner.rpc(PruneMyPeers { cutoff_timestamp }).await {
405497 tracing::error!("pruning stale subscriptions: {:?}", e);
406498 }
407499 }
···428520429521 /// List all peers we know about, and the subscriptions they have
430522 pub(crate) async fn peers(&self) -> irpc::Result<Vec<PeerInfo>> {
431431- let mut rx = self.inner.server_streaming(Peers {}, 1000).await?;
523523+ let mut rx = self.inner.server_streaming(MyPeers {}, 1000).await?;
432524 let mut peers = Vec::new();
433525 while let Some(peer) = rx.recv().await? {
434526 peers.push(peer);
···436528 Ok(peers)
437529 }
438530439439- pub(crate) async fn my_subscriptions(&self) -> irpc::Result<PeerInfo> {
440440- self.inner.rpc(MySubscriptions {}).await
531531+ pub(crate) async fn my_peer_info(&self) -> irpc::Result<PeerInfo> {
532532+ self.inner.rpc(MyPeerInfo {}).await
533533+ }
534534+535535+ async fn broadcast_peer_info(&self) -> irpc::Result<JoinHandle<()>> {
536536+ let peers = self.peers().await?;
537537+ let subs = self.my_peer_info().await?;
538538+ let client = self.inner.clone();
539539+ let handle = n0_future::task::spawn(async move {
540540+ if let Err(e) = broadcast_peer_info_inner(client, peers, subs).await {
541541+ tracing::error!("Peer announcement task failed: {:?}", e);
542542+ }
543543+ });
544544+ Ok(handle)
545545+ }
546546+547547+ pub(crate) async fn leaving(&self) -> irpc::Result<()> {
548548+ self.inner.rpc(BroadcastLeaving {}).await
441549 }
442550443551 pub(crate) async fn subscribe(&self, key: String, self_id: NodeId) -> irpc::Result<()> {
···452560 self.broadcast_peer_info().await?;
453561454562 Ok(())
455455- }
456456-457457- async fn broadcast_peer_info(&self) -> irpc::Result<JoinHandle<()>> {
458458- let peers = self.peers().await?;
459459- let subs = self.my_subscriptions().await?;
460460- let client = self.inner.clone();
461461- let handle = n0_future::task::spawn(async move {
462462- if let Err(e) = broadcast_peer_info_inner(client, peers, subs).await {
463463- tracing::error!("Peer announcement task failed: {:?}", e);
464464- }
465465- });
466466- Ok(handle)
467563 }
468564469565 pub(crate) async fn unsubscribe(&self, key: String, self_id: NodeId) -> irpc::Result<()> {
+8
rust/iroh-streamplace/src/receiver.rs
···8585 Ok(())
8686 }
87878888+ /// Get our node address
8889 #[uniffi::method(async_runtime = "tokio")]
8990 pub async fn node_addr(&self) -> NodeAddr {
9091 self.endpoint.node_addr().await
9292+ }
9393+9494+ /// tell the network that we're leaving. This should only be called just before disconnecting.
9595+ #[uniffi::method(async_runtime = "tokio")]
9696+ pub async fn leaving(&self) -> Result<(), Error> {
9797+ self.api.leaving().await?;
9898+ Ok(())
9199 }
92100}
93101
+6-3
rust/iroh-streamplace/swarm.md
···2222* when a node unsubscribes from a feed, it broadcasts it's updated `PeerInfo` to all known peers
2323* every `DEFAULT_PEER_INFO_REPUBLISH_INTERVAL`, a node broadcasts it's current `PeerInfo` to all known peers
24242525-every `DEFAULT_PEER_PRUNE_INTERVAL`, nodes will examine their local list of peers, and prune any who's latest timestamp is older than the current time, minus the prune interval, this is to purge peers that die off without notice
2525+every `DEFAULT_PEER_PRUNE_INTERVAL`, nodes will examine their local list of peers, and prune any who's latest timestamp is older than the current time, minus the prune interval, this is to purge peers that die off without notice.
26262727### Anchor Peers
2828-Anchor peers are _always_ transmitted to. They're expcted to be high-availability nodes. Any broadcast message will always try to
2828+Anchor peers are _always_ transmitted to. They're expected to be high-availability nodes. Any broadcast message will always broadcast to anchor peers, regardless of whether they are online at the time, or not.
2929+3030+### Peer Listing Messages
3131+At startup, the new nodes will send a `RequestPeerInfos` request to all anchor nodes. Each anchor node will respond with their list of `PeerInfo`s to inform new nodes of their current view of the swarm. There's room to grow on maintaining swarm health, but this message type is a good primitive as a start.
29323033### FFI API
3131-The FFI API to goland is a single method on the `Receiver`: `peers`. It returns an array of `PeerInfo`, representing the nodes current view of the swarm.
3434+The FFI API to goland is 2 methods on the `Receiver`: `peers`, and `leaving`. It returns an array of `PeerInfo`, representing the nodes current view of the swarm. `leaving` should be called just before a node shuts down to notify the network that the node is going away. It's not a critical that `leaving` is called, but will cut down on stale data living in the network.