Limit connectionRetry attempts to requested peers/servers
continuous-integration/drone/pr Build is failing Details

There is a bug where spurious PeerStateChange events from failed auth
attempts will make their way into contact retry plugin and result in
attempts that will *always* fail.

Note: This would also happen in the case of blocked peers *however* these would be short-circuit failed in engine also.
This commit is contained in:
Sarah Jamie Lewis 2023-08-28 13:17:55 -07:00
parent 602041d1c2
commit 0997406e51
1 changed files with 19 additions and 6 deletions

View File

@ -3,6 +3,7 @@ package plugins
import (
"cwtch.im/cwtch/event"
"cwtch.im/cwtch/protocol/connections"
"git.openprivacy.ca/openprivacy/connectivity/tor"
"git.openprivacy.ca/openprivacy/log"
"math"
"strconv"
@ -123,11 +124,12 @@ type contactRetry struct {
connections sync.Map //[string]*contact
pendingQueue *connectionQueue
priorityQueue *connectionQueue
authorizedPeers sync.Map
}
// NewConnectionRetry returns a Plugin that when started will retry connecting to contacts with a failedCount timing
func NewConnectionRetry(bus event.Manager, onion string) Plugin {
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1), connections: sync.Map{}, ACNUp: false, ACNUpTime: time.Now(), protocolEngine: false, onion: onion, pendingQueue: newConnectionQueue(), priorityQueue: newConnectionQueue()}
cr := &contactRetry{bus: bus, queue: event.NewQueue(), breakChan: make(chan bool, 1), authorizedPeers: sync.Map{}, connections: sync.Map{}, ACNUp: false, ACNUpTime: time.Now(), protocolEngine: false, onion: onion, pendingQueue: newConnectionQueue(), priorityQueue: newConnectionQueue()}
return cr
}
@ -229,13 +231,17 @@ func (cr *contactRetry) run() {
case event.PeerStateChange:
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
peer := e.Data[event.RemotePeer]
cr.handleEvent(peer, state, peerConn)
// only handle state change events from pre-authorized peers;
if _,exists := cr.authorizedPeers.Load(peer); exists {
cr.handleEvent(peer, state, peerConn)
}
case event.ServerStateChange:
state := connections.ConnectionStateToType()[e.Data[event.ConnectionState]]
server := e.Data[event.GroupServer]
cr.handleEvent(server, state, serverConn)
// only handle state change events from pre-authorized servers;
if _,exists := cr.authorizedPeers.Load(server); exists {
cr.handleEvent(server, state, serverConn)
}
case event.QueueJoinServer:
fallthrough
case event.QueuePeerRequest:
@ -252,7 +258,8 @@ func (cr *contactRetry) run() {
id = server
cr.addConnection(server, connections.DISCONNECTED, serverConn, lastSeen)
}
// this was an authorized event, and so we store this peer.
cr.authorizedPeers.Store(id, true)
if c, ok := cr.connections.Load(id); ok {
contact := c.(*contact)
if contact.state == connections.DISCONNECTED && !contact.queued {
@ -423,6 +430,12 @@ func (cr *contactRetry) handleEvent(id string, state connections.ConnectionState
return
}
// reject events that contain invalid hostnames...we cannot connect to them
// and they could result in spurious connection attempts...
if tor.IsValidHostname(id) == false {
return
}
if _, exists := cr.connections.Load(id); !exists {
// We have an event for something we don't know about...
// The only reason this should happen is if a *new* Peer/Server connection has changed.