|
@@ -57,13 +57,14 @@ type HandshakeManager struct {
|
|
|
messageMetrics *MessageMetrics
|
|
|
metricInitiated metrics.Counter
|
|
|
metricTimedOut metrics.Counter
|
|
|
+ f *Interface
|
|
|
l *logrus.Logger
|
|
|
|
|
|
// can be used to trigger outbound handshake for the given vpnIp
|
|
|
trigger chan iputil.VpnIp
|
|
|
}
|
|
|
|
|
|
-func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
|
|
+func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
|
|
return &HandshakeManager{
|
|
|
vpnIps: map[iputil.VpnIp]*HostInfo{},
|
|
|
indexes: map[uint32]*HostInfo{},
|
|
@@ -80,7 +81,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
|
|
+func (c *HandshakeManager) Run(ctx context.Context) {
|
|
|
clockSource := time.NewTicker(c.config.tryInterval)
|
|
|
defer clockSource.Stop()
|
|
|
|
|
@@ -89,25 +90,25 @@ func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
|
|
case <-ctx.Done():
|
|
|
return
|
|
|
case vpnIP := <-c.trigger:
|
|
|
- c.handleOutbound(vpnIP, f, true)
|
|
|
+ c.handleOutbound(vpnIP, true)
|
|
|
case now := <-clockSource.C:
|
|
|
- c.NextOutboundHandshakeTimerTick(now, f)
|
|
|
+ c.NextOutboundHandshakeTimerTick(now)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
|
|
|
+func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
|
|
c.OutboundHandshakeTimer.Advance(now)
|
|
|
for {
|
|
|
vpnIp, has := c.OutboundHandshakeTimer.Purge()
|
|
|
if !has {
|
|
|
break
|
|
|
}
|
|
|
- c.handleOutbound(vpnIp, f, false)
|
|
|
+ c.handleOutbound(vpnIp, false)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) {
|
|
|
+func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, lighthouseTriggered bool) {
|
|
|
hostinfo := c.QueryVpnIp(vpnIp)
|
|
|
if hostinfo == nil {
|
|
|
return
|
|
@@ -122,14 +123,6 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
return
|
|
|
}
|
|
|
|
|
|
- // Check if we have a handshake packet to transmit yet
|
|
|
- if !hostinfo.HandshakeReady {
|
|
|
- // There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
|
|
|
- // Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
|
|
|
- c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
|
|
- return
|
|
|
- }
|
|
|
-
|
|
|
// If we are out of time, clean up
|
|
|
if hostinfo.HandshakeCounter >= c.config.retries {
|
|
|
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.mainHostMap.preferredRanges)).
|
|
@@ -143,6 +136,17 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
return
|
|
|
}
|
|
|
|
|
|
+ // Increment the counter to increase our delay, linear backoff
|
|
|
+ hostinfo.HandshakeCounter++
|
|
|
+
|
|
|
+ // Check if we have a handshake packet to transmit yet
|
|
|
+ if !hostinfo.HandshakeReady {
|
|
|
+ if !ixHandshakeStage0(c.f, hostinfo) {
|
|
|
+ c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
// Get a remotes object if we don't already have one.
|
|
|
// This is mainly to protect us as this should never be the case
|
|
|
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
|
@@ -170,7 +174,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
|
|
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
|
|
// the learned public ip for them. Query again to short circuit the promotion counter
|
|
|
- c.lightHouse.QueryServer(vpnIp, f)
|
|
|
+ c.lightHouse.QueryServer(vpnIp, c.f)
|
|
|
}
|
|
|
|
|
|
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
|
@@ -214,7 +218,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
relayHostInfo := c.mainHostMap.QueryVpnIp(*relay)
|
|
|
if relayHostInfo == nil || relayHostInfo.remote == nil {
|
|
|
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
|
|
- f.Handshake(*relay)
|
|
|
+ c.f.Handshake(*relay)
|
|
|
continue
|
|
|
}
|
|
|
// Check the relay HostInfo to see if we already established a relay through it
|
|
@@ -222,7 +226,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
switch existingRelay.State {
|
|
|
case Established:
|
|
|
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
|
|
- f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
|
|
+ c.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
|
|
case Requested:
|
|
|
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
|
|
// Re-send the CreateRelay request, in case the previous one was lost.
|
|
@@ -239,7 +243,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
Error("Failed to marshal Control message to create relay")
|
|
|
} else {
|
|
|
// This must send over the hostinfo, not over hm.Hosts[ip]
|
|
|
- f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
|
+ c.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
|
c.l.WithFields(logrus.Fields{
|
|
|
"relayFrom": c.lightHouse.myVpnIp,
|
|
|
"relayTo": vpnIp,
|
|
@@ -274,7 +278,7 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
WithError(err).
|
|
|
Error("Failed to marshal Control message to create relay")
|
|
|
} else {
|
|
|
- f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
|
+ c.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
|
c.l.WithFields(logrus.Fields{
|
|
|
"relayFrom": c.lightHouse.myVpnIp,
|
|
|
"relayTo": vpnIp,
|
|
@@ -287,23 +291,40 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- // Increment the counter to increase our delay, linear backoff
|
|
|
- hostinfo.HandshakeCounter++
|
|
|
-
|
|
|
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
|
|
if !lighthouseTriggered {
|
|
|
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-// AddVpnIp will try to handshake with the provided vpn ip and return the hostinfo for it.
|
|
|
-func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
|
|
- // A write lock is used to avoid having to recheck the map and trading a read lock for a write lock
|
|
|
- c.Lock()
|
|
|
- defer c.Unlock()
|
|
|
+// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
|
|
|
+// The 2nd argument will be true if the hostinfo is ready to transmit traffic
|
|
|
+func (hm *HandshakeManager) GetOrHandshake(vpnIp iputil.VpnIp, cacheCb func(*HostInfo)) (*HostInfo, bool) {
|
|
|
+ // Check the main hostmap and maintain a read lock if our host is not there
|
|
|
+ hm.mainHostMap.RLock()
|
|
|
+ if h, ok := hm.mainHostMap.Hosts[vpnIp]; ok {
|
|
|
+ hm.mainHostMap.RUnlock()
|
|
|
+ // Do not attempt promotion if you are a lighthouse
|
|
|
+ if !hm.lightHouse.amLighthouse {
|
|
|
+ h.TryPromoteBest(hm.mainHostMap.preferredRanges, hm.f)
|
|
|
+ }
|
|
|
+ return h, true
|
|
|
+ }
|
|
|
+
|
|
|
+ defer hm.mainHostMap.RUnlock()
|
|
|
+ return hm.StartHandshake(vpnIp, cacheCb), false
|
|
|
+}
|
|
|
|
|
|
- if hostinfo, ok := c.vpnIps[vpnIp]; ok {
|
|
|
- // We are already tracking this vpn ip
|
|
|
+// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
|
|
+func (hm *HandshakeManager) StartHandshake(vpnIp iputil.VpnIp, cacheCb func(*HostInfo)) *HostInfo {
|
|
|
+ hm.Lock()
|
|
|
+ defer hm.Unlock()
|
|
|
+
|
|
|
+ if hostinfo, ok := hm.vpnIps[vpnIp]; ok {
|
|
|
+ // We are already trying to handshake with this vpn ip
|
|
|
+ if cacheCb != nil {
|
|
|
+ cacheCb(hostinfo)
|
|
|
+ }
|
|
|
return hostinfo
|
|
|
}
|
|
|
|
|
@@ -317,10 +338,30 @@ func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
|
|
},
|
|
|
}
|
|
|
|
|
|
- c.vpnIps[vpnIp] = hostinfo
|
|
|
- c.metricInitiated.Inc(1)
|
|
|
- c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
|
|
|
+ hm.vpnIps[vpnIp] = hostinfo
|
|
|
+ hm.metricInitiated.Inc(1)
|
|
|
+ hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
|
|
|
+
|
|
|
+ if cacheCb != nil {
|
|
|
+ cacheCb(hostinfo)
|
|
|
+ }
|
|
|
+
|
|
|
+ // If this is a static host, we don't need to wait for the HostQueryReply
|
|
|
+ // We can trigger the handshake right now
|
|
|
+ _, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
|
|
|
+ if !doTrigger {
|
|
|
+ // Add any calculated remotes, and trigger early handshake if one found
|
|
|
+ doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
|
|
|
+ }
|
|
|
+
|
|
|
+ if doTrigger {
|
|
|
+ select {
|
|
|
+ case hm.trigger <- vpnIp:
|
|
|
+ default:
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
+ hm.lightHouse.QueryServer(vpnIp, hm.f)
|
|
|
return hostinfo
|
|
|
}
|
|
|
|
|
@@ -342,10 +383,10 @@ var (
|
|
|
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
|
|
// hostmap for the hostinfo.localIndexId.
|
|
|
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
|
|
- c.Lock()
|
|
|
- defer c.Unlock()
|
|
|
c.mainHostMap.Lock()
|
|
|
defer c.mainHostMap.Unlock()
|
|
|
+ c.Lock()
|
|
|
+ defer c.Unlock()
|
|
|
|
|
|
// Check if we already have a tunnel with this vpn ip
|
|
|
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
|
@@ -396,47 +437,47 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
|
|
// Complete is a simpler version of CheckAndComplete when we already know we
|
|
|
// won't have a localIndexId collision because we already have an entry in the
|
|
|
// pendingHostMap. An existing hostinfo is returned if there was one.
|
|
|
-func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
|
|
- c.Lock()
|
|
|
- defer c.Unlock()
|
|
|
- c.mainHostMap.Lock()
|
|
|
- defer c.mainHostMap.Unlock()
|
|
|
+func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
|
|
+ hm.mainHostMap.Lock()
|
|
|
+ defer hm.mainHostMap.Unlock()
|
|
|
+ hm.Lock()
|
|
|
+ defer hm.Unlock()
|
|
|
|
|
|
- existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
|
|
+ existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
|
|
if found && existingRemoteIndex != nil {
|
|
|
// We have a collision, but this can happen since we can't control
|
|
|
// the remote ID. Just log about the situation as a note.
|
|
|
- hostinfo.logger(c.l).
|
|
|
+ hostinfo.logger(hm.l).
|
|
|
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
|
|
Info("New host shadows existing host remoteIndex")
|
|
|
}
|
|
|
|
|
|
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
|
|
|
- c.unlockedDeleteHostInfo(hostinfo)
|
|
|
- c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
|
|
+ hm.unlockedDeleteHostInfo(hostinfo)
|
|
|
+ hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
|
|
}
|
|
|
|
|
|
-// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
|
|
+// allocateIndex generates a unique localIndexId for this HostInfo
|
|
|
// and adds it to the pendingHostMap. Will error if we are unable to generate
|
|
|
// a unique localIndexId
|
|
|
-func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
|
|
- c.Lock()
|
|
|
- defer c.Unlock()
|
|
|
- c.mainHostMap.RLock()
|
|
|
- defer c.mainHostMap.RUnlock()
|
|
|
+func (hm *HandshakeManager) allocateIndex(h *HostInfo) error {
|
|
|
+ hm.mainHostMap.RLock()
|
|
|
+ defer hm.mainHostMap.RUnlock()
|
|
|
+ hm.Lock()
|
|
|
+ defer hm.Unlock()
|
|
|
|
|
|
for i := 0; i < 32; i++ {
|
|
|
- index, err := generateIndex(c.l)
|
|
|
+ index, err := generateIndex(hm.l)
|
|
|
if err != nil {
|
|
|
return err
|
|
|
}
|
|
|
|
|
|
- _, inPending := c.indexes[index]
|
|
|
- _, inMain := c.mainHostMap.Indexes[index]
|
|
|
+ _, inPending := hm.indexes[index]
|
|
|
+ _, inMain := hm.mainHostMap.Indexes[index]
|
|
|
|
|
|
if !inMain && !inPending {
|
|
|
h.localIndexId = index
|
|
|
- c.indexes[index] = h
|
|
|
+ hm.indexes[index] = h
|
|
|
return nil
|
|
|
}
|
|
|
}
|