2
0

connection_manager.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. package nebula
  2. import (
  3. "bytes"
  4. "context"
  5. "encoding/binary"
  6. "net/netip"
  7. "sync"
  8. "time"
  9. "github.com/rcrowley/go-metrics"
  10. "github.com/sirupsen/logrus"
  11. "github.com/slackhq/nebula/cert"
  12. "github.com/slackhq/nebula/header"
  13. )
  14. type trafficDecision int
  15. const (
  16. doNothing trafficDecision = 0
  17. deleteTunnel trafficDecision = 1 // delete the hostinfo on our side, do not notify the remote
  18. closeTunnel trafficDecision = 2 // delete the hostinfo and notify the remote
  19. swapPrimary trafficDecision = 3
  20. migrateRelays trafficDecision = 4
  21. tryRehandshake trafficDecision = 5
  22. sendTestPacket trafficDecision = 6
  23. )
  24. type connectionManager struct {
  25. in map[uint32]struct{}
  26. inLock *sync.RWMutex
  27. out map[uint32]struct{}
  28. outLock *sync.RWMutex
  29. // relayUsed holds which relay localIndexs are in use
  30. relayUsed map[uint32]struct{}
  31. relayUsedLock *sync.RWMutex
  32. hostMap *HostMap
  33. trafficTimer *LockingTimerWheel[uint32]
  34. intf *Interface
  35. pendingDeletion map[uint32]struct{}
  36. punchy *Punchy
  37. checkInterval time.Duration
  38. pendingDeletionInterval time.Duration
  39. metricsTxPunchy metrics.Counter
  40. l *logrus.Logger
  41. }
  42. func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
  43. var max time.Duration
  44. if checkInterval < pendingDeletionInterval {
  45. max = pendingDeletionInterval
  46. } else {
  47. max = checkInterval
  48. }
  49. nc := &connectionManager{
  50. hostMap: intf.hostMap,
  51. in: make(map[uint32]struct{}),
  52. inLock: &sync.RWMutex{},
  53. out: make(map[uint32]struct{}),
  54. outLock: &sync.RWMutex{},
  55. relayUsed: make(map[uint32]struct{}),
  56. relayUsedLock: &sync.RWMutex{},
  57. trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
  58. intf: intf,
  59. pendingDeletion: make(map[uint32]struct{}),
  60. checkInterval: checkInterval,
  61. pendingDeletionInterval: pendingDeletionInterval,
  62. punchy: punchy,
  63. metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
  64. l: l,
  65. }
  66. nc.Start(ctx)
  67. return nc
  68. }
  69. func (n *connectionManager) In(localIndex uint32) {
  70. n.inLock.RLock()
  71. // If this already exists, return
  72. if _, ok := n.in[localIndex]; ok {
  73. n.inLock.RUnlock()
  74. return
  75. }
  76. n.inLock.RUnlock()
  77. n.inLock.Lock()
  78. n.in[localIndex] = struct{}{}
  79. n.inLock.Unlock()
  80. }
  81. func (n *connectionManager) Out(localIndex uint32) {
  82. n.outLock.RLock()
  83. // If this already exists, return
  84. if _, ok := n.out[localIndex]; ok {
  85. n.outLock.RUnlock()
  86. return
  87. }
  88. n.outLock.RUnlock()
  89. n.outLock.Lock()
  90. n.out[localIndex] = struct{}{}
  91. n.outLock.Unlock()
  92. }
  93. func (n *connectionManager) RelayUsed(localIndex uint32) {
  94. n.relayUsedLock.RLock()
  95. // If this already exists, return
  96. if _, ok := n.relayUsed[localIndex]; ok {
  97. n.relayUsedLock.RUnlock()
  98. return
  99. }
  100. n.relayUsedLock.RUnlock()
  101. n.relayUsedLock.Lock()
  102. n.relayUsed[localIndex] = struct{}{}
  103. n.relayUsedLock.Unlock()
  104. }
  105. // getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
  106. // resets the state for this local index
  107. func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
  108. n.inLock.Lock()
  109. n.outLock.Lock()
  110. _, in := n.in[localIndex]
  111. _, out := n.out[localIndex]
  112. delete(n.in, localIndex)
  113. delete(n.out, localIndex)
  114. n.inLock.Unlock()
  115. n.outLock.Unlock()
  116. return in, out
  117. }
  118. func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
  119. // Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
  120. n.outLock.Lock()
  121. if _, ok := n.out[localIndex]; ok {
  122. n.outLock.Unlock()
  123. return
  124. }
  125. n.out[localIndex] = struct{}{}
  126. n.trafficTimer.Add(localIndex, n.checkInterval)
  127. n.outLock.Unlock()
  128. }
  129. func (n *connectionManager) Start(ctx context.Context) {
  130. go n.Run(ctx)
  131. }
  132. func (n *connectionManager) Run(ctx context.Context) {
  133. //TODO: this tick should be based on the min wheel tick? Check firewall
  134. clockSource := time.NewTicker(500 * time.Millisecond)
  135. defer clockSource.Stop()
  136. p := []byte("")
  137. nb := make([]byte, 12, 12)
  138. out := make([]byte, mtu)
  139. for {
  140. select {
  141. case <-ctx.Done():
  142. return
  143. case now := <-clockSource.C:
  144. n.trafficTimer.Advance(now)
  145. for {
  146. localIndex, has := n.trafficTimer.Purge()
  147. if !has {
  148. break
  149. }
  150. n.doTrafficCheck(localIndex, p, nb, out, now)
  151. }
  152. }
  153. }
  154. }
  155. func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
  156. decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
  157. switch decision {
  158. case deleteTunnel:
  159. if n.hostMap.DeleteHostInfo(hostinfo) {
  160. // Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
  161. n.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
  162. }
  163. case closeTunnel:
  164. n.intf.sendCloseTunnel(hostinfo)
  165. n.intf.closeTunnel(hostinfo)
  166. case swapPrimary:
  167. n.swapPrimary(hostinfo, primary)
  168. case migrateRelays:
  169. n.migrateRelayUsed(hostinfo, primary)
  170. case tryRehandshake:
  171. n.tryRehandshake(hostinfo)
  172. case sendTestPacket:
  173. n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
  174. }
  175. n.resetRelayTrafficCheck(hostinfo)
  176. }
  177. func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
  178. if hostinfo != nil {
  179. n.relayUsedLock.Lock()
  180. defer n.relayUsedLock.Unlock()
  181. // No need to migrate any relays, delete usage info now.
  182. for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
  183. delete(n.relayUsed, idx)
  184. }
  185. }
  186. }
  187. func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
  188. relayFor := oldhostinfo.relayState.CopyAllRelayFor()
  189. for _, r := range relayFor {
  190. existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerAddr)
  191. var index uint32
  192. var relayFrom netip.Addr
  193. var relayTo netip.Addr
  194. switch {
  195. case ok && existing.State == Established:
  196. // This relay already exists in newhostinfo, then do nothing.
  197. continue
  198. case ok && existing.State == Requested:
  199. // The relay exists in a Requested state; re-send the request
  200. index = existing.LocalIndex
  201. switch r.Type {
  202. case TerminalType:
  203. relayFrom = n.intf.myVpnAddrs[0]
  204. relayTo = existing.PeerAddr
  205. case ForwardingType:
  206. relayFrom = existing.PeerAddr
  207. relayTo = newhostinfo.vpnAddrs[0]
  208. default:
  209. // should never happen
  210. }
  211. case !ok:
  212. n.relayUsedLock.RLock()
  213. if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
  214. // The relay hasn't been used; don't migrate it.
  215. n.relayUsedLock.RUnlock()
  216. continue
  217. }
  218. n.relayUsedLock.RUnlock()
  219. // The relay doesn't exist at all; create some relay state and send the request.
  220. var err error
  221. index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerAddr, nil, r.Type, Requested)
  222. if err != nil {
  223. n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
  224. continue
  225. }
  226. switch r.Type {
  227. case TerminalType:
  228. relayFrom = n.intf.myVpnAddrs[0]
  229. relayTo = r.PeerAddr
  230. case ForwardingType:
  231. relayFrom = r.PeerAddr
  232. relayTo = newhostinfo.vpnAddrs[0]
  233. default:
  234. // should never happen
  235. }
  236. }
  237. // Send a CreateRelayRequest to the peer.
  238. req := NebulaControl{
  239. Type: NebulaControl_CreateRelayRequest,
  240. InitiatorRelayIndex: index,
  241. }
  242. switch newhostinfo.GetCert().Certificate.Version() {
  243. case cert.Version1:
  244. if !relayFrom.Is4() {
  245. n.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
  246. continue
  247. }
  248. if !relayTo.Is4() {
  249. n.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
  250. continue
  251. }
  252. b := relayFrom.As4()
  253. req.OldRelayFromAddr = binary.BigEndian.Uint32(b[:])
  254. b = relayTo.As4()
  255. req.OldRelayToAddr = binary.BigEndian.Uint32(b[:])
  256. case cert.Version2:
  257. req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
  258. req.RelayToAddr = netAddrToProtoAddr(relayTo)
  259. default:
  260. newhostinfo.logger(n.l).Error("Unknown certificate version found while attempting to migrate relay")
  261. continue
  262. }
  263. msg, err := req.Marshal()
  264. if err != nil {
  265. n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
  266. } else {
  267. n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
  268. n.l.WithFields(logrus.Fields{
  269. "relayFrom": req.RelayFromAddr,
  270. "relayTo": req.RelayToAddr,
  271. "initiatorRelayIndex": req.InitiatorRelayIndex,
  272. "responderRelayIndex": req.ResponderRelayIndex,
  273. "vpnAddrs": newhostinfo.vpnAddrs}).
  274. Info("send CreateRelayRequest")
  275. }
  276. }
  277. }
  278. func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
  279. n.hostMap.RLock()
  280. defer n.hostMap.RUnlock()
  281. hostinfo := n.hostMap.Indexes[localIndex]
  282. if hostinfo == nil {
  283. n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
  284. delete(n.pendingDeletion, localIndex)
  285. return doNothing, nil, nil
  286. }
  287. if n.isInvalidCertificate(now, hostinfo) {
  288. delete(n.pendingDeletion, hostinfo.localIndexId)
  289. return closeTunnel, hostinfo, nil
  290. }
  291. primary := n.hostMap.Hosts[hostinfo.vpnAddrs[0]]
  292. mainHostInfo := true
  293. if primary != nil && primary != hostinfo {
  294. mainHostInfo = false
  295. }
  296. // Check for traffic on this hostinfo
  297. inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
  298. // A hostinfo is determined alive if there is incoming traffic
  299. if inTraffic {
  300. decision := doNothing
  301. if n.l.Level >= logrus.DebugLevel {
  302. hostinfo.logger(n.l).
  303. WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
  304. Debug("Tunnel status")
  305. }
  306. delete(n.pendingDeletion, hostinfo.localIndexId)
  307. if mainHostInfo {
  308. decision = tryRehandshake
  309. } else {
  310. if n.shouldSwapPrimary(hostinfo, primary) {
  311. decision = swapPrimary
  312. } else {
  313. // migrate the relays to the primary, if in use.
  314. decision = migrateRelays
  315. }
  316. }
  317. n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
  318. if !outTraffic {
  319. // Send a punch packet to keep the NAT state alive
  320. n.sendPunch(hostinfo)
  321. }
  322. return decision, hostinfo, primary
  323. }
  324. if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
  325. // We have already sent a test packet and nothing was returned, this hostinfo is dead
  326. hostinfo.logger(n.l).
  327. WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
  328. Info("Tunnel status")
  329. delete(n.pendingDeletion, hostinfo.localIndexId)
  330. return deleteTunnel, hostinfo, nil
  331. }
  332. decision := doNothing
  333. if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
  334. if !outTraffic {
  335. // If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
  336. // Just maintain NAT state if configured to do so.
  337. n.sendPunch(hostinfo)
  338. n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
  339. return doNothing, nil, nil
  340. }
  341. if n.punchy.GetTargetEverything() {
  342. // This is similar to the old punchy behavior with a slight optimization.
  343. // We aren't receiving traffic but we are sending it, punch on all known
  344. // ips in case we need to re-prime NAT state
  345. n.sendPunch(hostinfo)
  346. }
  347. if n.l.Level >= logrus.DebugLevel {
  348. hostinfo.logger(n.l).
  349. WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
  350. Debug("Tunnel status")
  351. }
  352. // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
  353. decision = sendTestPacket
  354. } else {
  355. if n.l.Level >= logrus.DebugLevel {
  356. hostinfo.logger(n.l).Debugf("Hostinfo sadness")
  357. }
  358. }
  359. n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
  360. n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
  361. return decision, hostinfo, nil
  362. }
  363. func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
  364. // The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
  365. // If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
  366. // Let's sort this out.
  367. // Only one side should swap because if both swap then we may never resolve to a single tunnel.
  368. // vpn addr is static across all tunnels for this host pair so lets
  369. // use that to determine if we should consider swapping.
  370. if current.vpnAddrs[0].Compare(n.intf.myVpnAddrs[0]) < 0 {
  371. // Their primary vpn addr is less than mine. Do not swap.
  372. return false
  373. }
  374. crt := n.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
  375. // If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
  376. // settle down.
  377. return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
  378. }
  379. func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
  380. n.hostMap.Lock()
  381. // Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
  382. if n.hostMap.Hosts[current.vpnAddrs[0]] == primary {
  383. n.hostMap.unlockedMakePrimary(current)
  384. }
  385. n.hostMap.Unlock()
  386. }
  387. // isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
  388. // the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
  389. // check and return true.
  390. func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
  391. remoteCert := hostinfo.GetCert()
  392. if remoteCert == nil {
  393. return false
  394. }
  395. caPool := n.intf.pki.GetCAPool()
  396. err := caPool.VerifyCachedCertificate(now, remoteCert)
  397. if err == nil {
  398. return false
  399. }
  400. if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
  401. // Block listed certificates should always be disconnected
  402. return false
  403. }
  404. hostinfo.logger(n.l).WithError(err).
  405. WithField("fingerprint", remoteCert.Fingerprint).
  406. Info("Remote certificate is no longer valid, tearing down the tunnel")
  407. return true
  408. }
  409. func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
  410. if !n.punchy.GetPunch() {
  411. // Punching is disabled
  412. return
  413. }
  414. if n.punchy.GetTargetEverything() {
  415. hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
  416. n.metricsTxPunchy.Inc(1)
  417. n.intf.outside.WriteTo([]byte{1}, addr)
  418. })
  419. } else if hostinfo.remote.IsValid() {
  420. n.metricsTxPunchy.Inc(1)
  421. n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
  422. }
  423. }
  424. func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
  425. cs := n.intf.pki.getCertState()
  426. curCrt := hostinfo.ConnectionState.myCert
  427. myCrt := cs.getCertificate(curCrt.Version())
  428. if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
  429. // The current tunnel is using the latest certificate and version, no need to rehandshake.
  430. return
  431. }
  432. n.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
  433. WithField("reason", "local certificate is not current").
  434. Info("Re-handshaking with remote")
  435. n.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
  436. }