connection_manager.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. package nebula
  2. import (
  3. "bytes"
  4. "context"
  5. "sync"
  6. "time"
  7. "github.com/rcrowley/go-metrics"
  8. "github.com/sirupsen/logrus"
  9. "github.com/slackhq/nebula/cert"
  10. "github.com/slackhq/nebula/header"
  11. "github.com/slackhq/nebula/iputil"
  12. "github.com/slackhq/nebula/udp"
  13. )
  14. type trafficDecision int
  15. const (
  16. doNothing trafficDecision = 0
  17. deleteTunnel trafficDecision = 1 // delete the hostinfo on our side, do not notify the remote
  18. closeTunnel trafficDecision = 2 // delete the hostinfo and notify the remote
  19. swapPrimary trafficDecision = 3
  20. migrateRelays trafficDecision = 4
  21. tryRehandshake trafficDecision = 5
  22. )
  23. type connectionManager struct {
  24. in map[uint32]struct{}
  25. inLock *sync.RWMutex
  26. out map[uint32]struct{}
  27. outLock *sync.RWMutex
  28. // relayUsed holds which relay localIndexs are in use
  29. relayUsed map[uint32]struct{}
  30. relayUsedLock *sync.RWMutex
  31. hostMap *HostMap
  32. trafficTimer *LockingTimerWheel[uint32]
  33. intf *Interface
  34. pendingDeletion map[uint32]struct{}
  35. punchy *Punchy
  36. checkInterval time.Duration
  37. pendingDeletionInterval time.Duration
  38. metricsTxPunchy metrics.Counter
  39. l *logrus.Logger
  40. }
  41. func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
  42. var max time.Duration
  43. if checkInterval < pendingDeletionInterval {
  44. max = pendingDeletionInterval
  45. } else {
  46. max = checkInterval
  47. }
  48. nc := &connectionManager{
  49. hostMap: intf.hostMap,
  50. in: make(map[uint32]struct{}),
  51. inLock: &sync.RWMutex{},
  52. out: make(map[uint32]struct{}),
  53. outLock: &sync.RWMutex{},
  54. relayUsed: make(map[uint32]struct{}),
  55. relayUsedLock: &sync.RWMutex{},
  56. trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
  57. intf: intf,
  58. pendingDeletion: make(map[uint32]struct{}),
  59. checkInterval: checkInterval,
  60. pendingDeletionInterval: pendingDeletionInterval,
  61. punchy: punchy,
  62. metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
  63. l: l,
  64. }
  65. nc.Start(ctx)
  66. return nc
  67. }
  68. func (n *connectionManager) In(localIndex uint32) {
  69. n.inLock.RLock()
  70. // If this already exists, return
  71. if _, ok := n.in[localIndex]; ok {
  72. n.inLock.RUnlock()
  73. return
  74. }
  75. n.inLock.RUnlock()
  76. n.inLock.Lock()
  77. n.in[localIndex] = struct{}{}
  78. n.inLock.Unlock()
  79. }
  80. func (n *connectionManager) Out(localIndex uint32) {
  81. n.outLock.RLock()
  82. // If this already exists, return
  83. if _, ok := n.out[localIndex]; ok {
  84. n.outLock.RUnlock()
  85. return
  86. }
  87. n.outLock.RUnlock()
  88. n.outLock.Lock()
  89. n.out[localIndex] = struct{}{}
  90. n.outLock.Unlock()
  91. }
  92. func (n *connectionManager) RelayUsed(localIndex uint32) {
  93. n.relayUsedLock.RLock()
  94. // If this already exists, return
  95. if _, ok := n.relayUsed[localIndex]; ok {
  96. n.relayUsedLock.RUnlock()
  97. return
  98. }
  99. n.relayUsedLock.RUnlock()
  100. n.relayUsedLock.Lock()
  101. n.relayUsed[localIndex] = struct{}{}
  102. n.relayUsedLock.Unlock()
  103. }
  104. // getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
  105. // resets the state for this local index
  106. func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
  107. n.inLock.Lock()
  108. n.outLock.Lock()
  109. _, in := n.in[localIndex]
  110. _, out := n.out[localIndex]
  111. delete(n.in, localIndex)
  112. delete(n.out, localIndex)
  113. n.inLock.Unlock()
  114. n.outLock.Unlock()
  115. return in, out
  116. }
  117. func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
  118. // Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
  119. n.outLock.Lock()
  120. if _, ok := n.out[localIndex]; ok {
  121. n.outLock.Unlock()
  122. return
  123. }
  124. n.out[localIndex] = struct{}{}
  125. n.trafficTimer.Add(localIndex, n.checkInterval)
  126. n.outLock.Unlock()
  127. }
  128. func (n *connectionManager) Start(ctx context.Context) {
  129. go n.Run(ctx)
  130. }
  131. func (n *connectionManager) Run(ctx context.Context) {
  132. //TODO: this tick should be based on the min wheel tick? Check firewall
  133. clockSource := time.NewTicker(500 * time.Millisecond)
  134. defer clockSource.Stop()
  135. p := []byte("")
  136. nb := make([]byte, 12, 12)
  137. out := make([]byte, mtu)
  138. for {
  139. select {
  140. case <-ctx.Done():
  141. return
  142. case now := <-clockSource.C:
  143. n.trafficTimer.Advance(now)
  144. for {
  145. localIndex, has := n.trafficTimer.Purge()
  146. if !has {
  147. break
  148. }
  149. n.doTrafficCheck(localIndex, p, nb, out, now)
  150. }
  151. }
  152. }
  153. }
  154. func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
  155. decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now)
  156. switch decision {
  157. case deleteTunnel:
  158. if n.hostMap.DeleteHostInfo(hostinfo) {
  159. // Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
  160. n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
  161. }
  162. case closeTunnel:
  163. n.intf.sendCloseTunnel(hostinfo)
  164. n.intf.closeTunnel(hostinfo)
  165. case swapPrimary:
  166. n.swapPrimary(hostinfo, primary)
  167. case migrateRelays:
  168. n.migrateRelayUsed(hostinfo, primary)
  169. case tryRehandshake:
  170. n.tryRehandshake(hostinfo)
  171. }
  172. n.resetRelayTrafficCheck(hostinfo)
  173. }
  174. func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
  175. if hostinfo != nil {
  176. n.relayUsedLock.Lock()
  177. defer n.relayUsedLock.Unlock()
  178. // No need to migrate any relays, delete usage info now.
  179. for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
  180. delete(n.relayUsed, idx)
  181. }
  182. }
  183. }
  184. func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
  185. relayFor := oldhostinfo.relayState.CopyAllRelayFor()
  186. for _, r := range relayFor {
  187. existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
  188. var index uint32
  189. var relayFrom iputil.VpnIp
  190. var relayTo iputil.VpnIp
  191. switch {
  192. case ok && existing.State == Established:
  193. // This relay already exists in newhostinfo, then do nothing.
  194. continue
  195. case ok && existing.State == Requested:
  196. // The relay exists in a Requested state; re-send the request
  197. index = existing.LocalIndex
  198. switch r.Type {
  199. case TerminalType:
  200. relayFrom = n.intf.myVpnIp
  201. relayTo = existing.PeerIp
  202. case ForwardingType:
  203. relayFrom = existing.PeerIp
  204. relayTo = newhostinfo.vpnIp
  205. default:
  206. // should never happen
  207. }
  208. case !ok:
  209. n.relayUsedLock.RLock()
  210. if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
  211. // The relay hasn't been used; don't migrate it.
  212. n.relayUsedLock.RUnlock()
  213. continue
  214. }
  215. n.relayUsedLock.RUnlock()
  216. // The relay doesn't exist at all; create some relay state and send the request.
  217. var err error
  218. index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
  219. if err != nil {
  220. n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
  221. continue
  222. }
  223. switch r.Type {
  224. case TerminalType:
  225. relayFrom = n.intf.myVpnIp
  226. relayTo = r.PeerIp
  227. case ForwardingType:
  228. relayFrom = r.PeerIp
  229. relayTo = newhostinfo.vpnIp
  230. default:
  231. // should never happen
  232. }
  233. }
  234. // Send a CreateRelayRequest to the peer.
  235. req := NebulaControl{
  236. Type: NebulaControl_CreateRelayRequest,
  237. InitiatorRelayIndex: index,
  238. RelayFromIp: uint32(relayFrom),
  239. RelayToIp: uint32(relayTo),
  240. }
  241. msg, err := req.Marshal()
  242. if err != nil {
  243. n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
  244. } else {
  245. n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
  246. n.l.WithFields(logrus.Fields{
  247. "relayFrom": iputil.VpnIp(req.RelayFromIp),
  248. "relayTo": iputil.VpnIp(req.RelayToIp),
  249. "initiatorRelayIndex": req.InitiatorRelayIndex,
  250. "responderRelayIndex": req.ResponderRelayIndex,
  251. "vpnIp": newhostinfo.vpnIp}).
  252. Info("send CreateRelayRequest")
  253. }
  254. }
  255. }
  256. func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
  257. n.hostMap.RLock()
  258. defer n.hostMap.RUnlock()
  259. hostinfo := n.hostMap.Indexes[localIndex]
  260. if hostinfo == nil {
  261. n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
  262. delete(n.pendingDeletion, localIndex)
  263. return doNothing, nil, nil
  264. }
  265. if n.isInvalidCertificate(now, hostinfo) {
  266. delete(n.pendingDeletion, hostinfo.localIndexId)
  267. return closeTunnel, hostinfo, nil
  268. }
  269. primary := n.hostMap.Hosts[hostinfo.vpnIp]
  270. mainHostInfo := true
  271. if primary != nil && primary != hostinfo {
  272. mainHostInfo = false
  273. }
  274. // Check for traffic on this hostinfo
  275. inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
  276. // A hostinfo is determined alive if there is incoming traffic
  277. if inTraffic {
  278. decision := doNothing
  279. if n.l.Level >= logrus.DebugLevel {
  280. hostinfo.logger(n.l).
  281. WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
  282. Debug("Tunnel status")
  283. }
  284. delete(n.pendingDeletion, hostinfo.localIndexId)
  285. if mainHostInfo {
  286. decision = tryRehandshake
  287. } else {
  288. if n.shouldSwapPrimary(hostinfo, primary) {
  289. decision = swapPrimary
  290. } else {
  291. // migrate the relays to the primary, if in use.
  292. decision = migrateRelays
  293. }
  294. }
  295. n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
  296. if !outTraffic {
  297. // Send a punch packet to keep the NAT state alive
  298. n.sendPunch(hostinfo)
  299. }
  300. return decision, hostinfo, primary
  301. }
  302. if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
  303. // We have already sent a test packet and nothing was returned, this hostinfo is dead
  304. hostinfo.logger(n.l).
  305. WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
  306. Info("Tunnel status")
  307. delete(n.pendingDeletion, hostinfo.localIndexId)
  308. return deleteTunnel, hostinfo, nil
  309. }
  310. if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
  311. if !outTraffic {
  312. // If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
  313. // Just maintain NAT state if configured to do so.
  314. n.sendPunch(hostinfo)
  315. n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
  316. return doNothing, nil, nil
  317. }
  318. if n.punchy.GetTargetEverything() {
  319. // This is similar to the old punchy behavior with a slight optimization.
  320. // We aren't receiving traffic but we are sending it, punch on all known
  321. // ips in case we need to re-prime NAT state
  322. n.sendPunch(hostinfo)
  323. }
  324. if n.l.Level >= logrus.DebugLevel {
  325. hostinfo.logger(n.l).
  326. WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
  327. Debug("Tunnel status")
  328. }
  329. // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
  330. n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
  331. } else {
  332. if n.l.Level >= logrus.DebugLevel {
  333. hostinfo.logger(n.l).Debugf("Hostinfo sadness")
  334. }
  335. }
  336. n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
  337. n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
  338. return doNothing, nil, nil
  339. }
  340. func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
  341. // The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
  342. // If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
  343. // Let's sort this out.
  344. if current.vpnIp < n.intf.myVpnIp {
  345. // Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
  346. // vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
  347. // The remotes vpn ip is lower than mine. I will not flip.
  348. return false
  349. }
  350. certState := n.intf.pki.GetCertState()
  351. return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
  352. }
  353. func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
  354. n.hostMap.Lock()
  355. // Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
  356. if n.hostMap.Hosts[current.vpnIp] == primary {
  357. n.hostMap.unlockedMakePrimary(current)
  358. }
  359. n.hostMap.Unlock()
  360. }
  361. // isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
  362. // the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
  363. // check and return true.
  364. func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
  365. remoteCert := hostinfo.GetCert()
  366. if remoteCert == nil {
  367. return false
  368. }
  369. valid, err := remoteCert.VerifyWithCache(now, n.intf.pki.GetCAPool())
  370. if valid {
  371. return false
  372. }
  373. if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
  374. // Block listed certificates should always be disconnected
  375. return false
  376. }
  377. fingerprint, _ := remoteCert.Sha256Sum()
  378. hostinfo.logger(n.l).WithError(err).
  379. WithField("fingerprint", fingerprint).
  380. Info("Remote certificate is no longer valid, tearing down the tunnel")
  381. return true
  382. }
  383. func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
  384. if !n.punchy.GetPunch() {
  385. // Punching is disabled
  386. return
  387. }
  388. if n.punchy.GetTargetEverything() {
  389. hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
  390. n.metricsTxPunchy.Inc(1)
  391. n.intf.outside.WriteTo([]byte{1}, addr)
  392. })
  393. } else if hostinfo.remote != nil {
  394. n.metricsTxPunchy.Inc(1)
  395. n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
  396. }
  397. }
  398. func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
  399. certState := n.intf.pki.GetCertState()
  400. if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
  401. return
  402. }
  403. n.l.WithField("vpnIp", hostinfo.vpnIp).
  404. WithField("reason", "local certificate is not current").
  405. Info("Re-handshaking with remote")
  406. n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
  407. }