| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268 | package nebulaimport (	"context"	"net"	"os"	"os/signal"	"syscall"	"github.com/sirupsen/logrus"	"github.com/slackhq/nebula/cert"	"github.com/slackhq/nebula/header"	"github.com/slackhq/nebula/iputil"	"github.com/slackhq/nebula/udp")// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etctype Control struct {	f         *Interface	l         *logrus.Logger	cancel    context.CancelFunc	sshStart  func()	httpStart func()	dnsStart  func()}type ControlHostInfo struct {	VpnIp                  net.IP                  `json:"vpnIp"`	LocalIndex             uint32                  `json:"localIndex"`	RemoteIndex            uint32                  `json:"remoteIndex"`	RemoteAddrs            []*udp.Addr             `json:"remoteAddrs"`	CachedPackets          int                     `json:"cachedPackets"`	Cert                   *cert.NebulaCertificate `json:"cert"`	MessageCounter         uint64                  `json:"messageCounter"`	CurrentRemote          *udp.Addr               `json:"currentRemote"`	CurrentRelaysToMe      []iputil.VpnIp          `json:"currentRelaysToMe"`	CurrentRelaysThroughMe []iputil.VpnIp          `json:"currentRelaysThroughMe"`}// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()func (c *Control) Start() {	// Activate the interface	c.f.activate()	// Call all the delayed funcs that waited patiently for the interface to be created.	if c.sshStart != nil {		go c.sshStart()	}	if c.httpStart != nil {		go c.httpStart()	}	if c.dnsStart != nil {		go c.dnsStart()	}	// Start reading packets.	c.f.run()}// Stop signals nebula to shutdown, returns after the shutdown is completefunc (c *Control) Stop() {	// Stop the handshakeManager (and other services), to prevent new tunnels from	// being created while we're shutting them all down.	c.cancel()	c.CloseAllTunnels(false)	if err := c.f.Close(); err != nil {		c.l.WithError(err).Error("Close interface failed")	}	c.l.Info("Goodbye")}// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalledfunc (c *Control) ShutdownBlock() {	sigChan := make(chan os.Signal, 1)	signal.Notify(sigChan, syscall.SIGTERM)	signal.Notify(sigChan, syscall.SIGINT)	rawSig := <-sigChan	sig := rawSig.String()	c.l.WithField("signal", sig).Info("Caught signal, shutting down")	c.Stop()}// RebindUDPServer asks the UDP listener to rebind it's listener. Mainly used on mobile clients when interfaces changefunc (c *Control) RebindUDPServer() {	_ = c.f.outside.Rebind()	// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0	c.f.lightHouse.SendUpdate(c.f)	// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes	c.f.rebindCount++}// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ipfunc (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {	if pendingMap {		return listHostMapHosts(c.f.handshakeManager.pendingHostMap)	} else {		return listHostMapHosts(c.f.hostMap)	}}// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index idfunc (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {	if pendingMap {		return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)	} else {		return listHostMapIndexes(c.f.hostMap)	}}// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not foundfunc (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {	var hm *HostMap	if pending {		hm = c.f.handshakeManager.pendingHostMap	} else {		hm = c.f.hostMap	}	h, err := hm.QueryVpnIp(vpnIp)	if err != nil {		return nil	}	ch := copyHostInfo(h, c.f.hostMap.preferredRanges)	return &ch}// SetRemoteForTunnel forces a tunnel to use a specific remotefunc (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {	hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)	if err != nil {		return nil	}	hostInfo.SetRemote(addr.Copy())	ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)	return &ch}// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {	hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)	if err != nil {		return false	}	if !localOnly {		c.f.send(			header.CloseTunnel,			0,			hostInfo.ConnectionState,			hostInfo,			[]byte{},			make([]byte, 12, 12),			make([]byte, mtu),		)	}	c.f.closeTunnel(hostInfo)	return true}// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels// the int returned is a count of tunnels closedfunc (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {	//TODO: this is probably better as a function in ConnectionManager or HostMap directly	lighthouses := c.f.lightHouse.GetLighthouses()	shutdown := func(h *HostInfo) {		if excludeLighthouses {			if _, ok := lighthouses[h.vpnIp]; ok {				return			}		}		c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))		c.f.closeTunnel(h)		c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote).			Debug("Sending close tunnel message")		closed++	}	// Learn which hosts are being used as relays, so we can shut them down last.	relayingHosts := map[iputil.VpnIp]*HostInfo{}	// Grab the hostMap lock to access the Relays map	c.f.hostMap.Lock()	for _, relayingHost := range c.f.hostMap.Relays {		relayingHosts[relayingHost.vpnIp] = relayingHost	}	c.f.hostMap.Unlock()	hostInfos := []*HostInfo{}	// Grab the hostMap lock to access the Hosts map	c.f.hostMap.Lock()	for _, relayHost := range c.f.hostMap.Indexes {		if _, ok := relayingHosts[relayHost.vpnIp]; !ok {			hostInfos = append(hostInfos, relayHost)		}	}	c.f.hostMap.Unlock()	for _, h := range hostInfos {		shutdown(h)	}	for _, h := range relayingHosts {		shutdown(h)	}	return}func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {	chi := ControlHostInfo{		VpnIp:                  h.vpnIp.ToIP(),		LocalIndex:             h.localIndexId,		RemoteIndex:            h.remoteIndexId,		RemoteAddrs:            h.remotes.CopyAddrs(preferredRanges),		CachedPackets:          len(h.packetStore),		CurrentRelaysToMe:      h.relayState.CopyRelayIps(),		CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),	}	if h.ConnectionState != nil {		chi.MessageCounter = h.ConnectionState.messageCounter.Load()	}	if c := h.GetCert(); c != nil {		chi.Cert = c.Copy()	}	if h.remote != nil {		chi.CurrentRemote = h.remote.Copy()	}	return chi}func listHostMapHosts(hm *HostMap) []ControlHostInfo {	hm.RLock()	hosts := make([]ControlHostInfo, len(hm.Hosts))	i := 0	for _, v := range hm.Hosts {		hosts[i] = copyHostInfo(v, hm.preferredRanges)		i++	}	hm.RUnlock()	return hosts}func listHostMapIndexes(hm *HostMap) []ControlHostInfo {	hm.RLock()	hosts := make([]ControlHostInfo, len(hm.Indexes))	i := 0	for _, v := range hm.Indexes {		hosts[i] = copyHostInfo(v, hm.preferredRanges)		i++	}	hm.RUnlock()	return hosts}
 |