| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183 | package migrateimport (	"context"	"errors"	"fmt"	"github.com/gravitl/netmaker/database"	"github.com/gravitl/netmaker/db"	"github.com/gravitl/netmaker/schema"	"github.com/gravitl/netmaker/servercfg"	"gorm.io/gorm"	"os"	"path/filepath")// ToSQLSchema migrates the data from key-value// db to sql db.//// This function archives the old data and does not// delete it.//// Based on the db server, the archival is done in the// following way://// 1. Sqlite: Moves the old data to a// netmaker_archive.db file.//// 2. Postgres: Moves the data to a netmaker_archive// schema within the same database.func ToSQLSchema() error {	// initialize sql schema db.	err := db.InitializeDB(schema.ListModels()...)	if err != nil {		return err	}	// migrate, if not done already.	err = migrate()	if err != nil {		return err	}	// archive key-value schema db, if not done already.	// ignore errors.	_ = archive()	return nil}func migrate() error {	// begin a new transaction.	dbctx := db.BeginTx(context.TODO())	commit := false	defer func() {		if commit {			db.FromContext(dbctx).Commit()		} else {			db.FromContext(dbctx).Rollback()		}	}()	// check if migrated already.	migrationJob := &schema.Job{		ID: "migration-v1.0.0",	}	err := migrationJob.Get(dbctx)	if err != nil {		if !errors.Is(err, gorm.ErrRecordNotFound) {			return err		}		// initialize key-value schema db.		err := database.InitializeDatabase()		if err != nil {			return err		}		defer database.CloseDB()		// migrate.		// TODO: add migration code.		// mark migration job completed.		err = migrationJob.Create(dbctx)		if err != nil {			return err		}		commit = true	}	return nil}func archive() error {	dbServer := servercfg.GetDB()	if dbServer != "sqlite" && dbServer != "postgres" {		return nil	}	// begin a new transaction.	dbctx := db.BeginTx(context.TODO())	commit := false	defer func() {		if commit {			db.FromContext(dbctx).Commit()		} else {			db.FromContext(dbctx).Rollback()		}	}()	// check if key-value schema db archived already.	archivalJob := &schema.Job{		ID: "archival-v1.0.0",	}	err := archivalJob.Get(dbctx)	if err != nil {		if !errors.Is(err, gorm.ErrRecordNotFound) {			return err		}		// archive.		switch dbServer {		case "sqlite":			err = sqliteArchiveOldData()		default:			err = pgArchiveOldData()		}		if err != nil {			return err		}		// mark archival job completed.		err = archivalJob.Create(dbctx)		if err != nil {			return err		}		commit = true	} else {		// remove the residual		if dbServer == "sqlite" {			_ = os.Remove(filepath.Join("data", "netmaker.db"))		}	}	return nil}func sqliteArchiveOldData() error {	oldDBFilePath := filepath.Join("data", "netmaker.db")	archiveDBFilePath := filepath.Join("data", "netmaker_archive.db")	// check if netmaker_archive.db exist.	_, err := os.Stat(archiveDBFilePath)	if err == nil {		return nil	} else if !os.IsNotExist(err) {		return err	}	// rename old db file to netmaker_archive.db.	return os.Rename(oldDBFilePath, archiveDBFilePath)}func pgArchiveOldData() error {	_, err := database.PGDB.Exec("CREATE SCHEMA IF NOT EXISTS netmaker_archive")	if err != nil {		return err	}	for _, table := range database.Tables {		_, err := database.PGDB.Exec(			fmt.Sprintf(				"ALTER TABLE public.%s SET SCHEMA netmaker_archive",				table,			),		)		if err != nil {			return err		}	}	return nil}
 |