From 0b7449129af3268c1d0e4d3fef4741beb2b4aa30 Mon Sep 17 00:00:00 2001 From: Anil Kumar Kammari Date: Fri, 28 Feb 2020 11:56:04 +0530 Subject: [PATCH] Merge PR #5500: Regen network/multistore upgrades --- baseapp/baseapp.go | 60 -------- baseapp/baseapp_test.go | 50 ------- simapp/app.go | 4 +- simapp/app_test.go | 6 +- simapp/sim_bench_test.go | 4 +- simapp/sim_test.go | 12 +- simapp/test_helpers.go | 4 +- store/types/store.go | 7 + x/crisis/handler_test.go | 2 +- x/crisis/internal/keeper/integration_test.go | 2 +- x/gov/genesis_test.go | 2 +- x/upgrade/abci.go | 8 ++ x/upgrade/abci_test.go | 34 ++++- x/upgrade/alias.go | 1 + x/upgrade/doc.go | 21 +++ x/upgrade/keeper/keeper.go | 70 ++++++++- x/upgrade/spec/01_concepts.md | 27 ++++ x/upgrade/types/storeloader.go | 23 +++ x/upgrade/types/storeloader_test.go | 144 +++++++++++++++++++ 19 files changed, 351 insertions(+), 130 deletions(-) create mode 100644 x/upgrade/types/storeloader.go create mode 100644 x/upgrade/types/storeloader_test.go diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 3837a34ccc9a..4fe7d0ba803b 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -1,11 +1,8 @@ package baseapp import ( - "encoding/json" "errors" "fmt" - "io/ioutil" - "os" "reflect" "runtime/debug" "strings" @@ -17,7 +14,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/store" - storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) @@ -226,62 +222,6 @@ func DefaultStoreLoader(ms sdk.CommitMultiStore) error { return ms.LoadLatestVersion() } -// StoreLoaderWithUpgrade is used to prepare baseapp with a fixed StoreLoader -// pattern. This is useful in test cases, or with custom upgrade loading logic. -func StoreLoaderWithUpgrade(upgrades *storetypes.StoreUpgrades) StoreLoader { - return func(ms sdk.CommitMultiStore) error { - return ms.LoadLatestVersionAndUpgrade(upgrades) - } -} - -// UpgradeableStoreLoader can be configured by SetStoreLoader() to check for the -// existence of a given upgrade file - json encoded StoreUpgrades data. -// -// If not file is present, it will peform the default load (no upgrades to store). -// -// If the file is present, it will parse the file and execute those upgrades -// (rename or delete stores), while loading the data. It will also delete the -// upgrade file upon successful load, so that the upgrade is only applied once, -// and not re-applied on next restart -// -// This is useful for in place migrations when a store key is renamed between -// two versions of the software. (TODO: this code will move to x/upgrades -// when PR #4233 is merged, here mainly to help test the design) -func UpgradeableStoreLoader(upgradeInfoPath string) StoreLoader { - return func(ms sdk.CommitMultiStore) error { - _, err := os.Stat(upgradeInfoPath) - if os.IsNotExist(err) { - return DefaultStoreLoader(ms) - } else if err != nil { - return err - } - - // there is a migration file, let's execute - data, err := ioutil.ReadFile(upgradeInfoPath) - if err != nil { - return fmt.Errorf("cannot read upgrade file %s: %v", upgradeInfoPath, err) - } - - var upgrades storetypes.StoreUpgrades - err = json.Unmarshal(data, &upgrades) - if err != nil { - return fmt.Errorf("cannot parse upgrade file: %v", err) - } - - err = ms.LoadLatestVersionAndUpgrade(&upgrades) - if err != nil { - return fmt.Errorf("load and upgrade database: %v", err) - } - - // if we have a successful load, we delete the file - err = os.Remove(upgradeInfoPath) - if err != nil { - return fmt.Errorf("deleting upgrade file %s: %v", upgradeInfoPath, err) - } - return nil - } -} - // LoadVersion loads the BaseApp application version. It will panic if called // more than once on a running baseapp. func (app *BaseApp) LoadVersion(version int64, baseKey *sdk.KVStoreKey) error { diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 6ee5aabbb0a8..840531865d31 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/binary" "fmt" - "io/ioutil" "os" "sync" "testing" @@ -137,18 +136,6 @@ func useDefaultLoader(app *BaseApp) { app.SetStoreLoader(DefaultStoreLoader) } -func useUpgradeLoader(upgrades *store.StoreUpgrades) func(*BaseApp) { - return func(app *BaseApp) { - app.SetStoreLoader(StoreLoaderWithUpgrade(upgrades)) - } -} - -func useFileUpgradeLoader(upgradeInfoPath string) func(*BaseApp) { - return func(app *BaseApp) { - app.SetStoreLoader(UpgradeableStoreLoader(upgradeInfoPath)) - } -} - func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { rs := rootmulti.NewStore(db) rs.SetPruning(store.PruneNothing) @@ -184,19 +171,6 @@ func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte // Test that we can make commits and then reload old versions. // Test that LoadLatestVersion actually does. func TestSetLoader(t *testing.T) { - // write a renamer to a file - f, err := ioutil.TempFile("", "upgrade-*.json") - require.NoError(t, err) - data := []byte(`{"renamed":[{"old_key": "bnk", "new_key": "banker"}]}`) - _, err = f.Write(data) - require.NoError(t, err) - configName := f.Name() - require.NoError(t, f.Close()) - - // make sure it exists before running everything - _, err = os.Stat(configName) - require.NoError(t, err) - cases := map[string]struct { setLoader func(*BaseApp) origStoreKey string @@ -211,26 +185,6 @@ func TestSetLoader(t *testing.T) { origStoreKey: "foo", loadStoreKey: "foo", }, - "rename with inline opts": { - setLoader: useUpgradeLoader(&store.StoreUpgrades{ - Renamed: []store.StoreRename{{ - OldKey: "foo", - NewKey: "bar", - }}, - }), - origStoreKey: "foo", - loadStoreKey: "bar", - }, - "file loader with missing file": { - setLoader: useFileUpgradeLoader(configName + "randomchars"), - origStoreKey: "bnk", - loadStoreKey: "bnk", - }, - "file loader with existing file": { - setLoader: useFileUpgradeLoader(configName), - origStoreKey: "bnk", - loadStoreKey: "banker", - }, } k := []byte("key") @@ -265,10 +219,6 @@ func TestSetLoader(t *testing.T) { checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) }) } - - // ensure config file was deleted - _, err = os.Stat(configName) - require.True(t, os.IsNotExist(err)) } func TestAppVersionSetterGetter(t *testing.T) { diff --git a/simapp/app.go b/simapp/app.go index e7253c9a7874..67b6d9e119f1 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -122,7 +122,7 @@ type SimApp struct { // NewSimApp returns a reference to an initialized SimApp. func NewSimApp( logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool, - invCheckPeriod uint, baseAppOptions ...func(*bam.BaseApp), + homePath string, invCheckPeriod uint, baseAppOptions ...func(*bam.BaseApp), ) *SimApp { // TODO: Remove cdc in favor of appCodec once all modules are migrated. @@ -189,7 +189,7 @@ func NewSimApp( app.CrisisKeeper = crisis.NewKeeper( app.subspaces[crisis.ModuleName], invCheckPeriod, app.SupplyKeeper, auth.FeeCollectorName, ) - app.UpgradeKeeper = upgrade.NewKeeper(skipUpgradeHeights, keys[upgrade.StoreKey], appCodec) + app.UpgradeKeeper = upgrade.NewKeeper(skipUpgradeHeights, keys[upgrade.StoreKey], appCodec, homePath) // create evidence keeper with router evidenceKeeper := evidence.NewKeeper( diff --git a/simapp/app_test.go b/simapp/app_test.go index 9cf211a94176..17b6b3c28644 100644 --- a/simapp/app_test.go +++ b/simapp/app_test.go @@ -15,7 +15,7 @@ import ( func TestSimAppExport(t *testing.T) { db := dbm.NewMemDB() - app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, 0) + app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0) genesisState := NewDefaultGenesisState() stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState) @@ -31,7 +31,7 @@ func TestSimAppExport(t *testing.T) { app.Commit() // Making a new app object with the db, so that initchain hasn't been called - app2 := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, 0) + app2 := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0) _, _, err = app2.ExportAppStateAndValidators(false, []string{}) require.NoError(t, err, "ExportAppStateAndValidators should not have an error") } @@ -39,7 +39,7 @@ func TestSimAppExport(t *testing.T) { // ensure that black listed addresses are properly set in bank keeper func TestBlackListedAddrs(t *testing.T) { db := dbm.NewMemDB() - app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, 0) + app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0) for acc := range maccPerms { require.Equal(t, !allowedReceivingModAcc[acc], app.BankKeeper.BlacklistedAddr(app.SupplyKeeper.GetModuleAddress(acc))) diff --git a/simapp/sim_bench_test.go b/simapp/sim_bench_test.go index b09a3a9090fb..6d8ee05dbf77 100644 --- a/simapp/sim_bench_test.go +++ b/simapp/sim_bench_test.go @@ -26,7 +26,7 @@ func BenchmarkFullAppSimulation(b *testing.B) { } }() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, interBlockCacheOpt()) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, interBlockCacheOpt()) // run randomized simulation _, simParams, simErr := simulation.SimulateFromSeed( @@ -65,7 +65,7 @@ func BenchmarkInvariants(b *testing.B) { } }() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, interBlockCacheOpt()) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, interBlockCacheOpt()) // run randomized simulation _, simParams, simErr := simulation.SimulateFromSeed( diff --git a/simapp/sim_test.go b/simapp/sim_test.go index 18cb24a89484..6053ff83cc75 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -63,7 +63,7 @@ func TestFullAppSimulation(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt) require.Equal(t, "SimApp", app.Name()) // run randomized simulation @@ -95,7 +95,7 @@ func TestAppImportExport(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt) require.Equal(t, "SimApp", app.Name()) // Run randomized simulation @@ -129,7 +129,7 @@ func TestAppImportExport(t *testing.T) { require.NoError(t, os.RemoveAll(newDir)) }() - newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt) + newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt) require.Equal(t, "SimApp", newApp.Name()) var genesisState GenesisState @@ -181,7 +181,7 @@ func TestAppSimulationAfterImport(t *testing.T) { require.NoError(t, os.RemoveAll(dir)) }() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt) require.Equal(t, "SimApp", app.Name()) // Run randomized simulation @@ -220,7 +220,7 @@ func TestAppSimulationAfterImport(t *testing.T) { require.NoError(t, os.RemoveAll(newDir)) }() - newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt) + newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt) require.Equal(t, "SimApp", newApp.Name()) newApp.InitChain(abci.RequestInitChain{ @@ -266,7 +266,7 @@ func TestAppStateDeterminism(t *testing.T) { db := dbm.NewMemDB() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, interBlockCacheOpt()) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, interBlockCacheOpt()) fmt.Printf( "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n", diff --git a/simapp/test_helpers.go b/simapp/test_helpers.go index abc87ed635cc..234995a19bbb 100644 --- a/simapp/test_helpers.go +++ b/simapp/test_helpers.go @@ -23,7 +23,7 @@ import ( // Setup initializes a new SimApp. A Nop logger is set in SimApp. func Setup(isCheckTx bool) *SimApp { db := dbm.NewMemDB() - app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 0) + app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0) if !isCheckTx { // init chain must be called to stop deliverState from being nil genesisState := NewDefaultGenesisState() @@ -48,7 +48,7 @@ func Setup(isCheckTx bool) *SimApp { // accounts and possible balances. func SetupWithGenesisAccounts(genAccs []authexported.GenesisAccount, balances ...bank.Balance) *SimApp { db := dbm.NewMemDB() - app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 0) + app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0) // initialize the chain with the passed in genesis accounts genesisState := NewDefaultGenesisState() diff --git a/store/types/store.go b/store/types/store.go index d48585f8fce2..72575baa6a89 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -44,6 +44,13 @@ type StoreUpgrades struct { Deleted []string `json:"deleted"` } +// UpgradeInfo defines height and name of the upgrade +// to ensure multistore upgrades happen only at matching height. +type UpgradeInfo struct { + Name string `json:"name"` + Height int64 `json:"height"` +} + // StoreRename defines a name change of a sub-store. // All data previously under a PrefixStore with OldKey will be copied // to a PrefixStore with NewKey, then deleted from OldKey store. diff --git a/x/crisis/handler_test.go b/x/crisis/handler_test.go index 2db4012d2f70..83907543f2a1 100644 --- a/x/crisis/handler_test.go +++ b/x/crisis/handler_test.go @@ -25,7 +25,7 @@ var ( func createTestApp() (*simapp.SimApp, sdk.Context, []sdk.AccAddress) { db := dbm.NewMemDB() - app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 1) + app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 1) ctx := app.NewContext(true, abci.Header{}) constantFee := sdk.NewInt64Coin(sdk.DefaultBondDenom, 10) diff --git a/x/crisis/internal/keeper/integration_test.go b/x/crisis/internal/keeper/integration_test.go index a98a0b44533c..4dc123fda458 100644 --- a/x/crisis/internal/keeper/integration_test.go +++ b/x/crisis/internal/keeper/integration_test.go @@ -11,7 +11,7 @@ import ( func createTestApp() *simapp.SimApp { db := dbm.NewMemDB() - app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 5) + app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5) // init chain must be called to stop deliverState from being nil genesisState := simapp.NewDefaultGenesisState() stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState) diff --git a/x/gov/genesis_test.go b/x/gov/genesis_test.go index 133c7c4f3ab3..d9f8857b168f 100644 --- a/x/gov/genesis_test.go +++ b/x/gov/genesis_test.go @@ -63,7 +63,7 @@ func TestImportExportQueues(t *testing.T) { } db := dbm.NewMemDB() - app2 := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 0) + app2 := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 0) app2.InitChain( abci.RequestInitChain{ diff --git a/x/upgrade/abci.go b/x/upgrade/abci.go index 11edf7fbbc0c..f5c6d6a348d9 100644 --- a/x/upgrade/abci.go +++ b/x/upgrade/abci.go @@ -38,6 +38,14 @@ func BeginBlocker(k Keeper, ctx sdk.Context, _ abci.RequestBeginBlock) { upgradeMsg := fmt.Sprintf("UPGRADE \"%s\" NEEDED at %s: %s", plan.Name, plan.DueAt(), plan.Info) // We don't have an upgrade handler for this upgrade name, meaning this software is out of date so shutdown ctx.Logger().Error(upgradeMsg) + + // Write the upgrade info to disk. The UpgradeStoreLoader uses this info to perform or skip + // store migrations. + err := k.DumpUpgradeInfoToDisk(ctx.BlockHeight(), plan.Name) + if err != nil { + panic(fmt.Errorf("unable to write upgrade info to filesystem: %s", err.Error())) + } + panic(upgradeMsg) } // We have an upgrade handler for this upgrade name, so apply the upgrade diff --git a/x/upgrade/abci_test.go b/x/upgrade/abci_test.go index c82fe757a5e5..aa2bf33cfbb4 100644 --- a/x/upgrade/abci_test.go +++ b/x/upgrade/abci_test.go @@ -1,10 +1,15 @@ package upgrade_test import ( + "encoding/json" "errors" + "io/ioutil" + "os" "testing" "time" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" @@ -31,7 +36,7 @@ var s TestSuite func setupTest(height int64, skip map[int64]bool) TestSuite { db := dbm.NewMemDB() - app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, skip, 0) + app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, skip, simapp.DefaultNodeHome, 0) genesisState := simapp.NewDefaultGenesisState() stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState) if err != nil { @@ -393,3 +398,30 @@ func TestUpgradeWithoutSkip(t *testing.T) { VerifyDoUpgrade(t) VerifyDone(t, s.ctx, "test") } + +func TestDumpUpgradeInfoToFile(t *testing.T) { + s := setupTest(10, map[int64]bool{}) + + planHeight := s.ctx.BlockHeight() + 1 + name := "test" + t.Log("verify if upgrade height is dumped to file") + err := s.keeper.DumpUpgradeInfoToDisk(planHeight, name) + require.Nil(t, err) + + upgradeInfoFilePath, err := s.keeper.GetUpgradeInfoPath() + require.Nil(t, err) + + data, err := ioutil.ReadFile(upgradeInfoFilePath) + require.NoError(t, err) + + var upgradeInfo storetypes.UpgradeInfo + err = json.Unmarshal(data, &upgradeInfo) + require.Nil(t, err) + + t.Log("Verify upgrade height from file matches ") + require.Equal(t, upgradeInfo.Height, planHeight) + + // clear the test file + err = os.Remove(upgradeInfoFilePath) + require.Nil(t, err) +} diff --git a/x/upgrade/alias.go b/x/upgrade/alias.go index c8ae7bf63157..784eb681b1f4 100644 --- a/x/upgrade/alias.go +++ b/x/upgrade/alias.go @@ -27,6 +27,7 @@ var ( NewSoftwareUpgradeProposal = types.NewSoftwareUpgradeProposal NewCancelSoftwareUpgradeProposal = types.NewCancelSoftwareUpgradeProposal NewQueryAppliedParams = types.NewQueryAppliedParams + UpgradeStoreLoader = types.UpgradeStoreLoader NewKeeper = keeper.NewKeeper NewQuerier = keeper.NewQuerier ) diff --git a/x/upgrade/doc.go b/x/upgrade/doc.go index 160580d5c79c..79867f7a8e42 100644 --- a/x/upgrade/doc.go +++ b/x/upgrade/doc.go @@ -68,6 +68,27 @@ as well as providing the opportunity for the upgraded software to perform any ne (with the old binary) and applying the migration (with the new binary) are enforced in the state machine. Actually switching the binaries is an ops task and not handled inside the sdk / abci app. +Here is a sample code to set store migrations with an upgrade: + + // this configures a no-op upgrade handler for the "my-fancy-upgrade" upgrade + app.UpgradeKeeper.SetUpgradeHandler("my-fancy-upgrade", func(ctx sdk.Context, plan upgrade.Plan) { + // upgrade changes here + }) + + upgradeInfo := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() + if upgradeInfo.Name == "my-fancy-upgrade" && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { + storeUpgrades := store.StoreUpgrades{ + Renamed: []store.StoreRename{{ + OldKey: "foo", + NewKey: "bar", + }}, + Deleted: []string{}, + } + + // configure store loader that checks if version == upgradeHeight and applies store upgrades + app.SetStoreLoader(upgrade.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) + } + Halt Behavior Before halting the ABCI state machine in the BeginBlocker method, the upgrade module will log an error diff --git a/x/upgrade/keeper/keeper.go b/x/upgrade/keeper/keeper.go index ba789aeb7b77..a5bed9e36015 100644 --- a/x/upgrade/keeper/keeper.go +++ b/x/upgrade/keeper/keeper.go @@ -2,19 +2,30 @@ package keeper import ( "encoding/binary" + "encoding/json" "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" "github.com/cosmos/cosmos-sdk/x/upgrade/types" "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/prefix" + store "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) +// UpgradeInfoFileName file to store upgrade information +const UpgradeInfoFileName string = "upgrade-info.json" + type Keeper struct { + homePath string skipUpgradeHeights map[int64]bool storeKey sdk.StoreKey cdc codec.Marshaler @@ -22,8 +33,9 @@ type Keeper struct { } // NewKeeper constructs an upgrade Keeper -func NewKeeper(skipUpgradeHeights map[int64]bool, storeKey sdk.StoreKey, cdc codec.Marshaler) Keeper { +func NewKeeper(skipUpgradeHeights map[int64]bool, storeKey sdk.StoreKey, cdc codec.Marshaler, homePath string) Keeper { return Keeper{ + homePath: homePath, skipUpgradeHeights: skipUpgradeHeights, storeKey: storeKey, cdc: cdc, @@ -131,3 +143,59 @@ func (k Keeper) ApplyUpgrade(ctx sdk.Context, plan types.Plan) { func (k Keeper) IsSkipHeight(height int64) bool { return k.skipUpgradeHeights[height] } + +// DumpUpgradeInfoToDisk writes upgrade information to UpgradeInfoFileName. +func (k Keeper) DumpUpgradeInfoToDisk(height int64, name string) error { + upgradeInfoFilePath, err := k.GetUpgradeInfoPath() + if err != nil { + return err + } + + upgradeInfo := store.UpgradeInfo{ + Name: name, + Height: height, + } + info, err := json.Marshal(upgradeInfo) + if err != nil { + return err + } + + return ioutil.WriteFile(upgradeInfoFilePath, info, 0644) +} + +// GetUpgradeInfoPath returns the upgrade info file path +func (k Keeper) GetUpgradeInfoPath() (string, error) { + upgradeInfoFileDir := path.Join(k.getHomeDir(), "data") + err := tmos.EnsureDir(upgradeInfoFileDir, os.ModePerm) + if err != nil { + return "", err + } + + return filepath.Join(upgradeInfoFileDir, UpgradeInfoFileName), nil +} + +// getHomeDir returns the height at which the given upgrade was executed +func (k Keeper) getHomeDir() string { + return k.homePath +} + +// ReadUpgradeInfoFromDisk returns the name and height of the upgrade +// which is written to disk by the old binary when panic'ing +// if there's an error in reading the info, +// it assumes that the upgrade info is not available +func (k Keeper) ReadUpgradeInfoFromDisk() (upgradeInfo store.UpgradeInfo) { + upgradeInfoPath, err := k.GetUpgradeInfoPath() + // if error in reading the path, assume there are no upgrades + if err != nil { + return upgradeInfo + } + + data, err := ioutil.ReadFile(upgradeInfoPath) + // if error in reading the file, assume there are no upgrades + if err != nil { + return upgradeInfo + } + + json.Unmarshal(data, &upgradeInfo) + return +} diff --git a/x/upgrade/spec/01_concepts.md b/x/upgrade/spec/01_concepts.md index 19205591fc15..54147f8b5ebd 100644 --- a/x/upgrade/spec/01_concepts.md +++ b/x/upgrade/spec/01_concepts.md @@ -56,6 +56,33 @@ During each `EndBlock` execution, the `x/upgrade` module checks if there exists `Handler` is executed. If the `Plan` is expected to execute but no `Handler` is registered or if the binary was upgraded too early, the node will gracefully panic and exit. +## StoreLoader + + +The `x/upgrade` module also facilitates store migrations as part of the upgrade. The +`StoreLoader` sets the migrations that need to occur before the new binary can +successfully run the chain. This `StoreLoader` is also application specific and +not defined on a per-module basis. Registering this `StoreLoader` is done via +`app#SetStoreLoader` in the application. + +```go +func UpgradeStoreLoader (upgradeHeight int64, storeUpgrades *store.StoreUpgrades) baseapp.StoreLoader +``` + +If there's a planned upgrade and the upgrade height is reached, the old binary writes `UpgradeInfo` to the disk before panic'ing. + +```go +type UpgradeInfo struct { + Name string + Height int64 +} +``` + +This information is critical to ensure the `StoreUpgrades` happens smoothly at correct height and +expected upgrade. It eliminiates the chances for the new binary to execute `StoreUpgrades` multiple +times everytime on restart. Also if there are multiple upgrades planned on same height, the `Name` +will ensure these `StoreUpgrades` takes place only in planned upgrade handler. + ## Proposal Typically, a `Plan` is proposed and submitted through governance via a `SoftwareUpgradeProposal`. diff --git a/x/upgrade/types/storeloader.go b/x/upgrade/types/storeloader.go new file mode 100644 index 000000000000..0ff168dcc343 --- /dev/null +++ b/x/upgrade/types/storeloader.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/baseapp" + store "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// UpgradeStoreLoader is used to prepare baseapp with a fixed StoreLoader +// pattern. This is useful for custom upgrade loading logic. +func UpgradeStoreLoader(upgradeHeight int64, storeUpgrades *store.StoreUpgrades) baseapp.StoreLoader { + return func(ms sdk.CommitMultiStore) error { + if upgradeHeight == ms.LastCommitID().Version { + // Check if the current commit version and upgrade height matches + if len(storeUpgrades.Renamed) > 0 || len(storeUpgrades.Deleted) > 0 { + return ms.LoadLatestVersionAndUpgrade(storeUpgrades) + } + } + + // Otherwise load default store loader + return baseapp.DefaultStoreLoader(ms) + } +} diff --git a/x/upgrade/types/storeloader_test.go b/x/upgrade/types/storeloader_test.go new file mode 100644 index 000000000000..955ab07cdfc3 --- /dev/null +++ b/x/upgrade/types/storeloader_test.go @@ -0,0 +1,144 @@ +package types + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + dbm "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + store "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/tests" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func useUpgradeLoader(height int64, upgrades *store.StoreUpgrades) func(*baseapp.BaseApp) { + return func(app *baseapp.BaseApp) { + app.SetStoreLoader(UpgradeStoreLoader(height, upgrades)) + } +} + +func defaultLogger() log.Logger { + return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app") +} + +func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db) + rs.SetPruning(store.PruneNothing) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, int64(0), rs.LastCommitID().Version) + + // write some data in substore + kv, _ := rs.GetStore(key).(store.KVStore) + require.NotNil(t, kv) + kv.Set(k, v) + commitID := rs.Commit() + require.Equal(t, int64(1), commitID.Version) +} + +func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db) + rs.SetPruning(store.PruneNothing) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, ver, rs.LastCommitID().Version) + + // query data in substore + kv, _ := rs.GetStore(key).(store.KVStore) + + require.NotNil(t, kv) + require.Equal(t, v, kv.Get(k)) +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestSetLoader(t *testing.T) { + // set a temporary home dir + homeDir, cleanUp := tests.NewTestCaseDir(t) + defer cleanUp() + // TODO cleanup viper + viper.Set(flags.FlagHome, homeDir) + + upgradeInfoFilePath := filepath.Join(homeDir, "upgrade-info.json") + upgradeInfo := &store.UpgradeInfo{ + Name: "test", Height: 0, + } + data, err := json.Marshal(upgradeInfo) + require.NoError(t, err) + err = ioutil.WriteFile(upgradeInfoFilePath, data, 0644) + require.NoError(t, err) + + // make sure it exists before running everything + _, err = os.Stat(upgradeInfoFilePath) + require.NoError(t, err) + + cases := map[string]struct { + setLoader func(*baseapp.BaseApp) + origStoreKey string + loadStoreKey string + }{ + "don't set loader": { + origStoreKey: "foo", + loadStoreKey: "foo", + }, + "rename with inline opts": { + setLoader: useUpgradeLoader(0, &store.StoreUpgrades{ + Renamed: []store.StoreRename{{ + OldKey: "foo", + NewKey: "bar", + }}, + }), + origStoreKey: "foo", + loadStoreKey: "bar", + }, + } + + k := []byte("key") + v := []byte("value") + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + // prepare a db with some data + db := dbm.NewMemDB() + + initStore(t, db, tc.origStoreKey, k, v) + + // load the app with the existing db + opts := []func(*baseapp.BaseApp){baseapp.SetPruning(store.PruneNothing)} + if tc.setLoader != nil { + opts = append(opts, tc.setLoader) + } + + app := baseapp.NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) + capKey := sdk.NewKVStoreKey(baseapp.MainStoreKey) + app.MountStores(capKey) + app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) + err := app.LoadLatestVersion(capKey) + require.Nil(t, err) + + // "execute" one block + app.BeginBlock(abci.RequestBeginBlock{Header: abci.Header{Height: 2}}) + res := app.Commit() + require.NotNil(t, res.Data) + + // check db is properly updated + checkStore(t, db, 2, tc.loadStoreKey, k, v) + checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) + }) + } +}