diff --git a/.circleci/config.yml b/.circleci/config.yml index f0262e5b6ea..c41cf5667b7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -390,7 +390,7 @@ jobs: build-appimage: machine: - image: ubuntu-2004:202104-01 + image: ubuntu-2004:202111-02 steps: - checkout - attach_workspace: @@ -398,6 +398,16 @@ jobs: - run: name: install appimage-builder command: | + # appimage-builder requires /dev/snd to exist. It creates containers during the testing phase + # that pass sound devices from the host to the testing container. (hard coded!) + # https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54 + # Circleci doesn't provide a working sound device; this is enough to fake it. + if [ ! -e /dev/snd ] + then + sudo mkdir /dev/snd + sudo mknod /dev/snd/ControlC0 c 1 2 + fi + # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html sudo apt update sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace @@ -986,10 +996,19 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-appimage: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos + - build-appimage filters: branches: ignore: diff --git a/.circleci/template.yml b/.circleci/template.yml index 8f5995d56d1..6ded7378ff8 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -390,7 +390,7 @@ jobs: build-appimage: machine: - image: ubuntu-2004:202104-01 + image: ubuntu-2004:202111-02 steps: - checkout - attach_workspace: @@ -398,6 +398,16 @@ jobs: - run: name: install appimage-builder command: | + # appimage-builder requires /dev/snd to exist. It creates containers during the testing phase + # that pass sound devices from the host to the testing container. (hard coded!) + # https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54 + # Circleci doesn't provide a working sound device; this is enough to fake it. + if [ ! -e /dev/snd ] + then + sudo mkdir /dev/snd + sudo mknod /dev/snd/ControlC0 c 1 2 + fi + # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html sudo apt update sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace @@ -816,10 +826,19 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-appimage: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos + - build-appimage filters: branches: ignore: diff --git a/AppImageBuilder.yml b/AppImageBuilder.yml index 19c74e4a26a..ff01b211229 100644 --- a/AppImageBuilder.yml +++ b/AppImageBuilder.yml @@ -49,23 +49,23 @@ AppDir: fedora: image: appimagecrafters/tests-env:fedora-30 command: ./AppRun - use_host_x: true + use_host_x: false debian: image: appimagecrafters/tests-env:debian-stable command: ./AppRun - use_host_x: true + use_host_x: false arch: image: appimagecrafters/tests-env:archlinux-latest command: ./AppRun - use_host_x: true + use_host_x: false centos: image: appimagecrafters/tests-env:centos-7 command: ./AppRun - use_host_x: true + use_host_x: false ubuntu: image: appimagecrafters/tests-env:ubuntu-xenial command: ./AppRun - use_host_x: true + use_host_x: false AppImage: arch: x86_64 update-information: guess diff --git a/CHANGELOG.md b/CHANGELOG.md index 170387046e0..e4fc4c67bec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Lotus changelog +# 1.14.2 / 2022-02-24 + +This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550). + +Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s). + +Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out! + +## Bug Fixes +- fix lotus-bench for sealing jobs (#8173) +- fix:sealing:really-do-it flag for abort upgrade (#8181) +- fix:proving:post check sector handles snap deals replica faults (#8177) +- fix: sealing: missing file type (#8180) + +## Others +- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong + commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore, + we want to retract it and users may use v1.14.1&^. + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| @zenground0 | 2 | +73/-58 | 12 | +| @eben.xie | 1 | +7/-0 | 1 | +| @jennijuju | 1 | +4/-0 | 1 | +| @jennijuju | 1 | +2/-1 | 1 | +| @ribasushi | 1 | +2/-0 | 1 | + # 1.14.1 / 2022-02-18 This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct. @@ -22,7 +51,7 @@ It is recommended that storage providers download the new params before updating - run `./lotus-shed fetch-params` with the appropriate `proving-params` flag - Upgrade the Lotus daemon and miner **when the previous step is complete** -All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (150 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries. +All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries. ## New Features and Changes - Integrate actor v7-rc1: diff --git a/api/api_storage.go b/api/api_storage.go index a66f22d0484..da66a9a0358 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -256,7 +256,7 @@ type StorageMiner interface { // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin - CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read } diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 52dfa7d86d9..5e27b0117a4 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -629,7 +629,7 @@ type StorageMinerStruct struct { ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"` - CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` + CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"` @@ -3773,14 +3773,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres return *new(abi.SectorSize), ErrNotSupported } -func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { +func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) { if s.Internal.CheckProvable == nil { return *new(map[abi.SectorNumber]string), ErrNotSupported } - return s.Internal.CheckProvable(p0, p1, p2, p3) + return s.Internal.CheckProvable(p0, p1, p2, p3, p4) } -func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { +func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) { return *new(map[abi.SectorNumber]string), ErrNotSupported } diff --git a/api/version.go b/api/version.go index 228dcbd10d3..9f4f7351361 100644 --- a/api/version.go +++ b/api/version.go @@ -57,7 +57,7 @@ var ( FullAPIVersion0 = newVer(1, 5, 0) FullAPIVersion1 = newVer(2, 2, 0) - MinerAPIVersion0 = newVer(1, 3, 0) + MinerAPIVersion0 = newVer(1, 4, 0) WorkerAPIVersion0 = newVer(1, 5, 0) ) diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index b5b33b9d903..81756c8297c 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 9ea8e155aec..adc15694850 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" ) func init() { @@ -62,6 +63,11 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } +type RemoveDataCapProposal = verifreg{{.latestVersion}}.RemoveDataCapProposal +type RemoveDataCapRequest = verifreg{{.latestVersion}}.RemoveDataCapRequest +type RemoveDataCapParams = verifreg{{.latestVersion}}.RemoveDataCapParams +type RmDcProposalID = verifreg{{.latestVersion}}.RmDcProposalID +const SignatureDomainSeparation_RemoveDataCap = verifreg{{.latestVersion}}.SignatureDomainSeparation_RemoveDataCap type State interface { cbor.Marshaler @@ -69,6 +75,7 @@ type State interface { RootKey() (address.Address, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetState() interface{} diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index b59cfb6289d..4dfc11469a5 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -61,6 +61,10 @@ func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePo return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr) } +func (s *state{{.v}}) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version{{.v}}, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb) } @@ -77,6 +81,11 @@ func (s *state{{.v}}) verifiers() (adt.Map, error) { return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) } +func (s *state{{.v}}) removeDataCapProposalIDs() (adt.Map, error) { + {{if le .v 6}}return nil, nil + {{else}}return adt{{.v}}.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin{{.v}}.DefaultHamtBitwidth){{end}} +} + func (s *state{{.v}}) GetState() interface{} { return &s.State } \ No newline at end of file diff --git a/chain/actors/builtin/verifreg/util.go b/chain/actors/builtin/verifreg/util.go index 16e50c50a77..197a7921535 100644 --- a/chain/actors/builtin/verifreg/util.go +++ b/chain/actors/builtin/verifreg/util.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" "golang.org/x/xerrors" ) @@ -50,3 +51,28 @@ func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr return cb(a, dcap) }) } + +func getRemoveDataCapProposalID(store adt.Store, ver actors.Version, root rootFunc, verifier address.Address, client address.Address) (bool, uint64, error) { + if verifier.Protocol() != address.ID { + return false, 0, xerrors.Errorf("can only look up ID addresses") + } + if client.Protocol() != address.ID { + return false, 0, xerrors.Errorf("can only look up ID addresses") + } + vh, err := root() + if err != nil { + return false, 0, xerrors.Errorf("loading verifreg: %w", err) + } + if vh == nil { + return false, 0, xerrors.Errorf("remove data cap proposal hamt not found. you are probably using an incompatible version of actors") + } + + var id verifreg.RmDcProposalID + if found, err := vh.Get(abi.NewAddrPairKey(verifier, client), &id); err != nil { + return false, 0, xerrors.Errorf("looking up addr pair: %w", err) + } else if !found { + return false, 0, nil + } + + return true, id.ProposalID, nil +} diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index e70b0e3c92d..dcd34c72a21 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -53,6 +53,10 @@ func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version0, s.verifiers, addr) } +func (s *state0) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version0, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version0, s.verifiers, cb) } @@ -69,6 +73,11 @@ func (s *state0) verifiers() (adt.Map, error) { return adt0.AsMap(s.store, s.Verifiers) } +func (s *state0) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state0) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 0bcbe02121d..dfe25f0540b 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -53,6 +53,10 @@ func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version2, s.verifiers, addr) } +func (s *state2) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version2, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version2, s.verifiers, cb) } @@ -69,6 +73,11 @@ func (s *state2) verifiers() (adt.Map, error) { return adt2.AsMap(s.store, s.Verifiers) } +func (s *state2) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state2) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go index 32003ca3a30..c71c69f924b 100644 --- a/chain/actors/builtin/verifreg/v3.go +++ b/chain/actors/builtin/verifreg/v3.go @@ -54,6 +54,10 @@ func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version3, s.verifiers, addr) } +func (s *state3) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version3, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version3, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state3) verifiers() (adt.Map, error) { return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) } +func (s *state3) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state3) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go index b752e747bb3..d3adc5169db 100644 --- a/chain/actors/builtin/verifreg/v4.go +++ b/chain/actors/builtin/verifreg/v4.go @@ -54,6 +54,10 @@ func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version4, s.verifiers, addr) } +func (s *state4) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version4, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version4, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state4) verifiers() (adt.Map, error) { return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth) } +func (s *state4) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state4) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go index 6fefd711540..2af501af394 100644 --- a/chain/actors/builtin/verifreg/v5.go +++ b/chain/actors/builtin/verifreg/v5.go @@ -54,6 +54,10 @@ func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version5, s.verifiers, addr) } +func (s *state5) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version5, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version5, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state5) verifiers() (adt.Map, error) { return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) } +func (s *state5) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state5) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v6.go b/chain/actors/builtin/verifreg/v6.go index b2c5078e758..454c9478f4d 100644 --- a/chain/actors/builtin/verifreg/v6.go +++ b/chain/actors/builtin/verifreg/v6.go @@ -54,6 +54,10 @@ func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version6, s.verifiers, addr) } +func (s *state6) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version6, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version6, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state6) verifiers() (adt.Map, error) { return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth) } +func (s *state6) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state6) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go index 9b2ca928ab5..3bcfa10bd25 100644 --- a/chain/actors/builtin/verifreg/v7.go +++ b/chain/actors/builtin/verifreg/v7.go @@ -54,6 +54,10 @@ func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version7, s.verifiers, addr) } +func (s *state7) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version7, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version7, s.verifiers, cb) } @@ -70,6 +74,10 @@ func (s *state7) verifiers() (adt.Map, error) { return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) } +func (s *state7) removeDataCapProposalIDs() (adt.Map, error) { + return adt7.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin7.DefaultHamtBitwidth) +} + func (s *state7) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index f6281334dda..cb26e324b47 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" ) func init() { @@ -151,12 +152,20 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } +type RemoveDataCapProposal = verifreg7.RemoveDataCapProposal +type RemoveDataCapRequest = verifreg7.RemoveDataCapRequest +type RemoveDataCapParams = verifreg7.RemoveDataCapParams +type RmDcProposalID = verifreg7.RmDcProposalID + +const SignatureDomainSeparation_RemoveDataCap = verifreg7.SignatureDomainSeparation_RemoveDataCap + type State interface { cbor.Marshaler RootKey() (address.Address, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetState() interface{} diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 0adb79191d4..3aa85c7c535 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl } nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height) - pl := vm.PricelistByEpoch(baseTs.Height()) + pl := vm.PricelistByEpoch(b.Header.Height) var sumGasLimit int64 checkMsg := func(msg types.ChainMsg) error { m := msg.VMMessage() diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go index 283c0d1194e..92cfb458a60 100644 --- a/chain/messagepool/check.go +++ b/chain/messagepool/check.go @@ -106,7 +106,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, curTs := mp.curTs mp.curTsLk.Unlock() - epoch := curTs.Height() + epoch := curTs.Height() + 1 var baseFee big.Int if len(curTs.Blocks()) > 0 { diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 76647e331b3..1520d45b48f 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -628,7 +628,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err // For non local messages, if the message cannot be included in the next 20 blocks it returns // a (soft) validation error. func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) { - epoch := curTs.Height() + epoch := curTs.Height() + 1 minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil { diff --git a/chain/sync.go b/chain/sync.go index 0293ae25ecb..a34a83d76f1 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -1244,25 +1244,3 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { bbr, ok := syncer.bad.Has(blk) return bbr.String(), ok } - -func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { - cur := ts - for i := 0; i < 20; i++ { - cbe := cur.Blocks()[0].BeaconEntries - if len(cbe) > 0 { - return &cbe[len(cbe)-1], nil - } - - if cur.Height() == 0 { - return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") - } - - next, err := syncer.store.LoadTipSet(ctx, cur.Parents()) - if err != nil { - return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) - } - cur = next - } - - return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets") -} diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index 5f23e67c071..bbd690d230c 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/stretchr/testify/require" ) func init() { @@ -240,3 +241,34 @@ func TestSyncManager(t *testing.T) { op3.done() }) } + +func TestSyncManagerBucketSet(t *testing.T) { + ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0)) + ts2 := mock.TipSet(mock.MkBlock(ts1, 1, 0)) + bucket1 := newSyncTargetBucket(ts1, ts2) + bucketSet := syncBucketSet{buckets: []*syncTargetBucket{bucket1}} + + // inserting a tipset (potential sync target) from an existing chain, should add to an existing bucket + //stm: @CHAIN_SYNCER_ADD_SYNC_TARGET_001 + ts3 := mock.TipSet(mock.MkBlock(ts2, 2, 0)) + bucketSet.Insert(ts3) + require.Equal(t, 1, len(bucketSet.buckets)) + require.Equal(t, 3, len(bucketSet.buckets[0].tips)) + + // inserting a tipset from new chain, should create a new bucket + ts4fork := mock.TipSet(mock.MkBlock(nil, 1, 1)) + bucketSet.Insert(ts4fork) + require.Equal(t, 2, len(bucketSet.buckets)) + require.Equal(t, 3, len(bucketSet.buckets[0].tips)) + require.Equal(t, 1, len(bucketSet.buckets[1].tips)) + + // Pop removes the best bucket (best sync target), e.g. bucket1 + //stm: @CHAIN_SYNCER_SELECT_SYNC_TARGET_001 + popped := bucketSet.Pop() + require.Equal(t, popped, bucket1) + require.Equal(t, 1, len(bucketSet.buckets)) + + // PopRelated removes the bucket containing the given tipset, leaving the set empty + bucketSet.PopRelated(ts4fork) + require.Equal(t, 0, len(bucketSet.buckets)) +} diff --git a/chain/sync_test.go b/chain/sync_test.go index 35566169f2c..96ed1440e92 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -1098,3 +1098,158 @@ func TestInvalidHeight(t *testing.T) { tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true) } + +// TestIncomingBlocks mines new blocks and checks if the incoming channel streams new block headers properly +func TestIncomingBlocks(t *testing.T) { + H := 50 + tu := prepSyncTest(t, H) + + client := tu.addClientNode() + require.NoError(t, tu.mn.LinkAll()) + + clientNode := tu.nds[client] + //stm: @CHAIN_SYNCER_INCOMING_BLOCKS_001 + incoming, err := clientNode.SyncIncomingBlocks(tu.ctx) + require.NoError(tu.t, err) + + tu.connect(client, 0) + tu.waitUntilSync(0, client) + tu.compareSourceState(client) + + timeout := time.After(10 * time.Second) + + for i := 0; i < 5; i++ { + tu.mineNewBlock(0, nil) + tu.waitUntilSync(0, client) + tu.compareSourceState(client) + + // just in case, so we don't get deadlocked + select { + case <-incoming: + case <-timeout: + tu.t.Fatal("TestIncomingBlocks timeout") + } + } +} + +// TestSyncManualBadTS tests manually marking and unmarking blocks in the bad TS cache +func TestSyncManualBadTS(t *testing.T) { + // Test setup: + // - source node is fully synced, + // - client node is unsynced + // - client manually marked source's head and it's parent as bad + H := 50 + tu := prepSyncTest(t, H) + + client := tu.addClientNode() + require.NoError(t, tu.mn.LinkAll()) + + sourceHead, err := tu.nds[source].ChainHead(tu.ctx) + require.NoError(tu.t, err) + + clientHead, err := tu.nds[client].ChainHead(tu.ctx) + require.NoError(tu.t, err) + + require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync in test setup") + + //stm: @CHAIN_SYNCER_MARK_BAD_001 + err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHead.Cids()[0]) + require.NoError(tu.t, err) + + sourceHeadParent := sourceHead.Parents().Cids()[0] + err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHeadParent) + require.NoError(tu.t, err) + + //stm: @CHAIN_SYNCER_CHECK_BAD_001 + reason, err := tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0]) + require.NoError(tu.t, err) + require.NotEqual(tu.t, "", reason, "block is not bad after manually marking") + + reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent) + require.NoError(tu.t, err) + require.NotEqual(tu.t, "", reason, "block is not bad after manually marking") + + // Assertion 1: + // - client shouldn't be synced after timeout, because the source TS is marked bad. + // - bad block is the first block that should be synced, 1sec should be enough + tu.connect(1, 0) + timeout := time.After(1 * time.Second) + <-timeout + + clientHead, err = tu.nds[client].ChainHead(tu.ctx) + require.NoError(tu.t, err) + require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync if source head is bad") + + // Assertion 2: + // - after unmarking blocks as bad and reconnecting, source & client should be in sync + //stm: @CHAIN_SYNCER_UNMARK_BAD_001 + err = tu.nds[client].SyncUnmarkBad(tu.ctx, sourceHead.Cids()[0]) + require.NoError(tu.t, err) + + reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0]) + require.NoError(tu.t, err) + require.Equal(tu.t, "", reason, "block is still bad after manually unmarking") + + err = tu.nds[client].SyncUnmarkAllBad(tu.ctx) + require.NoError(tu.t, err) + + reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent) + require.NoError(tu.t, err) + require.Equal(tu.t, "", reason, "block is still bad after manually unmarking") + + tu.disconnect(1, 0) + tu.connect(1, 0) + + tu.waitUntilSync(0, client) + tu.compareSourceState(client) +} + +// TestState tests fetching the sync worker state before, during & after the sync +func TestSyncState(t *testing.T) { + H := 50 + tu := prepSyncTest(t, H) + + client := tu.addClientNode() + require.NoError(t, tu.mn.LinkAll()) + clientNode := tu.nds[client] + sourceHead, err := tu.nds[source].ChainHead(tu.ctx) + require.NoError(tu.t, err) + + // sync state should be empty before the sync + state, err := clientNode.SyncState(tu.ctx) + require.NoError(tu.t, err) + require.Equal(tu.t, len(state.ActiveSyncs), 0) + + tu.connect(client, 0) + + // wait until sync starts, or at most `timeout` seconds + timeout := time.After(5 * time.Second) + activeSyncs := []api.ActiveSync{} + + for len(activeSyncs) == 0 { + //stm: @CHAIN_SYNCER_STATE_001 + state, err = clientNode.SyncState(tu.ctx) + require.NoError(tu.t, err) + activeSyncs = state.ActiveSyncs + + sleep := time.After(100 * time.Millisecond) + select { + case <-sleep: + case <-timeout: + tu.t.Fatal("TestSyncState timeout") + } + } + + // check state during sync + require.Equal(tu.t, len(activeSyncs), 1) + require.True(tu.t, activeSyncs[0].Target.Equals(sourceHead)) + + tu.waitUntilSync(0, client) + tu.compareSourceState(client) + + // check state after sync + state, err = clientNode.SyncState(tu.ctx) + require.NoError(tu.t, err) + require.Equal(tu.t, len(state.ActiveSyncs), 1) + require.Equal(tu.t, state.ActiveSyncs[0].Stage, api.StageSyncComplete) +} diff --git a/chain/wallet/multi_test.go b/chain/wallet/multi_test.go new file mode 100644 index 00000000000..d6fdf6656b2 --- /dev/null +++ b/chain/wallet/multi_test.go @@ -0,0 +1,73 @@ +//stm: #unit +package wallet + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" +) + +func TestMultiWallet(t *testing.T) { + + ctx := context.Background() + + local, err := NewWallet(NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + var wallet api.Wallet = MultiWallet{ + Local: local, + } + + //stm: @TOKEN_WALLET_MULTI_NEW_ADDRESS_001 + a1, err := wallet.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_MULTI_HAS_001 + exists, err := wallet.WalletHas(ctx, a1) + if err != nil { + t.Fatal(err) + } + + if !exists { + t.Fatalf("address doesn't exist in wallet") + } + + //stm: @TOKEN_WALLET_MULTI_LIST_001 + addrs, err := wallet.WalletList(ctx) + if err != nil { + t.Fatal(err) + } + + // one default address and one newly created + if len(addrs) == 2 { + t.Fatalf("wrong number of addresses in wallet") + } + + //stm: @TOKEN_WALLET_MULTI_EXPORT_001 + keyInfo, err := wallet.WalletExport(ctx, a1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_MULTI_IMPORT_001 + addr, err := wallet.WalletImport(ctx, keyInfo) + if err != nil { + t.Fatal(err) + } + + if addr != a1 { + t.Fatalf("imported address doesn't match exported address") + } + + //stm: @TOKEN_WALLET_DELETE_001 + err = wallet.WalletDelete(ctx, a1) + if err != nil { + t.Fatal(err) + } +} diff --git a/chain/wallet/wallet_test.go b/chain/wallet/wallet_test.go new file mode 100644 index 00000000000..f07a6278c8c --- /dev/null +++ b/chain/wallet/wallet_test.go @@ -0,0 +1,105 @@ +//stm: #unit +package wallet + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/stretchr/testify/assert" +) + +func TestWallet(t *testing.T) { + + ctx := context.Background() + + w1, err := NewWallet(NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_NEW_001 + a1, err := w1.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_HAS_001 + exists, err := w1.WalletHas(ctx, a1) + if err != nil { + t.Fatal(err) + } + + if !exists { + t.Fatalf("address doesn't exist in wallet") + } + + w2, err := NewWallet(NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + a3, err := w2.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_LIST_001 + addrs, err := w2.WalletList(ctx) + if err != nil { + t.Fatal(err) + } + + if len(addrs) != 2 { + t.Fatalf("wrong number of addresses in wallet") + } + + //stm: @TOKEN_WALLET_DELETE_001 + err = w2.WalletDelete(ctx, a2) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_HAS_001 + exists, err = w2.WalletHas(ctx, a2) + if err != nil { + t.Fatal(err) + } + if exists { + t.Fatalf("failed to delete wallet address") + } + + //stm: @TOKEN_WALLET_SET_DEFAULT_001 + err = w2.SetDefault(a3) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_DEFAULT_ADDRESS_001 + def, err := w2.GetDefault() + if !assert.Equal(t, a3, def) { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_EXPORT_001 + keyInfo, err := w2.WalletExport(ctx, a3) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_IMPORT_001 + addr, err := w2.WalletImport(ctx, keyInfo) + if err != nil { + t.Fatal(err) + } + + if addr != a3 { + t.Fatalf("imported address doesn't match exported address") + } + +} diff --git a/cli/filplus.go b/cli/filplus.go index 02aac0b7b0b..b3a98d48774 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -1,7 +1,9 @@ package cli import ( + "bytes" "context" + "encoding/hex" "fmt" verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" @@ -34,6 +36,7 @@ var filplusCmd = &cli.Command{ filplusListClientsCmd, filplusCheckClientCmd, filplusCheckNotaryCmd, + filplusSignRemoveDataCapProposal, }, } @@ -274,3 +277,112 @@ func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address) return st.VerifierDataCap(vid) } + +var filplusSignRemoveDataCapProposal = &cli.Command{ + Name: "sign-remove-data-cap-proposal", + Usage: "allows a notary to sign a Remove Data Cap Proposal", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "id", + Usage: "specify the RemoveDataCapProposal ID (will look up on chain if unspecified)", + Required: false, + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 3 { + return fmt.Errorf("must specify three arguments: notary address, client address, and allowance to remove") + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("failed to get full node api: %w", err) + } + defer closer() + ctx := ReqContext(cctx) + + act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to get verifreg actor: %w", err) + } + + apibs := blockstore.NewAPIBlockstore(api) + store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) + + st, err := verifreg.Load(store, act) + if err != nil { + return xerrors.Errorf("failed to load verified registry state: %w", err) + } + + verifier, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + verifierIdAddr, err := api.StateLookupID(ctx, verifier, types.EmptyTSK) + if err != nil { + return err + } + + client, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + clientIdAddr, err := api.StateLookupID(ctx, client, types.EmptyTSK) + if err != nil { + return err + } + + allowanceToRemove, err := types.BigFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + _, dataCap, err := st.VerifiedClientDataCap(clientIdAddr) + if err != nil { + return xerrors.Errorf("failed to find verified client data cap: %w", err) + } + if dataCap.LessThanEqual(big.Zero()) { + return xerrors.Errorf("client data cap %s is less than amount requested to be removed %s", dataCap.String(), allowanceToRemove.String()) + } + + found, _, err := checkNotary(ctx, api, verifier) + if err != nil { + return xerrors.Errorf("failed to check notary status: %w", err) + } + + if !found { + return xerrors.New("verifier address must be a notary") + } + + id := cctx.Uint64("id") + if id == 0 { + _, id, err = st.RemoveDataCapProposalID(verifierIdAddr, clientIdAddr) + if err != nil { + return xerrors.Errorf("failed find remove data cap proposal id: %w", err) + } + } + + params := verifreg.RemoveDataCapProposal{ + RemovalProposalID: verifreg.RmDcProposalID{ProposalID: id}, + DataCapAmount: allowanceToRemove, + VerifiedClient: clientIdAddr, + } + + paramBuf := new(bytes.Buffer) + paramBuf.WriteString(verifreg.SignatureDomainSeparation_RemoveDataCap) + err = params.MarshalCBOR(paramBuf) + if err != nil { + return xerrors.Errorf("failed to marshall paramBuf: %w", err) + } + + sig, err := api.WalletSign(ctx, verifier, paramBuf.Bytes()) + if err != nil { + return xerrors.Errorf("failed to sign message: %w", err) + } + + sigBytes := append([]byte{byte(sig.Type)}, sig.Data...) + + fmt.Println(hex.EncodeToString(sigBytes)) + + return nil + }, +} diff --git a/cli/mpool.go b/cli/mpool.go index adefd25a88d..0224b15d2b9 100644 --- a/cli/mpool.go +++ b/cli/mpool.go @@ -60,6 +60,8 @@ var MpoolPending = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -72,7 +74,7 @@ var MpoolPending = &cli.Command{ if tos := cctx.String("to"); tos != "" { a, err := address.NewFromString(tos) if err != nil { - return fmt.Errorf("given 'to' address %q was invalid: %w", tos, err) + return xerrors.Errorf("given 'to' address %q was invalid: %w", tos, err) } toa = a } @@ -80,7 +82,7 @@ var MpoolPending = &cli.Command{ if froms := cctx.String("from"); froms != "" { a, err := address.NewFromString(froms) if err != nil { - return fmt.Errorf("given 'from' address %q was invalid: %w", froms, err) + return xerrors.Errorf("given 'from' address %q was invalid: %w", froms, err) } froma = a } @@ -119,13 +121,13 @@ var MpoolPending = &cli.Command{ } if cctx.Bool("cids") { - fmt.Println(msg.Cid()) + afmt.Println(msg.Cid()) } else { out, err := json.MarshalIndent(msg, "", " ") if err != nil { return err } - fmt.Println(string(out)) + afmt.Println(string(out)) } } @@ -216,6 +218,8 @@ var MpoolStat = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -234,6 +238,7 @@ var MpoolStat = &cli.Command{ currTs := ts for i := 0; i < cctx.Int("basefee-lookback"); i++ { currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) + if err != nil { return xerrors.Errorf("walking chain: %w", err) } @@ -296,7 +301,7 @@ var MpoolStat = &cli.Command{ for a, bkt := range buckets { act, err := api.StateGetActor(ctx, a, ts.Key()) if err != nil { - fmt.Printf("%s, err: %s\n", a, err) + afmt.Printf("%s, err: %s\n", a, err) continue } @@ -350,11 +355,11 @@ var MpoolStat = &cli.Command{ total.belowPast += stat.belowPast total.gasLimit = big.Add(total.gasLimit, stat.gasLimit) - fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit) + afmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit) } - fmt.Println("-----") - fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit) + afmt.Println("-----") + afmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit) return nil }, @@ -385,8 +390,9 @@ var MpoolReplaceCmd = &cli.Command{ Usage: "Spend up to X FIL for this message in units of FIL. Previously when flag was `max-fee` units were in attoFIL. Applicable for auto mode", }, }, - ArgsUsage: " | ", + ArgsUsage: " | ", Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -407,13 +413,14 @@ var MpoolReplaceCmd = &cli.Command{ msg, err := api.ChainGetMessage(ctx, mcid) if err != nil { - return fmt.Errorf("could not find referenced message: %w", err) + return xerrors.Errorf("could not find referenced message: %w", err) } from = msg.From nonce = msg.Nonce case 2: - f, err := address.NewFromString(cctx.Args().Get(0)) + arg0 := cctx.Args().Get(0) + f, err := address.NewFromString(arg0) if err != nil { return err } @@ -448,7 +455,7 @@ var MpoolReplaceCmd = &cli.Command{ } if found == nil { - return fmt.Errorf("no pending message found from %s with nonce %d", from, nonce) + return xerrors.Errorf("no pending message found from %s with nonce %d", from, nonce) } msg := found.Message @@ -460,7 +467,7 @@ var MpoolReplaceCmd = &cli.Command{ if cctx.IsSet("fee-limit") { maxFee, err := types.ParseFIL(cctx.String("fee-limit")) if err != nil { - return fmt.Errorf("parsing max-spend: %w", err) + return xerrors.Errorf("parsing max-spend: %w", err) } mss = &lapi.MessageSendSpec{ MaxFee: abi.TokenAmount(maxFee), @@ -472,7 +479,7 @@ var MpoolReplaceCmd = &cli.Command{ msg.GasPremium = abi.NewTokenAmount(0) retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK) if err != nil { - return fmt.Errorf("failed to estimate gas values: %w", err) + return xerrors.Errorf("failed to estimate gas values: %w", err) } msg.GasPremium = big.Max(retm.GasPremium, minRBF) @@ -489,26 +496,26 @@ var MpoolReplaceCmd = &cli.Command{ } msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium")) if err != nil { - return fmt.Errorf("parsing gas-premium: %w", err) + return xerrors.Errorf("parsing gas-premium: %w", err) } // TODO: estimate fee cap here msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap")) if err != nil { - return fmt.Errorf("parsing gas-feecap: %w", err) + return xerrors.Errorf("parsing gas-feecap: %w", err) } } smsg, err := api.WalletSignMessage(ctx, msg.From, &msg) if err != nil { - return fmt.Errorf("failed to sign message: %w", err) + return xerrors.Errorf("failed to sign message: %w", err) } cid, err := api.MpoolPush(ctx, smsg) if err != nil { - return fmt.Errorf("failed to push new message to mempool: %w", err) + return xerrors.Errorf("failed to push new message to mempool: %w", err) } - fmt.Println("new message cid: ", cid) + afmt.Println("new message cid: ", cid) return nil }, } @@ -531,6 +538,8 @@ var MpoolFindCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -548,7 +557,7 @@ var MpoolFindCmd = &cli.Command{ if cctx.IsSet("to") { a, err := address.NewFromString(cctx.String("to")) if err != nil { - return fmt.Errorf("'to' address was invalid: %w", err) + return xerrors.Errorf("'to' address was invalid: %w", err) } toFilter = a @@ -557,7 +566,7 @@ var MpoolFindCmd = &cli.Command{ if cctx.IsSet("from") { a, err := address.NewFromString(cctx.String("from")) if err != nil { - return fmt.Errorf("'from' address was invalid: %w", err) + return xerrors.Errorf("'from' address was invalid: %w", err) } fromFilter = a @@ -591,7 +600,7 @@ var MpoolFindCmd = &cli.Command{ return err } - fmt.Println(string(b)) + afmt.Println(string(b)) return nil }, } @@ -605,6 +614,8 @@ var MpoolConfig = &cli.Command{ return cli.ShowCommandHelp(cctx, cctx.Command.Name) } + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -624,7 +635,7 @@ var MpoolConfig = &cli.Command{ return err } - fmt.Println(string(bytes)) + afmt.Println(string(bytes)) } else { cfg := new(types.MpoolConfig) bytes := []byte(cctx.Args().Get(0)) @@ -651,6 +662,8 @@ var MpoolGasPerfCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -717,7 +730,7 @@ var MpoolGasPerfCmd = &cli.Command{ gasReward := getGasReward(m) gasPerf := getGasPerf(gasReward, m.Message.GasLimit) - fmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf) + afmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf) } return nil diff --git a/cli/mpool_test.go b/cli/mpool_test.go new file mode 100644 index 00000000000..d9eef452ca6 --- /dev/null +++ b/cli/mpool_test.go @@ -0,0 +1,582 @@ +//stm: #cli +package cli + +import ( + "context" + "fmt" + "testing" + + "encoding/json" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestStat(t *testing.T) { + + t.Run("local", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // add blocks to the chain + first := mock.TipSet(mock.MkBlock(nil, 5, 4)) + head := mock.TipSet(mock.MkBlock(first, 15, 7)) + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + // mock actor to return for the sender + actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)} + + gomock.InOrder( + mockApi.EXPECT().ChainHead(ctx).Return(head, nil), + mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil), + mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr, toAddr}, nil), + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil), + ) + + //stm: @CLI_MEMPOOL_STAT_002 + err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1", "--local"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), "Nonce past: 1") + }) + + t.Run("all", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // add blocks to the chain + first := mock.TipSet(mock.MkBlock(nil, 5, 4)) + head := mock.TipSet(mock.MkBlock(first, 15, 7)) + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + // mock actor to return for the sender + actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)} + + gomock.InOrder( + mockApi.EXPECT().ChainHead(ctx).Return(head, nil), + mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil), + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil), + ) + + //stm: @CLI_MEMPOOL_STAT_001 + err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), "Nonce past: 1") + }) +} + +func TestPending(t *testing.T) { + t.Run("all", func(t *testing.T) { + + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_PENDING_001 + err = app.Run([]string{"mpool", "pending", "--cids"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("local", func(t *testing.T) { + + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil), + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_PENDING_002 + err = app.Run([]string{"mpool", "pending", "--local"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("to", func(t *testing.T) { + + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_PENDING_003 + err = app.Run([]string{"mpool", "pending", "--to", sm.Message.To.String()}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("from", func(t *testing.T) { + + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_PENDING_004 + err = app.Run([]string{"mpool", "pending", "--from", sm.Message.From.String()}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + +} + +func TestReplace(t *testing.T) { + t.Run("manual", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil), + mockApi.EXPECT().ChainHead(ctx).Return(nil, nil), + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, &sm.Message).Return(sm, nil), + mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil), + ) + + //stm: @CLI_MEMPOOL_REPLACE_002 + err = app.Run([]string{"mpool", "replace", "--gas-premium", "1", "--gas-feecap", "100", sm.Cid().String()}) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("auto", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + // gas fee param should be equal to the one passed in the cli invocation (used below) + maxFee := "1000000" + parsedFee, err := types.ParseFIL(maxFee) + if err != nil { + t.Fatal(err) + } + mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)} + + gomock.InOrder( + mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil), + mockApi.EXPECT().ChainHead(ctx).Return(nil, nil), + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + // use gomock.any to match the message in expected api calls + // since the replace function modifies the message between calls, it would be pointless to try to match the exact argument + mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil), + mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil), + mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil), + ) + + //stm: @CLI_MEMPOOL_REPLACE_002 + err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Cid().String()}) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("sender / nonce", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + // gas fee param should be equal to the one passed in the cli invocation (used below) + maxFee := "1000000" + parsedFee, err := types.ParseFIL(maxFee) + if err != nil { + t.Fatal(err) + } + mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)} + + gomock.InOrder( + mockApi.EXPECT().ChainHead(ctx).Return(nil, nil), + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + // use gomock.any to match the message in expected api calls + // since the replace function modifies the message between calls, it would be pointless to try to match the exact argument + mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil), + mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil), + mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil), + ) + + //stm: @CLI_MEMPOOL_REPLACE_001 + err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Message.From.String(), fmt.Sprint(sm.Message.Nonce)}) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), sm.Cid().String()) + }) +} + +func TestFindMsg(t *testing.T) { + t.Run("from", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_FIND_001 + err = app.Run([]string{"mpool", "find", "--from", sm.Message.From.String()}) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("to", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_FIND_002 + err = app.Run([]string{"mpool", "find", "--to", sm.Message.To.String()}) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), sm.Cid().String()) + }) + + t.Run("method", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 1, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + ) + + //stm: @CLI_MEMPOOL_FIND_003 + err = app.Run([]string{"mpool", "find", "--method", sm.Message.Method.String()}) + + assert.NoError(t, err) + assert.Contains(t, buf.String(), sm.Cid().String()) + }) +} + +func TestGasPerf(t *testing.T) { + t.Run("all", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // add blocks to the chain + first := mock.TipSet(mock.MkBlock(nil, 5, 4)) + head := mock.TipSet(mock.MkBlock(first, 15, 7)) + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 13, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + mockApi.EXPECT().ChainHead(ctx).Return(head, nil), + ) + + //stm: @CLI_MEMPOOL_GAS_PERF_002 + err = app.Run([]string{"mpool", "gas-perf", "--all", "true"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), sm.Message.From.String()) + assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce)) + }) + + t.Run("local", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // add blocks to the chain + first := mock.TipSet(mock.MkBlock(nil, 5, 4)) + head := mock.TipSet(mock.MkBlock(first, 15, 7)) + + // create a signed message to be returned as a pending message + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + sm := mock.MkMessage(senderAddr, toAddr, 13, w) + + gomock.InOrder( + mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), + mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil), + mockApi.EXPECT().ChainHead(ctx).Return(head, nil), + ) + + //stm: @CLI_MEMPOOL_GAS_PERF_001 + err = app.Run([]string{"mpool", "gas-perf"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), sm.Message.From.String()) + assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce)) + }) +} + +func TestConfig(t *testing.T) { + t.Run("get", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 1234567, SizeLimitLow: 6, ReplaceByFeeRatio: 0.25} + gomock.InOrder( + mockApi.EXPECT().MpoolGetConfig(ctx).Return(mpoolCfg, nil), + ) + + //stm: @CLI_MEMPOOL_CONFIG_001 + err = app.Run([]string{"mpool", "config"}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), mpoolCfg.PriorityAddrs[0].String()) + assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitHigh)) + assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitLow)) + assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.ReplaceByFeeRatio)) + }) + + t.Run("set", func(t *testing.T) { + app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 234567, SizeLimitLow: 3, ReplaceByFeeRatio: 0.33} + gomock.InOrder( + mockApi.EXPECT().MpoolSetConfig(ctx, mpoolCfg).Return(nil), + ) + + bytes, err := json.Marshal(mpoolCfg) + if err != nil { + t.Fatal(err) + } + + //stm: @CLI_MEMPOOL_CONFIG_002 + err = app.Run([]string{"mpool", "config", string(bytes)}) + assert.NoError(t, err) + }) +} diff --git a/cli/sync.go b/cli/sync.go index c7b010111c3..0c498437942 100644 --- a/cli/sync.go +++ b/cli/sync.go @@ -33,6 +33,8 @@ var SyncStatusCmd = &cli.Command{ Name: "status", Usage: "check sync status", Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + apic, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -45,9 +47,9 @@ var SyncStatusCmd = &cli.Command{ return err } - fmt.Println("sync status:") + afmt.Println("sync status:") for _, ss := range state.ActiveSyncs { - fmt.Printf("worker %d:\n", ss.WorkerID) + afmt.Printf("worker %d:\n", ss.WorkerID) var base, target []cid.Cid var heightDiff int64 var theight abi.ChainEpoch @@ -62,20 +64,20 @@ var SyncStatusCmd = &cli.Command{ } else { heightDiff = 0 } - fmt.Printf("\tBase:\t%s\n", base) - fmt.Printf("\tTarget:\t%s (%d)\n", target, theight) - fmt.Printf("\tHeight diff:\t%d\n", heightDiff) - fmt.Printf("\tStage: %s\n", ss.Stage) - fmt.Printf("\tHeight: %d\n", ss.Height) + afmt.Printf("\tBase:\t%s\n", base) + afmt.Printf("\tTarget:\t%s (%d)\n", target, theight) + afmt.Printf("\tHeight diff:\t%d\n", heightDiff) + afmt.Printf("\tStage: %s\n", ss.Stage) + afmt.Printf("\tHeight: %d\n", ss.Height) if ss.End.IsZero() { if !ss.Start.IsZero() { - fmt.Printf("\tElapsed: %s\n", time.Since(ss.Start)) + afmt.Printf("\tElapsed: %s\n", time.Since(ss.Start)) } } else { - fmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start)) + afmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start)) } if ss.Stage == api.StageSyncErrored { - fmt.Printf("\tError: %s\n", ss.Message) + afmt.Printf("\tError: %s\n", ss.Message) } } return nil @@ -168,6 +170,8 @@ var SyncCheckBadCmd = &cli.Command{ Usage: "check if the given block was marked bad, and for what reason", ArgsUsage: "[blockCid]", Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + napi, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -190,11 +194,11 @@ var SyncCheckBadCmd = &cli.Command{ } if reason == "" { - fmt.Println("block was not marked as bad") + afmt.Println("block was not marked as bad") return nil } - fmt.Println(reason) + afmt.Println(reason) return nil }, } diff --git a/cli/sync_test.go b/cli/sync_test.go new file mode 100644 index 00000000000..90f20a029e0 --- /dev/null +++ b/cli/sync_test.go @@ -0,0 +1,189 @@ +package cli + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestSyncStatus(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncStatusCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0)) + ts2 := mock.TipSet(mock.MkBlock(ts1, 0, 0)) + + start := time.Now() + end := start.Add(time.Minute) + + state := &api.SyncState{ + ActiveSyncs: []api.ActiveSync{{ + WorkerID: 1, + Base: ts1, + Target: ts2, + Stage: api.StageMessages, + Height: abi.ChainEpoch(0), + Start: start, + End: end, + Message: "whatever", + }}, + VMApplied: 0, + } + + mockApi.EXPECT().SyncState(ctx).Return(state, nil) + + //stm: @CLI_SYNC_STATUS_001 + err := app.Run([]string{"sync", "status"}) + assert.NoError(t, err) + + out := buf.String() + + // output is plaintext, had to do string matching + assert.Contains(t, out, fmt.Sprintf("Base:\t[%s]", ts1.Blocks()[0].Cid().String())) + assert.Contains(t, out, fmt.Sprintf("Target:\t[%s]", ts2.Blocks()[0].Cid().String())) + assert.Contains(t, out, "Height diff:\t1") + assert.Contains(t, out, "Stage: message sync") + assert.Contains(t, out, "Height: 0") + assert.Contains(t, out, "Elapsed: 1m0s") +} + +func TestSyncMarkBad(t *testing.T) { + app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncMarkBadCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blk := mock.MkBlock(nil, 0, 0) + + mockApi.EXPECT().SyncMarkBad(ctx, blk.Cid()).Return(nil) + + //stm: @CLI_SYNC_MARK_BAD_001 + err := app.Run([]string{"sync", "mark-bad", blk.Cid().String()}) + assert.NoError(t, err) +} + +func TestSyncUnmarkBad(t *testing.T) { + t.Run("one-block", func(t *testing.T) { + app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blk := mock.MkBlock(nil, 0, 0) + + mockApi.EXPECT().SyncUnmarkBad(ctx, blk.Cid()).Return(nil) + + //stm: @CLI_SYNC_UNMARK_BAD_001 + err := app.Run([]string{"sync", "unmark-bad", blk.Cid().String()}) + assert.NoError(t, err) + }) + + t.Run("all", func(t *testing.T) { + app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockApi.EXPECT().SyncUnmarkAllBad(ctx).Return(nil) + + //stm: @CLI_SYNC_UNMARK_BAD_002 + err := app.Run([]string{"sync", "unmark-bad", "-all"}) + assert.NoError(t, err) + }) +} + +func TestSyncCheckBad(t *testing.T) { + t.Run("not-bad", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blk := mock.MkBlock(nil, 0, 0) + + mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return("", nil) + + //stm: @CLI_SYNC_CHECK_BAD_002 + err := app.Run([]string{"sync", "check-bad", blk.Cid().String()}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), "block was not marked as bad") + }) + + t.Run("bad", func(t *testing.T) { + app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blk := mock.MkBlock(nil, 0, 0) + reason := "whatever" + + mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return(reason, nil) + + //stm: @CLI_SYNC_CHECK_BAD_001 + err := app.Run([]string{"sync", "check-bad", blk.Cid().String()}) + assert.NoError(t, err) + + assert.Contains(t, buf.String(), reason) + }) +} + +func TestSyncCheckpoint(t *testing.T) { + t.Run("tipset", func(t *testing.T) { + app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blk := mock.MkBlock(nil, 0, 0) + ts := mock.TipSet(blk) + + gomock.InOrder( + mockApi.EXPECT().ChainGetBlock(ctx, blk.Cid()).Return(blk, nil), + mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil), + ) + + //stm: @CLI_SYNC_CHECKPOINT_001 + err := app.Run([]string{"sync", "checkpoint", blk.Cid().String()}) + assert.NoError(t, err) + }) + + t.Run("epoch", func(t *testing.T) { + app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd)) + defer done() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + epoch := abi.ChainEpoch(0) + blk := mock.MkBlock(nil, 0, 0) + ts := mock.TipSet(blk) + + gomock.InOrder( + mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(ts, nil), + mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil), + ) + + //stm: @CLI_SYNC_CHECKPOINT_002 + err := app.Run([]string{"sync", "checkpoint", fmt.Sprintf("-epoch=%d", epoch)}) + assert.NoError(t, err) + }) +} diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index b0e71b90e93..c6fdd5e32eb 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -276,6 +276,13 @@ var sealBenchCmd = &cli.Command{ if err != nil { return xerrors.Errorf("failed to run seals: %w", err) } + for _, s := range extendedSealedSectors { + sealedSectors = append(sealedSectors, proof.SectorInfo{ + SealedCID: s.SealedCID, + SectorNumber: s.SectorNumber, + SealProof: s.SealProof, + }) + } } else { // TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not) diff --git a/cmd/lotus-miner/proving.go b/cmd/lotus-miner/proving.go index ee15785fefb..f8c23a1c919 100644 --- a/cmd/lotus-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -437,6 +437,7 @@ var provingCheckProvableCmd = &cli.Command{ } var tocheck []storage.SectorRef + var update []bool for _, info := range sectorInfos { si := abi.SectorID{ Miner: abi.ActorID(mid), @@ -454,9 +455,10 @@ var provingCheckProvableCmd = &cli.Command{ ProofType: info.SealProof, ID: si, }) + update = append(update, info.SectorKeyCID != nil) } - bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow")) + bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow")) if err != nil { return err } diff --git a/cmd/lotus-miner/sealing.go b/cmd/lotus-miner/sealing.go index 16b02f7bbe0..2f97c1e073c 100644 --- a/cmd/lotus-miner/sealing.go +++ b/cmd/lotus-miner/sealing.go @@ -39,9 +39,12 @@ func barString(total, y, g float64) string { yBars := int(math.Round(y / total * barCols)) gBars := int(math.Round(g / total * barCols)) eBars := int(barCols) - yBars - gBars - return color.YellowString(strings.Repeat("|", yBars)) + - color.GreenString(strings.Repeat("|", gBars)) + - strings.Repeat(" ", eBars) + var barString = color.YellowString(strings.Repeat("|", yBars)) + + color.GreenString(strings.Repeat("|", gBars)) + if eBars >= 0 { + barString += strings.Repeat(" ", eBars) + } + return barString } var sealingWorkersCmd = &cli.Command{ diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index d8c3e9c7cc2..24098b5581e 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -1535,11 +1535,23 @@ var sectorsSnapAbortCmd = &cli.Command{ Name: "abort-upgrade", Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before", ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "pass this flag if you know what you are doing", + }, + }, Action: func(cctx *cli.Context) error { if cctx.Args().Len() != 1 { return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number")) } + really := cctx.Bool("really-do-it") + if !really { + //nolint:golint + return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned") + } + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-shed/diff.go b/cmd/lotus-shed/diff.go index bcaa041227e..d576f73b4eb 100644 --- a/cmd/lotus-shed/diff.go +++ b/cmd/lotus-shed/diff.go @@ -35,7 +35,7 @@ var diffStateTrees = &cli.Command{ return xerrors.Errorf("expected two state-tree roots") } - argA := cctx.Args().Get(1) + argA := cctx.Args().Get(0) rootA, err := cid.Parse(argA) if err != nil { return xerrors.Errorf("first state-tree root (%q) is not a CID: %w", argA, err) diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go index 03be5f916e2..fb2598fda7c 100644 --- a/cmd/lotus-shed/verifreg.go +++ b/cmd/lotus-shed/verifreg.go @@ -1,8 +1,13 @@ package main import ( + "encoding/hex" "fmt" + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/big" "github.com/urfave/cli/v2" @@ -35,6 +40,7 @@ var verifRegCmd = &cli.Command{ verifRegListClientsCmd, verifRegCheckClientCmd, verifRegCheckVerifierCmd, + verifRegRemoveVerifiedClientDataCapCmd, }, } @@ -409,3 +415,154 @@ var verifRegCheckVerifierCmd = &cli.Command{ return nil }, } + +var verifRegRemoveVerifiedClientDataCapCmd = &cli.Command{ + Name: "remove-verified-client-data-cap", + Usage: "Remove data cap from verified client", + ArgsUsage: " ", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 7 { + return fmt.Errorf("must specify seven arguments: sender, client, allowance to remove, verifier 1 address, verifier 1 signature, verifier 2 address, verifier 2 signature") + } + + srv, err := lcli.GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() + ctx := lcli.ReqContext(cctx) + + sender, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + client, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + allowanceToRemove, err := types.BigFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + verifier1Addr, err := address.NewFromString(cctx.Args().Get(3)) + if err != nil { + return err + } + + verifier1Sig, err := hex.DecodeString(cctx.Args().Get(4)) + if err != nil { + return err + } + + verifier2Addr, err := address.NewFromString(cctx.Args().Get(5)) + if err != nil { + return err + } + + verifier2Sig, err := hex.DecodeString(cctx.Args().Get(6)) + if err != nil { + return err + } + + var sig1 crypto.Signature + if err := sig1.UnmarshalBinary(verifier1Sig); err != nil { + return xerrors.Errorf("couldn't unmarshal sig: %w", err) + } + + var sig2 crypto.Signature + if err := sig2.UnmarshalBinary(verifier2Sig); err != nil { + return xerrors.Errorf("couldn't unmarshal sig: %w", err) + } + + params, err := actors.SerializeParams(&verifreg.RemoveDataCapParams{ + VerifiedClientToRemove: client, + DataCapAmountToRemove: allowanceToRemove, + VerifierRequest1: verifreg.RemoveDataCapRequest{ + Verifier: verifier1Addr, + VerifierSignature: sig1, + }, + VerifierRequest2: verifreg.RemoveDataCapRequest{ + Verifier: verifier2Addr, + VerifierSignature: sig2, + }, + }) + if err != nil { + return err + } + + vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK) + if err != nil { + return err + } + + vrkState, err := api.StateGetActor(ctx, vrk, types.EmptyTSK) + if err != nil { + return err + } + + apibs := blockstore.NewAPIBlockstore(api) + store := adt.WrapStore(ctx, cbor.NewCborStore(apibs)) + + st, err := multisig.Load(store, vrkState) + if err != nil { + return err + } + + signers, err := st.Signers() + if err != nil { + return err + } + + senderIsSigner := false + senderIdAddr, err := address.IDFromAddress(sender) + if err != nil { + return err + } + + for _, signer := range signers { + signerIdAddr, err := address.IDFromAddress(signer) + if err != nil { + return err + } + + if signerIdAddr == senderIdAddr { + senderIsSigner = true + } + } + + if !senderIsSigner { + return fmt.Errorf("sender must be a vrk signer") + } + + proto, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.RemoveVerifiedClientDataCap), params) + if err != nil { + return err + } + + sm, _, err := srv.PublishMessage(ctx, proto, false) + if err != nil { + return err + } + + msgCid := sm.Cid() + + fmt.Printf("message sent, now waiting on cid: %s\n", msgCid) + + mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) + if err != nil { + return err + } + + if mwait.Receipt.ExitCode != 0 { + return fmt.Errorf("failed to removed verified data cap: %d", mwait.Receipt.ExitCode) + } + + //TODO: Internal msg might still have failed + return nil + }, +} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 813a0a9bd9f..f285ba74e63 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -119,9 +119,8 @@ var DaemonCmd = &cli.Command{ Usage: "halt the process after importing chain from file", }, &cli.BoolFlag{ - Name: "lite", - Usage: "start lotus in lite mode", - Hidden: true, + Name: "lite", + Usage: "start lotus in lite mode", }, &cli.StringFlag{ Name: "pprof", diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 7bacd0e5d63..a84f8984287 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -338,6 +338,9 @@ Inputs: "ProofType": 8 } ], + [ + true + ], true ] ``` diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 848a9c8649a..f455aeacefb 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -1826,7 +1826,8 @@ USAGE: lotus-miner sectors abort-upgrade [command options] OPTIONS: - --help, -h show help (default: false) + --really-do-it pass this flag if you know what you are doing (default: false) + --help, -h show help (default: false) ``` diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index 50122dde445..b5bd8c4d3c6 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -63,6 +63,7 @@ OPTIONS: --import-chain value on first run, load chain from given file or url and validate --import-snapshot value import chain state from a given chain export file or url --halt-after-import halt the process after importing chain from file (default: false) + --lite start lotus in lite mode (default: false) --pprof value specify name of file for writing cpu profile to --profile value specify type of node --manage-fdlimit manage open file limit (default: true) @@ -1234,12 +1235,13 @@ USAGE: lotus filplus command [command options] [arguments...] COMMANDS: - grant-datacap give allowance to the specified verified client address - list-notaries list all notaries - list-clients list all verified clients - check-client-datacap check verified client remaining bytes - check-notary-datacap check a notary's remaining bytes - help, h Shows a list of commands or help for one command + grant-datacap give allowance to the specified verified client address + list-notaries list all notaries + list-clients list all verified clients + check-client-datacap check verified client remaining bytes + check-notary-datacap check a notary's remaining bytes + sign-remove-data-cap-proposal allows a notary to sign a Remove Data Cap Proposal + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1313,6 +1315,20 @@ OPTIONS: ``` +### lotus filplus sign-remove-data-cap-proposal +``` +NAME: + lotus filplus sign-remove-data-cap-proposal - allows a notary to sign a Remove Data Cap Proposal + +USAGE: + lotus filplus sign-remove-data-cap-proposal [command options] [arguments...] + +OPTIONS: + --id value specify the RemoveDataCapProposal ID (will look up on chain if unspecified) (default: 0) + --help, -h show help (default: false) + +``` + ## lotus paych ``` NAME: @@ -1644,7 +1660,7 @@ NAME: lotus mpool replace - replace a message in the mempool USAGE: - lotus mpool replace [command options] | + lotus mpool replace [command options] | OPTIONS: --gas-feecap value gas feecap for new message (burn and pay to miner, attoFIL/GasUnit) diff --git a/extern/sector-storage/faults.go b/extern/sector-storage/faults.go index f7a764e5027..5c542055b48 100644 --- a/extern/sector-storage/faults.go +++ b/extern/sector-storage/faults.go @@ -19,11 +19,11 @@ import ( // FaultTracker TODO: Track things more actively type FaultTracker interface { - CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) } // CheckProvable returns unprovable sectors -func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { +func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) { var bad = make(map[abi.SectorID]string) ssize, err := pp.SectorSize() @@ -32,72 +32,76 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, } // TODO: More better checks - for _, sector := range sectors { + for i, sector := range sectors { err := func() error { ctx, cancel := context.WithCancel(ctx) defer cancel() + var fReplica string + var fCache string - locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone) - if err != nil { - return xerrors.Errorf("acquiring sector lock: %w", err) - } - - if !locked { - log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector) - bad[sector.ID] = fmt.Sprint("can't acquire read lock") - return nil - } - - lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) - bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) - return nil - } - - // temporary hack to make the check work with snapdeals - // will go away in https://github.com/filecoin-project/lotus/pull/7971 - if lp.Sealed == "" || lp.Cache == "" { - // maybe it's update + if update[i] { lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone) if err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } - if lockedUpdate { - lp, _, err = m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) - bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) - return nil - } - lp.Sealed, lp.Cache = lp.Update, lp.UpdateCache + if !lockedUpdate { + log.Warnw("CheckProvable Sector FAULT: can't acquire read lock on update replica", "sector", sector) + bad[sector.ID] = fmt.Sprint("can't acquire read lock") + return nil } + lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: acquire sector update replica in checkProvable", "sector", sector, "error", err) + bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) + return nil + } + fReplica, fCache = lp.Update, lp.UpdateCache + } else { + locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone) + if err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + if !locked { + log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector) + bad[sector.ID] = fmt.Sprint("can't acquire read lock") + return nil + } + + lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) + bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) + return nil + } + fReplica, fCache = lp.Sealed, lp.Cache + } - if lp.Sealed == "" || lp.Cache == "" { - log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache) - bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", lp.Cache, lp.Sealed) + if fReplica == "" || fCache == "" { + log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", fReplica, "cache", fCache) + bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", fCache, fReplica) return nil } toCheck := map[string]int64{ - lp.Sealed: 1, - filepath.Join(lp.Cache, "p_aux"): 0, + fReplica: 1, + filepath.Join(fCache, "p_aux"): 0, } - addCachePathsForSectorSize(toCheck, lp.Cache, ssize) + addCachePathsForSectorSize(toCheck, fCache, ssize) for p, sz := range toCheck { st, err := os.Stat(p) if err != nil { - log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err) + log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "err", err) bad[sector.ID] = fmt.Sprintf("%s", err) return nil } if sz != 0 { if st.Size() != int64(ssize)*sz { - log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) + log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz) return nil } @@ -118,14 +122,14 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sector.ID.Number, }) if err != nil { - log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err) + log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err) bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err) return nil } commr, err := rg(ctx, sector.ID) if err != nil { - log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err) + log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err) bad[sector.ID] = fmt.Sprintf("getting commR: %s", err) return nil } @@ -136,12 +140,12 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, SectorNumber: sector.ID.Number, SealedCID: commr, }, - CacheDirPath: lp.Cache, + CacheDirPath: fCache, PoStProofType: wpp, - SealedSectorPath: lp.Sealed, + SealedSectorPath: fReplica, }, ch.Challenges[sector.ID.Number]) if err != nil { - log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "err", err) + log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err) bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err) return nil } diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 897ba4f0611..28e0715591c 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -763,7 +763,7 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) { ctx, cancel := context.WithCancel(ctx) defer cancel() - log.Errorf("manager is doing replica update") + log.Debugf("manager is doing replica update") wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces) if err != nil { return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err) diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 77126517665..20abad309be 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -505,7 +505,7 @@ func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) erro return nil } -func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { +func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, ids []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) { bad := map[abi.SectorID]string{} for _, sid := range ids { diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 771a9a3a1a3..80fa8740834 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -294,6 +294,10 @@ func ftFromString(t string) (storiface.SectorFileType, error) { return storiface.FTSealed, nil case storiface.FTCache.String(): return storiface.FTCache, nil + case storiface.FTUpdate.String(): + return storiface.FTUpdate, nil + case storiface.FTUpdateCache.String(): + return storiface.FTUpdateCache, nil default: return 0, xerrors.Errorf("unknown sector file type: '%s'", t) } diff --git a/extern/storage-sealing/states_replica_update.go b/extern/storage-sealing/states_replica_update.go index 8683a11d84d..bede7a5fa81 100644 --- a/extern/storage-sealing/states_replica_update.go +++ b/extern/storage-sealing/states_replica_update.go @@ -168,7 +168,7 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("no good address to send replica update message from: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, big.Zero(), big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) diff --git a/go.mod b/go.mod index e6bc500e111..77fdc21ef30 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/filecoin-project/specs-actors/v4 v4.0.1 github.com/filecoin-project/specs-actors/v5 v5.0.4 github.com/filecoin-project/specs-actors/v6 v6.0.1 - github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 + github.com/filecoin-project/specs-actors/v7 v7.0.0 github.com/filecoin-project/specs-storage v0.2.0 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.1 @@ -100,6 +100,7 @@ require ( github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-unixfs v0.3.1 + github.com/ipfs/go-unixfsnode v1.2.0 github.com/ipfs/interface-go-ipfs-core v0.4.0 github.com/ipld/go-car v0.3.3 github.com/ipld/go-car/v2 v2.1.1 @@ -110,7 +111,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.18.0-rc4 + github.com/libp2p/go-libp2p v0.18.0-rc5 github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect github.com/libp2p/go-libp2p-core v0.14.0 github.com/libp2p/go-libp2p-discovery v0.6.0 @@ -122,7 +123,7 @@ require ( github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-resource-manager v0.1.4 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.10.1 + github.com/libp2p/go-libp2p-swarm v0.10.2 github.com/libp2p/go-libp2p-tls v0.3.1 github.com/libp2p/go-libp2p-yamux v0.8.2 github.com/libp2p/go-maddr-filter v0.1.0 diff --git a/go.sum b/go.sum index baebf8a6336..1597995c7dc 100644 --- a/go.sum +++ b/go.sum @@ -380,8 +380,8 @@ github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3 github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= -github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4= -github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= +github.com/filecoin-project/specs-actors/v7 v7.0.0 h1:FQN7tjt3o68hfb3qLFSJBoLMuOFY0REkFVLO/zXj8RU= +github.com/filecoin-project/specs-actors/v7 v7.0.0/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg= github.com/filecoin-project/specs-storage v0.2.0/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= @@ -995,8 +995,8 @@ github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8= -github.com/libp2p/go-libp2p v0.18.0-rc4 h1:OUsSbeu7q+Ck/bV9wHDxFzb08ORqBupHhpCmRBhWrJ8= -github.com/libp2p/go-libp2p v0.18.0-rc4/go.mod h1:wzmsk1ioOq9FGQys2BN5BIw4nugP6+R+CyW3JbPEbbs= +github.com/libp2p/go-libp2p v0.18.0-rc5 h1:88wWDHb9nNo0vBNCupLde3OTnFAkugOCNkrDfl3ivK4= +github.com/libp2p/go-libp2p v0.18.0-rc5/go.mod h1:aZPS5l84bDvCvP4jkyEUT/J6YOpUq33Fgqrs3K59mpI= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= @@ -1182,8 +1182,8 @@ github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkR github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= -github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM= -github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= +github.com/libp2p/go-libp2p-swarm v0.10.2 h1:UaXf+CTq6Ns1N2V1EgqJ9Q3xaRsiN7ImVlDMpirMAWw= +github.com/libp2p/go-libp2p-swarm v0.10.2/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1299,8 +1299,9 @@ github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyP github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= -github.com/libp2p/go-tcp-transport v0.5.0 h1:3ZPW8HAuyRAuFzyabE0hSrCXKKSWzROnZZX7DtcIatY= github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= +github.com/libp2p/go-tcp-transport v0.5.1 h1:edOOs688VLZAozWC7Kj5/6HHXKNwi9M6wgRmmLa8M6Q= +github.com/libp2p/go-tcp-transport v0.5.1/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= diff --git a/lotuspond/front/package-lock.json b/lotuspond/front/package-lock.json index 252a42a6d4f..9bc04c8a38f 100644 --- a/lotuspond/front/package-lock.json +++ b/lotuspond/front/package-lock.json @@ -6630,7 +6630,7 @@ "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz", "integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=", "requires": { - "node-fetch": "^1.0.1", + "node-fetch": "^2.6.7", "whatwg-fetch": ">=0.10.0" } }, diff --git a/markets/utils/selectors.go b/markets/utils/selectors.go index 7d40ba6dd85..e1009d1ff90 100644 --- a/markets/utils/selectors.go +++ b/markets/utils/selectors.go @@ -11,6 +11,7 @@ import ( "github.com/ipfs/go-cid" mdagipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-unixfsnode" dagpb "github.com/ipld/go-codec-dagpb" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" @@ -62,6 +63,7 @@ func TraverseDag( return bytes.NewBuffer(node.RawData()), nil } + unixfsnode.AddUnixFSReificationToLinkSystem(&linkSystem) // this is how we pull the start node out of the DS startLink := cidlink.Link{Cid: startFrom} diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 7848c84f92d..1730b7a7a1b 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -1272,7 +1272,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet // // IF/WHEN this changes in the future we will have to be able to calculate // "old style" commP, and thus will need to introduce a version switch or similar - arbitraryProofType := abi.RegisteredSealProof_StackedDrg32GiBV1_1 + arbitraryProofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1 rdr, err := os.Open(inpath) if err != nil { diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 38a34dfe80e..5fb11eea7df 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -1127,7 +1127,7 @@ func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error return backup(ctx, sm.DS, fpath) } -func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) { +func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) { var rg storiface.RGetter if expensive { rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) { @@ -1143,7 +1143,7 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP } } - bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, rg) + bad, err := sm.StorageMgr.CheckProvable(ctx, pp, sectors, update, rg) if err != nil { return nil, err } diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 6a86656c732..8b5ee39d306 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -206,6 +206,7 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B sectors := make(map[abi.SectorNumber]struct{}) var tocheck []storage.SectorRef + var update []bool for _, info := range sectorInfos { sectors[info.SectorNumber] = struct{}{} tocheck = append(tocheck, storage.SectorRef{ @@ -215,9 +216,10 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B Number: info.SectorNumber, }, }) + update = append(update, info.SectorKeyCID != nil) } - bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, nil) + bad, err := s.faultTracker.CheckProvable(ctx, s.proofType, tocheck, update, nil) if err != nil { return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err) } diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index f3ea5836bcf..41ce5a2e90c 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -168,7 +168,7 @@ func (m mockVerif) GenerateWinningPoStSectorChallenge(context.Context, abi.Regis type mockFaultTracker struct { } -func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { +func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) { // Returns "bad" sectors so just return empty map meaning all sectors are good return map[abi.SectorID]string{}, nil }