diff --git a/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md b/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md new file mode 100644 index 0000000000..18bf27ad01 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/algorand-engineering-team-issue-template.md @@ -0,0 +1,18 @@ +--- +name: Algorand Engineering Team Issue Template +about: This is the template that Algorand internal team members use in conjunction + with their task management process. Feel free to use if you're an external contributor. +title: '' +labels: '' +assignees: '' + +--- + +## Summary +*Describe the problem identified or the general goal of this issue* + +## Scope/Requirements +*What's involved in this issue? What's required to achieve the goal?* + +## Urgency/Relative Priority +*How urgent is this issue? What are the timing considerations to take into account?* diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index c709eff1c4..6ed88b6ac6 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,9 +1,12 @@ --- -name: '🐜 Bug report' -about: 'Report a reproducible bug.' +name: "\U0001F41C Bug report" +about: Report a reproducible bug. title: '' -labels: 'new-bug' +labels: new-bug +assignees: '' + --- + (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// CompactOneTimeSignature +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// Participant +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// Reveal +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// coinChoice +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// +// sigslotCommit +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// + +// MarshalMsg implements msgp.Marshaler +func (z *Cert) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0005Len := uint32(5) + var zb0005Mask uint8 /* 6 bits */ + if len((*z).PartProofs) == 0 { + zb0005Len-- + zb0005Mask |= 0x1 + } + if len((*z).SigProofs) == 0 { + zb0005Len-- + zb0005Mask |= 0x2 + } + if (*z).SigCommit.MsgIsZero() { + zb0005Len-- + zb0005Mask |= 0x8 + } + if len((*z).Reveals) == 0 { + zb0005Len-- + zb0005Mask |= 0x10 + } + if (*z).SignedWeight == 0 { + zb0005Len-- + zb0005Mask |= 0x20 + } + // variable map header, size zb0005Len + o = append(o, 0x80|uint8(zb0005Len)) + if zb0005Len != 0 { + if (zb0005Mask & 0x1) == 0 { // if not empty + // string "P" + o = append(o, 0xa1, 0x50) + if (*z).PartProofs == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len((*z).PartProofs))) + } + for zb0002 := range (*z).PartProofs { + o, err = (*z).PartProofs[zb0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PartProofs", zb0002) + return + } + } + } + if (zb0005Mask & 0x2) == 0 { // if not empty + // string "S" + o = append(o, 0xa1, 0x53) + if (*z).SigProofs == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len((*z).SigProofs))) + } + for zb0001 := range (*z).SigProofs { + o, err = (*z).SigProofs[zb0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "SigProofs", zb0001) + return + } + } + } + if (zb0005Mask & 0x8) == 0 { // if not empty + // string "c" + o = append(o, 0xa1, 0x63) + o, err = (*z).SigCommit.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "SigCommit") + return + } + } + if (zb0005Mask & 0x10) == 0 { // if not empty + // string "r" + o = append(o, 0xa1, 0x72) + if (*z).Reveals == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendMapHeader(o, uint32(len((*z).Reveals))) + } + zb0003_keys := make([]uint64, 0, len((*z).Reveals)) + for zb0003 := range (*z).Reveals { + zb0003_keys = append(zb0003_keys, zb0003) + } + sort.Sort(SortUint64(zb0003_keys)) + for _, zb0003 := range zb0003_keys { + zb0004 := (*z).Reveals[zb0003] + _ = zb0004 + o = msgp.AppendUint64(o, zb0003) + o, err = zb0004.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Reveals", zb0003) + return + } + } + } + if (zb0005Mask & 0x20) == 0 { // if not empty + // string "w" + o = append(o, 0xa1, 0x77) + o = msgp.AppendUint64(o, (*z).SignedWeight) + } + } + return +} + +func (_ *Cert) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*Cert) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Cert) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0005 int + var zb0006 bool + zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).SigCommit.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigCommit") + return + } + } + if zb0005 > 0 { + zb0005-- + (*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SignedWeight") + return + } + } + if zb0005 > 0 { + zb0005-- + var zb0007 int + var zb0008 bool + zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigProofs") + return + } + if zb0007 > maxProofDigests { + err = msgp.ErrOverflow(uint64(zb0007), uint64(maxProofDigests)) + err = msgp.WrapError(err, "struct-from-array", "SigProofs") + return + } + if zb0008 { + (*z).SigProofs = nil + } else if (*z).SigProofs != nil && cap((*z).SigProofs) >= zb0007 { + (*z).SigProofs = ((*z).SigProofs)[:zb0007] + } else { + (*z).SigProofs = make([]crypto.Digest, zb0007) + } + for zb0001 := range (*z).SigProofs { + bts, err = (*z).SigProofs[zb0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigProofs", zb0001) + return + } + } + } + if zb0005 > 0 { + zb0005-- + var zb0009 int + var zb0010 bool + zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PartProofs") + return + } + if zb0009 > maxProofDigests { + err = msgp.ErrOverflow(uint64(zb0009), uint64(maxProofDigests)) + err = msgp.WrapError(err, "struct-from-array", "PartProofs") + return + } + if zb0010 { + (*z).PartProofs = nil + } else if (*z).PartProofs != nil && cap((*z).PartProofs) >= zb0009 { + (*z).PartProofs = ((*z).PartProofs)[:zb0009] + } else { + (*z).PartProofs = make([]crypto.Digest, zb0009) + } + for zb0002 := range (*z).PartProofs { + bts, err = (*z).PartProofs[zb0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PartProofs", zb0002) + return + } + } + } + if zb0005 > 0 { + zb0005-- + var zb0011 int + var zb0012 bool + zb0011, zb0012, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Reveals") + return + } + if zb0011 > maxReveals { + err = msgp.ErrOverflow(uint64(zb0011), uint64(maxReveals)) + err = msgp.WrapError(err, "struct-from-array", "Reveals") + return + } + if zb0012 { + (*z).Reveals = nil + } else if (*z).Reveals == nil { + (*z).Reveals = make(map[uint64]Reveal, zb0011) + } + for zb0011 > 0 { + var zb0003 uint64 + var zb0004 Reveal + zb0011-- + zb0003, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Reveals") + return + } + bts, err = zb0004.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Reveals", zb0003) + return + } + (*z).Reveals[zb0003] = zb0004 + } + } + if zb0005 > 0 { + err = msgp.ErrTooManyArrayFields(zb0005) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0006 { + (*z) = Cert{} + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "c": + bts, err = (*z).SigCommit.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SigCommit") + return + } + case "w": + (*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SignedWeight") + return + } + case "S": + var zb0013 int + var zb0014 bool + zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SigProofs") + return + } + if zb0013 > maxProofDigests { + err = msgp.ErrOverflow(uint64(zb0013), uint64(maxProofDigests)) + err = msgp.WrapError(err, "SigProofs") + return + } + if zb0014 { + (*z).SigProofs = nil + } else if (*z).SigProofs != nil && cap((*z).SigProofs) >= zb0013 { + (*z).SigProofs = ((*z).SigProofs)[:zb0013] + } else { + (*z).SigProofs = make([]crypto.Digest, zb0013) + } + for zb0001 := range (*z).SigProofs { + bts, err = (*z).SigProofs[zb0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SigProofs", zb0001) + return + } + } + case "P": + var zb0015 int + var zb0016 bool + zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartProofs") + return + } + if zb0015 > maxProofDigests { + err = msgp.ErrOverflow(uint64(zb0015), uint64(maxProofDigests)) + err = msgp.WrapError(err, "PartProofs") + return + } + if zb0016 { + (*z).PartProofs = nil + } else if (*z).PartProofs != nil && cap((*z).PartProofs) >= zb0015 { + (*z).PartProofs = ((*z).PartProofs)[:zb0015] + } else { + (*z).PartProofs = make([]crypto.Digest, zb0015) + } + for zb0002 := range (*z).PartProofs { + bts, err = (*z).PartProofs[zb0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PartProofs", zb0002) + return + } + } + case "r": + var zb0017 int + var zb0018 bool + zb0017, zb0018, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Reveals") + return + } + if zb0017 > maxReveals { + err = msgp.ErrOverflow(uint64(zb0017), uint64(maxReveals)) + err = msgp.WrapError(err, "Reveals") + return + } + if zb0018 { + (*z).Reveals = nil + } else if (*z).Reveals == nil { + (*z).Reveals = make(map[uint64]Reveal, zb0017) + } + for zb0017 > 0 { + var zb0003 uint64 + var zb0004 Reveal + zb0017-- + zb0003, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Reveals") + return + } + bts, err = zb0004.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Reveals", zb0003) + return + } + (*z).Reveals[zb0003] = zb0004 + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *Cert) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*Cert) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Cert) Msgsize() (s int) { + s = 1 + 2 + (*z).SigCommit.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.ArrayHeaderSize + for zb0001 := range (*z).SigProofs { + s += (*z).SigProofs[zb0001].Msgsize() + } + s += 2 + msgp.ArrayHeaderSize + for zb0002 := range (*z).PartProofs { + s += (*z).PartProofs[zb0002].Msgsize() + } + s += 2 + msgp.MapHeaderSize + if (*z).Reveals != nil { + for zb0003, zb0004 := range (*z).Reveals { + _ = zb0003 + _ = zb0004 + s += 0 + msgp.Uint64Size + zb0004.Msgsize() + } + } + return +} + +// MsgIsZero returns whether this is a zero value +func (z *Cert) MsgIsZero() bool { + return ((*z).SigCommit.MsgIsZero()) && ((*z).SignedWeight == 0) && (len((*z).SigProofs) == 0) && (len((*z).PartProofs) == 0) && (len((*z).Reveals) == 0) +} + +// MarshalMsg implements msgp.Marshaler +func (z *CompactOneTimeSignature) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 8 bits */ + if (*z).OneTimeSignature.PK.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).OneTimeSignature.PK1Sig.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x8 + } + if (*z).OneTimeSignature.PK2.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x10 + } + if (*z).OneTimeSignature.PK2Sig.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x20 + } + if (*z).OneTimeSignature.PKSigOld.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x40 + } + if (*z).OneTimeSignature.Sig.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x80 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "p" + o = append(o, 0xa1, 0x70) + o, err = (*z).OneTimeSignature.PK.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PK") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "p1s" + o = append(o, 0xa3, 0x70, 0x31, 0x73) + o, err = (*z).OneTimeSignature.PK1Sig.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PK1Sig") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "p2" + o = append(o, 0xa2, 0x70, 0x32) + o, err = (*z).OneTimeSignature.PK2.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PK2") + return + } + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "p2s" + o = append(o, 0xa3, 0x70, 0x32, 0x73) + o, err = (*z).OneTimeSignature.PK2Sig.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PK2Sig") + return + } + } + if (zb0001Mask & 0x40) == 0 { // if not empty + // string "ps" + o = append(o, 0xa2, 0x70, 0x73) + o, err = (*z).OneTimeSignature.PKSigOld.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PKSigOld") + return + } + } + if (zb0001Mask & 0x80) == 0 { // if not empty + // string "s" + o = append(o, 0xa1, 0x73) + o, err = (*z).OneTimeSignature.Sig.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Sig") + return + } + } + } + return +} + +func (_ *CompactOneTimeSignature) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*CompactOneTimeSignature) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *CompactOneTimeSignature) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OneTimeSignature.Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sig") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OneTimeSignature.PK.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OneTimeSignature.PKSigOld.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PKSigOld") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OneTimeSignature.PK2.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK2") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OneTimeSignature.PK1Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK1Sig") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OneTimeSignature.PK2Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK2Sig") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = CompactOneTimeSignature{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "s": + bts, err = (*z).OneTimeSignature.Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Sig") + return + } + case "p": + bts, err = (*z).OneTimeSignature.PK.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PK") + return + } + case "ps": + bts, err = (*z).OneTimeSignature.PKSigOld.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PKSigOld") + return + } + case "p2": + bts, err = (*z).OneTimeSignature.PK2.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PK2") + return + } + case "p1s": + bts, err = (*z).OneTimeSignature.PK1Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PK1Sig") + return + } + case "p2s": + bts, err = (*z).OneTimeSignature.PK2Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PK2Sig") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *CompactOneTimeSignature) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*CompactOneTimeSignature) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CompactOneTimeSignature) Msgsize() (s int) { + s = 1 + 2 + (*z).OneTimeSignature.Sig.Msgsize() + 2 + (*z).OneTimeSignature.PK.Msgsize() + 3 + (*z).OneTimeSignature.PKSigOld.Msgsize() + 3 + (*z).OneTimeSignature.PK2.Msgsize() + 4 + (*z).OneTimeSignature.PK1Sig.Msgsize() + 4 + (*z).OneTimeSignature.PK2Sig.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *CompactOneTimeSignature) MsgIsZero() bool { + return ((*z).OneTimeSignature.Sig.MsgIsZero()) && ((*z).OneTimeSignature.PK.MsgIsZero()) && ((*z).OneTimeSignature.PKSigOld.MsgIsZero()) && ((*z).OneTimeSignature.PK2.MsgIsZero()) && ((*z).OneTimeSignature.PK1Sig.MsgIsZero()) && ((*z).OneTimeSignature.PK2Sig.MsgIsZero()) +} + +// MarshalMsg implements msgp.Marshaler +func (z *Participant) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(3) + var zb0001Mask uint8 /* 4 bits */ + if (*z).KeyDilution == 0 { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).PK.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).Weight == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "d" + o = append(o, 0xa1, 0x64) + o = msgp.AppendUint64(o, (*z).KeyDilution) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "p" + o = append(o, 0xa1, 0x70) + o, err = (*z).PK.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PK") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "w" + o = append(o, 0xa1, 0x77) + o = msgp.AppendUint64(o, (*z).Weight) + } + } + return +} + +func (_ *Participant) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*Participant) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Participant) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).PK.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "PK") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Weight") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "KeyDilution") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = Participant{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "p": + bts, err = (*z).PK.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PK") + return + } + case "w": + (*z).Weight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Weight") + return + } + case "d": + (*z).KeyDilution, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "KeyDilution") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *Participant) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*Participant) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Participant) Msgsize() (s int) { + s = 1 + 2 + (*z).PK.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Uint64Size + return +} + +// MsgIsZero returns whether this is a zero value +func (z *Participant) MsgIsZero() bool { + return ((*z).PK.MsgIsZero()) && ((*z).Weight == 0) && ((*z).KeyDilution == 0) +} + +// MarshalMsg implements msgp.Marshaler +func (z *Reveal) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).Part.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if ((*z).SigSlot.Sig.MsgIsZero()) && ((*z).SigSlot.L == 0) { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "p" + o = append(o, 0xa1, 0x70) + o, err = (*z).Part.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Part") + return + } + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "s" + o = append(o, 0xa1, 0x73) + // omitempty: check for empty values + zb0002Len := uint32(2) + var zb0002Mask uint8 /* 3 bits */ + if (*z).SigSlot.L == 0 { + zb0002Len-- + zb0002Mask |= 0x2 + } + if (*z).SigSlot.Sig.MsgIsZero() { + zb0002Len-- + zb0002Mask |= 0x4 + } + // variable map header, size zb0002Len + o = append(o, 0x80|uint8(zb0002Len)) + if (zb0002Mask & 0x2) == 0 { // if not empty + // string "l" + o = append(o, 0xa1, 0x6c) + o = msgp.AppendUint64(o, (*z).SigSlot.L) + } + if (zb0002Mask & 0x4) == 0 { // if not empty + // string "s" + o = append(o, 0xa1, 0x73) + o, err = (*z).SigSlot.Sig.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "SigSlot", "Sig") + return + } + } + } + } + return +} + +func (_ *Reveal) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*Reveal) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Reveal) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + var zb0003 int + var zb0004 bool + zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot") + return + } + if zb0003 > 0 { + zb0003-- + bts, err = (*z).SigSlot.Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot", "struct-from-array", "Sig") + return + } + } + if zb0003 > 0 { + zb0003-- + (*z).SigSlot.L, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot", "struct-from-array", "L") + return + } + } + if zb0003 > 0 { + err = msgp.ErrTooManyArrayFields(zb0003) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot", "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot") + return + } + if zb0004 { + (*z).SigSlot = sigslotCommit{} + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot") + return + } + switch string(field) { + case "s": + bts, err = (*z).SigSlot.Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot", "Sig") + return + } + case "l": + (*z).SigSlot.L, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot", "L") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SigSlot") + return + } + } + } + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Part.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Part") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = Reveal{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "s": + var zb0005 int + var zb0006 bool + zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SigSlot") + return + } + if zb0005 > 0 { + zb0005-- + bts, err = (*z).SigSlot.Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SigSlot", "struct-from-array", "Sig") + return + } + } + if zb0005 > 0 { + zb0005-- + (*z).SigSlot.L, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SigSlot", "struct-from-array", "L") + return + } + } + if zb0005 > 0 { + err = msgp.ErrTooManyArrayFields(zb0005) + if err != nil { + err = msgp.WrapError(err, "SigSlot", "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err, "SigSlot") + return + } + if zb0006 { + (*z).SigSlot = sigslotCommit{} + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "SigSlot") + return + } + switch string(field) { + case "s": + bts, err = (*z).SigSlot.Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SigSlot", "Sig") + return + } + case "l": + (*z).SigSlot.L, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SigSlot", "L") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err, "SigSlot") + return + } + } + } + } + case "p": + bts, err = (*z).Part.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Part") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *Reveal) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*Reveal) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Reveal) Msgsize() (s int) { + s = 1 + 2 + 1 + 2 + (*z).SigSlot.Sig.Msgsize() + 2 + msgp.Uint64Size + 2 + (*z).Part.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *Reveal) MsgIsZero() bool { + return (((*z).SigSlot.Sig.MsgIsZero()) && ((*z).SigSlot.L == 0)) && ((*z).Part.MsgIsZero()) +} + +// MarshalMsg implements msgp.Marshaler +func (z *coinChoice) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(6) + var zb0001Mask uint8 /* 7 bits */ + if (*z).J == 0 { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).MsgHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).Partcom.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x8 + } + if (*z).ProvenWeight == 0 { + zb0001Len-- + zb0001Mask |= 0x10 + } + if (*z).Sigcom.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x20 + } + if (*z).SignedWeight == 0 { + zb0001Len-- + zb0001Mask |= 0x40 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "j" + o = append(o, 0xa1, 0x6a) + o = msgp.AppendUint64(o, (*z).J) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "msghash" + o = append(o, 0xa7, 0x6d, 0x73, 0x67, 0x68, 0x61, 0x73, 0x68) + o, err = (*z).MsgHash.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "MsgHash") + return + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "partcom" + o = append(o, 0xa7, 0x70, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6d) + o, err = (*z).Partcom.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Partcom") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "provenweight" + o = append(o, 0xac, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x6e, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74) + o = msgp.AppendUint64(o, (*z).ProvenWeight) + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "sigcom" + o = append(o, 0xa6, 0x73, 0x69, 0x67, 0x63, 0x6f, 0x6d) + o, err = (*z).Sigcom.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Sigcom") + return + } + } + if (zb0001Mask & 0x40) == 0 { // if not empty + // string "sigweight" + o = append(o, 0xa9, 0x73, 0x69, 0x67, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74) + o = msgp.AppendUint64(o, (*z).SignedWeight) + } + } + return +} + +func (_ *coinChoice) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*coinChoice) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *coinChoice) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + (*z).J, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "J") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "SignedWeight") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "ProvenWeight") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Sigcom.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sigcom") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Partcom.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Partcom") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).MsgHash.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "MsgHash") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = coinChoice{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "j": + (*z).J, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "J") + return + } + case "sigweight": + (*z).SignedWeight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SignedWeight") + return + } + case "provenweight": + (*z).ProvenWeight, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ProvenWeight") + return + } + case "sigcom": + bts, err = (*z).Sigcom.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Sigcom") + return + } + case "partcom": + bts, err = (*z).Partcom.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Partcom") + return + } + case "msghash": + bts, err = (*z).MsgHash.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "MsgHash") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *coinChoice) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*coinChoice) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *coinChoice) Msgsize() (s int) { + s = 1 + 2 + msgp.Uint64Size + 10 + msgp.Uint64Size + 13 + msgp.Uint64Size + 7 + (*z).Sigcom.Msgsize() + 8 + (*z).Partcom.Msgsize() + 8 + (*z).MsgHash.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *coinChoice) MsgIsZero() bool { + return ((*z).J == 0) && ((*z).SignedWeight == 0) && ((*z).ProvenWeight == 0) && ((*z).Sigcom.MsgIsZero()) && ((*z).Partcom.MsgIsZero()) && ((*z).MsgHash.MsgIsZero()) +} + +// MarshalMsg implements msgp.Marshaler +func (z *sigslotCommit) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).L == 0 { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Sig.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "l" + o = append(o, 0xa1, 0x6c) + o = msgp.AppendUint64(o, (*z).L) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "s" + o = append(o, 0xa1, 0x73) + o, err = (*z).Sig.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Sig") + return + } + } + } + return +} + +func (_ *sigslotCommit) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*sigslotCommit) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *sigslotCommit) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Sig") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).L, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "L") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = sigslotCommit{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "s": + bts, err = (*z).Sig.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Sig") + return + } + case "l": + (*z).L, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "L") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *sigslotCommit) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*sigslotCommit) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *sigslotCommit) Msgsize() (s int) { + s = 1 + 2 + (*z).Sig.Msgsize() + 2 + msgp.Uint64Size + return +} + +// MsgIsZero returns whether this is a zero value +func (z *sigslotCommit) MsgIsZero() bool { + return ((*z).Sig.MsgIsZero()) && ((*z).L == 0) +} diff --git a/crypto/compactcert/msgp_gen_test.go b/crypto/compactcert/msgp_gen_test.go new file mode 100644 index 0000000000..44a6976024 --- /dev/null +++ b/crypto/compactcert/msgp_gen_test.go @@ -0,0 +1,384 @@ +// +build !skip_msgp_testing + +package compactcert + +// Code generated by github.com/algorand/msgp DO NOT EDIT. + +import ( + "testing" + + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/msgp/msgp" +) + +func TestMarshalUnmarshalCert(t *testing.T) { + v := Cert{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingCert(t *testing.T) { + protocol.RunEncodingTest(t, &Cert{}) +} + +func BenchmarkMarshalMsgCert(b *testing.B) { + v := Cert{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgCert(b *testing.B) { + v := Cert{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalCert(b *testing.B) { + v := Cert{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalCompactOneTimeSignature(t *testing.T) { + v := CompactOneTimeSignature{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingCompactOneTimeSignature(t *testing.T) { + protocol.RunEncodingTest(t, &CompactOneTimeSignature{}) +} + +func BenchmarkMarshalMsgCompactOneTimeSignature(b *testing.B) { + v := CompactOneTimeSignature{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgCompactOneTimeSignature(b *testing.B) { + v := CompactOneTimeSignature{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalCompactOneTimeSignature(b *testing.B) { + v := CompactOneTimeSignature{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalParticipant(t *testing.T) { + v := Participant{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingParticipant(t *testing.T) { + protocol.RunEncodingTest(t, &Participant{}) +} + +func BenchmarkMarshalMsgParticipant(b *testing.B) { + v := Participant{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgParticipant(b *testing.B) { + v := Participant{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalParticipant(b *testing.B) { + v := Participant{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalReveal(t *testing.T) { + v := Reveal{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingReveal(t *testing.T) { + protocol.RunEncodingTest(t, &Reveal{}) +} + +func BenchmarkMarshalMsgReveal(b *testing.B) { + v := Reveal{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgReveal(b *testing.B) { + v := Reveal{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalReveal(b *testing.B) { + v := Reveal{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalcoinChoice(t *testing.T) { + v := coinChoice{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingcoinChoice(t *testing.T) { + protocol.RunEncodingTest(t, &coinChoice{}) +} + +func BenchmarkMarshalMsgcoinChoice(b *testing.B) { + v := coinChoice{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgcoinChoice(b *testing.B) { + v := coinChoice{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalcoinChoice(b *testing.B) { + v := coinChoice{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalsigslotCommit(t *testing.T) { + v := sigslotCommit{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingsigslotCommit(t *testing.T) { + protocol.RunEncodingTest(t, &sigslotCommit{}) +} + +func BenchmarkMarshalMsgsigslotCommit(b *testing.B) { + v := sigslotCommit{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgsigslotCommit(b *testing.B) { + v := sigslotCommit{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalsigslotCommit(b *testing.B) { + v := sigslotCommit{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/crypto/compactcert/structs.go b/crypto/compactcert/structs.go new file mode 100644 index 0000000000..37f756073b --- /dev/null +++ b/crypto/compactcert/structs.go @@ -0,0 +1,115 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package compactcert + +import ( + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" +) + +// Params defines common parameters for the verifier and builder. +type Params struct { + Msg crypto.Hashable // Message to be cerified + ProvenWeight uint64 // Weight threshold proven by the certificate + SigRound basics.Round // Ephemeral signature round to expect + SecKQ uint64 // Security parameter (k+q) from analysis document +} + +// A Participant corresponds to an account whose AccountData.Status +// is Online, and for which the expected sigRound satisfies +// AccountData.VoteFirstValid <= sigRound <= AccountData.VoteLastValid. +// +// In the Algorand ledger, it is possible for multiple accounts to have +// the same PK. Thus, the PK is not necessarily unique among Participants. +// However, each account will produce a unique Participant struct, to avoid +// potential DoS attacks where one account claims to have the same VoteID PK +// as another account. +type Participant struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // PK is AccountData.VoteID. + PK crypto.OneTimeSignatureVerifier `codec:"p"` + + // Weight is AccountData.MicroAlgos. + Weight uint64 `codec:"w"` + + // KeyDilution is AccountData.KeyDilution() with the protocol for sigRound + // as expected by the Builder. + KeyDilution uint64 `codec:"d"` +} + +// ToBeHashed implements the crypto.Hashable interface. +func (p Participant) ToBeHashed() (protocol.HashID, []byte) { + return protocol.CompactCertPart, protocol.Encode(&p) +} + +// CompactOneTimeSignature is crypto.OneTimeSignature with omitempty +type CompactOneTimeSignature struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + crypto.OneTimeSignature +} + +// A sigslotCommit is a single slot in the sigs array that forms the certificate. +type sigslotCommit struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + // Sig is a signature by the participant on the expected message. + Sig CompactOneTimeSignature `codec:"s"` + + // L is the total weight of signatures in lower-numbered slots. + // This is initialized once the builder has collected a sufficient + // number of signatures. + L uint64 `codec:"l"` +} + +func (ssc sigslotCommit) ToBeHashed() (protocol.HashID, []byte) { + return protocol.CompactCertSig, protocol.Encode(&ssc) +} + +// Reveal is a single array position revealed as part of a compact +// certificate. It reveals an element of the signature array and +// the corresponding element of the participants array. +type Reveal struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + SigSlot sigslotCommit `codec:"s"` + Part Participant `codec:"p"` +} + +// maxReveals is a bound on allocation and on numReveals to limit log computation +const maxReveals = 1024 +const maxProofDigests = 20 * maxReveals + +// Cert represents a compact certificate. +type Cert struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + SigCommit crypto.Digest `codec:"c"` + SignedWeight uint64 `codec:"w"` + SigProofs []crypto.Digest `codec:"S,allocbound=maxProofDigests"` + PartProofs []crypto.Digest `codec:"P,allocbound=maxProofDigests"` + + // Reveals is a sparse map from the position being revealed + // to the corresponding elements from the sigs and participants + // arrays. + Reveals map[uint64]Reveal `codec:"r,allocbound=maxReveals"` +} + +// SortUint64 implements sorting by uint64 keys for +// canonical encoding of maps in msgpack format. +type SortUint64 = basics.SortUint64 diff --git a/crypto/compactcert/verifier.go b/crypto/compactcert/verifier.go new file mode 100644 index 0000000000..a153ca5ed0 --- /dev/null +++ b/crypto/compactcert/verifier.go @@ -0,0 +1,107 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package compactcert + +import ( + "fmt" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/merklearray" + "github.com/algorand/go-algorand/data/basics" +) + +// Verifier is used to verify a compact certificate. +type Verifier struct { + Params + + partcom crypto.Digest +} + +// MkVerifier constructs a verifier to check the compact certificate +// on the message specified in p, with partcom specifying the Merkle +// root of the participants that must sign the message. +func MkVerifier(p Params, partcom crypto.Digest) *Verifier { + return &Verifier{ + Params: p, + partcom: partcom, + } +} + +// Verify checks if c is a valid compact certificate for the message +// and participants that were used to construct the Verifier. +func (v *Verifier) Verify(c *Cert) error { + if c.SignedWeight <= v.ProvenWeight { + return fmt.Errorf("cert signed weight %d <= proven weight %d", c.SignedWeight, v.ProvenWeight) + } + + // Verify all of the reveals + sigs := make(map[uint64]crypto.Hashable) + parts := make(map[uint64]crypto.Hashable) + for pos, r := range c.Reveals { + sigs[pos] = r.SigSlot + parts[pos] = r.Part + + ephID := basics.OneTimeIDForRound(v.SigRound, r.Part.KeyDilution) + if !r.Part.PK.Verify(ephID, v.Msg, r.SigSlot.Sig.OneTimeSignature) { + return fmt.Errorf("signature in reveal pos %d does not verify", pos) + } + } + + err := merklearray.Verify(c.SigCommit, sigs, c.SigProofs) + if err != nil { + return err + } + + err = merklearray.Verify(v.partcom, parts, c.PartProofs) + if err != nil { + return err + } + + // Verify that the reveals contain the right coins + nr, err := v.numReveals(c.SignedWeight) + if err != nil { + return err + } + + msgHash := crypto.HashObj(v.Msg) + + for j := uint64(0); j < nr; j++ { + choice := coinChoice{ + J: j, + SignedWeight: c.SignedWeight, + ProvenWeight: v.ProvenWeight, + Sigcom: c.SigCommit, + Partcom: v.partcom, + MsgHash: msgHash, + } + + coin := hashCoin(choice) + matchingReveal := false + for _, r := range c.Reveals { + if r.SigSlot.L <= coin && coin < r.SigSlot.L+r.Part.Weight { + matchingReveal = true + break + } + } + + if !matchingReveal { + return fmt.Errorf("no reveal for coin %d at %d", j, coin) + } + } + + return nil +} diff --git a/crypto/curve25519.go b/crypto/curve25519.go index 3f721c4328..1908976869 100644 --- a/crypto/curve25519.go +++ b/crypto/curve25519.go @@ -25,6 +25,8 @@ package crypto // #cgo linux,arm64 LDFLAGS: ${SRCDIR}/libs/linux/arm64/lib/libsodium.a // #cgo linux,arm CFLAGS: -I${SRCDIR}/libs/linux/arm/include // #cgo linux,arm LDFLAGS: ${SRCDIR}/libs/linux/arm/lib/libsodium.a +// #cgo windows,amd64 CFLAGS: -I${SRCDIR}/libs/windows/amd64/include +// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/libs/windows/amd64/lib/libsodium.a // #include // #include "sodium.h" import "C" diff --git a/crypto/libsodium-fork/configure.ac b/crypto/libsodium-fork/configure.ac index 02619d4a13..8b53d33fe0 100644 --- a/crypto/libsodium-fork/configure.ac +++ b/crypto/libsodium-fork/configure.ac @@ -216,7 +216,7 @@ AC_CHECK_DEFINE([_FORTIFY_SOURCE], [], [ AX_CHECK_COMPILE_FLAG([-fvisibility=hidden], [CFLAGS="$CFLAGS -fvisibility=hidden"]) -AS_CASE([$host_os], [cygwin*|mingw*|msys|pw32*|cegcc*], [ ], [ +AS_CASE([$host_os], [cygwin*|pw32*|mingw*|msys|cegcc*], [ ], [ AX_CHECK_COMPILE_FLAG([-fPIC], [CFLAGS="$CFLAGS -fPIC"]) ]) @@ -262,14 +262,14 @@ AC_ARG_ENABLE(soname-versions, ) AS_CASE([$host_os], - [cygwin*|mingw*|msys|pw32*|cegcc*], [ + [cygwin*|pw32*|mingw*|msys|cegcc*], [ AX_CHECK_LINK_FLAG([-Wl,--dynamicbase], [LDFLAGS="$LDFLAGS -Wl,--dynamicbase"]) AX_CHECK_LINK_FLAG([-Wl,--high-entropy-va], [LDFLAGS="$LDFLAGS -Wl,--high-entropy-va"]) AX_CHECK_LINK_FLAG([-Wl,--nxcompat], [LDFLAGS="$LDFLAGS -Wl,--nxcompat"]) ]) AS_CASE([$host_os], - [cygwin*|mingw*|msys|pw32*|cegcc*], [ + [cygwin*|pw32*|mingw*|msys|cegcc*], [ AX_CHECK_COMPILE_FLAG([-fno-asynchronous-unwind-tables], [ [CFLAGS="$CFLAGS -fno-asynchronous-unwind-tables"] ]) @@ -278,7 +278,7 @@ AS_CASE([$host_os], AS_IF([test "x$enable_ssp" != "xno"],[ AS_CASE([$host_os], - [cygwin*|mingw*|msys|pw32*|cegcc*|haiku], [ ], + [cygwin*|pw32*|mingw*|msys|cegcc*|haiku], [ ], [*], [ AX_CHECK_COMPILE_FLAG([-fstack-protector], [ AX_CHECK_LINK_FLAG([-fstack-protector], diff --git a/crypto/memcpy_chk_windows.c b/crypto/memcpy_chk_windows.c new file mode 100644 index 0000000000..cc7c67c45f --- /dev/null +++ b/crypto/memcpy_chk_windows.c @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build gc + +#include "_cgo_export.h" +#include + +extern void crosscall2(void (*fn)(void *, int), void *, int); +extern void _cgo_panic(void *, int); + +void * __memcpy_chk (void *dstpp, const void *srcpp, size_t len, size_t dstlen) +{ + if (dstlen < len) { + struct { const char *p; } a; + + a.p = "panic from __memcpy_chk"; + crosscall2(_cgo_panic, &a, sizeof a); + } + return memcpy (dstpp, srcpp, len); +} diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go index 2547a21e72..03fa54aaf9 100644 --- a/crypto/merkletrie/cache.go +++ b/crypto/merkletrie/cache.go @@ -67,7 +67,7 @@ type merkleTrieCache struct { // pendingCreatedNID contains a list of the node ids that has been created since the last commit and need to be stored. pendingCreatedNID map[storedNodeIdentifier]bool - // pendingDeletionPage contains a map of pages to delete once committed. + // pendingDeletionPage contains a map of pages that had at least one node removed from. This require these pages to be either deleted or updated. pendingDeletionPages map[uint64]bool // a list of the pages priorities. The item in the front has higher priority and would not get evicted as quickly as the item on the back @@ -76,10 +76,24 @@ type merkleTrieCache struct { pagesPrioritizationMap map[uint64]*list.Element // the page to load before the nextNodeID at init time. If zero, then nothing is being reloaded. deferedPageLoad uint64 + + // pages reallocation map, used during the commit() execution to identify pages and nodes that would get remapped to ensure the + // stored pages are sufficiently "packed" + reallocatedPages map[uint64]map[storedNodeIdentifier]*node + + // targetPageFillFactor is the desired threshold for page fill factor. Newly created pages would follow this fill factor. + targetPageFillFactor float32 + + // maxChildrenPagesThreshold is used during the commit(), evaluating the number of children pages each updated node is referring to. If the number + // exceed this number, the node children would be reallocated. + maxChildrenPagesThreshold uint64 + + // hashAccumulationBuffer is a shared buffer used for the node.calculateHash function. It avoids memory reallocation. + hashAccumulationBuffer [64 * 256]byte } // initialize perform the initialization for the cache -func (mtc *merkleTrieCache) initialize(mt *Trie, committer Committer, cachedNodeCountTarget int) { +func (mtc *merkleTrieCache) initialize(mt *Trie, committer Committer, memoryConfig MemoryConfig) { mtc.mt = mt mtc.pageToNIDsPtr = make(map[uint64]map[storedNodeIdentifier]*node) mtc.txNextNodeID = storedNodeIdentifierNull @@ -89,9 +103,11 @@ func (mtc *merkleTrieCache) initialize(mt *Trie, committer Committer, cachedNode mtc.pendingDeletionPages = make(map[uint64]bool) mtc.pagesPrioritizationList = list.New() mtc.pagesPrioritizationMap = make(map[uint64]*list.Element) - mtc.cachedNodeCountTarget = cachedNodeCountTarget + mtc.cachedNodeCountTarget = memoryConfig.CachedNodesCount mtc.deferedPageLoad = storedNodeIdentifierNull - mtc.nodesPerPage = committer.GetNodesCountPerPage() + mtc.nodesPerPage = memoryConfig.NodesCountPerPage + mtc.targetPageFillFactor = memoryConfig.PageFillFactor + mtc.maxChildrenPagesThreshold = memoryConfig.MaxChildrenPagesThreshold if mt.nextNodeID != storedNodeIdentifierBase { // if the next node is going to be on a new page, no need to reload the last page. if (int64(mtc.mt.nextNodeID) / mtc.nodesPerPage) == (int64(mtc.mt.nextNodeID-1) / mtc.nodesPerPage) { @@ -154,11 +170,14 @@ func (mtc *merkleTrieCache) getNode(nid storedNodeIdentifier) (pnode *node, err if pageNodes != nil { pnode = pageNodes[nid] if pnode != nil { - mtc.prioritizeNode(nid) + if mtc.reallocatedPages == nil { + mtc.prioritizeNodeFront(nid) + } return } } + // if we don't have it in memory, try to load it from disk err = mtc.loadPage(nodePage) if err != nil { return @@ -168,27 +187,54 @@ func (mtc *merkleTrieCache) getNode(nid storedNodeIdentifier) (pnode *node, err if pnode, have = pageNodes[nid]; !have { err = ErrLoadedPageMissingNode } else { - mtc.prioritizeNode(nid) + // if we're current reallocating pages, the mtc.reallocatedPages would be non-nil, and + // the newly prioritized pages should be placed on the back. Otherwise, we're on the + // "normal" path, adding/deleting elements from the trie, in which case new pages should + // always be placed on the front. + if mtc.reallocatedPages == nil { + mtc.prioritizeNodeFront(nid) + } else { + mtc.prioritizeNodeBack(nid) + } + } return } -// prioritizeNode make sure to adjust the priority of the given node id. +// prioritizeNodeFront make sure to adjust the priority of the given node id. // nodes are prioritized based on the page the belong to. -// a new page would be placed on front, and an older page would get moved +// a new page would be placed on front, and an existing page would get moved // to the front. -func (mtc *merkleTrieCache) prioritizeNode(nid storedNodeIdentifier) { +func (mtc *merkleTrieCache) prioritizeNodeFront(nid storedNodeIdentifier) { page := uint64(nid) / uint64(mtc.nodesPerPage) element := mtc.pagesPrioritizationMap[page] + if element != nil { // if we already have this page as an element, move it to the front. mtc.pagesPrioritizationList.MoveToFront(element) return } // add it at the front. - element = mtc.pagesPrioritizationList.PushFront(page) - mtc.pagesPrioritizationMap[page] = element + mtc.pagesPrioritizationMap[page] = mtc.pagesPrioritizationList.PushFront(page) +} + +// prioritizeNodeBack make sure to adjust the priority of the given node id. +// nodes are prioritized based on the page the belong to. +// a new page would be placed on front, and an existing page would get moved +// to the front. +func (mtc *merkleTrieCache) prioritizeNodeBack(nid storedNodeIdentifier) { + page := uint64(nid) / uint64(mtc.nodesPerPage) + + element := mtc.pagesPrioritizationMap[page] + + if element != nil { + // if we already have this page as an element, move it to the back. + mtc.pagesPrioritizationList.MoveToBack(element) + return + } + // add it at the back. + mtc.pagesPrioritizationMap[page] = mtc.pagesPrioritizationList.PushBack(page) } // loadPage loads a give page id into memory. @@ -251,7 +297,7 @@ func (mtc *merkleTrieCache) commitTransaction() { // the created nodes are already on the list. for nodeID := range mtc.txCreatedNodeIDs { mtc.pendingCreatedNID[nodeID] = true - mtc.prioritizeNode(nodeID) + mtc.prioritizeNodeFront(nodeID) } mtc.txCreatedNodeIDs = nil @@ -295,30 +341,100 @@ func (mtc *merkleTrieCache) rollbackTransaction() { mtc.txNextNodeID = storedNodeIdentifierNull } -// Uint64Slice attaches the methods of Interface to []uint64, sorting in increasing order. -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// SortUint64 sorts a slice of uint64s in increasing order. -func SortUint64(a []uint64) { - sort.Sort(Uint64Slice(a)) +// CommitStats provides statistics about the operation of the commit() function +type CommitStats struct { + NewPageCount int + NewNodeCount int + UpdatedPageCount int + UpdatedNodeCount int + DeletedPageCount int + FanoutReallocatedNodeCount int + PackingReallocatedNodeCount int + LoadedPages int } // commit - used as part of the Trie Commit functionality -func (mtc *merkleTrieCache) commit() error { +func (mtc *merkleTrieCache) commit() (CommitStats, error) { + var stats CommitStats + // if we have a pending page load, do that now. if mtc.deferedPageLoad != storedNodeIdentifierNull { err := mtc.loadPage(mtc.deferedPageLoad) if err != nil { - return err + return CommitStats{}, err } mtc.deferedPageLoad = storedNodeIdentifierNull } + pagesToCreate, pagesToDelete, pagesToUpdate, err := mtc.reallocatePendingPages(&stats) + if err != nil { + return CommitStats{}, err + } + + // allocate a staging area for the page encoder. buffer should be big enough so + // we won't need any reallocation to take place. + encodeBuffer := make([]byte, maxNodeSerializedSize*256+32) + + // store all the new pages ( which have a sequential ordering ) + for _, page := range pagesToCreate { + nodeIDs := mtc.pageToNIDsPtr[page] + pageContent := mtc.encodePage(nodeIDs, encodeBuffer) + err := mtc.committer.StorePage(uint64(page), pageContent) + if err != nil { + return CommitStats{}, err + } + stats.NewPageCount++ + stats.NewNodeCount += len(nodeIDs) + } + + // delete the pages that we don't need anymore. + for page := range pagesToDelete { + err := mtc.committer.StorePage(uint64(page), nil) + if err != nil { + return CommitStats{}, err + } + + // since the entire page was removed from memory, we can also remove it from the priority list. + element := mtc.pagesPrioritizationMap[uint64(page)] + if element != nil { + mtc.pagesPrioritizationList.Remove(element) + delete(mtc.pagesPrioritizationMap, uint64(page)) + } + stats.DeletedPageCount++ + + mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[uint64(page)]) + delete(mtc.pageToNIDsPtr, uint64(page)) + } + + // updated pages + for page, nodeIDs := range pagesToUpdate { + pageContent := mtc.encodePage(nodeIDs, encodeBuffer) + err := mtc.committer.StorePage(uint64(page), pageContent) + if err != nil { + return CommitStats{}, err + } + stats.UpdatedPageCount++ + stats.UpdatedNodeCount += len(nodeIDs) + } + + mtc.pendingCreatedNID = make(map[storedNodeIdentifier]bool) + mtc.pendingDeletionPages = make(map[uint64]bool) + mtc.modified = false + return stats, nil +} + +// reallocatePendingPages is called by the commit() function, and is reponsible for performing two tasks - +// 1. calculate the hashes of all the newly created nodes +// 2. reornigize the pending flush nodes into an optimal page list, and construct a list of pages that need to be created, deleted and updated. +func (mtc *merkleTrieCache) reallocatePendingPages(stats *CommitStats) (pagesToCreate []uint64, pagesToDelete map[uint64]bool, pagesToUpdate map[uint64]map[storedNodeIdentifier]*node, err error) { + // newPageThreshold is the threshold at which all the pages are newly created pages that were never commited. + newPageThreshold := uint64(mtc.mt.lastCommittedNodeID) / uint64(mtc.nodesPerPage) + if int64(mtc.mt.lastCommittedNodeID)%mtc.nodesPerPage > 0 { + newPageThreshold++ + } + createdPages := make(map[uint64]map[storedNodeIdentifier]*node) + toUpdatePages := make(map[uint64]map[storedNodeIdentifier]*node) // create a list of all the pages that need to be created/updated for nodeID := range mtc.pendingCreatedNID { @@ -333,28 +449,77 @@ func (mtc *merkleTrieCache) commit() error { for page := range createdPages { sortedCreatedPages = append(sortedCreatedPages, page) } - SortUint64(sortedCreatedPages) + sort.SliceStable(sortedCreatedPages, func(i, j int) bool { return sortedCreatedPages[i] < sortedCreatedPages[j] }) + + mtc.reallocatedPages = make(map[uint64]map[storedNodeIdentifier]*node) + + // move the next node id to the next page, so that all reallocated nodes would be packed on new pages. + mtc.mt.nextNodeID = storedNodeIdentifier(((uint64(mtc.mt.nextNodeID) + uint64(mtc.nodesPerPage-1)) / uint64(mtc.nodesPerPage)) * uint64(mtc.nodesPerPage)) + reallocatedNodesBasePage := uint64(mtc.mt.nextNodeID) / uint64(mtc.nodesPerPage) + + beforeHashCalculationPageCount := len(mtc.pageToNIDsPtr) + beforeHashCalculationPendingDeletionPages := len(mtc.pendingDeletionPages) + // updated the hashes of these pages. this works correctly // since all trie modification are done with ids that are bottom-up for _, page := range sortedCreatedPages { - err := mtc.calculatePageHashes(int64(page)) + relocatedNodes, err := mtc.calculatePageHashes(int64(page), page >= newPageThreshold) if err != nil { - return err + return nil, nil, nil, err } + stats.FanoutReallocatedNodeCount += int(relocatedNodes) } - // store the pages. - for page, nodeIDs := range createdPages { - pageContent := mtc.encodePage(nodeIDs) - err := mtc.committer.StorePage(uint64(page), pageContent) - if err != nil { - return err + stats.LoadedPages = len(mtc.pendingDeletionPages) - beforeHashCalculationPendingDeletionPages + len(mtc.pageToNIDsPtr) - beforeHashCalculationPageCount + // reallocate each of the new page content, if not meeting the desired fill factor. + reallocationMap := make(map[storedNodeIdentifier]storedNodeIdentifier) + for _, page := range sortedCreatedPages { + if page < newPageThreshold { + continue } + if mtc.getPageFillFactor(page) >= mtc.targetPageFillFactor { + if len(createdPages[page]) > 0 { + pagesToCreate = append(pagesToCreate, page) + } + continue + } + + stats.PackingReallocatedNodeCount += mtc.reallocatePage(page, reallocationMap) + delete(createdPages, page) + } + + for pageID, page := range mtc.reallocatedPages { + createdPages[pageID] = page } + for _, nodeIDs := range createdPages { + for _, node := range nodeIDs { + node.remapChildren(reallocationMap) + } + } + + if newRootID, has := reallocationMap[mtc.mt.root]; has { + delete(reallocationMap, mtc.mt.root) + mtc.mt.root = newRootID + } + mtc.reallocatedPages = nil + // pages that contains elemets that were removed. toRemovePages := mtc.pendingDeletionPages - toUpdatePages := make(map[uint64]map[storedNodeIdentifier]*node) + + // The initial page is moved to the "update" step. + if len(sortedCreatedPages) > 0 && mtc.pageToNIDsPtr[sortedCreatedPages[0]] != nil { + toRemovePages[sortedCreatedPages[0]] = true + } + + for page := reallocatedNodesBasePage; len(createdPages[page]) > 0; page++ { + nodeIDs := createdPages[page] + delete(createdPages, page) + if len(nodeIDs) == 0 { + continue + } + pagesToCreate = append(pagesToCreate, page) + } // iterate over the existing list and ensure we don't delete any page that has active elements for pageRemovalCandidate := range toRemovePages { @@ -368,60 +533,137 @@ func (mtc *merkleTrieCache) commit() error { delete(toRemovePages, pageRemovalCandidate) } - // delete the pages that we don't need anymore. - for page := range toRemovePages { - err := mtc.committer.StorePage(uint64(page), nil) - if err != nil { - return err - } - - // since the entire page was removed from memory, we can also remove it from the priority list. - element := mtc.pagesPrioritizationMap[uint64(page)] - if element != nil { - mtc.pagesPrioritizationList.Remove(element) - delete(mtc.pagesPrioritizationMap, uint64(page)) - } - mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[uint64(page)]) - delete(mtc.pageToNIDsPtr, uint64(page)) - } - - // updated pages - for page, nodeIDs := range toUpdatePages { - if createdPages[page] != nil { - continue - } - pageContent := mtc.encodePage(nodeIDs) - err := mtc.committer.StorePage(uint64(page), pageContent) - if err != nil { - return err - } - } - - mtc.pendingCreatedNID = make(map[storedNodeIdentifier]bool) - mtc.pendingDeletionPages = make(map[uint64]bool) - mtc.modified = false - return nil + return pagesToCreate, toRemovePages, toUpdatePages, nil } // calculatePageHashes calculate hashes of a specific page // It is vital that the hashes for all the preceding page would have // already been calculated for this function to work correctly. -func (mtc *merkleTrieCache) calculatePageHashes(page int64) (err error) { +func (mtc *merkleTrieCache) calculatePageHashes(page int64, newPage bool) (fanoutRelocatedNodes int64, err error) { nodes := mtc.pageToNIDsPtr[uint64(page)] - for i := page * mtc.nodesPerPage; i < (page+1)*mtc.nodesPerPage; i++ { - if mtc.pendingCreatedNID[storedNodeIdentifier(i)] == false { + for i := storedNodeIdentifier(page * mtc.nodesPerPage); i < storedNodeIdentifier((page+1)*mtc.nodesPerPage); i++ { + if !newPage && mtc.pendingCreatedNID[i] == false { continue } - node := nodes[storedNodeIdentifier(i)] - if node != nil { - if err = node.calculateHash(mtc); err != nil { - return + node := nodes[i] + if node == nil { + continue + } + + if err = node.calculateHash(mtc); err != nil { + return + } + + nodeChildCount := node.getChildCount() + if nodeChildCount > mtc.maxChildrenPagesThreshold { + nodeUniqueChildPages := node.getUniqueChildPageCount(mtc.nodesPerPage) + if nodeUniqueChildPages > mtc.maxChildrenPagesThreshold { + // see if we can fit all the child nodes into the existing page or not. If not, we might want to start + // a new page as long as there is a chance that all the children would be able to fit into that page. + if nodeChildCount < uint64(mtc.nodesPerPage) && + mtc.getPageFillFactor(uint64(mtc.mt.nextNodeID)/uint64(mtc.nodesPerPage)) > mtc.targetPageFillFactor { + // adjust the next node id to align with the next page. + mtc.mt.nextNodeID = storedNodeIdentifier((1 + uint64(mtc.mt.nextNodeID)/uint64(mtc.nodesPerPage)) * uint64(mtc.nodesPerPage)) + } + node.reallocateChildren(mtc) + fanoutRelocatedNodes++ } } } return } +// getPageFillFactor calculates the fill factor for a given page, or return 0 if the page is not in memory. +func (mtc *merkleTrieCache) getPageFillFactor(page uint64) float32 { + if pageMap := mtc.pageToNIDsPtr[page]; pageMap != nil { + return float32(len(pageMap)) / float32(mtc.nodesPerPage) + } + return 0.0 +} + +// reallocatePage reallocates an entire page into the latest page(s). It also update the reallocationMap for all the nodes that have been moved, +// so that we could update the needed node dependencies. +func (mtc *merkleTrieCache) reallocatePage(page uint64, reallocationMap map[storedNodeIdentifier]storedNodeIdentifier) (reallocatedNodes int) { + nextID := mtc.mt.nextNodeID + reallocatedNodes = len(mtc.pageToNIDsPtr[page]) + nextPage := uint64(nextID) / uint64(mtc.nodesPerPage) + if reallocatedNodes == 0 { + // if we aren't going to reallocate any nodes, no need to allocate (maybe) + // new pages for these. + goto skipContentDeletion + } + + if _, has := mtc.pageToNIDsPtr[nextPage]; has { + // see if we will need another allocated page: + lastID := mtc.mt.nextNodeID + storedNodeIdentifier(reallocatedNodes) - 1 + lastPage := uint64(lastID) / uint64(mtc.nodesPerPage) + if _, has := mtc.pageToNIDsPtr[lastPage]; !has { + nextPage = lastPage + } else { + nextPage = storedNodeIdentifierNull + } + } + + if nextPage > storedNodeIdentifierNull { + pageMap := make(map[storedNodeIdentifier]*node, mtc.nodesPerPage) + mtc.reallocatedPages[nextPage] = pageMap + mtc.pageToNIDsPtr[nextPage] = pageMap + mtc.pagesPrioritizationMap[nextPage] = mtc.pagesPrioritizationList.PushFront(nextPage) + } + + mtc.mt.nextNodeID += storedNodeIdentifier(reallocatedNodes) + for nid, node := range mtc.pageToNIDsPtr[page] { + reallocationMap[nid] = nextID + mtc.pageToNIDsPtr[uint64(nextID)/uint64(mtc.nodesPerPage)][nextID] = node + delete(mtc.pageToNIDsPtr[page], nid) + nextID++ + } +skipContentDeletion: + delete(mtc.pageToNIDsPtr, page) + delete(mtc.reallocatedPages, page) + if element, has := mtc.pagesPrioritizationMap[page]; has { + mtc.pagesPrioritizationList.Remove(element) + delete(mtc.pagesPrioritizationMap, page) + } + return +} + +// reallocateNode reallocates a given node into the latest page. Unlike refurbishNode, it's not expected to be called +// from within the context of a transaction. +func (mtc *merkleTrieCache) reallocateNode(nid storedNodeIdentifier) storedNodeIdentifier { + nextID := mtc.mt.nextNodeID + nextPage := uint64(nextID) / uint64(mtc.nodesPerPage) + currentPage := uint64(nid) / uint64(mtc.nodesPerPage) + if currentPage == nextPage { + return nid + } + mtc.mt.nextNodeID++ + + pnode := mtc.pageToNIDsPtr[currentPage][nid] + + delete(mtc.pageToNIDsPtr[currentPage], nid) + if len(mtc.pageToNIDsPtr[currentPage]) == 0 { + delete(mtc.pageToNIDsPtr, currentPage) + delete(mtc.reallocatedPages, currentPage) // if there is one. + if element, has := mtc.pagesPrioritizationMap[currentPage]; has && element != nil { + // since the page was just deleted, we can delete it from the prioritization map as well. + mtc.pagesPrioritizationList.Remove(element) + delete(mtc.pagesPrioritizationMap, currentPage) + } + } + mtc.pendingDeletionPages[currentPage] = true + + if mtc.pageToNIDsPtr[nextPage] == nil { + pageMap := make(map[storedNodeIdentifier]*node, mtc.nodesPerPage) + mtc.reallocatedPages[nextPage] = pageMap + mtc.pageToNIDsPtr[nextPage] = pageMap + mtc.pagesPrioritizationMap[nextPage] = mtc.pagesPrioritizationList.PushFront(nextPage) + } + mtc.pageToNIDsPtr[nextPage][nextID] = pnode + + return nextID +} + // decodePage decodes a byte array into a page content func decodePage(bytes []byte) (nodesMap map[storedNodeIdentifier]*node, err error) { version, versionLength := binary.Uvarint(bytes[:]) @@ -455,8 +697,7 @@ func decodePage(bytes []byte) (nodesMap map[storedNodeIdentifier]*node, err erro } // decodePage encodes a page contents into a byte array -func (mtc *merkleTrieCache) encodePage(nodeIDs map[storedNodeIdentifier]*node) []byte { - serializedBuffer := make([]byte, maxNodeSerializedSize*len(nodeIDs)+32) +func (mtc *merkleTrieCache) encodePage(nodeIDs map[storedNodeIdentifier]*node, serializedBuffer []byte) []byte { version := binary.PutUvarint(serializedBuffer[:], NodePageVersion) length := binary.PutVarint(serializedBuffer[version:], int64(len(nodeIDs))) walk := version + length diff --git a/crypto/merkletrie/cache_test.go b/crypto/merkletrie/cache_test.go index 710ff81328..47938800eb 100644 --- a/crypto/merkletrie/cache_test.go +++ b/crypto/merkletrie/cache_test.go @@ -25,6 +25,13 @@ import ( "github.com/algorand/go-algorand/crypto" ) +var defaultTestMemoryConfig = MemoryConfig{ + NodesCountPerPage: inMemoryCommitterPageSize, + CachedNodesCount: defaultTestEvictSize, + PageFillFactor: 0.90, + MaxChildrenPagesThreshold: 32, +} + func verifyCacheNodeCount(t *testing.T, trie *Trie) { count := 0 for _, pageNodes := range trie.cache.pageToNIDsPtr { @@ -44,7 +51,7 @@ func verifyCacheNodeCount(t *testing.T, trie *Trie) { func TestCacheEviction1(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter, defaultTestMemoryConfig) // create 13000 hashes. leafsCount := 13000 hashes := make([]crypto.Digest, leafsCount) @@ -66,7 +73,7 @@ func TestCacheEviction1(t *testing.T) { func TestCacheEviction2(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter, defaultTestMemoryConfig) // create 20000 hashes. leafsCount := 20000 hashes := make([]crypto.Digest, leafsCount) @@ -93,7 +100,7 @@ func TestCacheEviction2(t *testing.T) { func TestCacheEviction3(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter, defaultTestMemoryConfig) // create 200000 hashes. leafsCount := 200000 hashes := make([]crypto.Digest, leafsCount) @@ -120,16 +127,10 @@ func TestCacheEviction3(t *testing.T) { // smallPageMemoryCommitter is an InMemoryCommitter, which has a custom page size, and knows how to "fail" per request. type smallPageMemoryCommitter struct { InMemoryCommitter - pageSize int64 failStore int failLoad int } -// GetNodesCountPerPage returns the page size ( number of nodes per page ) -func (spmc *smallPageMemoryCommitter) GetNodesCountPerPage() (pageSize int64) { - return spmc.pageSize -} - // StorePage stores a single page in an in-memory persistence. func (spmc *smallPageMemoryCommitter) StorePage(page uint64, content []byte) error { if spmc.failStore > 0 { @@ -150,8 +151,10 @@ func (spmc *smallPageMemoryCommitter) LoadPage(page uint64) (content []byte, err func cacheEvictionFuzzer(t *testing.T, hashes []crypto.Digest, pageSize int64, evictSize int) { var memoryCommitter smallPageMemoryCommitter - memoryCommitter.pageSize = pageSize - mt1, _ := MakeTrie(&memoryCommitter, evictSize) + memoryConfig := defaultTestMemoryConfig + memoryConfig.CachedNodesCount = evictSize + memoryConfig.NodesCountPerPage = pageSize + mt1, _ := MakeTrie(&memoryCommitter, memoryConfig) // add the first 10 hashes. for i := 0; i < 10; i++ { @@ -221,8 +224,9 @@ func TestCacheEvictionFuzzer2(t *testing.T) { // it's being deleted correctly. func TestCacheMidTransactionPageDeletion(t *testing.T) { var memoryCommitter smallPageMemoryCommitter - memoryCommitter.pageSize = 2 - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + memoryConfig := defaultTestMemoryConfig + memoryConfig.NodesCountPerPage = 2 + mt1, _ := MakeTrie(&memoryCommitter, memoryConfig) // create 10000 hashes. leafsCount := 10000 @@ -241,10 +245,11 @@ func TestCacheMidTransactionPageDeletion(t *testing.T) { require.NoError(t, err) require.True(t, deleted) } - mt1.Commit() - // compare committed pages to the in-memory pages. + stats, err := mt1.Commit() + require.NoError(t, err) + // compare committed pages to the in-memory pages. for page, pageContent := range memoryCommitter.memStore { if page == storedNodeIdentifierNull { continue @@ -254,16 +259,15 @@ func TestCacheMidTransactionPageDeletion(t *testing.T) { require.NoError(t, err) // stored page should have more than a single node. - require.Greater(t, len(decodedPage), 0) + require.Greaterf(t, len(decodedPage), 0, "page %d has no nodes", page) } for page, pageContent := range mt1.cache.pageToNIDsPtr { - // memory page should have more than a single node. require.NotZerof(t, len(pageContent), "Memory page %d has zero nodes", page) // memory page should also be available on disk: - require.NotNil(t, memoryCommitter.memStore[page]) + require.NotNilf(t, memoryCommitter.memStore[page], "committed page %d is empty while memory node has %d items\nStats : %#v", page, len(pageContent), stats) } } @@ -305,8 +309,10 @@ func (mt *Trie) TestDeleteRollback(d []byte) (bool, error) { // it's being deleted correctly. func TestCacheTransactionRollbackPageDeletion(t *testing.T) { var memoryCommitter smallPageMemoryCommitter - memoryCommitter.pageSize = 2 - mt1, _ := MakeTrie(&memoryCommitter, 5) + memConfig := defaultTestMemoryConfig + memConfig.CachedNodesCount = 5 + memConfig.NodesCountPerPage = 2 + mt1, _ := MakeTrie(&memoryCommitter, memConfig) // create 1000 hashes. leafsCount := 1000 @@ -344,8 +350,10 @@ func TestCacheTransactionRollbackPageDeletion(t *testing.T) { // it's being deleted correctly. func TestCacheDeleteNodeMidTransaction(t *testing.T) { var memoryCommitter smallPageMemoryCommitter - memoryCommitter.pageSize = 1 - mt1, _ := MakeTrie(&memoryCommitter, 5) + memConfig := defaultTestMemoryConfig + memConfig.CachedNodesCount = 5 + memConfig.NodesCountPerPage = 1 + mt1, _ := MakeTrie(&memoryCommitter, memConfig) // create 1000 hashes. leafsCount := 10000 @@ -375,7 +383,7 @@ func TestCacheDeleteNodeMidTransaction(t *testing.T) { // increased if the page was already loaded previously into memory. func TestCachePageReloading(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter, defaultTestMemoryConfig) // create 10 hashes. leafsCount := 10 hashes := make([]crypto.Digest, leafsCount) @@ -391,7 +399,7 @@ func TestCachePageReloading(t *testing.T) { earlyCachedNodeCount := mt1.cache.cachedNodeCount // reloading existing cached page multiple time should not cause increase cached node count. - page := uint64(mt1.nextNodeID) / uint64(memoryCommitter.GetNodesCountPerPage()) + page := uint64(mt1.nextNodeID-1) / uint64(defaultTestMemoryConfig.NodesCountPerPage) err = mt1.cache.loadPage(page) require.NoError(t, err) lateCachedNodeCount := mt1.cache.cachedNodeCount @@ -415,7 +423,9 @@ func TestCachePageReloading(t *testing.T) { // evicting other pages before evicting the top page. func TestCachePagedOutTip(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, 600) + memConfig := defaultTestMemoryConfig + memConfig.CachedNodesCount = 600 + mt1, _ := MakeTrie(&memoryCommitter, memConfig) // create 2048 hashes. leafsCount := 2048 hashes := make([]crypto.Digest, leafsCount) @@ -426,7 +436,7 @@ func TestCachePagedOutTip(t *testing.T) { for i := 0; i < len(hashes)/2; i++ { mt1.Add(hashes[i][:]) } - err := mt1.Commit() + _, err := mt1.Commit() require.NoError(t, err) for i := 0; i < len(hashes)/2; i++ { @@ -434,12 +444,13 @@ func TestCachePagedOutTip(t *testing.T) { } // check the tip page before evicting - page := uint64(mt1.nextNodeID) / uint64(memoryCommitter.GetNodesCountPerPage()) + page := uint64(mt1.root) / uint64(memConfig.NodesCountPerPage) require.NotNil(t, mt1.cache.pageToNIDsPtr[page]) _, err = mt1.Evict(true) require.NoError(t, err) - // ensures that the tip page was not flushed out. + // ensures that the tip page was not flushed out. ( the root might have been reallocated, so recheck is needed ) + page = uint64(mt1.root) / uint64(memConfig.NodesCountPerPage) require.NotNil(t, mt1.cache.pageToNIDsPtr[page]) } diff --git a/crypto/merkletrie/committer.go b/crypto/merkletrie/committer.go index 91625cadee..79331cef7c 100644 --- a/crypto/merkletrie/committer.go +++ b/crypto/merkletrie/committer.go @@ -20,7 +20,6 @@ package merkletrie type Committer interface { StorePage(page uint64, content []byte) error LoadPage(page uint64) (content []byte, err error) - GetNodesCountPerPage() int64 } const ( @@ -41,7 +40,9 @@ func (mc *InMemoryCommitter) StorePage(page uint64, content []byte) error { if content == nil { delete(mc.memStore, page) } else { - mc.memStore[page] = content + storedContent := make([]byte, len(content)) + copy(storedContent, content) + mc.memStore[page] = storedContent } return nil } @@ -54,8 +55,3 @@ func (mc *InMemoryCommitter) LoadPage(page uint64) (content []byte, err error) { content = mc.memStore[page] return content, nil } - -// GetNodesCountPerPage returns the page size ( number of nodes per page ) -func (mc *InMemoryCommitter) GetNodesCountPerPage() (pageSize int64) { - return inMemoryCommitterPageSize -} diff --git a/crypto/merkletrie/committer_test.go b/crypto/merkletrie/committer_test.go index 8ff7cd6ea9..f3006347cf 100644 --- a/crypto/merkletrie/committer_test.go +++ b/crypto/merkletrie/committer_test.go @@ -17,6 +17,7 @@ package merkletrie import ( + "encoding/binary" "testing" "github.com/stretchr/testify/require" @@ -35,7 +36,7 @@ func (mc *InMemoryCommitter) Duplicate() (out *InMemoryCommitter) { func TestInMemoryCommitter(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter, defaultTestMemoryConfig) // create 50000 hashes. leafsCount := 50000 hashes := make([]crypto.Digest, leafsCount) @@ -53,14 +54,14 @@ func TestInMemoryCommitter(t *testing.T) { releasedNodes, err := mt1.Evict(true) require.NoError(t, err) savedMemoryCommitter := memoryCommitter.Duplicate() - require.Equal(t, 18957, releasedNodes) + require.Equal(t, 19282, releasedNodes) for i := len(hashes) / 2; i < len(hashes); i++ { mt1.Add(hashes[i][:]) } mt1Hash, _ := mt1.RootHash() - mt2, _ := MakeTrie(savedMemoryCommitter, defaultTestEvictSize) + mt2, _ := MakeTrie(savedMemoryCommitter, defaultTestMemoryConfig) for i := len(hashes) / 2; i < len(hashes); i++ { mt2.Add(hashes[i][:]) @@ -69,13 +70,13 @@ func TestInMemoryCommitter(t *testing.T) { mt2Hash, _ := mt2.RootHash() require.Equal(t, mt1Hash, mt2Hash) - require.Equal(t, 347, len(memoryCommitter.memStore)) // 347 pages. + require.Equal(t, 137, len(memoryCommitter.memStore)) // 137 pages. // find the size of all the storage. storageSize := 0 for _, bytes := range memoryCommitter.memStore { storageSize += len(bytes) } - require.Equal(t, 2427986, storageSize) // 2,427,986 / 50,000 ~= 48 bytes/leaf. + require.Equal(t, 2425675, storageSize) // 2,425,575 / 50,000 ~= 48 bytes/leaf. stats, _ := mt1.GetStats() require.Equal(t, leafsCount, int(stats.leafCount)) require.Equal(t, 61926, int(stats.nodesCount)) @@ -95,7 +96,7 @@ func (n *node) getChildren() (list []storedNodeIdentifier) { func TestNoRedundentPages(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter, defaultTestMemoryConfig) testSize := 20000 // create 20000 hashes. @@ -111,7 +112,7 @@ func TestNoRedundentPages(t *testing.T) { trieNodes := make(map[storedNodeIdentifier]bool) for page, bytes := range memoryCommitter.memStore { if page == 0 { - mt2, _ := MakeTrie(nil, defaultTestEvictSize) + mt2, _ := MakeTrie(nil, defaultTestMemoryConfig) _, err := mt2.deserialize(bytes) require.NoError(t, err) } else { @@ -128,8 +129,23 @@ func TestNoRedundentPages(t *testing.T) { require.Equal(t, nodesCount, mt1.cache.cachedNodeCount) } -func TestMultipleCommits(t *testing.T) { +// decodePage decodes a byte array into a page content +func decodePageHeaderSize(bytes []byte) (headerSize int, err error) { + version, versionLength := binary.Uvarint(bytes[:]) + if versionLength <= 0 { + return 0, ErrPageDecodingFailuire + } + if version != NodePageVersion { + return 0, ErrPageDecodingFailuire + } + _, nodesCountLength := binary.Varint(bytes[versionLength:]) + if nodesCountLength <= 0 { + return 0, ErrPageDecodingFailuire + } + return nodesCountLength + versionLength, nil +} +func TestMultipleCommits(t *testing.T) { testSize := 5000 commitsCount := 5 @@ -139,7 +155,7 @@ func TestMultipleCommits(t *testing.T) { } var memoryCommitter1 InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter1, defaultTestEvictSize) + mt1, _ := MakeTrie(&memoryCommitter1, defaultTestMemoryConfig) for i := 0; i < len(hashes); i++ { mt1.Add(hashes[i][:]) @@ -150,22 +166,24 @@ func TestMultipleCommits(t *testing.T) { mt1.Commit() var memoryCommitter2 InMemoryCommitter - mt2, _ := MakeTrie(&memoryCommitter2, defaultTestEvictSize) + mt2, _ := MakeTrie(&memoryCommitter2, defaultTestMemoryConfig) for i := 0; i < len(hashes); i++ { mt2.Add(hashes[i][:]) } mt2.Commit() - require.Equal(t, len(memoryCommitter1.memStore), len(memoryCommitter2.memStore)) - storageSize1 := 0 for _, bytes := range memoryCommitter1.memStore { - storageSize1 += len(bytes) + headerSize, err := decodePageHeaderSize(bytes) + require.NoError(t, err) + storageSize1 += len(bytes) - headerSize } storageSize2 := 0 - for _, bytes := range memoryCommitter1.memStore { - storageSize2 += len(bytes) + for _, bytes := range memoryCommitter2.memStore { + headerSize, err := decodePageHeaderSize(bytes) + require.NoError(t, err) + storageSize2 += len(bytes) - headerSize } require.Equal(t, storageSize1, storageSize2) } diff --git a/crypto/merkletrie/node.go b/crypto/merkletrie/node.go index b1a1a2300e..81e99ee3e9 100644 --- a/crypto/merkletrie/node.go +++ b/crypto/merkletrie/node.go @@ -210,7 +210,11 @@ func (n *node) add(cache *merkleTrieCache, d []byte, path []byte) (nodeID stored pnode, nodeID = childNode, cache.refurbishNode(curNodeID) pnode.childrenMask = n.childrenMask - pnode.children = make([]childEntry, len(n.children), len(n.children)) + if len(pnode.children) < len(n.children) { + pnode.children = make([]childEntry, len(n.children), len(n.children)) + } else { + pnode.children = pnode.children[:len(n.children)] + } copy(pnode.children, n.children) pnode.children[curNodeIndex].id = updatedChild } @@ -228,7 +232,7 @@ func (n *node) calculateHash(cache *merkleTrieCache) error { return nil } path := n.hash - hashAccumulator := make([]byte, 0, 64*256) // we can have up to 256 elements, so preallocate sufficient storage; append would expand the storage if it won't be enough. + hashAccumulator := cache.hashAccumulationBuffer[:0] // use a preallocated storage and reuse the storage to avoid reallocation. hashAccumulator = append(hashAccumulator, byte(len(path))) // we add this string length before the actual string so it could get "decoded"; in practice, it makes a good domain separator. hashAccumulator = append(hashAccumulator, path...) for _, child := range n.children { @@ -278,7 +282,11 @@ func (n *node) remove(cache *merkleTrieCache, key []byte, path []byte) (nodeID s pnode, nodeID = childNode, cache.refurbishNode(childNodeID) pnode.childrenMask = n.childrenMask - pnode.children = make([]childEntry, len(n.children), len(n.children)) + if len(pnode.children) < len(n.children) { + pnode.children = make([]childEntry, len(n.children), len(n.children)) + } else { + pnode.children = pnode.children[:len(n.children)] + } copy(pnode.children, n.children) pnode.children[childIndex].id = updatedChildNodeID } @@ -323,8 +331,7 @@ func (n *node) serialize(buf []byte) int { w += x } buf[w] = n.children[len(n.children)-1].hashIndex - w++ - return w + return w + 1 } // deserializeNode deserializes the node from a byte array @@ -365,6 +372,37 @@ func deserializeNode(buf []byte) (n *node, s int) { i++ } n.children = make([]childEntry, i, i) - copy(n.children[:], childEntries[:i]) + copy(n.children, childEntries[:i]) return } + +func (n *node) getUniqueChildPageCount(nodesPerPage int64) uint64 { + uniquePages := make(map[int64]struct{}, len(n.children)) + for _, child := range n.children { + uniquePages[int64(child.id)/nodesPerPage] = struct{}{} + } + return uint64(len(uniquePages)) +} + +func (n *node) reallocateChildren(cache *merkleTrieCache) { + for i := range n.children { + n.children[i].id = cache.reallocateNode(n.children[i].id) + } +} + +func (n *node) getChildCount() uint64 { + return uint64(len(n.children)) +} + +func (n *node) remapChildren(reallocationMap map[storedNodeIdentifier]storedNodeIdentifier) { + for i := range n.children { + for { + if newID, has := reallocationMap[n.children[i].id]; has { + delete(reallocationMap, n.children[i].id) + n.children[i].id = newID + continue + } + break + } + } +} diff --git a/crypto/merkletrie/node_test.go b/crypto/merkletrie/node_test.go index 1fbd02ab0e..13b4668ead 100644 --- a/crypto/merkletrie/node_test.go +++ b/crypto/merkletrie/node_test.go @@ -27,7 +27,9 @@ import ( // TestNodeSerialization tests the serialization and deserialization of nodes. func TestNodeSerialization(t *testing.T) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, 1000) + memConfig := defaultTestMemoryConfig + memConfig.CachedNodesCount = 1000 + mt1, _ := MakeTrie(&memoryCommitter, memConfig) // create 1024 hashes. leafsCount := 1024 hashes := make([]crypto.Digest, leafsCount) @@ -65,7 +67,8 @@ func (n *node) leafUsingChildrenLength() bool { func BenchmarkNodeLeafImplementation(b *testing.B) { b.Run("leaf-ChildrenMask", func(b *testing.B) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + memConfig := defaultTestMemoryConfig + mt1, _ := MakeTrie(&memoryCommitter, memConfig) // create 100000 hashes. leafsCount := 100000 hashes := make([]crypto.Digest, leafsCount) @@ -92,7 +95,8 @@ func BenchmarkNodeLeafImplementation(b *testing.B) { }) b.Run("leaf-ChildrenLength", func(b *testing.B) { var memoryCommitter InMemoryCommitter - mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize) + memConfig := defaultTestMemoryConfig + mt1, _ := MakeTrie(&memoryCommitter, memConfig) // create 100000 hashes. leafsCount := 100000 hashes := make([]crypto.Digest, leafsCount) diff --git a/crypto/merkletrie/trie.go b/crypto/merkletrie/trie.go index b373a99f6b..cdc5ace27f 100644 --- a/crypto/merkletrie/trie.go +++ b/crypto/merkletrie/trie.go @@ -44,13 +44,27 @@ var ErrMismatchingPageSize = errors.New("mismatching page size") // ErrUnableToEvictPendingCommits is returned if the tree was modified and Evict was called with commit=false var ErrUnableToEvictPendingCommits = errors.New("unable to evict as pending commits available") +// MemoryConfig used to define the Trie object memory configuration. +type MemoryConfig struct { + // NodesCountPerPage defines how many nodes each page would contain + NodesCountPerPage int64 + // CachedNodesCount defines the number of nodes we want to retain in memory between consecutive Evict calls. + CachedNodesCount int + // PageFillFactor defines the desired fill ratio of a created page. + PageFillFactor float32 + // MaxChildrenPagesThreshold define the maximum number of different pages that would be used for a single node's children. + // it's being evaluated during Commit, for all the updated nodes. + MaxChildrenPagesThreshold uint64 +} + // Trie is a merkle trie intended to efficiently calculate the merkle root of // unordered elements type Trie struct { - root storedNodeIdentifier - nextNodeID storedNodeIdentifier - cache *merkleTrieCache - elementLength int + root storedNodeIdentifier + nextNodeID storedNodeIdentifier + lastCommittedNodeID storedNodeIdentifier + cache *merkleTrieCache + elementLength int } // Stats structure is a helper for finding underlaying statistics about the trie @@ -62,11 +76,12 @@ type Stats struct { } // MakeTrie creates a merkle trie -func MakeTrie(committer Committer, cachedNodesCount int) (*Trie, error) { +func MakeTrie(committer Committer, memoryConfig MemoryConfig) (*Trie, error) { mt := &Trie{ - root: storedNodeIdentifierNull, - cache: &merkleTrieCache{}, - nextNodeID: storedNodeIdentifierBase, + root: storedNodeIdentifierNull, + cache: &merkleTrieCache{}, + nextNodeID: storedNodeIdentifierBase, + lastCommittedNodeID: storedNodeIdentifierBase, } if committer == nil { committer = &InMemoryCommitter{} @@ -79,7 +94,7 @@ func MakeTrie(committer Committer, cachedNodesCount int) (*Trie, error) { if err != nil { return nil, err } - if pageSize != committer.GetNodesCountPerPage() { + if pageSize != memoryConfig.NodesCountPerPage { return nil, ErrMismatchingPageSize } } @@ -87,16 +102,12 @@ func MakeTrie(committer Committer, cachedNodesCount int) (*Trie, error) { return nil, err } } - - mt.cache.initialize(mt, committer, cachedNodesCount) + mt.cache.initialize(mt, committer, memoryConfig) return mt, nil } // SetCommitter set the provided committter as the current committer, and return the old one. func (mt *Trie) SetCommitter(committer Committer) (prevCommitter Committer) { - if committer.GetNodesCountPerPage() != mt.cache.committer.GetNodesCountPerPage() { - panic("committer has to retain the name page size") - } prevCommitter = mt.cache.committer mt.cache.committer = committer return @@ -108,7 +119,7 @@ func (mt *Trie) RootHash() (crypto.Digest, error) { return crypto.Digest{}, nil } if mt.cache.modified { - if err := mt.Commit(); err != nil { + if _, err := mt.Commit(); err != nil { return crypto.Digest{}, err } } @@ -212,17 +223,21 @@ func (mt *Trie) GetStats() (stats Stats, err error) { } // Commit stores the existings trie using the committer. -func (mt *Trie) Commit() error { - bytes := mt.serialize() - mt.cache.committer.StorePage(storedNodeIdentifierNull, bytes) - return mt.cache.commit() +func (mt *Trie) Commit() (stats CommitStats, err error) { + stats, err = mt.cache.commit() + if err == nil { + mt.lastCommittedNodeID = mt.nextNodeID + bytes := mt.serialize() + err = mt.cache.committer.StorePage(storedNodeIdentifierNull, bytes) + } + return } // Evict removes elements from the cache that are no longer needed. func (mt *Trie) Evict(commit bool) (int, error) { if commit { if mt.cache.modified { - if err := mt.Commit(); err != nil { + if _, err := mt.Commit(); err != nil { return 0, err } } @@ -241,7 +256,7 @@ func (mt *Trie) serialize() []byte { root := binary.PutUvarint(serializedBuffer[version:], uint64(mt.root)) next := binary.PutUvarint(serializedBuffer[version+root:], uint64(mt.nextNodeID)) elementLength := binary.PutUvarint(serializedBuffer[version+root+next:], uint64(mt.elementLength)) - pageSizeLength := binary.PutUvarint(serializedBuffer[version+root+next+elementLength:], uint64(mt.cache.committer.GetNodesCountPerPage())) + pageSizeLength := binary.PutUvarint(serializedBuffer[version+root+next+elementLength:], uint64(mt.cache.nodesPerPage)) return serializedBuffer[:version+root+next+elementLength+pageSizeLength] } @@ -272,14 +287,7 @@ func (mt *Trie) deserialize(bytes []byte) (int64, error) { } mt.root = storedNodeIdentifier(root) mt.nextNodeID = storedNodeIdentifier(nextNodeID) + mt.lastCommittedNodeID = storedNodeIdentifier(nextNodeID) mt.elementLength = int(elemLength) return int64(pageSize), nil } - -// reset is used to reset the trie to a given root & nextID. It's used exclusively as part of the -// transaction rollback recovery in case no persistence could be established. -func (mt *Trie) reset(root, nextID storedNodeIdentifier) { - mt.root = root - mt.nextNodeID = nextID - mt.cache.initialize(mt, mt.cache.committer, mt.cache.cachedNodeCount) -} diff --git a/crypto/merkletrie/trie_test.go b/crypto/merkletrie/trie_test.go index a7146816b2..c59419d1ea 100644 --- a/crypto/merkletrie/trie_test.go +++ b/crypto/merkletrie/trie_test.go @@ -30,7 +30,7 @@ const ( ) func TestAddingAndRemoving(t *testing.T) { - mt, _ := MakeTrie(nil, defaultTestEvictSize) + mt, _ := MakeTrie(nil, defaultTestMemoryConfig) // create 10000 hashes. hashes := make([]crypto.Digest, 10000) for i := 0; i < len(hashes); i++ { @@ -82,7 +82,7 @@ func TestAddingAndRemoving(t *testing.T) { } func TestRandomAddingAndRemoving(t *testing.T) { - mt, err := MakeTrie(nil, defaultTestEvictSize) + mt, err := MakeTrie(nil, defaultTestMemoryConfig) require.NoError(t, err) // create 10000 hashes. @@ -131,7 +131,7 @@ func TestRandomAddingAndRemoving(t *testing.T) { nextOperation = 1 } if (i % (1 + int(processesHash[0]))) == 42 { - err := mt.Commit() + _, err := mt.Commit() require.NoError(t, err) verifyCacheNodeCount(t, mt) } diff --git a/crypto/vrf.go b/crypto/vrf.go index 25de87e7cb..4e1f549e1f 100644 --- a/crypto/vrf.go +++ b/crypto/vrf.go @@ -25,6 +25,8 @@ package crypto // #cgo linux,arm64 LDFLAGS: ${SRCDIR}/libs/linux/arm64/lib/libsodium.a // #cgo linux,arm CFLAGS: -I${SRCDIR}/libs/linux/arm/include // #cgo linux,arm LDFLAGS: ${SRCDIR}/libs/linux/arm/lib/libsodium.a +// #cgo windows,amd64 CFLAGS: -I${SRCDIR}/libs/windows/amd64/include +// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/libs/windows/amd64/lib/libsodium.a // #include // #include "sodium.h" import "C" diff --git a/daemon/algod/api/algod.oas2.json b/daemon/algod/api/algod.oas2.json index 8210a5d66b..9612ca71c7 100644 --- a/daemon/algod/api/algod.oas2.json +++ b/daemon/algod/api/algod.oas2.json @@ -69,6 +69,33 @@ } } }, + "/genesis": { + "get": { + "description": "Returns the entire genesis file in json.", + "tags": [ + "common" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "http" + ], + "summary": "Gets the genesis information.", + "operationId": "GetGenesis", + "responses": { + "200": { + "description": "The genesis file in json.", + "schema": { + "type": "string" + } + }, + "default": { + "description": "Unknown Error" + } + } + } + }, "/swagger.json": { "get": { "description": "Returns the entire swagger spec in json.", @@ -96,6 +123,26 @@ } } }, + "/versions": { + "get": { + "description": "Retrieves the supported API versions, binary build versions, and genesis information.", + "tags": [ + "common" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "http" + ], + "operationId": "GetVersion", + "responses": { + "200": { + "$ref": "#/responses/VersionsResponse" + } + } + } + }, "/v2/accounts/{address}": { "get": { "description": "Given a specific account public key, this call returns the accounts status, balance and spendable amounts", @@ -412,7 +459,6 @@ ], "responses": { "200": { - "description": "(empty)", "schema": { "type": "object" } @@ -662,7 +708,7 @@ }, "/v2/transactions/pending/{txid}": { "get": { - "description": "Given a transaction id of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round \u003e 0) - transaction still in the pool (committed round = 0, pool error = \"\") - transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n", + "description": "Given a transaction id of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round \u003e 0)\n- transaction still in the pool (committed round = 0, pool error = \"\")\n- transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n", "produces": [ "application/json", "application/msgpack" @@ -926,6 +972,10 @@ "description": "OK", "$ref": "#/responses/CatchpointStartResponse" }, + "201": { + "description": "OK", + "$ref": "#/responses/CatchpointStartResponse" + }, "400": { "description": "Bad Request", "schema": { @@ -1605,68 +1655,6 @@ } } }, - "Version": { - "description": "Note that we annotate this as a model so that legacy clients\ncan directly import a swagger generated Version model.", - "type": "object", - "required": [ - "build", - "genesis-hash", - "genesis-id", - "versions" - ], - "properties": { - "build": { - "$ref": "#/definitions/VersionBuild" - }, - "genesis-hash": { - "type": "string", - "format": "byte" - }, - "genesis-id": { - "type": "string" - }, - "versions": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "x-go-package": "github.com/algorand/go-algorand/daemon/algod/api/spec/common" - }, - "VersionBuild": { - "description": "the current algod build version information.", - "type": "object", - "required": [ - "branch", - "build-number", - "channel", - "commit-hash", - "major", - "minor" - ], - "properties": { - "branch": { - "type": "string" - }, - "build-number": { - "type": "integer" - }, - "channel": { - "type": "string" - }, - "commit-hash": { - "type": "string", - "format": "byte" - }, - "major": { - "type": "integer" - }, - "minor": { - "type": "integer" - } - } - }, "DryrunRequest": { "description": "Request data type for dryrun endpoint. Given the Transactions and simulated ledger state upload, run TEAL scripts and return debugging information.", "type": "object", @@ -1748,6 +1736,73 @@ "x-algorand-format": "uint64" } } + }, + "Version": { + "description": "algod version information.", + "type": "object", + "title": "Version contains the current algod version.", + "required": [ + "versions", + "genesis_id", + "genesis_hash_b64", + "build" + ], + "properties": { + "build": { + "$ref": "#/definitions/BuildVersion" + }, + "genesis_hash_b64": { + "type": "string", + "format": "byte" + }, + "genesis_id": { + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "BuildVersion": { + "tags": [ + "common" + ], + "type": "object", + "title": "BuildVersion contains the current algod build version information.", + "required": [ + "major", + "minor", + "build_number", + "commit_hash", + "branch", + "channel" + ], + "properties": { + "branch": { + "type": "string" + }, + "build_number": { + "type": "integer", + "format": "int64" + }, + "channel": { + "type": "string" + }, + "commit_hash": { + "type": "string" + }, + "major": { + "type": "integer", + "format": "int64" + }, + "minor": { + "type": "integer", + "format": "int64" + } + } } }, "parameters": { @@ -1922,7 +1977,6 @@ }, "responses": { "AccountResponse": { - "description": "(empty)", "schema": { "$ref": "#/definitions/Account" } @@ -1952,7 +2006,6 @@ "tags": [ "private" ], - "description": "(empty)", "schema": { "description": "An catchpoint start response.", "type": "object", @@ -1971,7 +2024,6 @@ "tags": [ "private" ], - "description": "(empty)", "schema": { "description": "An catchpoint abort response.", "type": "object", @@ -1987,7 +2039,6 @@ } }, "NodeStatusResponse": { - "description": "(empty)", "schema": { "description": "NodeStatus contains the information about a node status", "type": "object", @@ -2288,6 +2339,12 @@ } } } + }, + "VersionsResponse": { + "description": "VersionsResponse is the response to 'GET /versions'", + "schema": { + "$ref": "#/definitions/Version" + } } }, "securityDefinitions": { diff --git a/daemon/algod/api/algod.oas3.yml b/daemon/algod/api/algod.oas3.yml index 688ee9c26d..2f286218c4 100644 --- a/daemon/algod/api/algod.oas3.yml +++ b/daemon/algod/api/algod.oas3.yml @@ -230,8 +230,7 @@ "$ref": "#/components/schemas/Account" } } - }, - "description": "(empty)" + } }, "ApplicationResponse": { "content": { @@ -297,8 +296,7 @@ "type": "object" } } - }, - "description": "(empty)" + } }, "CatchpointStartResponse": { "content": { @@ -317,8 +315,7 @@ "type": "object" } } - }, - "description": "(empty)" + } }, "CompileResponse": { "content": { @@ -450,8 +447,7 @@ "type": "object" } } - }, - "description": "(empty)" + } }, "PendingTransactionResponse": { "content": { @@ -644,6 +640,16 @@ } }, "description": "TransactionParams contains the parameters that help a client construct a new transaction." + }, + "VersionsResponse": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Version" + } + } + }, + "description": "VersionsResponse is the response to 'GET /versions'" } }, "schemas": { @@ -1000,6 +1006,41 @@ ], "type": "object" }, + "BuildVersion": { + "properties": { + "branch": { + "type": "string" + }, + "build_number": { + "format": "int64", + "type": "integer" + }, + "channel": { + "type": "string" + }, + "commit_hash": { + "type": "string" + }, + "major": { + "format": "int64", + "type": "integer" + }, + "minor": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "branch", + "build_number", + "channel", + "commit_hash", + "major", + "minor" + ], + "title": "BuildVersion contains the current algod build version information.", + "type": "object" + }, "DryrunRequest": { "description": "Request data type for dryrun endpoint. Given the Transactions and simulated ledger state upload, run TEAL scripts and return debugging information.", "properties": { @@ -1274,17 +1315,17 @@ "type": "object" }, "Version": { - "description": "Note that we annotate this as a model so that legacy clients\ncan directly import a swagger generated Version model.", + "description": "algod version information.", "properties": { "build": { - "$ref": "#/components/schemas/VersionBuild" + "$ref": "#/components/schemas/BuildVersion" }, - "genesis-hash": { + "genesis_hash_b64": { "format": "byte", "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", "type": "string" }, - "genesis-id": { + "genesis_id": { "type": "string" }, "versions": { @@ -1296,45 +1337,11 @@ }, "required": [ "build", - "genesis-hash", - "genesis-id", + "genesis_hash_b64", + "genesis_id", "versions" ], - "type": "object", - "x-go-package": "github.com/algorand/go-algorand/daemon/algod/api/spec/common" - }, - "VersionBuild": { - "description": "the current algod build version information.", - "properties": { - "branch": { - "type": "string" - }, - "build-number": { - "type": "integer" - }, - "channel": { - "type": "string" - }, - "commit-hash": { - "format": "byte", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$", - "type": "string" - }, - "major": { - "type": "integer" - }, - "minor": { - "type": "integer" - } - }, - "required": [ - "branch", - "build-number", - "channel", - "commit-hash", - "major", - "minor" - ], + "title": "Version contains the current algod version.", "type": "object" } }, @@ -1359,6 +1366,32 @@ }, "openapi": "3.0.1", "paths": { + "/genesis": { + "get": { + "description": "Returns the entire genesis file in json.", + "operationId": "GetGenesis", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "string" + } + } + }, + "description": "The genesis file in json." + }, + "default": { + "content": {}, + "description": "Unknown Error" + } + }, + "summary": "Gets the genesis information.", + "tags": [ + "common" + ] + } + }, "/health": { "get": { "operationId": "HealthCheck", @@ -1464,8 +1497,7 @@ "$ref": "#/components/schemas/Account" } } - }, - "description": "(empty)" + } }, "400": { "content": { @@ -2010,8 +2042,7 @@ "type": "object" } } - }, - "description": "(empty)" + } }, "400": { "content": { @@ -2089,8 +2120,26 @@ "type": "object" } } - }, - "description": "(empty)" + } + }, + "201": { + "content": { + "application/json": { + "schema": { + "description": "An catchpoint start response.", + "properties": { + "catchup-message": { + "description": "Catchup start response string", + "type": "string" + } + }, + "required": [ + "catchup-message" + ], + "type": "object" + } + } + } }, "400": { "content": { @@ -2281,8 +2330,7 @@ "type": "object" } } - }, - "description": "(empty)" + } } }, "tags": [ @@ -2370,8 +2418,7 @@ "type": "object" } } - }, - "description": "(empty)" + } }, "401": { "content": { @@ -2494,8 +2541,7 @@ "type": "object" } } - }, - "description": "(empty)" + } }, "400": { "content": { @@ -3026,7 +3072,7 @@ }, "/v2/transactions/pending/{txid}": { "get": { - "description": "Given a transaction id of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round > 0) - transaction still in the pool (committed round = 0, pool error = \"\") - transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n", + "description": "Given a transaction id of a recently submitted transaction, it returns information about it. There are several cases when this might succeed:\n- transaction committed (committed round > 0)\n- transaction still in the pool (committed round = 0, pool error = \"\")\n- transaction removed from pool due to error (committed round = 0, pool error != \"\")\nOr the transaction may have happened sufficiently long ago that the node no longer remembers it, and this will return an error.\n", "operationId": "PendingTransactionInformation", "parameters": [ { @@ -3230,6 +3276,27 @@ }, "summary": "Get a specific pending transaction." } + }, + "/versions": { + "get": { + "description": "Retrieves the supported API versions, binary build versions, and genesis information.", + "operationId": "GetVersion", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Version" + } + } + }, + "description": "VersionsResponse is the response to 'GET /versions'" + } + }, + "tags": [ + "common" + ] + } } }, "security": [ diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go index 9db7b17d51..f7d8449d8e 100644 --- a/daemon/algod/api/client/restClient.go +++ b/daemon/algod/api/client/restClient.go @@ -87,11 +87,11 @@ func (client *RestClient) SetAPIVersionAffinity(affinity APIVersion) (previousAf return } -// extractError checks if the response signifies an error (for now, StatusCode != 200). +// extractError checks if the response signifies an error (for now, StatusCode != 200 or StatusCode != 201). // If so, it returns the error. // Otherwise, it returns nil. func extractError(resp *http.Response) error { - if resp.StatusCode == 200 { + if resp.StatusCode == 200 || resp.StatusCode == 201 { return nil } diff --git a/daemon/algod/api/server/common/handlers.go b/daemon/algod/api/server/common/handlers.go index 64d8986a92..6bacca9632 100644 --- a/daemon/algod/api/server/common/handlers.go +++ b/daemon/algod/api/server/common/handlers.go @@ -27,6 +27,27 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/spec/common" ) +// GenesisJSON is an httpHandler for route GET /genesis +func GenesisJSON(ctx lib.ReqContext, context echo.Context) { + // swagger:operation GET /genesis GenesisJSON + //--- + // Summary: Gets the genesis information + // Description: Returns the entire genesis file in json. + // Produces: + // - application/json + // Schemes: + // - http + // Responses: + // 200: + // description: The current genesis information + // schema: {type: string} + // default: { description: Unknown Error } + w := context.Response().Writer + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte(lib.GenesisJSONText)) +} + // SwaggerJSON is an httpHandler for route GET /swagger.json func SwaggerJSON(ctx lib.ReqContext, context echo.Context) { // swagger:operation GET /swagger.json SwaggerJSON @@ -62,6 +83,7 @@ func HealthCheck(ctx lib.ReqContext, context echo.Context) { // description: OK. // default: { description: Unknown Error } w := context.Response().Writer + w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(nil) } diff --git a/daemon/algod/api/server/common/routes.go b/daemon/algod/api/server/common/routes.go index 9e0229271a..927a6546c7 100644 --- a/daemon/algod/api/server/common/routes.go +++ b/daemon/algod/api/server/common/routes.go @@ -47,4 +47,11 @@ var Routes = lib.Routes{ Path: "/swagger.json", HandlerFunc: SwaggerJSON, }, + + lib.Route{ + Name: "genesis", + Method: "GET", + Path: "/genesis", + HandlerFunc: GenesisJSON, + }, } diff --git a/daemon/algod/api/server/lib/common.go b/daemon/algod/api/server/lib/common.go index 3562564763..5c5d1a1286 100644 --- a/daemon/algod/api/server/lib/common.go +++ b/daemon/algod/api/server/lib/common.go @@ -28,6 +28,9 @@ import ( // SwaggerSpecJSON is autogenerated from swagger.json, and bundled in with a script on build. var SwaggerSpecJSON string +// GenesisJSONText is initialized when the node starts. +var GenesisJSONText string + // HandlerFunc defines a wrapper for http.HandlerFunc that includes a context type HandlerFunc func(ReqContext, echo.Context) diff --git a/daemon/algod/api/server/lib/middlewares/auth.go b/daemon/algod/api/server/lib/middlewares/auth.go index e168f00a1f..cd5a38ed08 100644 --- a/daemon/algod/api/server/lib/middlewares/auth.go +++ b/daemon/algod/api/server/lib/middlewares/auth.go @@ -25,8 +25,16 @@ import ( "github.com/labstack/echo/v4" ) +// TokenPathParam is the name of the path parameter used by URLAuthPrefix +const TokenPathParam = "token" + +// URLAuthPrefix is the echo formatted url/path param which can be used for supplying an API token. +const URLAuthPrefix = "/urlAuth/:" + TokenPathParam const urlAuthFormatter = "/urlAuth/%s" +// InvalidTokenMessage is the message set when an invalid / missing token is found. +const InvalidTokenMessage = "Invalid API Token" + // AuthMiddleware provides some data to the handler. type AuthMiddleware struct { // Header is the token header which needs to be provided. For example 'X-Algod-API-Token'. @@ -71,7 +79,7 @@ func (auth *AuthMiddleware) handler(next echo.HandlerFunc) echo.HandlerFunc { } // Handle debug routes with /urlAuth/:token prefix. - if ctx.Param("token") != "" { + if ctx.Param(TokenPathParam) != "" { // For debug routes, we place the apiToken in the path itself providedToken = []byte(ctx.Param("token")) @@ -93,6 +101,6 @@ func (auth *AuthMiddleware) handler(next echo.HandlerFunc) echo.HandlerFunc { } } - return echo.NewHTTPError(http.StatusUnauthorized, "Invalid API Token") + return echo.NewHTTPError(http.StatusUnauthorized, InvalidTokenMessage) } } diff --git a/daemon/algod/api/server/lib/middlewares/auth_test.go b/daemon/algod/api/server/lib/middlewares/auth_test.go new file mode 100644 index 0000000000..30e3bbd13c --- /dev/null +++ b/daemon/algod/api/server/lib/middlewares/auth_test.go @@ -0,0 +1,188 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package middlewares + +import ( + "errors" + "net/http" + "testing" + + "github.com/labstack/echo/v4" + "github.com/stretchr/testify/require" +) + +var errSuccess = errors.New("unexpected success") +var invalidTokenError = echo.NewHTTPError(http.StatusUnauthorized, InvalidTokenMessage) +var e = echo.New() +var testAPIHeader = "API-Header-Whatever" + +// success is the "next" handler, it is only called when auth allows the request to continue +func success(ctx echo.Context) error { + return errSuccess +} + +func TestAuth(t *testing.T) { + tokens := []string{"token1", "token2"} + + tests := []struct { + name string + url string + header string + token string + method string + expectResponse error + finalPath string + }{ + { + "Valid token (1)", + "N/A", + testAPIHeader, + tokens[0], + "GET", + errSuccess, + "", + }, + { + "Valid token (2)", + "N/A", + testAPIHeader, + tokens[1], + "GET", + errSuccess, + "", + }, + { + "Valid token Bearer Format (1)", + "N/A", + "Authorization", + "Bearer " + tokens[0], + "GET", + errSuccess, + "", + }, + { + "Valid token Bearer Format (2)", + "N/A", + "Authorization", + "Bearer " + tokens[1], + "GET", + errSuccess, + "", + }, + { + "Invalid token", + "N/A", + testAPIHeader, + "invalid_token", + "GET", + invalidTokenError, + "", + }, + { + "Invalid token Bearer Format", + "N/A", + "Authorization", + "Bearer invalid_token", + "GET", + invalidTokenError, + "", + }, + { + "Missing token", + "N/A", + "", + "", + "GET", + invalidTokenError, + "", + }, + { + "Invalid token + OPTIONS", + "N/A", + testAPIHeader, + "invalid_token", + "OPTIONS", + errSuccess, + "", + }, + { + "Invalid bearer token + OPTIONS", + "N/A", + "Authorization", + "Bearer invalid_token", + "OPTIONS", + errSuccess, + "", + }, + { + "Token in url (1)", + "http://my-node.com:80/urlAuth/" + tokens[0] + "/v2/status", + "", + tokens[0], + "GET", + errSuccess, + "/v2/status", + }, + { + "Token in url (2)", + "http://my-node.com:80/urlAuth/" + tokens[1] + "/v2/status", + "", + tokens[1], + "GET", + errSuccess, + "/v2/status", + }, + { + "Invalid token in url", + "http://my-node.com:80/urlAuth/invalid_token/v2/status", + "", + "invalid_token", + "GET", + invalidTokenError, + "", + }, + } + + authFn := MakeAuth(testAPIHeader, tokens) + handler := authFn(success) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, _ := http.NewRequest(test.method, test.url, nil) + if test.header != "" { + req.Header.Set(test.header, test.token) + } + ctx := e.NewContext(req, nil) + + // There is no router to update the context based on the url, so do it manually. + if test.header == "" && test.token != "" { + ctx.SetParamNames(TokenPathParam) + ctx.SetParamValues(test.token) + } + ctx.SetPath("") + + err := handler(ctx) + require.Equal(t, test.expectResponse, err, test.name) + + // In some cases the auth rewrites the path, make sure the path has been rewritten + if test.finalPath != "" { + require.Equal(t, test.finalPath, ctx.Path()) + require.Equal(t, test.finalPath, ctx.Request().URL.Path) + } + }) + } +} diff --git a/daemon/algod/api/server/router.go b/daemon/algod/api/server/router.go index 6067b21687..9e48a893f4 100644 --- a/daemon/algod/api/server/router.go +++ b/daemon/algod/api/server/router.go @@ -56,10 +56,11 @@ // loader.Config.Import(), and that breaks the vendor directory if the source is symlinked from elsewhere) //go:generate swagger generate spec -o="../swagger.json" //go:generate swagger validate ../swagger.json --stop-on-error -//go:generate ./lib/bundle_swagger_json.sh +//go:generate sh ./lib/bundle_swagger_json.sh package server import ( + "fmt" "net" "net/http" @@ -130,7 +131,7 @@ func NewRouter(logger logging.Logger, node *node.AlgorandFullNode, shutdown <-ch // The auth middleware removes /urlAuth/:token so that it can be routed correctly. if node.Config().EnableProfiler { e.GET("/debug/pprof/*", echo.WrapHandler(http.DefaultServeMux), adminAuthenticator) - e.GET("/urlAuth/:token/debug/pprof/*", echo.WrapHandler(http.DefaultServeMux), adminAuthenticator) + e.GET(fmt.Sprintf("%s/debug/pprof/*", middlewares.URLAuthPrefix), echo.WrapHandler(http.DefaultServeMux), adminAuthenticator) } // Registering common routes (no auth) registerHandlers(e, "", common.Routes, ctx) diff --git a/daemon/algod/api/server/v1/handlers/handlers.go b/daemon/algod/api/server/v1/handlers/handlers.go index 313d8eb20f..d8706556a7 100644 --- a/daemon/algod/api/server/v1/handlers/handlers.go +++ b/daemon/algod/api/server/v1/handlers/handlers.go @@ -17,6 +17,7 @@ package handlers import ( + "database/sql" "encoding/base64" "errors" "fmt" @@ -737,7 +738,7 @@ func AccountInformation(ctx lib.ReqContext, context echo.Context) { lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log) return } - recordWithoutPendingRewards, err := ledger.LookupWithoutRewards(lastRound, basics.Address(addr)) + recordWithoutPendingRewards, _, err := ledger.LookupWithoutRewards(lastRound, basics.Address(addr)) if err != nil { lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log) return @@ -1321,20 +1322,28 @@ func Assets(ctx lib.ReqContext, context echo.Context) { const maxAssetsToList = 100 + var err error + var max int64 = maxAssetsToList + var assetIdx int64 = 0 + // Parse max assets to fetch from db - max, err := strconv.ParseInt(r.FormValue("max"), 10, 64) - if err != nil || max < 0 || max > maxAssetsToList { - err := fmt.Errorf(errFailedParsingMaxAssetsToList, 0, maxAssetsToList) - lib.ErrorResponse(w, http.StatusBadRequest, err, err.Error(), ctx.Log) - return + if r.PostFormValue("max") != "" { + max, err = strconv.ParseInt(r.FormValue("max"), 10, 64) + if err != nil || max < 0 || max > maxAssetsToList { + err := fmt.Errorf(errFailedParsingMaxAssetsToList, 0, maxAssetsToList) + lib.ErrorResponse(w, http.StatusBadRequest, err, err.Error(), ctx.Log) + return + } } // Parse maximum asset idx - assetIdx, err := strconv.ParseInt(r.FormValue("assetIdx"), 10, 64) - if err != nil || assetIdx < 0 { - errs := errFailedParsingAssetIdx - lib.ErrorResponse(w, http.StatusBadRequest, errors.New(errs), errs, ctx.Log) - return + if r.PostFormValue("assetIdx") != "" { + assetIdx, err = strconv.ParseInt(r.FormValue("assetIdx"), 10, 64) + if err != nil || assetIdx < 0 { + errs := errFailedParsingAssetIdx + lib.ErrorResponse(w, http.StatusBadRequest, errors.New(errs), errs, ctx.Log) + return + } } // If assetIdx is 0, we want the most recent assets, so make it intmax @@ -1607,7 +1616,7 @@ func Transactions(ctx lib.ReqContext, context echo.Context) { // swagger:operation GET /v1/account/{address}/transactions Transactions // --- // Summary: Get a list of confirmed transactions. - // Description: Returns the list of confirmed transactions between within a date range. This call is available only when the indexer is running. + // Description: Returns the list of confirmed transactions between within a date range. When indexer is disabled this call requires firstRound and lastRound and returns an error if firstRound is not available to the node. The transaction results start from the oldest round. // Produces: // - application/json // Schemes: @@ -1838,6 +1847,10 @@ func GetTransactionByID(ctx lib.ReqContext, context echo.Context) { } rnd, err := indexer.GetRoundByTXID(queryTxID) + if err == sql.ErrNoRows { + lib.ErrorResponse(w, http.StatusNotFound, err, errTransactionNotFound, ctx.Log) + return + } if err != nil { lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedGettingInformationFromIndexer, ctx.Log) return diff --git a/daemon/algod/api/server/v2/dryrun.go b/daemon/algod/api/server/v2/dryrun.go index a2c62a921b..c1b0b5f414 100644 --- a/daemon/algod/api/server/v2/dryrun.go +++ b/daemon/algod/api/server/v2/dryrun.go @@ -411,12 +411,11 @@ func doDryrunRequest(dr *DryrunRequest, proto *config.ConsensusParams, response GroupIndex: ti, //Logger: nil, // TODO: capture logs, send them back } - var result *generated.DryrunTxnResult + var result generated.DryrunTxnResult if len(stxn.Lsig.Logic) > 0 { var debug dryrunDebugReceiver ep.Debugger = &debug pass, err := logic.Eval(stxn.Lsig.Logic, ep) - result = new(generated.DryrunTxnResult) var messages []string result.Disassembly = debug.lines result.LogicSigTrace = &debug.history @@ -460,9 +459,6 @@ func doDryrunRequest(dr *DryrunRequest, proto *config.ConsensusParams, response } } var messages []string - if result == nil { - result = new(generated.DryrunTxnResult) - } if !ok { messages = make([]string, 1) messages[0] = fmt.Sprintf("uploaded state did not include app id %d referenced in txn[%d]", appIdx, ti) @@ -507,7 +503,7 @@ func doDryrunRequest(dr *DryrunRequest, proto *config.ConsensusParams, response } result.AppCallMessages = &messages } - response.Txns[ti] = *result + response.Txns[ti] = result } } diff --git a/daemon/algod/api/server/v2/generated/private/routes.go b/daemon/algod/api/server/v2/generated/private/routes.go index 5cfcc95a5a..14c53e1f99 100644 --- a/daemon/algod/api/server/v2/generated/private/routes.go +++ b/daemon/algod/api/server/v2/generated/private/routes.go @@ -235,131 +235,132 @@ func RegisterHandlers(router interface { // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9f3fbNhLgV8Fp970mOVFyfnU3fq9vz036w9c0zYvdvbuNc1uIHEmoSYAlQMtqzt/9", - "3gwAEiRBSXa82eu7/SuxAAwGg5nBzGAw/DhJVVEqCdLoyfHHSckrXoCBiv7iaapqaRKR4V8Z6LQSpRFK", - "To59G9OmEnI1mU4E/lpys55MJ5IX0PbB8dNJBb/VooJscmyqGqYTna6h4AjYbEvs3UC6TlYqcSBOLIjT", - "V5ObHQ08yyrQeojlTzLfMiHTvM6AmYpLzVNs0mwjzJqZtdDMDWZCMiWBqSUz605nthSQZ3rmF/lbDdU2", - "WKWbfHxJNy2KSaVyGOL5UhULIcFjBQ1SzYYwo1gGS+q05obhDIir72gU08CrdM2WqtqDqkUixBdkXUyO", - "3080yAwq2q0UxBX9d1kB/A6J4dUKzOTDNLa4pYEqMaKILO3UUb8CXedGM+pLa1yJK5AMR83Yj7U2bAGM", - "S/bu25fs6dOnL3AhBTcGMsdko6tqZw/XZIdPjicZN+Cbh7zG85WquMySpv+7b1/S/GdugYf24lpDXFhO", - "sIWdvhpbgB8YYSEhDaxoHzrcjyMiQtH+vIClquDAPbGd73VTwvn/rbuScpOuSyWkiewLo1Zmm6M6LBi+", - "S4c1CHT6l0ipCoG+P0pefPj4ePr46OZP70+Sf7g/nz+9OXD5Lxu4eygQ7ZjWVQUy3SarCjhJy5rLIT3e", - "OX7Qa1XnGVvzK9p8XpCqd2MZjrWq84rnNfKJSCt1kq+UZtyxUQZLXueG+YlZLXNUUwjNcTsTmpWVuhIZ", - "ZFPUvpu1SNcs5dqCoH5sI/IcebDWkI3xWnx1O4TpJiQJ4nUnetCC/t8lRruuPZSAa9IGSZorDYlRe44n", - "f+JwmbHwQGnPKn27w4qdr4HR5NhgD1uinUSezvMtM7SvGeOaceaPpikTS7ZVNdvQ5uTiksa71SDVCoZE", - "o83pnKMovGPkGxAjQryFUjlwScTzcjckmVyKVV2BZps1mLU78yrQpZIamFr8CqnBbf/vZz+9YapiP4LW", - "fAVveXrJQKYqG99jN2nsBP9VK9zwQq9Knl7Gj+tcFCKC8o/8WhR1wWRdLKDC/fLng1GsAlNXcgwhC3EP", - "nxX8ejjpeVXLlDa3nbZjqCErCV3mfDtjp0tW8OuvjqYOHc14nrMSZCbkiplrOWqk4dz70UsqVcvsABvG", - "4IYFp6YuIRVLARlroOzAxE2zDx8hb4dPa1kF6Hggo+g0s+xBR8J1hGdQdLGFlXwFAcvM2M9Oc1GrUZcg", - "GwXHFltqKiu4EqrWzaARHGnq3ea1VAaSsoKliPDYmSMHag/bx6nXwhk4qZKGCwkZal5CWhmwmmgUp2DC", - "3c7M8IhecA1fPhs7wNvWA3d/qfq7vnPHD9pt6pRYkYyci9jqBDZuNnXGH+D8hXNrsUrsz4ONFKtzPEqW", - "Iqdj5lfcP0+GWpMS6BDCHzxarCQ3dQXHF/IR/sUSdma4zHiV4S+F/enHOjfiTKzwp9z+9FqtRHomViPE", - "bHCNelM0rLD/ILy4OjbXUafhtVKXdRkuKO14pYstO301tskW5m0Z86RxZUOv4vzaexq3HWGum40cQXKU", - "diXHjpewrQCx5emS/rleEj/xZfU7/lOWeYymyMDuoKWggAsWvHO/4U8o8mB9AoQiUo5EndPxefwxQOjP", - "FSwnx5M/zdtIydy26rmDa2fs7t4DKEqzfYhUOGnh3z8G7cgYFkEzE9LuGnWdWl/x/vFBqFFMyIDt4fB1", - "rtLLO+FQVqqEygi7vwuEM5QgAs/WwDOoWMYNn7XOlrW/RuSABn5P48h7gipy9P1E/+E5w2aUTm68WYcm", - "rdBo3KkgAJWhJWjPFzsTdiALVbHCGn8MjbZbYfmyndwq7kbTvndk+dCHFtmdb6y9yWiEXwQuvfUmTxaq", - "uhu/9BhBstZHZhyhNlYxrry7s9S1LhNHn4idbTv0ALVhyaG6DSnUB38IrQLJbqlzZvi/gDoaod4HdbqA", - "Phd1VFGKHO5Bvtdcr4eLQ0Pp6RN29v3J88dP/vnk+Zd40peVWlW8YIutAc0euPOJabPN4eFwxXRQ1LmJ", - "Q//ymffEunD3Uo4QbmAfQrdzQE1iKcZs3AGxe1Vtq1reAwmhqlQVsZ2JpYxKVZ5cQaWFioRB3roezPVA", - "vWXt997vFlu24Zrh3OTW1TKDahajPPprZBoYKPS+g8WCPr+WLW0cQF5VfDvYAbveyOrcvIfsSZf43kvQ", - "rIQqMdeSZbCoV+GZxpaVKhhnGQ0kBfpGZXBmuKn1PWiHFliLDG5EiAJfqNowzqTKUNCxc1xvjMREKRhD", - "MSQTqiKztufVAtDKTnm9WhuG5qmKbW07MOGp3ZSEzhY94kI2vr/tZaez8ba8Ap5t2QJAMrVwfprzIGmR", - "nMI7xt/cOK3VotX4Fh28ykqloDVkibum2ouav/KiTTY7yER4E77NJEwrtuTVHXE1yvB8D57UZ4itbq0P", - "59sOsT5s+l3715883EVeoatqmQBNHRTuHAyMkXAvTepy5FrDnXbnokCRYJJLpSFVMtNRYDnXJtknCtip", - "cyTjtgbcF+N+AjzivL/m2lj3WciMzDYrwjQPjaEpxhEe1dII+e9eQQ9hp6h7pK51o611XZaqMpDF1iDh", - "esdcb+C6mUstA9jNkWAUqzXsgzxGpQC+I5ZdiSUQNy5+08SXhoujUDnq1m2UlB0kWkLsQuTM9wqoG4Z2", - "RxBBG78ZSYwjdI9zmnjydKKNKkvUSSapZTNujExntveJ+bntO2QublpdmSnA2Y3HyWG+sZS1Qf01R3uJ", - "ILOCX6K+J+vH+vlDnFEYEy1kCskuzkexPMNeoQjsEdIRg9RdGwaz9YSjx79Rphtlgj27MLbgW1rHb23U", - "+ryN6NyDgfAKDBe5boyAJjTezkJR9H6GA1psFaQgTb5FHl6KqrAXUXR2aP+bNTEyN4u9cmnFUmasgg2v", - "Mt9j6LEEi0mEzOA6rnV5J26RwTUTcaSXzczCsNRfE8kQwCyqANzF2w4UXMDiLpPj0Pi09lrJUknHLhyp", - "AQWjEGmluL1HxMXYw9M0V2UVFByxoxstd9iPzynkKrHXlpFj07b7a00fTg55Jg7X88moxDessVkD3ZSg", - "Gu8RMeQ2dN9Aw9hCVrla8DxBoxaSDHKzNxyFxjK8op54fqp0OLyL8sXF+zy7uPjAXmNfsp+BXcJ2Tre7", - "LF1zuYI25B7yqbWM4RrSOlT1PTIe5Oy4uGIX+667M52USuVJ49b1rwgG6r9P90uRXkLGUE+QMepOpS+6", - "O4STsAfI4rq5RNmst97OLUuQkD2cMXYiGek2F1voWSC9yeUXZtf81zRrVtN9LpeMFjm7kHH33d4Gf6JM", - "eTC7JcmmR33iVBbI7onMtRwRJ76hywwEF5XPnRHDMxoZHDmDEzZgKovFIafad5QzxDu7LDJyQtpTRdeL", - "QlDiUNBtiprT3+UOvVhhZoydk+5AL0LDFVQ8p6wI7YOpQrNCoDOq6zQFyI4vZNLBJFWFm/hB+1+rli7q", - "o6OnwI4e9sdog+ajc5isDPTHfsWOpraJyMW+YheTi8kAUgWFuoLMOo0hX9tRe8H+lwbuhfxpoJhZwbfW", - "3fSyyHS9XIpUWKLnCvX6SvWsQKmoBSpED9Bp00yYKR1lRFGynu2+tAIYt1ruI64RgYp2Mx6lqO38DV6X", - "dzSDa57iKjkpmS3bIKM0fDY0PowqkxBANPy6Y0YXGNcdPX5HuRvqc+tl78bvvOdnd8gRsOtsvy09IEYU", - "g0PE/4SVCndduFwdn9CRC20GSDqHn25FGoaMHDoz9r9UzVJO8lvWBhpfS1XkwJBjizPQGevndJZaSyHI", - "oQAbBqGWR4/6C3/0yO250GwJG5/ghh375Hj0yAqB0uaTJaDHmtenEQOKgs94mkaSktdcr2d7A9EE96D4", - "cwD69JWfkIRJazpibqYTdIHz7T0IvAXEKnD2nu4Eg7RtVcswmc7tn95qA8UwommH/nPEEn3nPbfBSatk", - "LiQkhZKwjeaPCwk/UmP0nCYWGRlMwjo2tu/ZdvDvodWd55Dd/FT60m4HLPG2Se27h83vw+0Fs8M0QrIy", - "IS8ZZ2kuKFCopDZVnZoLySlw0TODemzhwzHjoayXvks8dhYJbTlQF5JrpGETzoheciwhEqj8FsBHtHS9", - "WoHumUVsCXAhXS8hWS2FobnIqkzshpVQ0W3UzPZES2DJc4q8/Q6VYovadFUvZTtZy8ZG1nEappYXkhuW", - "A9eG/Sjk+TWB8x6O5xkJZqOqy4YKIx4aSNBCJ/ELu+9s6/dcr/3ysaNXNm6wDR4j/DYlamugk079vx/8", - "7fj9SfIPnvx+lLz4r/MPH5/dPHw0+PHJzVdf/Z/uT09vvnr4tz/HdsrjHsvFcZifvnJmyekrOnvaoPoA", - "988WFC6ETKJMhu5CISSldPZ4iz3AE9Qz0MM2PO92/UKaa4mMdMVzkaELfBd26Ku4gSxa6ehxTWcjejE+", - "v9YPMXdnpZKSp5d0Dz5ZCbOuF7NUFXNvjs1XqjHN5hmHQklqy+a8FHN0b+dXj/ccjZ+gr1hEXVG2m/X5", - "gzSliFnqbp46HhJCtK81bLofegivYCmkwPbjC5lxw+cLrkWq57WG6muec5nCbKXYMXMgX3HDybHuhenG", - "HlRR0MNhU9aLXKTsMjzfWn4fizZdXLxHql9cfBjcGg1PIzdVPIJHEyQbYdaqNokLdY47520AgyDbYNeu", - "WafMwbbb7EKpDv5IVLEsdRKEmeLLL8sclx+cmZrRIEpSYtqoymsWVDcuUID7+0a5e7OKb3wKeY3O8C8F", - "L98LaT6wxDm1J2VJMSwKIv3iBBi17raEwwNRLYotsJjzQgu3VsqtE9cI6Jkd5SOzOk45bCLSUR8UtTbQ", - "dlc6IajvVY6be2cyBTCi1KnNOkGZiq5KI2uRPAQP//gKFYy/6EJfFJnPPURZAEvXkF5CRtF8CrxNO8P9", - "/bJT115khbZvR2x+GiU4k4+1AFaXGXcHGpfbfqapBmN8eu07uITtuWrzo2+TWnoznbhIeYI8MyYgJdIj", - "0Kxq2RUXH23vbb67sKBodlkyGzC2qX+eLY4bvvBjxgXIqvt7EJ4YUzRk2MHvJa8ihLDMP0KCOywU4X0S", - "60fD07wyIhWlXf9hAe+3nTEIZJ9Sj6pxtexr64EyjWpv2zlZcB1X3IAtuB8oQ/1UDj+TDVfYmydG748d", - "4y5yCK5qtJNsXpEF4ZdtH1SOoRbnEqhke5p6NLoUCY/ttbvrE1ftDR/d8R5ywO296UEu8pfzohvTFThv", - "Dld8NLw+mvh/Gty4B+/JmrR+r9j6wjBtnnjYp90+/d/n/PtE/8n0Vkn704lLrIpth5J0umeQw4q7aDKl", - "bDlGcah9oYMNQjx+Wi7R52dJ7PKea61SYS8YW13u5gA0/h4xZqMV7GAIMTYO0KYwHAFmb1Qom3J1GyQl", - "CIrbcQ+bAnjB37A/jNW+sXdm5V7zb6g7WiGatm9g7DYOQyrTSVQljVnmnV7MdlnAwD+IsSiqpmGQYRjK", - "0JADHcdJR7Mml7HQE1oVQGx45ocF5jp7IJZ4yD8MorEVrNChbZ1AlFYf1fi8jviVMpAsRaVNQv5ndHnY", - "6VtNxuC32DWufjqkYvaRrsji2oemvYRtkom8ju+2m/eHVzjtm8Zv0fXiErZ0yABP12xBj8rxFOpMj312", - "TG0TWHYu+LVd8Gt+b+s9jJewK05cKWV6c/xBuKqnT3YJU4QBY8wx3LVRku5QL8EV/1C3BMkFNhGBkhZm", - "u7z1gTDdOk1iVPNaSNG1BIbuzlXYbBqbMBO8yR4mKI/IAC9LkV33fGcLNc7jNMVtDHVr8Q+oQLvrgO2h", - "QOAnx/L1KvC+vt3S4My0r+sHuUv7KdPPmAoUQjiV0L42zJBQyNqU4rKPVufA8x9g+3fsS8uZ3Ewnn+by", - "x2jtIO6h9dtme6N0psCsdQE7kbNbkpyXZaWueJ64NyBjrFmpK8ea1N0/GfnMqi7ufp9/c/L6rUOfUsKA", - "Vy4TateqqF/5h1kVesSxdKjzIDJC1qr3na0hFmx+83AvDKb47LWOLYdazDGXFa/mgAtF0QVXlvH7ob2h", - "kjDj7U6S2UmZ+9TIXJg/d68iP5CwOIe2O7xHL4Rz7agGUNiCF5op2c8aQDOOvExil4JvcRdtYHaoIGRd", - "JCgCic5FGg8dyIVGKZJ1Qc8jtgYYdR4xCBFiLUbC57IWASzspg+4fukhGcwRJSaFdXbQbqFcpbJait9q", - "YCIDabCpcllEHWFB2fCJscMjLZ6E6wC7PNwG/Kec8whq7IQnJHYf8mGUN5J67Z0+v9AmPI0/BMG5W1zS", - "hDMOjqUdFyyOPxw32+vjdTdaGxYWG+ogZAxbhGJ/VTMfOlhbREfmiFYpG9XYJ+PampKrD9fTrVomdEOF", - "bBPeeK5VBEwtN1zaokM4ztLQjdZg/XYctVEVvRDSEL32FTpZVup3iHuTS9yoSGKTIyWZbDR6Fnl50Vei", - "TWSkLSfn6RviMcraY9ZU0Mi6l2gjEk5cHoSvKVPTB5m4tGxtCyR17kPjwhHmMMwt/FY4HM6DvI+cbxY8", - "VhMAjRrE6aS9KOmEw4xifrDfBd0kKDveC+5cmr7CPqspoWqzD4fPIu9ooPyxWD6DVBQ8j0dHM6J+92Fl", - "JlbCVpmqNQRljBwgW57PcpErBWWvolrSnC7Z0TQolOZ2IxNXQotFDtTjse2x4JpOrSbk2QzB5YE0a03d", - "nxzQfV3LrILMrLUlrFasMSLtiwEff16A2QBIdkT9Hr9gDyjyrsUVPEQqOltkcvz4BeU52D+OYoedKye3", - "S69kpFj+h1MscT6mqwcLAw8pB3UWfeJla4COq7Ad0mSHHiJL1NNpvf2yVHDJVxC/US324GTH0m5S4K5H", - "F5nZAnbaVGrLhInPD4ajfhrJdUL1Z9FwCegFCpBRTKsC+amtUWQn9eBsNTxXH8Tj5RvpmqP0Dwl6Tuvn", - "DdLaszy2arqMesML6JJ1yrh9CUlvIdwLWqcQZyOFGaC6ik9SjWywPzfdWPZAKpkUKDvZwzaLLuC/aF0C", - "ZXgendZ43dXPXNkN+lBTC6Eko4StO4TlgU66M4nrKr5OXuNUP7977Q6GQlWxIgOtNnSHRAWmEnAVldh+", - "NlhjmTTHhad8zEDxpRh+q0Gb2MMbarD5M+S34RloyzAwkBmdIDNmH6og2p2nBqS5RVHnNm0dshVUzqmv", - "y1zxbMoQzvk3J6+ZnVW7x470QILKQKzso6eGRJEwUvB8/zavwMbSbQ6HszsPAVetDb2p1YYXZSw9EXuc", - "+w6UA3nFRe6vtEmlhdSZsVf2NNFeV9lJ2sd+rJnO8W++UvTKmxvD0zWp6Y5Ss0IS9f0Orl/iM3x1UA+w", - "Ka3WvIq379eM8iVMbAWTKVN4lm6EtjVN4Qq6GZFNerAzE3yGZHd5VS2l5ZS4ztuRvn4Xsnvk7GWRD3NE", - "MesR/paqS6u6SuG25VzOaFT0MUy/NsygEKCE7PxaNgW3fK3qlEslRUpPUYIqqg3Krj7qIXG4A17t9F0w", - "L+JOQiPCFa1I01xHOyqO1qjxitARbhiECFpxUy132D8NFeJE52IFRjvNBtnUVx1yvoGQGlyVAyqVG+hJ", - "dPH6d1LRcHn7rvqWbEQpZSNH4LfYRsefcGkgl0LSK0NHNpdxYq13Kt9o0GUQhq0UaLee7isa/R7HzM6v", - "5Sli/GHmyz0SDBuWxGXbOPgQ1ImPirsoNPZ9iX0ZhSDbnzvpa3bSk7J0k8Y0gW52OFY3aZTAkchq4kNb", - "AXEb+CG0Hey28zqLzlNkNLiiYDiUdA4PGGPkrfI36ChZjrJPHu01cjSHXsgIGq+FhLYYaeSASKNHAm0M", - "yevIOJ1W3KTrg3XaOfCcou8xhaaNC0d8KqjeBhNJaI1+jvFtbKtnjSiOpkOb4c7ltqmBitwdGBMvqfiy", - "I+SwFhZZVc6IyihRqFcdK6Y4UHH7enPdA2AoBkObyA43FbeSc5uTaCyxORMaTdxikUdSI141jUGFOMrB", - "Wmzp39hL0fEVuMuaO1c2oIG3ti93VxnIce8TLVZ33JV2/D1uS08Gwj2Kcf83qFbCh2uDR79W8TT1Eela", - "WPn6nuRUNMnOXZ4lRRejQ1CScbcjNF5ccUqqcSQ55F37tI9b7WvjTWMpIuloRhM3Ll3RcLar3IetfBiD", - "YO+2bMVF+xWEqLM5dp9lr7OweTD6MLthYIUR7J0E9RelQ4R+8JkQrOTCBVNbERlS1uVMDbPYDsmmaDe4", - "vwiXiURAYiu5Y+LQQbI3pFJEsMPr5j3sedkhqX1h0LMkVQX3TNrgCL0laYcX6Ycuj9ZBHFNrGK7z4A3o", - "0HaE9ocQvtULQ+KOi7NZHCLO8URtHE76xBLEPyUYapPPpg06BVvdvLFd//torTv7logbtgHGpVQkUS7q", - "xjgrVAY5067GRg4rnm7d6z99IVMuWSYqoEIVoqCaa5zpDV+toKJno7ZMqo9NELTIbtUiz/axjYPxNfWN", - "vMb9d76nHQqxRfZW5kR/a2mhu9+PNtP8q96MpqoobGigQ/7oy8nmORYFXQj9tk7grtjhouLSeiIDChGU", - "4EsNkTpday4l5NHR9m7i38QhBf9VjeBcCBlv6rOAJUyPDO2auyv0U3r4kVIK04mGtK6E2VL+kPdMxD+j", - "udHfNfLrqsw3t7DuEtB++MSFx1tpb79V8Z2ydZ8LdJfIdTBU/eSba16UOTg9+tUXi7/A078+y46ePv7L", - "4q9Hz49SePb8xdERf/GMP37x9DE8+evzZ0fwePnli8WT7MmzJ4tnT559+fxF+vTZ48WzL1/85Qv/oQiL", - "aPsRhv9J5QSSk7enyTki224UL8UPsLUvopE7fckHnpLmhoKLfHLsf/pvXk5QgIJv27lfJ+62YbI2ptTH", - "8/lms5mFQ+YrqseXGFWn67mfZ1hs5u1pE9C3SQckSzZWi4JO54UwOWWaUNu7b87O2cnb01mrDibHk6PZ", - "0ewxVQApQfJSTI4nT+kn4vo17ft8DTw3KBk308m8AFOJVLu/nAqfuWoX+NPVk7mPAM4/uqv1m11t3dwG", - "92AlGBC8eJx/7JROzEK49B5w/tHnfQRNtg7v/CMFGIPfXSHN+ce2su2N5e4cYpEeX+Gr7U6Vu6jovra/", - "IkP7u0mhu9WFm905zXBXcNTLpspv+M3R9/+ffqHvQ++DJU+Ojv7ziQUqk/rslpTY6dd04gCReb/mGfN3", - "jDT3488396mkVySoqJhVxDfTyfPPufpTiaLAc0Y9g0yTIUv8LC+l2kjfE0/Nuih4tfXirTvKwtf0Jt3M", - "V5oqDVbiihuYfKBSlrFL3RGlQ9+yuLXSoQ90/EfpfC6l88f+csl/lM4fTemcWaVwuNJxhpBN9pjbimit", - "feTfLQ4f83UtuzHN5Qx99oCiyhI2D13CiAUbeRjaXM6rzEaQfHEfn9rkZp0NNNs7B7TzBvkH2Op9au58", - "DeyX9hvtv1ACJl3VTJmq2C88z4Pf6FOb3oSdjXzwvXkseOjX3m9upjG0lgA+HZTSPl1RT1T3l+CflVoa", - "dK5zhxkQbX21JYx+9NWWoQo1m2PBx0dHR7GXFX2cXbTLYkzptxuV5HAF+XCrx5DovS7d9YnE0Q9VDB8F", - "h15nhOv8F4Wbd8KjX4zsvnS9DXavlPzCsA0XrrZ4UFnGfg6kEMZ/TNWmVLkUvubsiH+AM0GQu7/P+6lH", - "3B+vSOfNDmWn17XJ1EaOKy5638NzlyBLKauNs20U8wAaTTVj/it4+dZ/3pVxSu5Stel+ddkXjOjVIm5K", - "Gq2EpAlIymkWmwnOgzxL96WIoRI8c5i9sR/W6Om96McnLY5xuY8J/afy0uEGyM499IVHOn/PURTQ2LNf", - "6UmIckO33wDP5y7dp/ervZQPfuzWIY78Om8eXUUb+8GMWOv8o7l28Yog8EZb1oTc3n9AylM6r9vNNo50", - "PJ/TzfdaaTOfoObpxpjCxg8NUT96FvDEvflw838DAAD//9ssXxsJhQAA", + "H4sIAAAAAAAC/+x9e3PcNpL4V8Fvdqv8+A1n5Fd2rarUnmI7iS6O47KU3N1avgRD9swgIgGGACVNfPru", + "V90ASJAEZ0ayznep3b9sDYBGo9EvNBrNj5NUFaWSII2eHH6clLziBRio6C+epqqWJhEZ/pWBTitRGqHk", + "5NC3MW0qIVeT6UTgryU368l0InkBbR8cP51U8FstKsgmh6aqYTrR6RoKjoDNpsTeDaSrZKUSB+LIgjh+", + "Obne0sCzrAKth1j+IPMNEzLN6wyYqbjUPMUmzS6FWTOzFpq5wUxIpiQwtWRm3enMlgLyTM/8In+rodoE", + "q3STjy/pukUxqVQOQzxfqGIhJHisoEGq2RBmFMtgSZ3W3DCcAXH1HY1iGniVrtlSVTtQtUiE+IKsi8nh", + "+4kGmUFFu5WCuKD/LiuA3yExvFqBmXyYxha3NFAlRhSRpR076leg69xoRn1pjStxAZLhqBn7vtaGLYBx", + "yd59/YI9efLkOS6k4MZA5phsdFXt7OGa7PDJ4STjBnzzkNd4vlIVl1nS9H/39Qua/8QtcN9eXGuIC8sR", + "trDjl2ML8AMjLCSkgRXtQ4f7cUREKNqfF7BUFey5J7bznW5KOP//6q6k3KTrUglpIvvCqJXZ5qgOC4Zv", + "02ENAp3+JVKqQqDvD5LnHz4+mj46uP7T+6Pk7+7PZ0+u91z+iwbuDgpEO6Z1VYFMN8mqAk7SsuZySI93", + "jh/0WtV5xtb8gjafF6Tq3ViGY63qvOB5jXwi0kod5SulGXdslMGS17lhfmJWyxzVFEJz3M6EZmWlLkQG", + "2RS17+VapGuWcm1BUD92KfIcebDWkI3xWnx1W4TpOiQJ4nUretCC/u8So13XDkrAFWmDJM2VhsSoHebJ", + "WxwuMxYalNZW6ZsZK3a6BkaTY4M1tkQ7iTyd5xtmaF8zxjXjzJumKRNLtlE1u6TNycU5jXerQaoVDIlG", + "m9Oxoyi8Y+QbECNCvIVSOXBJxPNyNySZXIpVXYFml2swa2fzKtClkhqYWvwKqcFt/9eTH94wVbHvQWu+", + "grc8PWcgU5WN77GbNGbBf9UKN7zQq5Kn53FznYtCRFD+nl+Joi6YrIsFVLhf3j4YxSowdSXHELIQd/BZ", + "wa+Gk55WtUxpc9tpO44aspLQZc43M3a8ZAW/+vJg6tDRjOc5K0FmQq6YuZKjThrOvRu9pFK1zPbwYQxu", + "WGA1dQmpWArIWANlCyZuml34CHkzfFrPKkDHAxlFp5llBzoSriI8g6KLLazkKwhYZsZ+dJqLWo06B9ko", + "OLbYUFNZwYVQtW4GjeBIU293r6UykJQVLEWEx04cOVB72D5OvRbOwUmVNFxIyFDzEtLKgNVEozgFE24/", + "zAxN9IJr+OLpmAFvW/fc/aXq7/rWHd9rt6lTYkUyYhex1Qls3G3qjN/j8BfOrcUqsT8PNlKsTtGULEVO", + "ZuZX3D9PhlqTEugQwhseLVaSm7qCwzP5EP9iCTsxXGa8yvCXwv70fZ0bcSJW+FNuf3qtViI9EasRYja4", + "Rk9TNKyw/yC8uDo2V9FDw2ulzusyXFDaOZUuNuz45dgmW5g3Zcyj5igbnipOr/xJ46YjzFWzkSNIjtKu", + "5NjxHDYVILY8XdI/V0viJ76sfsd/yjKP0RQZ2BlaCgq4YME79xv+hCIP9kyAUETKkahzMp+HHwOE/lzB", + "cnI4+dO8jZTMbaueO7g44/V0ctTCufuZ2pF2fb2DTNvMhLS7Q12n9kx49/gg1Cgm5Kj2cPgqV+n5rXAo", + "K1VCZYTdxwXCGUoKgWdr4BlULOOGz9pDlfWzRvidBn5L4+iUBFXExP1A/+E5w2aUQm68+4auq9DoxKkg", + "0JShx2ftiJ0JO5AnqlhhnTyGztmNsHzRTm4VdKNR3zuyfOhDi+zOK+tXMhrhF4FLb0+NRwtV3Y5feowg", + "WXsWZhyhNt4vrry7s9S1LhNHn4g/bTv0ALXhx6FaDSnUBx+jVYcKJ4b/D1BBI9S7oEIX0F1TQRWlyOEO", + "5HXN9Xq4CHRwnjxmJ98ePXv0+OfHz75AC11WalXxgi02BjS77+wK02aTw4PhykjB17mJQ//iqT9BdeHu", + "pBAh3MDeR6JOATWDpRiz8QLE7mW1qWp5BySEqlJVxOcl1jEqVXlyAZUWKhK+eOt6MNcD9ZD1u3u/W2zZ", + "JdcM56bjWC0zqGYxyuM5i0y6gULvMhQW9OmVbGnjAPKq4pvBDtj1Rlbn5t1nT7rE9969ZiVUibmSLINF", + "vQptFFtWqmCcZTSQFOIblcGJ4abWd6AFWmAtMrgRIQp8oWrDOJMqQ4HGznH9MBLLpCAKxX5MqHLM2tqf", + "BaB3nPJ6tTYM3UoV29p2YMJTuykJ2Qo9cvRrzuy2l53OxsnyCni2YQsAydTCna/cyY8WySksY/yNi9NO", + "LVrNmaCDV1mpFLSGLHHXSztR81dVtMlmC5kIb8K3mYRpxZa8uiWuRhme78CT+gyx1a034c6kQ6z3m37b", + "/vUnD3eRV3jEtEyArgsKdw4Gxki4kyZ1OXId4azaqShQJJjkUmlIlcx0FFjOtUl2iQJ26phe3NaA+2Lc", + "T4BHDt2vuTb22CtkRm6YFWGah8bQFOMIj2pphPyTV9BD2CnqHqlr3WhrXZelqgxksTVIuNoy1xu4auZS", + "ywB2YxKMYrWGXZDHqBTAd8SyK7EE4sbFXZq40HBxFOJG3bqJkrKDREuIbYic+F4BdcOQ7Agi6LM3I4lx", + "hO5xThMHnk60UWWJOskktWzGjZHpxPY+Mj+2fYfMxU2rKzMFOLvxODnMLy1lbTB+zdFfIsis4Oeo78n7", + "sefzIc4ojIkWMoVkG+ejWJ5gr1AEdgjpiOPprvuC2XrC0ePfKNONMsGOXRhb8IgX/NZGlU/biMsdOAIv", + "wXCR68bYN6HrdhaKcvczENAzqyAFafIN8upSVIW9KCIbof1v1pXI3Cz2SqQVP5mxCi55lfkewxNIsJhE", + "yAyu4tqVd+INGVwxEUd62cwsDEv9NY4MAcyigu4uxrag4AINt5kch8antdc+lko6diFIDSgAhUgrxe09", + "Hy7GGknTXGVVUHDEjm6cnFEfn1PIVWKvFSPm0bb7a0cf7g15Jg7X88moZDescbkGuslAdd0jYshteEwD", + "DWMLWeVqwfMEnVdIMsjNzjASOsXwknqinVTpcHgX5bOz93l2dvaBvca+5CcDO4fNnG5fWbrmcgVtSDzk", + "U+sBwxWkdajSe2Tc61Dj4n5d7LvHmumkVCpPmuNbP4Q/UPN9up+L9BwyhnqCnE5nfe51dwgnYfeRxXVz", + "yXG53nh/tixBQvZgxtiRZFCUZuNiBT1Poze5vGe2zX9Fs2Y13bdyyWiRszMZP6bb29pPlCkPZrsk2fSl", + "T5zKAtk+kbmSI+LEL+myAcFF5XNrpO+ERgYmZ2BJA6ayWOxzHv6Gcnp4Z5dFRoeN1qroelEISuwJuk1R", + "c/q71uFpVZgZY6ekO/C0oOECKp5T1oL2QVChWSHw0KnrNAXIDs9k0sEkVYWb+H77X6uWzuqDgyfADh70", + "x2iDbqI7GFkZ6I/9kh1MbRORi33JziZnkwGkCgp1AZk9HIZ8bUftBPv/Grhn8oeBYmYF39hjpZdFpuvl", + "UqTCEj1XqNdXquftSUUtUCF6gIczzYSZkikjipKXbPelFcBJ1Gu5i/hFBCr6x2hKUdv5G7Yu72gGVzzF", + "VXJSMht2iYzS8NnQ+TCqTEIA0XDqlhldQFt39Pgt5W6oz+1pejt+p73zdIccAbvOdvvMA2JEMdhH/I9Y", + "qXDXhcul8QkXudBmgKQ72NNtRsOQEaMzY/+hapZykt+yNtCcqVRFBxU6wOIMZGP9nM5TaykEORRgwx3U", + "8vBhf+EPH7o9F5ot4dInoGHHPjkePrRCoLT5ZAnosebVccSBoiAzWtNI0vCa6/VsZ8CZ4O4VZw5AH7/0", + "E5IwaU0m5no6waNuvrkDgbeAWAXO39OdoI+2rWoZJru5/dMbbaAYRi7t0J9HPNF3/oQ2sLRK5kJCUigJ", + "m2h+t5DwPTVG7TSxyMhgEtaxsf0TbAf/HlrdefbZzU+lL+12wBJvm9S7O9j8Ptxe0DpM8yMvE/KScZbm", + "ggKCSmpT1ak5k5wCFD03qMcWPuwyHrJ64bvEY2SREJYDdSa5Rho2YYvoZcYSIgHJrwF85ErXqxXonlvE", + "lgBn0vUSktVSGJqLvMrEblgJFd06zWxP9ASWPKcI2+9QKbaoTVf1UjaS9WxsBB2nYWp5JrlhOXBt2PdC", + "nl4ROH/C8TwjwVyq6ryhwsgJDSRooZP4xdw3tvVbrtd++djRKxs32AaJEX6bsrQx0El3/s/7fzt8f5T8", + "nSe/HyTP///8w8en1w8eDn58fP3ll//V/enJ9ZcP/vbn2E553GO5Mg7z45fOLTl+SbanDZ4PcP9swd9C", + "yCTKZHhcKISklMseb7H7aEE9Az1ow/Bu18+kuZLISBc8FxkegW/DDn0VN5BFKx09rulsRC+W59f6IXbc", + "Wamk5Ok53WtPVsKs68UsVcXcu2PzlWpcs3nGoVCS2rI5L8Ucj7fzi0c7TOMn6CsWUVfX04nTOvrOM2gc", + "4NiC+nM2UXT/t1Hs3jevTtnc7ZS+ZxPnLOgg4yniQbvLsM5hDhdvH37YzEE8zLyEpZAC2w/PZMYNny+4", + "Fqme1xqqr3jOZQqzlWKHzIF8yQ2nGEAvojj2NoviMw6bsl7kImXnoSluRXMsMHZ29h4Z5Ozsw+Aia2g4", + "3VTxYCNNkFwKs1a1SVxUdjyO0MZaCLKNy22bdcocbMuRLurr4I8EQMtSJ0FELL78ssxx+QEbakaDKA+K", + "aaMqrwRRM7qYBu7vG+Wu8ip+6bPRazy3/1Lw8r2Q5gNL3Pn7qCwp3Ebxrl+crkGe3JSwf8ysRbEFFjtn", + "0cKtQ3Xj3DgCemJH+SCyjlMOm4h01Ae1QhsTvC2dENS3KsfNvTWZAhhR6tRmnaBMRVelkbVIHoI3hHyF", + "utDfveGxGZnPvWlZAEvXkJ5DRhcPFCOcdob7K29nWbzICm2fodgUOMqVpuPgAlhdZtzZXi43/aRVDcb4", + "TN13cA6bU9WmWt8kS/V6OnFB/QR5ZkxASqRHYATUsisu/mKgt/nuboUC72XJbGzbZhd6tjhs+MKPGRcg", + "a5nuQHhiTNGQYQu/l7yKEMIy/wgJbrFQhPdJrB+NpPPKiFSUdv37xebfdsYgkF1KParG1bKvrQfKNKq9", + "bedkwXVccQO24H6gDPWzS/xMNrJiL8kYPWV2jLvIIbhV0k6yeUXOjl+2fZs5hlqcS6CSrTX1aHQpEprt", + "tbuWFBftZSRdO+9j4HZeSiEX+XwB0Q0/C5w3hws+ehMw+obgOEgCCJ6mNS8EvGLrC8O0eS1iX4n7lwT+", + "+YB/MzCZ3ij/fzpxuV6x7VCSrHsGOay4C3xTFpljFIfaPR1sEOLxw3KZCwksieUTcK1VKuxdaKvL3RyA", + "zt9Dxmxghe0NIcbGAdoUMSTA7I0KZVOuboKkBEEhRu5hU6wx+Bt2R9za5/rOrdzp/g11RytE0/Y5jd3G", + "YfRnOomqpDHPvNOL2S4LGBxlYiyKqmkYDxlGXTTkQOY46WjW5DwWJUOvAogNT/ywwF1n98USjfyDIHBc", + "wQrP3u15FaXVB2A+b8zgQhlIlqLSJqGjcnR52OlrTc7g19g1rn46pGL2va/I4tqHpj2HTZKJvI7vtpv3", + "u5c47Zvm3KLrxTlsyMgAT9dsQe/T0Qp1psc+W6a2OTVbF/zaLvg1v7P17sdL2BUnrpQyvTn+IFzV0yfb", + "hCnCgDHmGO7aKEm3qJcgG2GoW4I8CJszQfkVs22n9YEw3TijY1TzWkjRtQSO7tZV2MQfm9sTPO8e5kyP", + "yAAvS5Fd9c7OFmqcx2mKmzjq1uMfUIF21wHbQYHgnBxLIazAn/XtlgY20z7UH6RZ7aZMP7krUAjhVEL7", + "MjNDQiFrUzbOLlqdAs+/g81P2JeWM7meTj7tyB+jtYO4g9Zvm+2N0pliyPYI2Imc3ZDkvCwrdcHzxD1L", + "GWPNSl041qTu/hXLZ1Z18eP36auj128d+pS9BrxySVvbVkX9yj/MqvBEHMvcOg0iI+St+rOzdcSCzW/e", + "BobBFJ9o1/HlUIs55rLi1Ri4UBRdcGUZv8raGSoJk/NuJZmd7L5PjcyFqX53KvIDCYtzaLvDO/RCONeW", + "wgKFrZ2hmZL9BAd04+iUSexS8A3uog3MDhWErIsERSDRuUjjoQO50ChFsi7oxcbGAKPOIw4hQqzFSPhc", + "1iKAhd30HjdFPSSDOaLEpLDOFtotlCt6VkvxWw1MZCANNlUu4akjLCgbPod3aNLi+cIOsEsZbsB/ip1H", + "UGMWnpDYbuTDKG8kS9wf+vxCm/A0/hAE525wSRPOODBLWy5YHH84brY33etutDasUTbUQcgYtp7F7gJp", + "PnSwtoiOzBEteDaqsY/GtTXlge+vp1u1TOiGCtnm5vFcqwiYWl5yaesX4ThLQzdagz2346hLVdGjJQ3R", + "G2qhk2Wlfof4aXKJGxXJwXKkJJeNRs8ij0H6SrSJjLSV6Tx9QzxGWXvMmwoaWfcSbUTCicuD8DUllfog", + "E5eWrW2tpc7VbVw4wnSLuYXfCofDeZCikvPLBY+VHUCnBnE6ai9KOuEwo5gf7HdBN7nUjveCO5emr7Av", + "fUqo2kTJ4UvNWzoofyyWzyAVBc/j0dGMqN9965mJlbAFq2oNQUUkB8hW+rNc5KpK2auoljTHS3YwDWqu", + "ud3IxIXQYpED9Xhkeyy4JqvVhDybIbg8kGatqfvjPbqva5lVkJm1toTVijVOpH3c4OPPCzCXAJIdUL9H", + "z9l9irxrcQEPkIrOF5kcPnpOKRn2j4OYsXOV6bbplYwUy785xRLnY7p6sDDQSDmos+irM1tOdFyFbZEm", + "O3QfWaKeTuvtlqWCS76C+I1qsQMnO5Z2kwJ3PbrIzNbC06ZSGyZMfH4wHPXTSFoWqj+LhsuVL1CAjGJa", + "FchPbbkjO6kHZwvruRIkHi/fSNccpX/z0Du0ft4grbXlsVXTZdQbXkCXrFPG7eNMerbhHvU6hTgbqRUB", + "1UV8kmpkg73ddGPZfalkUqDsZA/ahL+A/6KlEpTheXRa43VXP3NlO+h9XS2EkowStu4Qlgc66dYkrqv4", + "OnmNU/347rUzDIWqYnUPWm3ojEQFphJwEZXYfuJa45k05sJTPuagfFWLPPupTTftlRiquEzX0fjnAgf+", + "3NZEa8huqR59grfmUkIeBWdl+Wcv8xGt9Kvad55CyD379ksH2eX2Ftci3kXTI+UnRPIKk+MEIVW7+XdN", + "4ki+UhmjedpH1i0jDN9IBWVUfqtBm9h7LmqwuU50xkZ/xVbxYCAzsvYzZt8/IS6dFyxkZUVR5/Y1BGQr", + "qFwApi5zxbMpQzinr45eMzurdm9o6d0NVRFZ2bd0nVX0zlZB9YebPC4cS43aH872nBFctTb0JFsbXpSx", + "rFfsceo7UGrtBRe5Tz8g8xNSZ8ZeWsuvvV2xk7RvSFkzndM1xBP4H2N4uiaT2jFA4yy/f/kbz5U6KAPZ", + "VNRriirYZ5FG+Qo4tgDOlCn0ey6FtqVs4QK6ibZN1rlz6XzibXd5VS2l5ZS4fdryKuI2ZPfI2Ys9H5KK", + "YtYj/A3NjFZ1lcJNqwGd0KjoG6t+aaFB/UcJ2emVbOqv+RLlKZdKipReOAXFcxuUXVncfWKmezwG6x+X", + "vYg7CY0IV7SgUZM64Kg4WuLIK0JHuGHAKGjFTbXcYf80VH8VD4IrMNppNsimvmiVO8cJqcEVyaAKyYGe", + "xON4//4werXRPte/IRtR+t+Iu/I1tpGrIlzKzrmQ9HjVkc1lB9mTFlXtNHi8E4atFGi3nu7jLP0ex8xO", + "r+QxYvxh5qt8EgwbQsZl2zuLIagjf4Phbgyw7wvsyyhc3P7cSTW0kx6VpZs0pgl0s8OxslujBI5EwRMf", + "hgyI28APoW1ht61Xj2RPkdHggi4uoCQ7PGCMkSfwr/BQaznKvqS1V/7RpxlCRtB4LSS0NWgjBiKNmgTa", + "GJLXkXE6rbixLuBeOu0UeE43JTGFpo0LHX0qqN4GE0lojX6O8W1si6+NKI6mQ+u4cblpSt8idwfOxAuq", + "ue0IOSylRl6Vc6IySurqFVeLKQ5U3L4sYdcADMVg6BPZ4abiVnJuYonGktAzofE4UizySBrLy6YxKDBI", + "+XKLDf0be4A8vgJ3sXbrghk08Mb+5fbiFTnufaLF6pa70o6/w23pyUC4RzHuf4VqJXy3M3hLbhVP86yG", + "rvCVL/dKh4omMb3Ls6Toooe2tnLn9kPreA3OKanGkUSed+2LUW61r40NjqXzpKPZZ9y41FLD2bYqMrZw", + "ZgyCvYe0BTvtxy+igYGxu0d79YjNg9H7+Q0DL4xgbyWov9QeIvSdz1phJRcu8N2KyJCyLr9tmHG4T+ZL", + "u8H9RbisMQISW8ktk7z2kr0hlSKCHaYG7GDP8w5J7WuQniepKrhj0gYm9IakHSY97Ls8WgdxTK1huM69", + "N6BD2xHa70P4Vi8MiTsuzmaxjzjHk+pxOOkTSxD/7GOoTT6bNujU+3Xzxnb9p7HogT0hjwSqejStRZ7t", + "2txO2LF9zkyBtZ8XXzztRO8+54Pqn+2F/FDc3NvSmxj+/iYQYSJr7UweTBUEFPeIJbphkcgh1YJK60qY", + "DeXueE9T/BzNS/4GpKt67IrINzeg7gLOfr/EhaZXTe/2kxPfKFsGukD3l1xBQ0VSXl3xoszBycWX9xZ/", + "gSd/fZodPHn0l8VfD54dpPD02fODA/78KX/0/MkjePzXZ08P4NHyi+eLx9njp48XTx8//eLZ8/TJ00eL", + "p188/8s9/70Hi2j7LYV/p6oDydHb4+QUkW1pwkvxHWzsO2NkY/+CmackiVBwkU8O/U//4iVslqoi+ESd", + "+3XiIv2TtTGlPpzPLy8vZ+GQ+YrK9iVG1el67ucZ1qR5e9wEaO2FP+2ojb0hK9CmOlY4orZ3r05O2dHb", + "41nLMJPDycHsYPaICoWUIHkpJoeTJ/QTSc+a9n3umG1y+PF6Opmvgedm7f4owFQi9U36kq9WUM3cU278", + "6eLx3Md35h/dJff1trZuloF7OhIMCN4ezj926i1mIVx6mTf/6DMwgiZbpHf+kcJHwe+uyub8Y1v29try", + "eg6xc7wvC9Z2p3JfVGFf21+Rvf0todDd0sPNXh1nuEc46kVTAjj8kOj7f9DP7n3ofYXk8cHBP9j3FJ7e", + "cMVbvdPOaS5SNeErnjF/U0RzP/p8cx9LereB6olZ9Xs9nTz7nKs/lsjyPGfUM8jtGG79j/Jcqkvpe6Kt", + "rIuCVxsvxrqjFHxhb9LIfKWpDGElLvCI/oHqXMau5kaUC3244sbKhb7G8U/l8rmUyx/jMyWPbyjgf/wV", + "/1Od/tHU6YlVd/urU+fK2WSEuS0E13p4/g3k8GFg1zcd08nu4MLuU9RTwuUDl9BgwUYemTaXxyqzEQ5f", + "KMinSblZZwOd/c4B7bxn/g42epcCP10D+6X9dPwvlMxJVwlTpir2C8/z4Df6Aqh3wmcj36FvHh7u+xH6", + "6+tpDK0lgE8tpRRSV8sUDdk5+Ceqlgad68bhDX1bVm4Jo9+itdW3Qg3mWPDRwcFBLLWnj7OLxliMKZX3", + "UiU5XEA+3OoxJHovVbd9uXH0OxzDB8bhKTrCdf5Dx82b49EPWXZfzd4Eu5dK3jPskgtXUj2oUmO/dlII", + "47/xalN+XDpgYyPi3wVNEOT2zwZ/qvH+49Umvd6i7PS6Npm6lOOKi94K8dwl21L6axM8MIp5AI2mmjH/", + "0b584786yzglH6nadD8G7YtP9EowN+WRVkLSBCTlNIvNKudBzqb7EMZQCZ44zN7Y74b09F70m5gWx7jc", + "x4T+U3lp6Ghs3StfrKTz9xxZHt1V+7GhhCg0DFAY4PncpZ30frWXw8GP3TLLkV/nzUOtaGM/7BJrnX80", + "V0FkpY13hvFD2qkmcvj+AxKcMoLdJrbhsMP5nC5k10qb+QQVTjdUFjZ+aGj80e+8p/X1h+v/DgAA//8U", + "2bjJl4UAAA==", } // GetSwagger returns the Swagger specification corresponding to the generated code diff --git a/daemon/algod/api/server/v2/generated/private/types.go b/daemon/algod/api/server/v2/generated/private/types.go index bb3626a7ae..3a44a1a51f 100644 --- a/daemon/algod/api/server/v2/generated/private/types.go +++ b/daemon/algod/api/server/v2/generated/private/types.go @@ -227,6 +227,16 @@ type AssetParams struct { Url *string `json:"url,omitempty"` } +// BuildVersion defines model for BuildVersion. +type BuildVersion struct { + Branch string `json:"branch"` + BuildNumber uint64 `json:"build_number"` + Channel string `json:"channel"` + CommitHash string `json:"commit_hash"` + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` +} + // DryrunRequest defines model for DryrunRequest. type DryrunRequest struct { Accounts []Account `json:"accounts"` @@ -340,22 +350,10 @@ type TealValue struct { // Version defines model for Version. type Version struct { - - // the current algod build version information. - Build VersionBuild `json:"build"` - GenesisHash []byte `json:"genesis-hash"` - GenesisId string `json:"genesis-id"` - Versions []string `json:"versions"` -} - -// VersionBuild defines model for VersionBuild. -type VersionBuild struct { - Branch string `json:"branch"` - BuildNumber uint64 `json:"build-number"` - Channel string `json:"channel"` - CommitHash []byte `json:"commit-hash"` - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` + Build BuildVersion `json:"build"` + GenesisHashB64 []byte `json:"genesis_hash_b64"` + GenesisId string `json:"genesis_id"` + Versions []string `json:"versions"` } // AccountId defines model for account-id. @@ -616,6 +614,9 @@ type TransactionParametersResponse struct { MinFee uint64 `json:"min-fee"` } +// VersionsResponse defines model for VersionsResponse. +type VersionsResponse Version + // RegisterParticipationKeysParams defines parameters for RegisterParticipationKeys. type RegisterParticipationKeysParams struct { diff --git a/daemon/algod/api/server/v2/generated/routes.go b/daemon/algod/api/server/v2/generated/routes.go index b2c2dab120..8adb9de4a6 100644 --- a/daemon/algod/api/server/v2/generated/routes.go +++ b/daemon/algod/api/server/v2/generated/routes.go @@ -561,163 +561,164 @@ func RegisterHandlers(router interface { // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9/XPbOJLov4LTXdUkOdFyvuY2rpq650nmw2+TTCr27O27OG8PIlsS1iTABUBLmjz/", - "76/QAEiQBCX5I8l4Vj8lFoEG0OhvNBqfRqkoSsGBazU6+jQqqaQFaJD4F01TUXGdsMz8lYFKJSs1E3x0", - "5L8RpSXj89F4xMyvJdWL0XjEaQFNG9N/PJLwj4pJyEZHWlYwHql0AQU1gPW6NK1rSKtkLhIH4tiCOHk1", - "utrwgWaZBKX6s/yF52vCeJpXGRAtKVc0NZ8UWTK9IHrBFHGdCeNEcCBiRvSi1ZjMGOSZOvCL/EcFch2s", - "0g0+vKSrZoqJFDn05/lSFFPGwc8K6knVG0K0IBnMsNGCamJGMHP1DbUgCqhMF2Qm5Jap2kmE8wVeFaOj", - "DyMFPAOJu5UCu8T/ziTAb5BoKuegRx/HscXNNMhEsyKytBOHfQmqyrUi2BbXOGeXwInpdUDeVEqTKRDK", - "yfsfX5KnT5++MAspqNaQOSIbXFUzergm2310NMqoBv+5T2s0nwtJeZbU7d//+BLHP3UL3LUVVQrizHJs", - "vpCTV0ML8B0jJMS4hjnuQ4v6TY8IUzQ/T2EmJOy4J7bxnW5KOP5X3ZWU6nRRCsZ1ZF8IfiX2c1SGBd03", - "ybB6Aq32pcGUNEA/HCYvPn56PH58ePWvH46T/3Z/Pn96tePyX9Zwt2Ag2jCtpASerpO5BIrcsqC8j4/3", - "jh7UQlR5Rhb0EjefFijqXV9i+lrReUnzytAJS6U4zudCEerIKIMZrXJN/MCk4rkRUwaao3bCFCmluGQZ", - "ZGMjfZcLli5ISpUFge3IkuW5ocFKQTZEa/HVbWCmqxAlZl43wgcu6PeLjGZdWzABK5QGSZoLBYkWW9ST", - "1ziUZyRUKI2uUtdTVuRsAQQHNx+sskXccUPTeb4mGvc1I1QRSrxqGhM2I2tRkSVuTs4usL9bjcFaQQzS", - "cHNaetQw7xD6esiIIG8qRA6UI/I83/VRxmdsXklQZLkAvXA6T4IqBVdAxPTvkGqz7f/79Je3REjyBpSi", - "c3hH0wsCPBXZ8B67QWMa/O9KmA0v1Lyk6UVcXeesYJEpv6ErVlQF4VUxBWn2y+sHLYgEXUk+NCELcQud", - "FXTVH/RMVjzFzW2GbRlqhpSYKnO6PiAnM1LQ1XeHYzcdRWiekxJ4xvic6BUfNNLM2Nunl0hR8WwHG0ab", - "DQu0piohZTMGGamhbJiJG2bbfBi/3nwayyqYjgcyOJ16lC3T4bCK0IxhXfOFlHQOAckckF+d5MKvWlwA", - "rwUcma7xUynhkolK1Z0G5ohDbzavudCQlBJmLEJjpw4dRnrYNk68Fs7ASQXXlHHIjOTFSQsNVhINzikY", - "cLMz01fRU6rg22dDCrz5uuPuz0R31zfu+E67jY0Sy5IRvWi+OoaNm02t/js4f+HYis0T+3NvI9n8zKiS", - "GctRzfzd7J9HQ6VQCLQQ4RWPYnNOdSXh6Jw/Mn+RhJxqyjMqM/NLYX96U+WanbK5+Sm3P70Wc5aesvkA", - "Muu5Rr0p7FbYfwy8uDjWq6jT8FqIi6oMF5S2vNLpmpy8GtpkC/O6hHlcu7KhV3G28p7GdXvoVb2RA5Mc", - "xF1JTcMLWEsws6XpDP9ZzZCe6Ez+Zv4pyzyGU0PATtFiUMAFC96738xPhuXB+gQGCkupQeoE1efRp2BC", - "/yZhNjoa/eukiZRM7Fc1cXDtiO3dewBFqdcPDRaOG/h3P4OmZ2wWwWfCuN01bDq2vuLdz8dAjc4EDdjO", - "HL7PRXpxozmUUpQgNbP7OzVw+hyE4MkCaAaSZFTTg8bZsvbXAB9gx5+xH3pPICOq7xf8D82J+Wy4k2pv", - "1hmTlilj3IkgAJUZS9DqFzuSaYAWqiCFNf6IMdquNcuXzeBWcNeS9oNDy8cutMju/GDtTYI9/CLM0htv", - "8ngq5M3opUMInDQ+MqEGam0Vm5W3dxabVmXi8BOxs22DDqAmLNkXtyGGuuB3wVXA2Q12TjX9DNhRBupd", - "YKcN6EthRxQly+EO+HtB1aK/OGMoPX1CTn8+fv74yd+ePP/WaPpSirmkBZmuNSjywOknovQ6h4f9FaOi", - "qHIdh/7tM++JteFuxRxOuIa9C97OwEgSizFi4w5mdq/kWlb8DlAIUgoZsZ2RpLRIRZ5cglRMRMIg71wL", - "4loYuWXt987vdrZkSRUxY6NbV/EM5EEM88ZfQ9NAQ6G2KRYL+mzFG9w4gFRKuu7tgF1vZHVu3F32pI18", - "7yUoUoJM9IqTDKbVPNRpZCZFQSjJsCMK0Lcig1NNdaXuQDo0wJrJmI0Ip0CnotKEEi4yw+imcVxuDMRE", - "MRiDMSQdiiK9sPpqCsbKTmk1X2hizFMR29qmY0JTuykJ6hY14ELWvr9tZYez8bZcAs3WZArAiZg6P815", - "kLhIiuEd7U9unNRqplX7Fq15lVKkoBRkiTum2jo1f+SFm6w3oAnnjfOtByFKkBmVN5yrFprmW+aJbfqz", - "VY314Xzb/qx3G37T/nUHD3eRSuOqWiIwpo5h7hw0DKFwK06qcuBYw2m7M1YYliCccqEgFTxTUWA5VTrZ", - "xgqmUUslm20NqC9G/Qh4wHl/TZW27jPjGZptloVxHOyDQwxPeFBKG8h/8QK6Dzs1soerStXSWlVlKaSG", - "LLYGDqsNY72FVT2WmAWwa5WgBakUbIM8hKUAvkOWXYlFENUuflPHl/qLw1C5ka3rKCpbk2gQsWkip75V", - "gN0wtDswEWPj1z2RcJjqUE4dTx6PlBZlaWSSTipe9xtC06ltfax/bdr2iYvqRlZmAszo2s/JzXxpMWuD", - "+gtq7CWETAp6YeQ9Wj/Wz+/P2TBjohhPIdlE+YYtT02rkAW2MOmAQeqODYPROszRod8o0Q0SwZZdGFrw", - "Na3jdzZqfdZEdO7AQHgFmrJc1UZAHRpvRsEoejfDwVhsElLgOl8bGp4xWdiDKNQdyv9mTYzMjWKPXBq2", - "5BmRsKQy8y36HkuwmITxDFZxqUtbcYsMVoTFJz2rR2aapP6YiIcADqICwB28bZiCC1jcZHDTNT6sPVay", - "WFKxA0f8YBijYKkU1J4jmsVY5anrozIJBTWzwxMtp+yHx2R8nthjy4jatN/9saYPJ4c0E4fr6WSQ42vS", - "WC4AT0qMGO8gMaQ2476BgqGFzHMxpXlijFpIMsj11nCUMZbhFbY0+lOk/e7tKZ+ff8iz8/OP5LVpi/Yz", - "kAtYT/B0l6QLyufQhNxDOrWWMawgrUJR30HjTs6Oiyu2Z992d8ajUog8qd267hFBT/x38X7B0gvIiJET", - "aIw6rfRNe4fMIOSBIXFVH6IsF2tv55YlcMgeHhByzAnKNhdb6FggncH5N3rT+CscNavwPJdygos8OOdx", - "992eBt+SpzyYzZxk06NuOZQFsnkgveID7ESXeJhhwEX5c2PE8BR7Biqnp2EDorKz2EWr/YQ5Q7S1yyxD", - "J6TRKqqaFgwTh4JmYyM5/Vlu34tl+oCQM5QdxotQcAmS5pgVoXwwlSlSMOOMqipNAbKjc560ZpKKwg38", - "oPmvFUvn1eHhUyCHD7t9lDbmo3OYLA90+35HDsf2E6KLfEfOR+ejHiQJhbiEzDqNIV3bXlvB/ksN95z/", - "0hPMpKBr6256XiSqms1YyizSc2Hk+lx0rEAu8AtIMz0wTpsiTI9RlSFG0Xq2+9IwYNxquYu4RgSqsZuN", - "KjXSzp/gtWlHEVjR1KySopBZk6UhlJrO+saHFmUSAoiGXzeM6ALjqiXHb8h3fXluvezN8zvr+NktdATk", - "erDdlu4hIzqDXdj/mJTC7DpzuTo+oSNnSvcm6Rx+PBWpCTKidA7I/xEVSSnyb1lpqH0tIdGBQcfWjIA6", - "1o/pLLUGQ5BDATYMgl8ePeou/NEjt+dMkRksfYKbadhFx6NHlgmE0rfmgA5prk4iBhQGn402jSQlL6ha", - "HGwNRCPcneLPAeiTV35AZCalUMVcjUfGBc7Xd8DwFhCR4Ow91QoGKftVzMJkOrd/aq00FP2Ipu36twFL", - "9L333HqaVvCccUgKwWEdzR9nHN7gx6ieRhIZ6IzMOtS369m25t+ZVnucXXbztvjF3Q5I4l2d2ncHm9+F", - "2wlmh2mEaGVCXhJK0pxhoFBwpWWV6nNOMXDRMYM6ZOHDMcOhrJe+STx2FgltOVDnnCqDwzqcET3kmEEk", - "UPkjgI9oqWo+B9Uxi8gM4Jy7VoyTijONY6FVmdgNK0HiadSBbWksgRnNMfL2G0hBppVui17MdrKWjY2s", - "m2GImJ1zqkkOVGnyhvGzFYLzHo6nGQ56KeRFjYUBDw04KKaS+IHdT/brz1Qt/PJNQy9sXGcbPDbwm5So", - "tYZWOvX/ffCfRx+Ok/+myW+HyYt/n3z89Ozq4aPej0+uvvvu/7V/enr13cP//LfYTvm5x3Jx3MxPXjmz", - "5OQV6p4mqN6b+xcLCheMJ1EiM+5CwTimdHZoizwwGtQT0MMmPO92/ZzrFTeEdElzlhkX+Cbk0BVxPV60", - "3NGhmtZGdGJ8fq0fY+7OXCQlTS/wHHw0Z3pRTQ9SUUy8OTaZi9o0m2QUCsHxWzahJZsY93Zy+XiLaryF", - "vCIRcYXZbtbnD9KUImapO3lqeUgGor2tYdP9jIfwCmaMM/P96JxnVNPJlCqWqkmlQH5Pc8pTOJgLckQc", - "yFdUU3SsO2G6oQtVGPRwsymrac5SchHqt4beh6JN5+cfDNbPzz/2To362sgNFY/g4QDJkumFqHTiQp3D", - "znkTwEDINti1adQxcbDtNrtQqoM/EFUsS5UEYab48ssyN8sPdKYi2AmTlIjSQnrJYsSNCxSY/X0r3LmZ", - "pEufQl4ZZ/h/Clp+YFx/JIlzao/LEmNYGET6H8fARuquS9g9ENVMsQEWc15w4dZKuXbiGgI9tb18ZFbF", - "MWc+IeqwjWG1JtB2UzwZUD+L3GzujdEUwIhip9KLxPBUdFXKkBbyQ3Dxj86NgPEHXcYXNcTnLqJMgaQL", - "SC8gw2g+Bt7Gre7+fNmJa8+yTNm7IzY/DROc0ceaAqnKjDqFRvm6m2mqQGufXvseLmB9Jpr86Oukll6N", - "Ry5SnhiaGWKQ0uAjkKxi1mYXH23vbL47sMBodlkSGzC2qX+eLI5quvB9hhnIivs7YJ4YUdRo2EDvJZUR", - "RFjiH0DBDRZq4N2K9KPhaSo1S1lp179bwPtdq48Bsk2oR8W4mHWldU+YRqW3bZxMqYoLbjBfzH4YHuqm", - "cviRbLjCnjwRvH/sCHeaQ3BUoxxnU4kWhF+2vVA5NLU4lYDkjTb102hjJFTbC3fWxy6bEz48491FwW09", - "6TFU5A/nWTumy8y4OVzSwfD6YOL/SXDiHtwnq9P6vWDrMsO4vuJhr3b79H+f8+8T/UfjayXtj0cusSq2", - "HYKjds8ghzl10WRM2XKE4qb2jQo2yMzjl9nM+PwkiR3eU6VEyuwBYyPL3RhgjL9HhNhoBdkZQoyMg2lj", - "GA4Bk7ci5E0+v84kOTCM21EPGwN4wd+wPYzV3LF3ZuVW868vOxomGjd3YOw29kMq41FUJA1Z5q1WxDaZ", - "Qs8/iJGoEU39IEM/lKEgB1THSUuyJhex0JOxKgDJ8NR3C8x18oDNjJJ/GERjJcyNQ9s4gYZbfVTjyzri", - "l0JDMmNS6QT9z+jyTKMfFRqDP5qmcfHTQhWxl3RZFpc+OOwFrJOM5VV8t924f35lhn1b+y2qml7AGpUM", - "0HRBpnip3Gih1vCmzYahbQLLxgW/tgt+Te9svbvRkmlqBpZC6M4Y94SqOvJkEzNFCDBGHP1dG0TpBvES", - "HPH3ZUuQXGATETBp4WCTt95jpmunSQxKXgspupbA0N24CptNYxNmgjvZ/QTlAR6gZcmyVcd3tlDjNI5D", - "XMdQtxZ/Dwu4uw7YFgwEfnIsX0+C9/XtlgY6096u7+UubcdMN2MqEAjhUEz52jB9RBnSxhSXbbg6A5r/", - "GdZ/MW1xOaOr8eh2Ln8M1w7iFly/q7c3imcMzFoXsBU5uybKaVlKcUnzxN0BGSJNKS4daWJzf2XkC4u6", - "uPt99sPx63du+pgSBlS6TKhNq8J25b1ZlfGIY+lQZ0FkBK1V7ztbQyzY/PriXhhM8dlrLVvOSDFHXJa9", - "agUXsqILrszi50NbQyVhxtuNOLOVMnfbyFyYP3enLN/jsDiFNju8RS6EY22oBlDYgheKCN7NGjBmHHqZ", - "SC4FXZtdtIHZvoDgVZEYFkhUztJ46IBPleEiXhV4PWKtgWDjAYPQQKzYQPicVyyAZZqpHY5fOpMMxogi", - "E8M6G3A3Fa5SWcXZPyogLAOuzSfpsohazGJ4wyfG9lVaPAnXAXZ5uDX42+h5A2pIw+MkNiv5MMobSb32", - "Tp9faB2eNj8EwblrHNKEI/bU0oYDFkcfjprt8fGiHa0NC4v1ZZAhDFuEYntVMx86WNiJDowRrVI2KLGP", - "h6U1JlfvLqcbsYzTDQWyTXijuRIRMBVfUm6LDpl+FoeutwLrt5teSyHxhpCC6LEvU8lMit8g7k3OzEZF", - "EpscKtFkw94HkZsXXSFaR0aacnIev+E8Bkl7yJoKPpL2IdoAhyOVB+FrzNT0QSbKLVnbAkmt89A4c4Q5", - "DBMLv2EON+de3kdOl1MaqwlgjBozp+PmoKQVDtOC+M5+F1SdoOxoLzhzqdsye62mBNlkH/avRd7QQLlf", - "JJ9Bygqax6OjGWK/fbEyY3Nmq0xVCoIyRg6QLc9nqciVgrJHUQ1qTmbkcBwUSnO7kbFLptg0B2zx2LaY", - "UoVaqw551l3M8oDrhcLmT3Zovqh4JiHTC2URqwSpjUh7Y8DHn6eglwCcHGK7xy/IA4y8K3YJDw0WnS0y", - "Onr8AvMc7B+HMWXnysltkisZCpb/coIlTsd49GBhGCXloB5Er3jZGqDDImwDN9muu/AStnRSbzsvFZTT", - "OcRPVIstc7J9cTcxcNfBC89sATulpVgTpuPjg6ZGPg3kOhnxZ6fhEtALw0BaECUKQ09NjSI7qAdnq+G5", - "+iB+Xv4jHnOU/iJBx2n9skFaq8tjq8bDqLe0gDZax4Tam5B4F8LdoHUC8WCgMAPIy/ggcmCDvd50fckD", - "LnhSGN7JHjZZdAH9ResSCE3z6LDay65u5spm0LuaWgZKMojYqoVYGsikG6O4kvF10soM9ev7104xFELG", - "igw00tApCQlaMriMcmw3G6y2TGp14TEfM1B8KYZ/VKB07OINfrD5M+i3GR1oyzAQ4BlqkANiL6qYabeu", - "GqDkZkWV27R1yOYgnVNflbmg2ZgYOGc/HL8mdlTlLjviBQksAzG3l55qFEXCSMH1/evcAhtKt9kdzuY8", - "BLNqpfFOrdK0KGPpiabFmW+AOZCXlOX+SBtFWoidA/LKahPlZZUdpLnsR+rhHP3mc4G3vKnWNF2gmG4J", - "NcskUd9v5/olPsNXBfUA69Jq9a14e39NC1/CxFYwGRNhdOmSKVvTFC6hnRFZpwc7M8FnSLaXJyvOLaXE", - "Zd6G9PWboN1Pzh4W+TBHdGYdxF9TdClRyRSuW87lFHtFL8N0a8P0CgFyyM5WvC645WtVp5QLzlK8ihJU", - "Ua2n7Oqj7hKH2+HWTtcF8yzuODTCXNGKNPVxtMPiYI0aLwgd4vpBiOCr2VRLHfZPjYU4jXMxB62cZINs", - "7KsOOd+AcQWuygGWyg3kpHHxumdS0XB5c6/6mmSEKWUDKvBH8w3VH3NpIBeM4y1DhzaXcWKtdyzfqI3L", - "wDSZC1BuPe1bNOqD6XNwtuInZsYfD3y5R4Rhw5Jm2TYO3gd17KPiLgpt2r40bQmGIJufW+lrdtDjsnSD", - "xiSBqnc4VjdpEMGRyGriQ1sBcmv4IbQN5LbxOAv1qSE0uMRgOJSoh3uEMXBX+QfjKFmKslce7TFyNIee", - "8cg0XjMOTTHSiIJIoyoBNwb5daCfSiXV6WJnmXYGNMfoe0ygKe3CEbcF1dlgRAmu0Y8xvI1N9awBwVE3", - "aDLcKV/XNVANdQfGxEssvuwQ2a+FhVaVM6IyTBTqVMeKCQ4juH29ubYC6LNB3yay3bWklnOuo4mGEpsz", - "poyJW0zzSGrEq/pjUCEOc7Cma/w3dlN0eAXusObGlQ2w47Xty81VBnKz94li8xvuStP/DrelwwPhHsWo", - "/wcjVsKLa71Lv1bw1PUR8VhY+Pqe6FTUyc5tmkVBF8NDUJJxsyM0XFxxjKJxIDnkfXO1j1rpa+NNQyki", - "6WBGE9UuXVFTsqnch618GINgz7ZsxUX7CkLU2Rw6z7LHWeZzr/dudkPPCkPYGxHqD0r7E/qzz4QgJWUu", - "mNqwSB+zLmeqn8W2SzZFs8HdRbhMJAQSW8kNE4d24r0+liKMHR43byHPixZK7Q2DjiUpJNwxagMVek3U", - "9g/Sd10ergMpplLQX+fOG9DC7QDud0F8Ixf6yB1mZz3dhZ3jidqmO8oTixB/laAvTb6YNGgVbHXjxnb9", - "L4O17uxdIqrJEgjlXCBHuagboaQQGeREuRobOcxpuna3/9Q5TyknGZOAhSpYgTXXKFFLOp+DxGujtkyq", - "j00gtMhuVSzPtpGNg/E9to3cxv2a92n7TGwney1zoru1uNDN90frYT7XndFUFIUNDbTQH705WV/HwqAL", - "Tr+pE7gpdjiVlFtPpIchhBK81BCp07WgnEMe7W3PJr4ShRT072JgzgXj8U9dErCI6aChWXN7hX5IDz9S", - "SmE8UpBWkuk15g95z4T9LZob/VPNv67KfH0K6w4B7cMnLjzecHvzVsVPwtZ9Loy7hK6DxuonP6xoUebg", - "5Oh330z/A57+6Vl2+PTxf0z/dPj8MIVnz18cHtIXz+jjF08fw5M/PX92CI9n376YPsmePHsyffbk2bfP", - "X6RPnz2ePvv2xX984x+KsBNtHmH4K5YTSI7fnSRnZrLNRtGS/RnW9ka0oU5f8oGmKLmhoCwfHfmf/pfn", - "E8NAwdt27teRO20YLbQu1dFkslwuD8IukznW40u0qNLFxI/TLzbz7qQO6NukA+QlG6s1jI76gukcM03w", - "2/sfTs/I8buTg0YcjI5GhweHB4+xAkgJnJZsdDR6ij8h1S9w3ycLoLk2nHE1Hk0K0JKlyv3lRPiBq3Zh", - "frp8MvERwMknd7R+ZeDMY7lUvmpWHYHu36seWzVjvNq6SlZwhUi5m0VjMrVZQ8QVauMZxohtRohRfjV6", - "TrLg7czgMYZx6+nPD/foNatYCafYBfXY+6R1bvvw+zTBE37+2b7nf7qKHG997Lw58uTw8DO8MzJuQfF4", - "ueMHS57d4dTbvvetF9AF11vGG5obeoL6bTq7oMf3dkEnHG+XGAFGrIC+Go+e3+MdOuGGoWhOsGWQ0NIX", - "kb/yCy6W3Lc0yrkqCirXqHqDa+2h7XQ1KIrbqWTufuCwfIagyFhwpbh1JDJdezobE1XXeC4lE8aEwJcc", - "M0glUFT4QuJJYlOuzF2cBFvU+s3xX/Hc4c3xX20dwOgrd8HwtiZmW7j/BDpSTu/7dfNS00ZJ/7XE5/h3", - "+zDg/dGFt1VB+6KM97Yo4w5Ce7+7+5Kb97bk5v02SVd1GjAlXPCEY4mFSyBBxGNvo/6ubdTnh0/v7WpO", - "QV6yFMgZFKWQVLJ8TX7ldS7Z7UzwWuZUPMju2yh/egXmGys6MN+Dck+TT613I7LtQZXW/fCsVWacxt/E", - "DCrhuNzUcXPplfLM5gD5U3419pc/MW5nb1nb/Rj3roYexIz04JDu+/XJq13s8taagvtwMdu8ha/rvcD7", - "WSMZN36v9HNqgN48vqcZ8cnGn1k27yZMnx0++3IzCHfhrdDkR0xP/Mwi/bPGCeJkFQgbLKk2+eSvzu0g", - "YNy11LZo6T5yGxMqhkPH7gaBq8Rcv69h5IkVhPZmcF9qmBF2lRf9m7MxSdHcFvy9yIhrvSG8lwt7uXBj", - "udAlqEYi2BcMJ58wNTsUBz2WxGeY/0AHKEEtPykKX0xGkBnodOFeiO4cVkfEik9pH5Ypmy453lq+7N8H", - "v8374Ds4JHsEf5kH2O9z4CPQliQhb9EcQgb32fp/xLDH59TIn3tBbwUHAiumsManpcX9cWNtLmA5AESK", - "fw4hrL9fmw7uldLJp+bZ4KsmQ8ReL51Yy3+TXWHfcBnd6ZnO/t2de/Duztf3Km7FIZ3VSgjfPgZ3vbrh", - "Fl8itF83s51E5ZqrRaUzsQxSrppSzIOc5F/Bv0NO2j/Fv3+Kf/8U//4p/v1T/Pun+PdP8d/vp/i/vsF1", - "XQerG8T7jF5P24QNTJnGhLN/T5aU6WQmpFVPCdZxiwRQ26P/F2XaVQ90vpUWRliA0dBYCc4KGgcnqLuj", - "wlwW98SGf+ucFZFDVzPUj0LuFK9tgqBaELMwUnHNfBY+PsXk7bnfX/Bzb6nuLdW9pbq3VPeW6t5S3Vuq", - "fyxL9eskO5Ak8YLaZ7LG8ljJ/bSm71Gq6JfM7WwM7Nq8RoPcmMOGvzcegmig+cRVlsPzYqEGs6nCKnWp", - "GY5xUuYUyzGvtL/Tg5WYv33mkyHqeku2UIWRQabB0yfk9Ofj54+f/O3J82/r58XbbR/4yrFKr3Nbfrnt", - "KZwBzV+6uVthAkp/L7J1Z1/N9CY40/aONtfoGacyUsos8sh0FwdaYDlDV5uv50xc3WmCRLyGcR+f21A5", - "UMc3Sn2btnNr+Vh3nd/B3kWKmj316CSuDNpXlagEZ+TIrJEe//Ti80biyqMxykbIhGNDYVmVAr495uhn", - "lZhGc+CJY/JkKrK1f6jC1UhsiTRbvG5Yov2wgrQynIEzcUT9QD10zzxiEc4whhEtHhzUVwaE5/Ks+lLK", - "lknbKKRuvnntosu3Pqrvgtv00D55ICSZS1GVD+2LBXyNzmlRUr724RdjT2HVZnx0E9OL7lYs1hUre0Jt", - "96LDoU2Pd8W6v1u0kCVVvuJwZksOx8sudQvjbsd4U/ZxW0Edu95oidqBgrT9TfS77BIb65BTCTLRKx4p", - "FNkpC/lPn9N7H+XvOykumXEVo+LMhnd1lL0PtophGQgglMOd+5peELel43u6DG9/7iohV4mz2W5t0C3A", - "vvPlDZzI5VajnKSgWUoVJiG6ytyf2djTq5OIp43TxCIFs94lLaMtt5f0R7g7mWIB6Ob5KLxFrJTNwv6q", - "hllTQ+TY5Xy2sLGXEn8UJ/d7z3yKUCLpssucQbX8HcQUXeoVj0qpSfM+XTRHKWCI+kGrOzwB6oFvHwQF", - "L0fZkwjIS0JdCUMMTmpZpfqcUwz6hS929Q+JfChz2DB66ZvE486RsLADdc4pvrFShwKjBtIMYrXjAbz9", - "par5HJTuSOIZwDl3rRhv3nMpWCpFYjP1SpAo0Q9sy4KuyYzmGLX+DaQgU2OyhxdfMVSmNMtzdyplhiFi", - "ds6xUKQR+m+YMc8MOB9NqU9a3SsN4Yvw/ZB0t8RjvzydYupnqhZ++T4igoEb+9kevHz5J4TaBSKjMz95", - "5YpSnLzCe8bNgVRv7l/sQKVgPIkSmdH47ly3S1vkgXvQCgnoYXO05Xb9nBvTWAv7Xnvzmuz1yKEb+O7x", - "ouWOzQUzW/Fxv9bPVTzz8vEW++AW8opExNVec/9xwtPdFw/rjTdGbG/vB/TyHdQA+30X/tqa6LIvs7Uv", - "s7UvxLQvs7Xf3X2ZrX0Rqn0Rqn/WIlQHGy3EySe92qUsTAiVZfahVgmpHbkW4GGzVgGZ/hkg0weEnOEr", - "rNToALgESXN8fFv56+xMkYLNF5qoKk0BsqNznrRmYmvgm4EfNP+1bu55dXj4FMjhQ9LuYsMWgeDtd0VL", - "FT/Z55O+I+ej81EXkIRCXIIrJoGtswqPZW2nrVD/xYE957/I3sYVdG1DKwtalmCUmqpmM5Yyi/BcGFdg", - "Ljr5bFzgF5BmcmDkqSJM27pdiE3MA3RZJ9S9DhUzufva/Ro11Y87xBJPJTdkd80Ku/++S3ndfxbz+hVo", - "ynJVZ7hHvCn0a7qUtaSqYdxapox9YrTyv7nDZzdKzi4gzDnFg/4llZlvEX31rqnU5l917AeW2iWsMlh5", - "g6A76Vk9MtO26JRxN3vvEPXjWq4Q1IYpuGo5Nxl84EXsq/EozYWCxGJJxd4zwg9GEmEslmIolro3rv0z", - "twaGYWZqZifxConNZB8ek/F5Yh9IiISo7Xf3gEIdi+tEviNwPZ0MprPWpGEf1kZp00ViSG0z4m6SD4R/", - "7YuBNhnixu8Gdrr3nmTKs/Pzj+S1LXKIrx9dwHpiXyZJF5TPQdU4CunUXvuwGSxBHnMHjXf3VqHRGsnA", - "K6Mn/dzmLt4vWHoBGTFywj+rPmDCkwd1xTZ8Rnq5WPtLHFYNPTwg5JgTTNz1L0q3I82dwfk3etP4q1Bx", - "tjVSJN8uBXYJ8pY85cFs5iQFhuFuOZQFsnkgveID7ESXEYd21xI+Ef+1400GRGVncRdhgb1W2mulvVba", - "a6W9Vtprpc+mlXohmPsfpOj2uXmUogvp7sIUXz1Q8QcqG7ivEPg7W1CYutkqAXyL2G39AGLMCraT8G9y", - "Yhitfo3zw8erj+abvPQRtuaJyaPJBM2IhVB6Mroaf+o8Pxl+NLKTzi0EF8EqJbvEip4fr/5/AAAA//9U", - "hyTJJOUAAA==", + "H4sIAAAAAAAC/+x9/XfbNrLov4Kre89p0itazld343N67nOTtPXbJM2J3b37bpzXhciRhDUJcAHQkprn", + "//0dDAASJEFJ/khad/VTYhEYDAbzhcFg8GmUiqIUHLhWo6NPo5JKWoAGiX/RNBUV1wnLzF8ZqFSyUjPB", + "R0f+G1FaMj4fjUfM/FpSvRiNR5wW0LQx/ccjCf+smIRsdKRlBeORShdQUANYr0vTuoa0SuYicSCOLYiT", + "l6OrDR9olklQqo/lTzxfE8bTvMqAaEm5oqn5pMiS6QXRC6aI60wYJ4IDETOiF63GZMYgz9SBn+Q/K5Dr", + "YJZu8OEpXTUoJlLk0MfzhSimjIPHCmqk6gUhWpAMZthoQTUxIxhcfUMtiAIq0wWZCbkFVYtEiC/wqhgd", + "fRgp4BlIXK0U2CX+dyYBfoVEUzkHPfo4jk1upkEmmhWRqZ046ktQVa4VwbY4xzm7BE5MrwPyplKaTIFQ", + "Tt5//4I8efLkuZlIQbWGzDHZ4Kya0cM52e6jo1FGNfjPfV6j+VxIyrOkbv/++xc4/qmb4K6tqFIQF5Zj", + "84WcvByagO8YYSHGNcxxHVrcb3pEhKL5eQozIWHHNbGN73RRwvF/01VJqU4XpWBcR9aF4FdiP0d1WNB9", + "kw6rEWi1Lw2lpAH64TB5/vHTo/Gjw6t//3Cc/I/789mTqx2n/6KGu4UC0YZpJSXwdJ3MJVCUlgXlfXq8", + "d/ygFqLKM7Kgl7j4tEBV7/oS09eqzkuaV4ZPWCrFcT4XilDHRhnMaJVr4gcmFc+NmjLQHLcTpkgpxSXL", + "IBsb7btcsHRBUqosCGxHlizPDQ9WCrIhXovPboMwXYUkMXjdiB44od8vMZp5baEErFAbJGkuFCRabDFP", + "3uJQnpHQoDS2Sl3PWJGzBRAc3HywxhZpxw1P5/maaFzXjFBFKPGmaUzYjKxFRZa4ODm7wP5uNoZqBTFE", + "w8Vp2VEjvEPk6xEjQrypEDlQjsTzctcnGZ+xeSVBkeUC9MLZPAmqFFwBEdN/QKrNsv/v05/eEiHJG1CK", + "zuEdTS8I8FRkw2vsBo1Z8H8oYRa8UPOSphdxc52zgkVQfkNXrKgKwqtiCtKsl7cPWhAJupJ8CCELcQuf", + "FXTVH/RMVjzFxW2GbTlqhpWYKnO6PiAnM1LQ1beHY4eOIjTPSQk8Y3xO9IoPOmlm7O3oJVJUPNvBh9Fm", + "wQKrqUpI2YxBRmooGzBxw2zDh/Hr4dN4VgE6HsggOvUoW9DhsIrwjBFd84WUdA4ByxyQn53mwq9aXACv", + "FRyZrvFTKeGSiUrVnQZwxKE3u9dcaEhKCTMW4bFTRw6jPWwbp14L5+CkgmvKOGRG8yLSQoPVRIM4BQNu", + "3sz0TfSUKvjm6ZABb77uuPoz0V31jSu+02pjo8SKZMQumq9OYONuU6v/Dpu/cGzF5on9ubeQbH5mTMmM", + "5Whm/mHWz5OhUqgEWoTwhkexOae6knB0zr82f5GEnGrKMyoz80thf3pT5Zqdsrn5Kbc/vRZzlp6y+QAx", + "a1yjuynsVth/DLy4Otar6KbhtRAXVRlOKG3tSqdrcvJyaJEtzOsy5nG9lQ13FWcrv9O4bg+9qhdyAMlB", + "2pXUNLyAtQSDLU1n+M9qhvxEZ/JX809Z5jGaGgZ2hhaDAi5Y8N79Zn4yIg92T2CgsJQaok7QfB59ChD6", + "Dwmz0dHo3ydNpGRiv6qJg2tGvBqPjhs4dz9S09POr7ORaT4Txu3qYNOx3RPePT4GahQTdFQ7OHyXi/Ti", + "RjiUUpQgNbPrODVw+pKC4MkCaAaSZFTTg2ZTZf2sAX7Hjj9iP9wlgYyYuJ/wPzQn5rORQqq9+2ZcV6aM", + "EyeCQFNmPD5rR+xIpgF6ooIU1skjxjm7FpYvmsGtgq416gdHlo9daJHVeWX9SoI9/CTM1Jtd4/FUyJvx", + "S4cROGn2woQaqLX3a2beXllsWpWJo0/En7YNOoCa8GNfrYYU6oKP0apFhVNNPwMVlIF6F1RoA7prKoii", + "ZDncgbwuqFr0J2EcnCePyemPx88ePf7l8bNvjIUupZhLWpDpWoMiD5xdIUqvc3jYnxkq+CrXcejfPPU7", + "qDbcrRRChGvYu0jUGRjNYClGbLzAYPdSrmXF74CEIKWQEZ8XWUeLVOTJJUjFRCR88c61IK6F0UPW7+78", + "brElS6qIGRu3YxXPQB7EKG/2WWjSNRRqm6GwoM9WvKGNA0ilpOveCtj5Rmbnxt1lTdrE9969IiXIRK84", + "yWBazUMbRWZSFISSDDuiQnwrMjjVVFfqDrRAA6xBxixEiAKdikoTSrjIjECbxnH9MBDLxCAKxn50qHL0", + "wtqfKRjvOKXVfKGJcStFbGmbjglN7aIkaCvUwNav3rPbVnY4GyfLJdBsTaYAnIip21+5nR9OkmJYRvsT", + "F6edGrTqPUELr1KKFJSCLHHHS1tR80dVuMh6A5kQb8S3HoQoQWZU3hBXLTTNt+CJbfrYqsabcHvSPta7", + "Db9p/bqDh6tIpdliWiYwrosR7hw0DJFwK02qcuA4wlm1M1YYkSCccqEgFTxTUWA5VTrZJgqmUcv0mmUN", + "uC/G/Qh4YNP9miptt72MZ+iGWRHGcbAPDjGM8KCWNpD/6hV0H3ZqdA9Xlaq1tarKUkgNWWwOHFYbxnoL", + "q3osMQtg1yZBC1Ip2AZ5iEoBfEcsOxNLIKpd3KWOC/UnhyFuo1vXUVK2kGgIsQmRU98qoG4Ykh1AxPjs", + "dU9kHKY6nFPHgccjpUVZGp2kk4rX/YbIdGpbH+ufm7Z95qK60ZWZADO69jg5zJeWsjYYv6DGX0LIpKAX", + "Rt+j92P3532cjTAmivEUkk2cb8Ty1LQKRWCLkA44nu64LxitIxwd/o0y3SATbFmFoQkPeMHvbFT5rIm4", + "3IEj8BI0ZbmqjX0dum5GwSh3NwPBeGYSUuA6XxtenTFZ2IMitBHK/2ZdicyNYo9EGvHjGZGwpDLzLfo7", + "kGAyCeMZrOLalbbiDRmsCIsjPatHZpqk/hiHhwAOooLuDsY2oOACDTcZ3HSND2uPfSyVVOxAED8YAShY", + "KgW153xmMtZI6vooS0JBDXZ44uSM+vCYjM8Te6wYMY/2uz929OHekGficD2fDEp2zRrLBeBJhlHXHSKG", + "3Ga2aaBgaCLzXExpnhjnFZIMcr01jGScYniJLY2dFGm/exvl8/MPeXZ+/pG8Nm3RTwZyAesJnr6SdEH5", + "HJqQeMin1gOGFaRVqNI7ZNxpU+Pifm3s29ua8agUIk/q7Vs3hN9T8126X7D0AjJi9AQ6nc76fNVeITMI", + "eWBYXNWHHMvF2vuzZQkcsocHhBxzAkWp1y5W0PE0OoPzr/Sm8Vc4albheSvlBCd5cM7j23R7WntLmfJg", + "NkuSTV+65VAWyOaB9IoPiBNd4mGDAReVz42RvlPsGZicniUNmMpisct++AfM6aGtVWYZbjYaq6KqacEw", + "sSdoNjaa05+19nerTB8Qcoa6w+wWFFyCpDlmLSgfBGWKFMxsOlWVpgDZ0TlPWpikonADP2j+a9XSeXV4", + "+ATI4cNuH6WNm+g2RlYGun2/JYdj+wnJRb4l56PzUQ+ShEJcQmY3hyFf215bwf5bDfec/9RTzKSga7ut", + "9LJIVDWbsZRZoufC6PW56Hh7XOAXkAY9MJszRZgeoylDiqKXbNelEcBR1Gu5i/hFBKrxj40pNdrOn7C1", + "eUcRWNHUzJKiklmTpWGUms/6zocWZRICiIZTN4zoAtqqpcdvKHd9fW5305vxO+vsp1vkCNj1YLvP3CNG", + "FINdxP+YlMKsOnO5ND7hImdK95B0G3s8zagZMmJ0Dsj/ERVJKcpvWWmo91RC4kYFN7BmBLSxfkznqTUU", + "ghwKsOEO/PL1192Jf/21W3OmyAyWPgHNNOyS4+uvrRAIpW8tAR3WXJ1EHCgMMhtrGkkaXlC1ONgacEa4", + "O8WZA9AnL/2AKExKoYm5Go/MVjdf34HAW0BEgvP3VCvoo+xXMQuT3dz6qbXSUPQjl7brLwOe6Hu/Q+tZ", + "WsFzxiEpBId1NL+bcXiDH6N2GllkoDMK61Df7g62hX8HrfY4u6zmbemLqx2wxLs69e4OFr8LtxO0DtP8", + "0MuEvCSUpDnDgKDgSssq1eecYoCi4wZ12MKHXYZDVi98k3iMLBLCcqDOOVWGhnXYInqYMYNIQPJ7AB+5", + "UtV8DqrjFpEZwDl3rRgnFWcax0KvMrELVoLEU6cD29J4AjOaY4TtV5CCTCvdVr2YjWQ9GxtBN8MQMTvn", + "VJMcqNLkDeNnKwTndzieZzjopZAXNRUGdmjAQTGVxA/mfrBff6Rq4advGnpl4zrbILGB36QsrTW00p3/", + "74P/OvpwnPwPTX49TJ7/5+Tjp6dXD7/u/fj46ttv/1/7pydX3z78r/+IrZTHPZYr4zA/eenckpOXaHua", + "4HkP9y8W/C0YT6JMZrYLBeOYctnhLfLAWFDPQA+bMLxb9XOuV9ww0iXNWWa2wDdhh66K68milY4O17QW", + "ohPL83P9GNvuzEVS0vQCz7VHc6YX1fQgFcXEu2OTuahds0lGoRAcv2UTWrKJ2d5OLh9tMY230Fckoq6u", + "xiOnddSdZ9A4wLEJdceso+j+by3IVz+8OiMTt1LqK5s4Z0EHGU8RD9odhrU2c2by9uKHzRw0m5mXMGOc", + "me9H5zyjmk6mVLFUTSoF8juaU57CwVyQI+JAvqSaYgygE1EcupuF8RmHTVlNc5aSi9AUN6I5FBg7P/9g", + "GOT8/GPvIKtvON1Q8WAjDpAsmV6ISicuKjscR2hiLQjZxuU2jTomDrblSBf1dfAHAqBlqZIgIhafflnm", + "ZvoBGyqCnTAPiigtpFeCRjO6mIZZ37fCHeVJuvTZ6JXZt/+9oOUHxvVHkrj993FZYrgN411/d7rG8OS6", + "hN1jZg2KDbDYPgsnbh2qa+fGIdBT28sHkVWccuYTkg7bGK3QxARvSicD6keRm8W9MZkCGFHqVHqRGJmK", + "zkoZ1kJ5CO4Q0rnRhf7szWybDfO5Oy1TIOkC0gvI8OABY4TjVnd/5O0sixdZpuw1FJsCh7nSuB2cAqnK", + "jDrbS/m6m7SqQGufqfseLmB9JppU6+tkqV6NRy6onxieGRKQ0tAjMAJi1hYXfzDQWXx3toKB97IkNrZt", + "sws9WxzVfOH7DAuQtUx3IDwxpqjJsIHfSyojhLDMP0CCG0zUwLsV60cj6VRqlrLSzn+32Py7Vh8DZJtS", + "j6pxMetq654yjWpv2ziZUhVX3GC+mPUwMtTNLvEj2ciKPSQjeJXZMe40h+BUSTnJphKdHT9tezdzCLU4", + "l4DkjTX1aLQpEprthTuWZJfNYSQeO+9i4LYeShku8vkCrB1+ZmbcHC7p4EnA4B2CkyAJILiaVt8Q8Iqt", + "Kwzj+raIvSXubxL46wP+zsBofK38//HI5XrFlkNwtO4Z5DCnLvCNWWSOURxqX6lggQweP81mOeNAklg+", + "AVVKpMyehTa63I0Bxvn7mhAbWCE7Q4ixcYA2RgwRMHkrQtnk8+sgyYFhiJF62BhrDP6G7RG35rq+cyu3", + "un993dEI0bi5TmOXsR/9GY+iKmnIM2+1IrbJFHpbmRiLGtXUj4f0oy4KckBznLQ0a3IRi5IZrwKQDU99", + "t8BdJw/YzBj5h0HgWMLc7L2b/aqRVh+A+bIxg0uhIZkxqXSCW+Xo9Eyj7xU6g9+bpnH10yIVsfd9WRbX", + "PjjsBayTjOVVfLXduH95aYZ9W+9bVDW9gDUaGaDpgkzxfrqxQq3hTZsNQ9ucmo0Tfm0n/Jre2Xx34yXT", + "1AwshdCdMe4JV3X0ySZhijBgjDn6qzZI0g3qJchG6OuWIA/C5kxgfsXBpt16T5iundExqHktpOhcAkd3", + "4yxs4o/N7Qmud/dzpgdkgJYly1advbOFGudxHOI6jrr1+HtUwNV1wLZQINgnx1IIJfi9vl3SwGbai/q9", + "NKvtlOkmdwUKIRyKKV9mpk8ow9qYjbONVmdA87/A+q+mLU5ndDUe3W7LH6O1g7iF1u/q5Y3SGWPIdgvY", + "ipxdk+S0LKW4pHnirqUMsaYUl441sbm/xfKFVV18+3326vj1O4c+Zq8BlS5pa9OssF15b2ZldsSxzK2z", + "IDKC3qrfO1tHLFj8+m5gGEzxiXYtX85oMcdcVrxqAxeKoguuzOJHWVtDJWFy3o0ks5Xdd9vIXJjqd6ci", + "35OwOIc2K7xFL4RjbSgsUNjaGYoI3k1wMG4c7jKRXQq6NqtoA7N9BcGrIjEikKicpfHQAZ8qI0W8KvDG", + "xloDwcYDDqGBWLGB8DmvWADLNFM7nBR1kAzGiBITwzobaDcVruhZxdk/KyAsA67NJ+kSnlrCYmTD5/D2", + "TVo8X9gBdinDNfjb2HkDasjCIxKbjXwY5Y1kiftNn59oHZ42PwTBuWsc0oQj9szShgMWxx+Om+1J96Id", + "rQ1rlPV1kGEMW89ie4E0HzpYWEQHxogWPBvU2MfD2hrzwHfX041aRnRDhWxz82iuRARMxZeU2/pFpp+l", + "oeutwO7bTa+lkHhpSUH0hJqpZCbFrxDfTc7MQkVysBwp0WXD3geRyyBdJVpHRprKdJ6+IR6DrD3kTQUf", + "SfsQbUDCkcuD8DUmlfogE+WWrW2tpdbRbVw4wnSLiYXfCIfDuZeiktPllMbKDhinxuB03ByUtMJhWhDf", + "2a+CqnOpHe8FZy51W2Zv+pQgm0TJ/k3NGzoo94vlM0hZQfN4dDRD6rfvemZszmzBqkpBUBHJAbKV/iwX", + "uapS9iiqIc3JjByOg5prbjUydskUm+aALR7ZFlOq0GrVIc+6i5kecL1Q2PzxDs0XFc8kZHqhLGGVILUT", + "aS83+PjzFPQSgJNDbPfoOXmAkXfFLuGhoaLzRUZHj55jSob94zBm7Fxluk16JUPF8t9OscT5GI8eLAxj", + "pBzUg+itM1tOdFiFbZAm23UXWcKWTuttl6WCcjqH+IlqsQUn2xdXEwN3HbrwzNbCU1qKNWE6Pj5oavTT", + "QFqWUX8WDZcrXxgB0oIoURh+asod2UE9OFtYz5Ug8Xj5j3jMUfo7D51N65cN0lpbHps1Hka9pQW0yTom", + "1F7OxGsb7lKvU4gHA7UiQF7GB5EDC+ztputLHnDBk8LITvawSfgL+C9aKkFomkeH1V53dTNXNoPe1dUy", + "UJJBwlYtwtJAJ92YxJWMz5NWZqif3792hqEQMlb3oNGGzkhI0JLBZVRiu4lrtWdSmwtP+ZiD8l3F8uyv", + "Tbppp8SQpDxdROOfU9Pxl6YmWk12S/XoFbwF5RzyKDgry794mY9opX+IXccpGN+xbbd0kJ1uZ3IN4m00", + "PVJ+QENepnMzQEjVdv5dnTiSz0VGcJzmknXDCP07UkEZlX9WoHTsPhd+sLlOuMc2/oqt4kGAZ2jtD4i9", + "/2Rwad1gQSvLiiq3tyEgm4N0AZiqzAXNxsTAOXt1/JrYUZW7Q4v3brCKyNzepWvNorO3Cqo/XOdy4VBq", + "1O5wNueMmFkrjVeylaZFGct6NS3OfANMrb2kLPfpB2h+QuockJfW8itvV+wgzR1SUg/ndA3yhPmP1jRd", + "oEltGaBhlt+9/I3nShWUgawr6tVFFey1SC18BRxbAGdMhPF7lkzZUrZwCe1E2zrr3Ll0PvG2PT1ZcW45", + "JW6fNtyKuAnZPXL2YM+HpKKYdQh/TTOjRCVTuG41oFPsFb1j1S0t1Kv/yCE7W/G6/povUZ5SLjhL8YZT", + "UDy3RtmVxd0lZrrDZbDudtmLuJPQiHBFCxrVqQOOioMljrwidITrB4yCr2ZRLXfYPzXWXzUbwTlo5TQb", + "ZGNftMrt4xhX4IpkYIXkQE+a7Xj3/DB6tNFc178mG2H634C78r35hq4Kcyk7F4zj5VVHNpcdZHdaWLVT", + "m+0d02QuQLn5tC9nqQ+mz8HZip8YjD8e+CqfCMOGkM207ZlFH9SxP8FwJwam7QvTlmC4uPm5lWpoBz0u", + "SzdoTBOoeoVjZbcGCRyJgic+DBkQt4YfQtvAbhuPHtGeGkaDSzy4gBLtcI8xBq7AvzKbWstR9iatPfKP", + "Xs1gPILGa8ahqUEbMRBp1CTgwqC8DvRTqaTauoA76bQzoDmelMQUmtIudHRbUJ0FRpLgHP0Yw8vYFF8b", + "UBx1g8Zxo3xdl7413B04Ey+w5rYjZL+UGnpVzonKMKmrU1wtpjiM4vZlCdsGoC8GfZ/IdteSWsm5jiUa", + "SkLPmDLbkWKaR9JYXtYfgwKDmC83XeO/sQvIwzNwB2s3LpiBHa/tX24uXpGbtU8Um99wVZr+d7gsHRkI", + "1yjG/a+MWgnv7fTuklvFU1+rwSN84cu94qaiTkxv8ywquuimrancuXnTOlyDc4yqcSCR531zY5Ra7Wtj", + "g0PpPOlg9hnVLrVUU7KpiowtnBmDYM8hbcFO+/hFNDAwdPZojx7N517v3fyGnheGsDcS1B9q9xH6i89a", + "ISVlLvDdiEifsi6/rZ9xuEvmS7PA3Um4rDEEEpvJDZO8dpK9PpUigh2mBmxhz4sWSe1tkI4nKSTcMWkD", + "E3pN0vaTHnadHs4DOaZS0J/nzgvQou0A7XchfKMX+sQdFmc93UWc40n1pjvqE0sQf+2jr02+mDZo1ft1", + "48ZW/a9D0QO7Qx4IVHVoWrE827a4rbBjc50ZA2u/TL952orefckL1b/YA/m+uLm7pdcx/N1FQMJE5toa", + "PBgqCCjuEEt03SKRQ6wFlVaS6TXm7nhPk/0SzUv+AbireuyKyNcnoO4Azr5f4kLT87p18+TED8KWgS6M", + "+4uuoMYiKa9WtChzcHLx7VfTP8GTPz/NDp88+tP0z4fPDlN4+uz54SF9/pQ+ev7kETz+87Onh/Bo9s3z", + "6ePs8dPH06ePn37z7Hn65Omj6dNvnv/pK//eg0W0eUvhb1h1IDl+d5KcGWQbmtCS/QXW9p6xYWN/g5mm", + "KIlQUJaPjvxP/8tL2EEqiuCJOvfryEX6RwutS3U0mSyXy4Owy2SOZfsSLap0MfHj9GvSvDupA7T2wB9X", + "1MbeDCvgojpWOMZv71+dnpHjdycHDcOMjkaHB4cHj7BQSAmclmx0NHqCP6H0LHDdJ47ZRkefrsajyQJo", + "rhfujwK0ZKn/pJZ0Pgd54K5ym58uH098fGfyyR1yXxmo81hWky+1VccX+zecxzZgYfYsdWmt4DKPcnd8", + "xmRq83eIq+7GM4wA2twMo9pqYp1kwYOYwcsL49Z7nh/u0RNVsbpPsavisUdH6yzz4Udngnf5/Ft8z/58", + "FTlo+th5SOTx4eFneDxk3ILi6XLDV0ie3iGK7R3UrRHtgutphTc0N3wD9cNyI5zQo3s7oROO9zmM2iJW", + "LV+NR8/u8QqdcCM4NCfYMkgh6avCn/kFF0vuWxqTXBUFlWs0uMFF8tC1uhpUue3kLXcjb1gPQ1CBLLjE", + "2wpsT9eez8ZE1YWeS8mEcRzwGcYMUgkUzbyQeB7U1DJzVxXBVrZ+c/w3jB6/Of6bLRIYfaIuGN4WzGwr", + "8R9AR2rtfbdunlnaqNF/KzU5/t2+6nd/bN5tTc2+YuO9rdi4g9Ler+6+Hue9rcd5v13SVZ14SwkXPOFY", + "1OASSBDW2vuov2sf9dnhk3s7m1OQlywFcgZFKSSVLF+Tn3mdEXQ7F7zWORUPcrQ26p9e9fnGiw7c96DA", + "0uRT61GJbHvwpHUjO2vVIKfxhy6D2jMuG3TcXDOlPLOZHP6sVo39dUuM1tl7zXY9xr3LmAcxJz04avlu", + "ffJyF7+8NafgBlrMN2/R63rP537WiMWNHyH9nBagh8d3NCM+ZfQz6+bdlOnTw6dfDoNwFd4KTb7HJLPP", + "rNI/a5wgzlaBssEiZpNP/rLaDgrGXQRtq5buy7UxpWIkdOxy9l2Z5vrxDaNPrCK0d3H7WsOMsKu+6N9V", + "jWmK5n7e70VHXOth4L1e2OuFG+uFLkM1GsE+Yzj5hAm2oTroiSS+rfwHOigJqudJUfjyLYLMQKcL9+xz", + "5yx76PX+jTpl07XCW+uX/aPft3n0e4cNyZ7AX+ZV9fsc+AisJUnIW3SHUMB9zvUfMezxOS3y557QW8GB", + "wIoprKppeXF/3Fi7C3gBH4ni30oIK97XroN7qnTyqXk7+KrJBLGXBCfW89/kV9gHXkZ3eqazf5TnHjzK", + "89vvKm4lIZ3ZSggfQAZ3SbaRFl+Us1+psp0s5ZqrRaUzsQxSq5rix4OS5J/Cv0NJ2r/Hv3+Pf/8e//49", + "/v17/Pv3+Pfv8d+P9/h/e8fquhupbrDuM+5u2q5q4LI0rpr9e7KkTCczIa0ZSrBCWiRQ2h79vynTri6f", + "20NpYZQCGEuMNdasQnFwgiopKsxZcY9X+AfPWRE5XDVDfS/kTnHZJtipBTETIxXXzGfV4yNH3m/7/QU5", + "9x7p3iPde6R7j3Tvke490r1Hej890t8meYEkiVfIPjM1lpdKRvfSa75HqZ9fMlezcaRrNxodb+P2Gjne", + "eKihgeYTV+8Lz3+FGsyOCmuHpWY4xkmZUyxovNL+jg7WMv7mqU9uqKvg2PIBRteYBk8ek9Mfj589evzL", + "42ff1G+Jt9s+8LVXlV7ntoBxe0dwBjR/4XC3SgOU/k5k6866GvQmiGl7RZtL9YxTGSkwFXlRuksDLbDI", + "nKuY1ts0XN1pwkO8CnCfnttIOVAJN8p9m5ZzawFWVxvUwd7lqMWsqScnccWpflONShAjx2aN9viXV583", + "UleejFExQiEcGw7LqhTw9S7HP6vENJoDT5yQJ1ORrf1TD65yXUul2ZJiwxrt1QrSykgGYuKY+oF66B5K", + "xNKIYawiWtI1qFAMCM/lTfW1lC1etVFJ3Xzx2qVwb3303gW36VV98kBIMpeiKh/amv98jZvQoqR87cMs", + "xm/CWrr4bCWmC92tWqzrCPaU2u6lYEPfHe9+dX+3ZCFLqnwd2MwWgo0Xw+mWK91O8aYY37biKXa+0cKh", + "A2VC+4voV9klKtahpRJkolc8Ur6vU6zvXz5H9z7q33dSXDKzJYyqMxvG1VHxPtiqhmWggFAPd+5fekXc", + "1o7v6TK8zbmrhlwlzme7tUO3APtSlndwIpdVjXGSgmYpVZhU6Oolf2ZnT69OIjtqRBOLDsx6l66Mtdxe", + "FB/h7uSKBaCbB5jwVrCy1ZV+W8esqQly7HI4W9TYa4k/yib3Oy98ilAi6bIrnEEN8x3UFF3qFY9qqUnz", + "wls05ygQiPpJqDs86emBbx/4BG8v2RMHyEtCSZozDKYLrrSsUn3OKQb3wjev+odBPmQ57Bi98E3i8eVI", + "+NeBOucUXympQ35RB2kGsYreAN7/UtV8Dkp3NPEM4Jy7Vow3L6IULJUisZl3JUjU6Ae2ZUHXZEZzjE7/", + "ClKQqXHZw4usGCpTmuW5O30ywxAxO+dUkxyM0n/DjHtmwPloSn2i6mrnh2+q90PPrszYwDM+P9ivP1K1", + "8NP3EREM3NjP9oDlyz/C43GPvc7nMD956YpMnLzEe8PNwVMP9y92cFIwnkSZzFh8d37b5S3ywD0JhQz0", + "sDnCcqt+zo1rrIV98bx5j/V67NANcPdk0UpHh2taC9GJg/u5foxdcJiLxGwAsfbwaM70oppiOT5/8WEy", + "F/UliElGoRAcv2UTWrKJKiGdXD7a4h/cQl+RiLraW+4/Tni6+2ZgvfDGie2t/YBdvoOaXr/vQl5bE1r2", + "ZbP2ZbP2hZX2ZbP2q7svm7UvKrUvKvWvWlTqYKOHOPmkV7uUeQmhssw+dSohtSPXCjxs1ioI0z8DZPqA", + "kDN8x5QaGwCXIGmOz1crfz2dKVKw+UITVaUpQHZ0zpMWJvZBTTPwg+a/dpt7Xh0ePgFy+JC0u9iwRaB4", + "+13RU8VP9lGbb8n56HzUBSShEJfgikNg66zCY1nbaSvUf3Ngz/lPsrdwBV3b0MqCliUYo6aq2YylzBI8", + "F2YrMBedvDUu8AtIgxwYfaoI07YOF1IT8/1c1gl1b/bEXO6+db9GLfTjDrPEU8YN212zYu5/7lIu91/F", + "vX4JmrJc1Znskd0U7mu6nLWkqhHcWqeMfQK08r+5w2c3Ss4uIMwtxYP+JZWZbxF9i6ypvObf2ou8r98q", + "SZXByjsEXaRn9ciseYi//zpMP67lCjttQMFVv7nJ4ANvSl+NR2kuFCSWSir2ygx+MJoIY7EUQ7HUvRLt", + "Hx81MIwwU4OdxKsiNmN9eEzG54l92CASorbf3cMHdSyuE/mOwPV8Mpi2WrOGfZoatU2XiCG3zYi7GT4Q", + "/rXvuNlkiBu/5tbp3nsoJ8/Ozz+S17ZoIb7ydAHriX1fJF1QPgdV0yjkU3u9w2awBPnKHTLe3Qtyxmok", + "A28/nvRzmLt0v2DpBWTE6An/MPmAC08e1BXY8HHf5WLtL2tYM/TwgJBjTqAo9dq/89uONHcG51/pTeOv", + "QsPZtkiRfLsU2CXIW8qUB7NZkhQYgbvlUBbI5oH0ig+IE11GNrS7luSJ7F87u8mAqSwWdxEW2FulvVXa", + "W6W9Vdpbpb1V+mxWqReCuf9Bim6fm0cpupDuLkzxmwcq/kBlAPcV/35nEwpTN1slfW8Ru60fLox5wS4q", + "2zwMGj60iVG1+onNDx+vPppv8tIH3Jp3I48mE/QqFkLpyehq/KnzpmT40ahSOrcQXECrlOwSC3Z+vPr/", + "AQAA//9ABOdBwOQAAA==", } // GetSwagger returns the Swagger specification corresponding to the generated code diff --git a/daemon/algod/api/server/v2/generated/types.go b/daemon/algod/api/server/v2/generated/types.go index c3b918387f..9b31e0f89e 100644 --- a/daemon/algod/api/server/v2/generated/types.go +++ b/daemon/algod/api/server/v2/generated/types.go @@ -227,6 +227,16 @@ type AssetParams struct { Url *string `json:"url,omitempty"` } +// BuildVersion defines model for BuildVersion. +type BuildVersion struct { + Branch string `json:"branch"` + BuildNumber uint64 `json:"build_number"` + Channel string `json:"channel"` + CommitHash string `json:"commit_hash"` + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` +} + // DryrunRequest defines model for DryrunRequest. type DryrunRequest struct { Accounts []Account `json:"accounts"` @@ -340,22 +350,10 @@ type TealValue struct { // Version defines model for Version. type Version struct { - - // the current algod build version information. - Build VersionBuild `json:"build"` - GenesisHash []byte `json:"genesis-hash"` - GenesisId string `json:"genesis-id"` - Versions []string `json:"versions"` -} - -// VersionBuild defines model for VersionBuild. -type VersionBuild struct { - Branch string `json:"branch"` - BuildNumber uint64 `json:"build-number"` - Channel string `json:"channel"` - CommitHash []byte `json:"commit-hash"` - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` + Build BuildVersion `json:"build"` + GenesisHashB64 []byte `json:"genesis_hash_b64"` + GenesisId string `json:"genesis_id"` + Versions []string `json:"versions"` } // AccountId defines model for account-id. @@ -616,6 +614,9 @@ type TransactionParametersResponse struct { MinFee uint64 `json:"min-fee"` } +// VersionsResponse defines model for VersionsResponse. +type VersionsResponse Version + // AccountInformationParams defines parameters for AccountInformation. type AccountInformationParams struct { diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 192fbbe528..4c7131481d 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -110,7 +110,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params return ctx.Blob(http.StatusOK, contentType, data) } - recordWithoutPendingRewards, err := myLedger.LookupWithoutRewards(lastRound, addr) + recordWithoutPendingRewards, _, err := myLedger.LookupWithoutRewards(lastRound, addr) if err != nil { return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } @@ -563,12 +563,21 @@ func (v2 *Handlers) startCatchup(ctx echo.Context, catchpoint string) error { return badRequest(ctx, err, errFailedToParseCatchpoint, v2.Log) } + // Select 200/201, or return an error + var code int err = v2.Node.StartCatchup(catchpoint) - if err != nil { + switch err.(type) { + case nil: + code = http.StatusCreated + case *node.CatchpointAlreadyInProgressError: + code = http.StatusOK + case *node.CatchpointUnableToStartError: + return badRequest(ctx, err, err.Error(), v2.Log) + default: return internalError(ctx, err, fmt.Sprintf(errFailedToStartCatchup, err), v2.Log) } - return ctx.JSON(http.StatusOK, private.CatchpointStartResponse{ + return ctx.JSON(code, private.CatchpointStartResponse{ CatchupMessage: catchpoint, }) } diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index eead3fd01c..0998d8449f 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -18,6 +18,7 @@ package test import ( "bytes" + "errors" "io" "net/http" "net/http/httptest" @@ -34,6 +35,7 @@ import ( "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/node" "github.com/algorand/go-algorand/protocol" ) @@ -42,7 +44,7 @@ func setupTestForMethodGet(t *testing.T) (v2.Handlers, echo.Context, *httptest.R numTransactions := 1 offlineAccounts := true mockLedger, rootkeys, _, stxns, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) - mockNode := makeMockNode(mockLedger, t.Name()) + mockNode := makeMockNode(mockLedger, t.Name(), nil) dummyShutdownChan := make(chan struct{}) handler := v2.Handlers{ Node: mockNode, @@ -232,7 +234,7 @@ func postTransactionTest(t *testing.T, txnToUse, expectedCode int) { mockLedger, _, _, stxns, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) defer releasefunc() dummyShutdownChan := make(chan struct{}) - mockNode := makeMockNode(mockLedger, t.Name()) + mockNode := makeMockNode(mockLedger, t.Name(), nil) handler := v2.Handlers{ Node: mockNode, Log: logging.Base(), @@ -260,14 +262,14 @@ func TestPostTransaction(t *testing.T) { postTransactionTest(t, 0, 200) } -func startCatchupTest(t *testing.T, catchpoint string, expectedCode int) { +func startCatchupTest(t *testing.T, catchpoint string, nodeError error, expectedCode int) { numAccounts := 1 numTransactions := 1 offlineAccounts := true mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) defer releasefunc() dummyShutdownChan := make(chan struct{}) - mockNode := makeMockNode(mockLedger, t.Name()) + mockNode := makeMockNode(mockLedger, t.Name(), nodeError) handler := v2.Handlers{ Node: mockNode, Log: logging.Base(), @@ -286,9 +288,18 @@ func TestStartCatchup(t *testing.T) { t.Parallel() goodCatchPoint := "5894690#DVFRZUYHEFKRLK5N6DNJRR4IABEVN2D6H76F3ZSEPIE6MKXMQWQA" - startCatchupTest(t, goodCatchPoint, 200) + startCatchupTest(t, goodCatchPoint, nil, 201) + + inProgressError := node.MakeCatchpointAlreadyInProgressError("catchpoint") + startCatchupTest(t, goodCatchPoint, inProgressError, 200) + + unableToStartError := node.MakeCatchpointUnableToStartError("running", "requested") + startCatchupTest(t, goodCatchPoint, unableToStartError, 400) + + startCatchupTest(t, goodCatchPoint, errors.New("anothing else is internal"), 500) + badCatchPoint := "bad catchpoint" - startCatchupTest(t, badCatchPoint, 400) + startCatchupTest(t, badCatchPoint, nil, 400) } func abortCatchupTest(t *testing.T, catchpoint string, expectedCode int) { @@ -298,7 +309,7 @@ func abortCatchupTest(t *testing.T, catchpoint string, expectedCode int) { mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) defer releasefunc() dummyShutdownChan := make(chan struct{}) - mockNode := makeMockNode(mockLedger, t.Name()) + mockNode := makeMockNode(mockLedger, t.Name(), nil) handler := v2.Handlers{ Node: mockNode, Log: logging.Base(), @@ -308,7 +319,7 @@ func abortCatchupTest(t *testing.T, catchpoint string, expectedCode int) { req := httptest.NewRequest(http.MethodDelete, "/", nil) rec := httptest.NewRecorder() c := e.NewContext(req, rec) - err := handler.StartCatchup(c, catchpoint) + err := handler.AbortCatchup(c, catchpoint) require.NoError(t, err) require.Equal(t, expectedCode, rec.Code) } @@ -329,7 +340,7 @@ func tealCompileTest(t *testing.T, bytesToUse []byte, expectedCode int, enableDe mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) defer releasefunc() dummyShutdownChan := make(chan struct{}) - mockNode := makeMockNode(mockLedger, t.Name()) + mockNode := makeMockNode(mockLedger, t.Name(), nil) mockNode.config.EnableDeveloperAPI = enableDeveloperAPI handler := v2.Handlers{ Node: &mockNode, @@ -368,7 +379,7 @@ func tealDryrunTest( mockLedger, _, _, _, releasefunc := testingenv(t, numAccounts, numTransactions, offlineAccounts) defer releasefunc() dummyShutdownChan := make(chan struct{}) - mockNode := makeMockNode(mockLedger, t.Name()) + mockNode := makeMockNode(mockLedger, t.Name(), nil) mockNode.config.EnableDeveloperAPI = enableDeveloperAPI handler := v2.Handlers{ Node: &mockNode, diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index 34d12883c1..dd283dd921 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -81,10 +81,15 @@ type mockNode struct { ledger *data.Ledger genesisID string config config.Local + err error } -func makeMockNode(ledger *data.Ledger, genesisID string) mockNode { - return mockNode{ledger: ledger, genesisID: genesisID, config: config.GetDefaultLocal()} +func makeMockNode(ledger *data.Ledger, genesisID string, nodeError error) mockNode { + return mockNode{ + ledger: ledger, + genesisID: genesisID, + config: config.GetDefaultLocal(), + err: nodeError} } func (m mockNode) Ledger() *data.Ledger { @@ -104,7 +109,7 @@ func (m mockNode) GenesisHash() crypto.Digest { } func (m mockNode) BroadcastSignedTxGroup(txgroup []transactions.SignedTxn) error { - return nil + return m.err } func (m mockNode) GetPendingTransaction(txID transactions.Txid) (res node.TxnWithStatus, found bool) { @@ -114,7 +119,7 @@ func (m mockNode) GetPendingTransaction(txID transactions.Txid) (res node.TxnWit } func (m mockNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) { - return nil, nil + return nil, m.err } func (m mockNode) SuggestedFee() basics.MicroAlgos { @@ -168,11 +173,11 @@ func (m mockNode) AssembleBlock(round basics.Round, deadline time.Time) (agreeme } func (m mockNode) StartCatchup(catchpoint string) error { - return nil + return m.err } func (m mockNode) AbortCatchup(catchpoint string) error { - return nil + return m.err } ////// mock ledger testing environment follows diff --git a/daemon/algod/api/server/v2/utils.go b/daemon/algod/api/server/v2/utils.go index d616fa1010..e022034318 100644 --- a/daemon/algod/api/server/v2/utils.go +++ b/daemon/algod/api/server/v2/utils.go @@ -105,8 +105,6 @@ func computeCreatableIndexInPayset(tx node.TxnWithStatus, txnCounter uint64, pay return &idx } - - // computeAssetIndexFromTxn returns the created asset index given a confirmed // transaction whose confirmation block is available in the ledger. Note that // 0 is an invalid asset index (they start at 1). @@ -177,7 +175,6 @@ func computeAppIndexFromTxn(tx node.TxnWithStatus, l *data.Ledger) (aidx *uint64 return computeCreatableIndexInPayset(tx, blk.BlockHeader.TxnCounter, payset) } - // getCodecHandle converts a format string into the encoder + content type func getCodecHandle(formatPtr *string) (codec.Handle, string, error) { format := "json" @@ -224,9 +221,9 @@ func stateDeltaToStateDelta(d basics.StateDelta) *generated.StateDelta { return nil } var delta generated.StateDelta - for k, v:= range d { + for k, v := range d { delta = append(delta, generated.EvalDeltaKeyValue{ - Key: base64.StdEncoding.EncodeToString([]byte(k)), + Key: base64.StdEncoding.EncodeToString([]byte(k)), Value: generated.EvalDelta{ Action: uint64(v.Action), Bytes: strOrNil(base64.StdEncoding.EncodeToString([]byte(v.Bytes))), @@ -249,10 +246,10 @@ func convertToDeltas(txn node.TxnWithStatus) (*[]generated.AccountStateDelta, *g if k == 0 { addr = txn.Txn.Txn.Sender.String() } else { - if int(k - 1) < len(accounts) { + if int(k-1) < len(accounts) { addr = txn.Txn.Txn.Accounts[k-1].String() } else { - addr = fmt.Sprintf("Invalid Address Index: %d", k - 1) + addr = fmt.Sprintf("Invalid Address Index: %d", k-1) } } d = append(d, generated.AccountStateDelta{ diff --git a/daemon/algod/server.go b/daemon/algod/server.go index c51c520c2c..a14d0dfe0f 100644 --- a/daemon/algod/server.go +++ b/daemon/algod/server.go @@ -34,6 +34,7 @@ import ( "github.com/algorand/go-algorand/config" apiServer "github.com/algorand/go-algorand/daemon/algod/api/server" + "github.com/algorand/go-algorand/daemon/algod/api/server/lib" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" @@ -59,10 +60,12 @@ type Server struct { } // Initialize creates a Node instance with applicable network services -func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string) error { +func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string, genesisText string) error { // set up node s.log = logging.Base() + lib.GenesisJSONText = genesisText + liveLog := filepath.Join(s.RootPath, "node.log") archive := filepath.Join(s.RootPath, cfg.LogArchiveName) fmt.Println("Logging to: ", liveLog) @@ -100,10 +103,12 @@ func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string) error // collected metrics decorations. fmt.Fprintln(logWriter, "++++++++++++++++++++++++++++++++++++++++") fmt.Fprintln(logWriter, "Logging Starting") - if s.log.GetTelemetryEnabled() { + if s.log.GetTelemetryUploadingEnabled() { + // May or may not be logging to node.log fmt.Fprintf(logWriter, "Telemetry Enabled: %s\n", s.log.GetTelemetryHostName()) fmt.Fprintf(logWriter, "Session: %s\n", s.log.GetTelemetrySession()) } else { + // May or may not be logging to node.log fmt.Fprintln(logWriter, "Telemetry Disabled") } fmt.Fprintln(logWriter, "++++++++++++++++++++++++++++++++++++++++") diff --git a/daemon/kmd/api/api.go b/daemon/kmd/api/api.go index d38fb85adc..9b55951217 100644 --- a/daemon/kmd/api/api.go +++ b/daemon/kmd/api/api.go @@ -60,7 +60,7 @@ // loader.Config.Import(), and that breaks the vendor directory if the source is symlinked from elsewhere) //go:generate swagger generate spec -m -o="./swagger.json" //go:generate swagger validate ./swagger.json --stop-on-error -//go:generate ../lib/kmdapi/bundle_swagger_json.sh +//go:generate sh ../lib/kmdapi/bundle_swagger_json.sh package api import ( diff --git a/data/basics/address.go b/data/basics/address.go index c0341517b1..a196071f30 100644 --- a/data/basics/address.go +++ b/data/basics/address.go @@ -49,6 +49,8 @@ func (addr Address) GetUserAddress() string { } // UnmarshalChecksumAddress tries to unmarshal the checksummed address string. +// Algorand strings addresses ( base32 encoded ) have a postamble which serves as the checksum of the address. +// When converted to an Address object representation, that checksum is dropped (after validation). func UnmarshalChecksumAddress(address string) (Address, error) { decoded, err := base32Encoder.DecodeString(address) diff --git a/data/basics/overflow.go b/data/basics/overflow.go index 3e28d17aa4..e344a2ada0 100644 --- a/data/basics/overflow.go +++ b/data/basics/overflow.go @@ -18,6 +18,7 @@ package basics import ( "math" + "math/big" ) // OverflowTracker is used to track when an operation causes an overflow @@ -163,3 +164,21 @@ func (t *OverflowTracker) SubR(a Round, b Round) Round { func (t *OverflowTracker) ScalarMulA(a MicroAlgos, b uint64) MicroAlgos { return MicroAlgos{Raw: t.Mul(a.Raw, b)} } + +// Muldiv computes a*b/c. The overflow flag indicates that +// the result was 2^64 or greater. +func Muldiv(a uint64, b uint64, c uint64) (res uint64, overflow bool) { + var aa big.Int + aa.SetUint64(a) + + var bb big.Int + bb.SetUint64(b) + + var cc big.Int + cc.SetUint64(c) + + aa.Mul(&aa, &bb) + aa.Div(&aa, &cc) + + return aa.Uint64(), !aa.IsUint64() +} diff --git a/data/basics/teal_test.go b/data/basics/teal_test.go index 0de5553f5c..090bae9a41 100644 --- a/data/basics/teal_test.go +++ b/data/basics/teal_test.go @@ -111,3 +111,115 @@ func TestSatisfiesSchema(t *testing.T) { err = tkv.SatisfiesSchema(schema) a.NoError(err) } + +func TestStateDeltaEqual(t *testing.T) { + a := require.New(t) + + var d1 StateDelta = nil + var d2 StateDelta = nil + a.True(d1.Equal(d2)) + + d2 = StateDelta{} + a.True(d1.Equal(d2)) + + d2 = StateDelta{"test": {Action: SetUintAction, Uint: 0}} + a.False(d1.Equal(d2)) + + d1 = StateDelta{} + d2 = StateDelta{} + a.True(d1.Equal(d2)) + + d2 = StateDelta{"test": {Action: SetUintAction, Uint: 0}} + a.False(d1.Equal(d2)) + + d1 = StateDelta{"test2": {Action: SetBytesAction, Uint: 0}} + a.False(d1.Equal(d2)) + + d1 = StateDelta{"test": {Action: SetUintAction, Uint: 0}} + d2 = StateDelta{"test": {Action: SetUintAction, Uint: 0}} + a.True(d1.Equal(d2)) + + d1 = StateDelta{"test": {Action: SetBytesAction, Bytes: "val"}} + d2 = StateDelta{"test": {Action: SetBytesAction, Bytes: "val"}} + a.True(d1.Equal(d2)) + + d2 = StateDelta{"test": {Action: SetBytesAction, Bytes: "val1"}} + a.False(d1.Equal(d2)) +} + +func TestEvalDeltaEqual(t *testing.T) { + a := require.New(t) + + d1 := EvalDelta{} + d2 := EvalDelta{} + a.True(d1.Equal(d2)) + + d2 = EvalDelta{ + GlobalDelta: nil, + LocalDeltas: nil, + } + a.True(d1.Equal(d2)) + + d2 = EvalDelta{ + GlobalDelta: StateDelta{}, + LocalDeltas: map[uint64]StateDelta{}, + } + a.True(d1.Equal(d2)) + + d2 = EvalDelta{ + GlobalDelta: StateDelta{"test": {Action: SetUintAction, Uint: 0}}, + } + a.False(d1.Equal(d2)) + + d1 = EvalDelta{ + GlobalDelta: StateDelta{"test": {Action: SetUintAction, Uint: 0}}, + } + a.True(d1.Equal(d2)) + + d2 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 0: {"test": {Action: SetUintAction, Uint: 0}}, + }, + } + a.False(d1.Equal(d2)) + + d1 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 0: {"test": {Action: SetUintAction, Uint: 1}}, + }, + } + a.False(d1.Equal(d2)) + + d2 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 0: {"test": {Action: SetUintAction, Uint: 1}}, + }, + } + a.True(d1.Equal(d2)) + + d1 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 0: {"test": {Action: SetBytesAction, Bytes: "val"}}, + }, + } + d2 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 0: {"test": {Action: SetBytesAction, Bytes: "val"}}, + }, + } + a.True(d1.Equal(d2)) + + d2 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 0: {"test": {Action: SetBytesAction, Bytes: "val1"}}, + }, + } + a.False(d1.Equal(d2)) + + d2 = EvalDelta{ + LocalDeltas: map[uint64]StateDelta{ + 1: {"test": {Action: SetBytesAction, Bytes: "val"}}, + }, + } + a.False(d1.Equal(d2)) +} diff --git a/data/basics/userBalance.go b/data/basics/userBalance.go index 1f56369d20..2ccb5eeb40 100644 --- a/data/basics/userBalance.go +++ b/data/basics/userBalance.go @@ -381,6 +381,14 @@ func (u AccountData) Money(proto config.ConsensusParams, rewardsLevel uint64) (m return e.MicroAlgos, e.RewardedMicroAlgos } +// PendingRewards computes the amount of rewards (in microalgos) that +// have yet to be added to the account balance. +func PendingRewards(ot *OverflowTracker, proto config.ConsensusParams, microAlgos MicroAlgos, rewardsBase uint64, rewardsLevel uint64) MicroAlgos { + rewardsUnits := microAlgos.RewardUnits(proto) + rewardsDelta := ot.Sub(rewardsLevel, rewardsBase) + return MicroAlgos{Raw: ot.Mul(rewardsUnits, rewardsDelta)} +} + // WithUpdatedRewards returns an updated number of algos in an AccountData // to reflect rewards up to some rewards level. func (u AccountData) WithUpdatedRewards(proto config.ConsensusParams, rewardsLevel uint64) AccountData { @@ -462,6 +470,49 @@ func (u AccountData) IsZero() bool { return reflect.DeepEqual(u, AccountData{}) } +// NormalizedOnlineBalance returns a ``normalized'' balance for this account. +// +// The normalization compensates for rewards that have not yet been applied, +// by computing a balance normalized to round 0. To normalize, we estimate +// the microalgo balance that an account should have had at round 0, in order +// to end up at its current balance when rewards are included. +// +// The benefit of the normalization procedure is that an account's normalized +// balance does not change over time (unlike the actual algo balance that includes +// rewards). This makes it possible to compare normalized balances between two +// accounts, to sort them, and get results that are close to what we would get +// if we computed the exact algo balance of the accounts at a given round number. +// +// The normalization can lead to some inconsistencies in comparisons between +// account balances, because the growth rate of rewards for accounts depends +// on how recently the account has been touched (our rewards do not implement +// compounding). However, online accounts have to periodically renew +// participation keys, so the scale of the inconsistency is small. +func (u AccountData) NormalizedOnlineBalance(proto config.ConsensusParams) uint64 { + if u.Status != Online { + return 0 + } + + // If this account had one RewardUnit of microAlgos in round 0, it would + // have perRewardUnit microAlgos at the account's current rewards level. + perRewardUnit := u.RewardsBase + proto.RewardUnit + + // To normalize, we compute, mathematically, + // u.MicroAlgos / perRewardUnit * proto.RewardUnit, as + // (u.MicroAlgos * proto.RewardUnit) / perRewardUnit. + norm, overflowed := Muldiv(u.MicroAlgos.ToUint64(), proto.RewardUnit, perRewardUnit) + + // Mathematically should be impossible to overflow + // because perRewardUnit >= proto.RewardUnit, as long + // as u.RewardBase isn't huge enough to cause overflow.. + if overflowed { + logging.Base().Panicf("overflow computing normalized balance %d * %d / (%d + %d)", + u.MicroAlgos.ToUint64(), proto.RewardUnit, u.RewardsBase, proto.RewardUnit) + } + + return norm +} + // BalanceRecord pairs an account's address with its associated data. type BalanceRecord struct { _struct struct{} `codec:",omitempty,omitemptyarray"` diff --git a/data/bookkeeping/block.go b/data/bookkeeping/block.go index 54426066b6..8a4d79194e 100644 --- a/data/bookkeeping/block.go +++ b/data/bookkeeping/block.go @@ -121,6 +121,24 @@ type ( // transactions have ever been committed (since TxnCounter // started being supported). TxnCounter uint64 `codec:"tc"` + + // CompactCertVoters is the root of a Merkle tree containing the + // online accounts that will help sign a compact certificate. The + // Merkle root, and the compact certificate, happen on blocks that + // are a multiple of ConsensusParams.CompactCertRounds. For blocks + // that are not a multiple of ConsensusParams.CompactCertRounds, + // this value is zero. + CompactCertVoters crypto.Digest `codec:"ccv"` + + // CompactCertVotersTotal is the total number of microalgos held by + // the accounts in CompactCertVoters (or zero, if the merkle root is + // zero). This is intended for computing the threshold of votes to + // expect from CompactCertVoters. + CompactCertVotersTotal basics.MicroAlgos `codec:"ccvt"` + + // CompactCertLastRound is the last round for which we have committed + // a CompactCert transaction. + CompactCertLastRound basics.Round `codec:"ccl"` } // RewardsState represents the global parameters controlling the rate diff --git a/data/bookkeeping/msgp_gen.go b/data/bookkeeping/msgp_gen.go index a93dfe395e..76ed260a44 100644 --- a/data/bookkeeping/msgp_gen.go +++ b/data/bookkeeping/msgp_gen.go @@ -69,109 +69,148 @@ import ( func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(23) - var zb0001Mask uint32 /* 26 bits */ - if (*z).BlockHeader.RewardsState.RewardsLevel == 0 { + zb0001Len := uint32(26) + var zb0001Mask uint32 /* 29 bits */ + if (*z).BlockHeader.CompactCertLastRound.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x8 } - if (*z).BlockHeader.RewardsState.FeeSink.MsgIsZero() { + if (*z).BlockHeader.CompactCertVoters.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x10 } - if (*z).BlockHeader.RewardsState.RewardsResidue == 0 { + if (*z).BlockHeader.CompactCertVotersTotal.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x20 } - if (*z).BlockHeader.GenesisID == "" { + if (*z).BlockHeader.RewardsState.RewardsLevel == 0 { zb0001Len-- zb0001Mask |= 0x40 } - if (*z).BlockHeader.GenesisHash.MsgIsZero() { + if (*z).BlockHeader.RewardsState.FeeSink.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80 } - if (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() { + if (*z).BlockHeader.RewardsState.RewardsResidue == 0 { zb0001Len-- zb0001Mask |= 0x100 } - if (*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero() { + if (*z).BlockHeader.GenesisID == "" { zb0001Len-- zb0001Mask |= 0x200 } - if (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() { + if (*z).BlockHeader.GenesisHash.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x400 } - if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 { + if (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x800 } - if (*z).BlockHeader.Branch.MsgIsZero() { + if (*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x1000 } - if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2000 } - if (*z).BlockHeader.RewardsState.RewardsRate == 0 { + if (*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0 { zb0001Len-- zb0001Mask |= 0x4000 } - if (*z).BlockHeader.Round.MsgIsZero() { + if (*z).BlockHeader.Branch.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x8000 } - if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { + if (*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x10000 } - if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() { + if (*z).BlockHeader.RewardsState.RewardsRate == 0 { zb0001Len-- zb0001Mask |= 0x20000 } - if (*z).BlockHeader.Seed.MsgIsZero() { + if (*z).BlockHeader.Round.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x40000 } - if (*z).BlockHeader.TxnCounter == 0 { + if (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80000 } - if (*z).BlockHeader.TimeStamp == 0 { + if (*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x100000 } - if (*z).BlockHeader.TxnRoot.MsgIsZero() { + if (*z).BlockHeader.Seed.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x200000 } - if (*z).Payset.MsgIsZero() { + if (*z).BlockHeader.TxnCounter == 0 { zb0001Len-- zb0001Mask |= 0x400000 } - if (*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { + if (*z).BlockHeader.TimeStamp == 0 { zb0001Len-- zb0001Mask |= 0x800000 } - if (*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { + if (*z).BlockHeader.TxnRoot.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x1000000 } - if (*z).BlockHeader.UpgradeVote.UpgradeApprove == false { + if (*z).Payset.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2000000 } + if (*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4000000 + } + if (*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x8000000 + } + if (*z).BlockHeader.UpgradeVote.UpgradeApprove == false { + zb0001Len-- + zb0001Mask |= 0x10000000 + } // variable map header, size zb0001Len o = msgp.AppendMapHeader(o, zb0001Len) if zb0001Len != 0 { if (zb0001Mask & 0x8) == 0 { // if not empty + // string "ccl" + o = append(o, 0xa3, 0x63, 0x63, 0x6c) + o, err = (*z).BlockHeader.CompactCertLastRound.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CompactCertLastRound") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "ccv" + o = append(o, 0xa3, 0x63, 0x63, 0x76) + o, err = (*z).BlockHeader.CompactCertVoters.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CompactCertVoters") + return + } + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "ccvt" + o = append(o, 0xa4, 0x63, 0x63, 0x76, 0x74) + o, err = (*z).BlockHeader.CompactCertVotersTotal.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CompactCertVotersTotal") + return + } + } + if (zb0001Mask & 0x40) == 0 { // if not empty // string "earn" o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e) o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsLevel) } - if (zb0001Mask & 0x10) == 0 { // if not empty + if (zb0001Mask & 0x80) == 0 { // if not empty // string "fees" o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73) o, err = (*z).BlockHeader.RewardsState.FeeSink.MarshalMsg(o) @@ -180,17 +219,17 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x20) == 0 { // if not empty + if (zb0001Mask & 0x100) == 0 { // if not empty // string "frac" o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63) o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsResidue) } - if (zb0001Mask & 0x40) == 0 { // if not empty + if (zb0001Mask & 0x200) == 0 { // if not empty // string "gen" o = append(o, 0xa3, 0x67, 0x65, 0x6e) o = msgp.AppendString(o, (*z).BlockHeader.GenesisID) } - if (zb0001Mask & 0x80) == 0 { // if not empty + if (zb0001Mask & 0x400) == 0 { // if not empty // string "gh" o = append(o, 0xa2, 0x67, 0x68) o, err = (*z).BlockHeader.GenesisHash.MarshalMsg(o) @@ -199,7 +238,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x100) == 0 { // if not empty + if (zb0001Mask & 0x800) == 0 { // if not empty // string "nextbefore" o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65) o, err = (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MarshalMsg(o) @@ -208,7 +247,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x200) == 0 { // if not empty + if (zb0001Mask & 0x1000) == 0 { // if not empty // string "nextproto" o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f) o, err = (*z).BlockHeader.UpgradeState.NextProtocol.MarshalMsg(o) @@ -217,7 +256,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x400) == 0 { // if not empty + if (zb0001Mask & 0x2000) == 0 { // if not empty // string "nextswitch" o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68) o, err = (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MarshalMsg(o) @@ -226,12 +265,12 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x800) == 0 { // if not empty + if (zb0001Mask & 0x4000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) o = msgp.AppendUint64(o, (*z).BlockHeader.UpgradeState.NextProtocolApprovals) } - if (zb0001Mask & 0x1000) == 0 { // if not empty + if (zb0001Mask & 0x8000) == 0 { // if not empty // string "prev" o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76) o, err = (*z).BlockHeader.Branch.MarshalMsg(o) @@ -240,7 +279,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x2000) == 0 { // if not empty + if (zb0001Mask & 0x10000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o, err = (*z).BlockHeader.UpgradeState.CurrentProtocol.MarshalMsg(o) @@ -249,12 +288,12 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x4000) == 0 { // if not empty + if (zb0001Mask & 0x20000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).BlockHeader.RewardsState.RewardsRate) } - if (zb0001Mask & 0x8000) == 0 { // if not empty + if (zb0001Mask & 0x40000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o, err = (*z).BlockHeader.Round.MarshalMsg(o) @@ -263,7 +302,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x10000) == 0 { // if not empty + if (zb0001Mask & 0x80000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o, err = (*z).BlockHeader.RewardsState.RewardsRecalculationRound.MarshalMsg(o) @@ -272,7 +311,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x20000) == 0 { // if not empty + if (zb0001Mask & 0x100000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o, err = (*z).BlockHeader.RewardsState.RewardsPool.MarshalMsg(o) @@ -281,7 +320,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x40000) == 0 { // if not empty + if (zb0001Mask & 0x200000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o, err = (*z).BlockHeader.Seed.MarshalMsg(o) @@ -290,17 +329,17 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x80000) == 0 { // if not empty + if (zb0001Mask & 0x400000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).BlockHeader.TxnCounter) } - if (zb0001Mask & 0x100000) == 0 { // if not empty + if (zb0001Mask & 0x800000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).BlockHeader.TimeStamp) } - if (zb0001Mask & 0x200000) == 0 { // if not empty + if (zb0001Mask & 0x1000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o, err = (*z).BlockHeader.TxnRoot.MarshalMsg(o) @@ -309,7 +348,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x400000) == 0 { // if not empty + if (zb0001Mask & 0x2000000) == 0 { // if not empty // string "txns" o = append(o, 0xa4, 0x74, 0x78, 0x6e, 0x73) o, err = (*z).Payset.MarshalMsg(o) @@ -318,7 +357,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x800000) == 0 { // if not empty + if (zb0001Mask & 0x4000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o, err = (*z).BlockHeader.UpgradeVote.UpgradeDelay.MarshalMsg(o) @@ -327,7 +366,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x1000000) == 0 { // if not empty + if (zb0001Mask & 0x8000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o, err = (*z).BlockHeader.UpgradeVote.UpgradePropose.MarshalMsg(o) @@ -336,7 +375,7 @@ func (z *Block) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x2000000) == 0 { // if not empty + if (zb0001Mask & 0x10000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).BlockHeader.UpgradeVote.UpgradeApprove) @@ -539,6 +578,30 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).BlockHeader.CompactCertVoters.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CompactCertVoters") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).BlockHeader.CompactCertVotersTotal.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CompactCertVotersTotal") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).BlockHeader.CompactCertLastRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CompactCertLastRound") + return + } + } if zb0001 > 0 { zb0001-- bts, err = (*z).Payset.UnmarshalMsg(bts) @@ -702,6 +765,24 @@ func (z *Block) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "TxnCounter") return } + case "ccv": + bts, err = (*z).BlockHeader.CompactCertVoters.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CompactCertVoters") + return + } + case "ccvt": + bts, err = (*z).BlockHeader.CompactCertVotersTotal.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CompactCertVotersTotal") + return + } + case "ccl": + bts, err = (*z).BlockHeader.CompactCertLastRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CompactCertLastRound") + return + } case "txns": bts, err = (*z).Payset.UnmarshalMsg(bts) if err != nil { @@ -728,13 +809,13 @@ func (_ *Block) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Block) Msgsize() (s int) { - s = 3 + 4 + (*z).BlockHeader.Round.Msgsize() + 5 + (*z).BlockHeader.Branch.Msgsize() + 5 + (*z).BlockHeader.Seed.Msgsize() + 4 + (*z).BlockHeader.TxnRoot.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).BlockHeader.GenesisID) + 3 + (*z).BlockHeader.GenesisHash.Msgsize() + 5 + (*z).BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 5 + (*z).Payset.Msgsize() + s = 3 + 4 + (*z).BlockHeader.Round.Msgsize() + 5 + (*z).BlockHeader.Branch.Msgsize() + 5 + (*z).BlockHeader.Seed.Msgsize() + 4 + (*z).BlockHeader.TxnRoot.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).BlockHeader.GenesisID) + 3 + (*z).BlockHeader.GenesisHash.Msgsize() + 5 + (*z).BlockHeader.RewardsState.FeeSink.Msgsize() + 4 + (*z).BlockHeader.RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).BlockHeader.RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).BlockHeader.UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).BlockHeader.UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).BlockHeader.UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).BlockHeader.UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + (*z).BlockHeader.CompactCertVoters.Msgsize() + 5 + (*z).BlockHeader.CompactCertVotersTotal.Msgsize() + 4 + (*z).BlockHeader.CompactCertLastRound.Msgsize() + 5 + (*z).Payset.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *Block) MsgIsZero() bool { - return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnRoot.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && ((*z).Payset.MsgIsZero()) + return ((*z).BlockHeader.Round.MsgIsZero()) && ((*z).BlockHeader.Branch.MsgIsZero()) && ((*z).BlockHeader.Seed.MsgIsZero()) && ((*z).BlockHeader.TxnRoot.MsgIsZero()) && ((*z).BlockHeader.TimeStamp == 0) && ((*z).BlockHeader.GenesisID == "") && ((*z).BlockHeader.GenesisHash.MsgIsZero()) && ((*z).BlockHeader.RewardsState.FeeSink.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsPool.MsgIsZero()) && ((*z).BlockHeader.RewardsState.RewardsLevel == 0) && ((*z).BlockHeader.RewardsState.RewardsRate == 0) && ((*z).BlockHeader.RewardsState.RewardsResidue == 0) && ((*z).BlockHeader.RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocol.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolApprovals == 0) && ((*z).BlockHeader.UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).BlockHeader.UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).BlockHeader.UpgradeVote.UpgradeApprove == false) && ((*z).BlockHeader.TxnCounter == 0) && ((*z).BlockHeader.CompactCertVoters.MsgIsZero()) && ((*z).BlockHeader.CompactCertVotersTotal.MsgIsZero()) && ((*z).BlockHeader.CompactCertLastRound.MsgIsZero()) && ((*z).Payset.MsgIsZero()) } // MarshalMsg implements msgp.Marshaler @@ -769,105 +850,144 @@ func (z *BlockHash) MsgIsZero() bool { func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(22) - var zb0001Mask uint32 /* 25 bits */ - if (*z).RewardsState.RewardsLevel == 0 { + zb0001Len := uint32(25) + var zb0001Mask uint32 /* 28 bits */ + if (*z).CompactCertLastRound.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x8 } - if (*z).RewardsState.FeeSink.MsgIsZero() { + if (*z).CompactCertVoters.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x10 } - if (*z).RewardsState.RewardsResidue == 0 { + if (*z).CompactCertVotersTotal.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x20 } - if (*z).GenesisID == "" { + if (*z).RewardsState.RewardsLevel == 0 { zb0001Len-- zb0001Mask |= 0x40 } - if (*z).GenesisHash.MsgIsZero() { + if (*z).RewardsState.FeeSink.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80 } - if (*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero() { + if (*z).RewardsState.RewardsResidue == 0 { zb0001Len-- zb0001Mask |= 0x100 } - if (*z).UpgradeState.NextProtocol.MsgIsZero() { + if (*z).GenesisID == "" { zb0001Len-- zb0001Mask |= 0x200 } - if (*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero() { + if (*z).GenesisHash.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x400 } - if (*z).UpgradeState.NextProtocolApprovals == 0 { + if (*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x800 } - if (*z).Branch.MsgIsZero() { + if (*z).UpgradeState.NextProtocol.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x1000 } - if (*z).UpgradeState.CurrentProtocol.MsgIsZero() { + if (*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2000 } - if (*z).RewardsState.RewardsRate == 0 { + if (*z).UpgradeState.NextProtocolApprovals == 0 { zb0001Len-- zb0001Mask |= 0x4000 } - if (*z).Round.MsgIsZero() { + if (*z).Branch.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x8000 } - if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() { + if (*z).UpgradeState.CurrentProtocol.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x10000 } - if (*z).RewardsState.RewardsPool.MsgIsZero() { + if (*z).RewardsState.RewardsRate == 0 { zb0001Len-- zb0001Mask |= 0x20000 } - if (*z).Seed.MsgIsZero() { + if (*z).Round.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x40000 } - if (*z).TxnCounter == 0 { + if (*z).RewardsState.RewardsRecalculationRound.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80000 } - if (*z).TimeStamp == 0 { + if (*z).RewardsState.RewardsPool.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x100000 } - if (*z).TxnRoot.MsgIsZero() { + if (*z).Seed.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x200000 } - if (*z).UpgradeVote.UpgradeDelay.MsgIsZero() { + if (*z).TxnCounter == 0 { zb0001Len-- zb0001Mask |= 0x400000 } - if (*z).UpgradeVote.UpgradePropose.MsgIsZero() { + if (*z).TimeStamp == 0 { zb0001Len-- zb0001Mask |= 0x800000 } - if (*z).UpgradeVote.UpgradeApprove == false { + if (*z).TxnRoot.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x1000000 } + if (*z).UpgradeVote.UpgradeDelay.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2000000 + } + if (*z).UpgradeVote.UpgradePropose.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4000000 + } + if (*z).UpgradeVote.UpgradeApprove == false { + zb0001Len-- + zb0001Mask |= 0x8000000 + } // variable map header, size zb0001Len o = msgp.AppendMapHeader(o, zb0001Len) if zb0001Len != 0 { if (zb0001Mask & 0x8) == 0 { // if not empty + // string "ccl" + o = append(o, 0xa3, 0x63, 0x63, 0x6c) + o, err = (*z).CompactCertLastRound.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CompactCertLastRound") + return + } + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "ccv" + o = append(o, 0xa3, 0x63, 0x63, 0x76) + o, err = (*z).CompactCertVoters.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CompactCertVoters") + return + } + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "ccvt" + o = append(o, 0xa4, 0x63, 0x63, 0x76, 0x74) + o, err = (*z).CompactCertVotersTotal.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CompactCertVotersTotal") + return + } + } + if (zb0001Mask & 0x40) == 0 { // if not empty // string "earn" o = append(o, 0xa4, 0x65, 0x61, 0x72, 0x6e) o = msgp.AppendUint64(o, (*z).RewardsState.RewardsLevel) } - if (zb0001Mask & 0x10) == 0 { // if not empty + if (zb0001Mask & 0x80) == 0 { // if not empty // string "fees" o = append(o, 0xa4, 0x66, 0x65, 0x65, 0x73) o, err = (*z).RewardsState.FeeSink.MarshalMsg(o) @@ -876,17 +996,17 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x20) == 0 { // if not empty + if (zb0001Mask & 0x100) == 0 { // if not empty // string "frac" o = append(o, 0xa4, 0x66, 0x72, 0x61, 0x63) o = msgp.AppendUint64(o, (*z).RewardsState.RewardsResidue) } - if (zb0001Mask & 0x40) == 0 { // if not empty + if (zb0001Mask & 0x200) == 0 { // if not empty // string "gen" o = append(o, 0xa3, 0x67, 0x65, 0x6e) o = msgp.AppendString(o, (*z).GenesisID) } - if (zb0001Mask & 0x80) == 0 { // if not empty + if (zb0001Mask & 0x400) == 0 { // if not empty // string "gh" o = append(o, 0xa2, 0x67, 0x68) o, err = (*z).GenesisHash.MarshalMsg(o) @@ -895,7 +1015,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x100) == 0 { // if not empty + if (zb0001Mask & 0x800) == 0 { // if not empty // string "nextbefore" o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65) o, err = (*z).UpgradeState.NextProtocolVoteBefore.MarshalMsg(o) @@ -904,7 +1024,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x200) == 0 { // if not empty + if (zb0001Mask & 0x1000) == 0 { // if not empty // string "nextproto" o = append(o, 0xa9, 0x6e, 0x65, 0x78, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f) o, err = (*z).UpgradeState.NextProtocol.MarshalMsg(o) @@ -913,7 +1033,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x400) == 0 { // if not empty + if (zb0001Mask & 0x2000) == 0 { // if not empty // string "nextswitch" o = append(o, 0xaa, 0x6e, 0x65, 0x78, 0x74, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68) o, err = (*z).UpgradeState.NextProtocolSwitchOn.MarshalMsg(o) @@ -922,12 +1042,12 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x800) == 0 { // if not empty + if (zb0001Mask & 0x4000) == 0 { // if not empty // string "nextyes" o = append(o, 0xa7, 0x6e, 0x65, 0x78, 0x74, 0x79, 0x65, 0x73) o = msgp.AppendUint64(o, (*z).UpgradeState.NextProtocolApprovals) } - if (zb0001Mask & 0x1000) == 0 { // if not empty + if (zb0001Mask & 0x8000) == 0 { // if not empty // string "prev" o = append(o, 0xa4, 0x70, 0x72, 0x65, 0x76) o, err = (*z).Branch.MarshalMsg(o) @@ -936,7 +1056,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x2000) == 0 { // if not empty + if (zb0001Mask & 0x10000) == 0 { // if not empty // string "proto" o = append(o, 0xa5, 0x70, 0x72, 0x6f, 0x74, 0x6f) o, err = (*z).UpgradeState.CurrentProtocol.MarshalMsg(o) @@ -945,12 +1065,12 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x4000) == 0 { // if not empty + if (zb0001Mask & 0x20000) == 0 { // if not empty // string "rate" o = append(o, 0xa4, 0x72, 0x61, 0x74, 0x65) o = msgp.AppendUint64(o, (*z).RewardsState.RewardsRate) } - if (zb0001Mask & 0x8000) == 0 { // if not empty + if (zb0001Mask & 0x40000) == 0 { // if not empty // string "rnd" o = append(o, 0xa3, 0x72, 0x6e, 0x64) o, err = (*z).Round.MarshalMsg(o) @@ -959,7 +1079,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x10000) == 0 { // if not empty + if (zb0001Mask & 0x80000) == 0 { // if not empty // string "rwcalr" o = append(o, 0xa6, 0x72, 0x77, 0x63, 0x61, 0x6c, 0x72) o, err = (*z).RewardsState.RewardsRecalculationRound.MarshalMsg(o) @@ -968,7 +1088,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x20000) == 0 { // if not empty + if (zb0001Mask & 0x100000) == 0 { // if not empty // string "rwd" o = append(o, 0xa3, 0x72, 0x77, 0x64) o, err = (*z).RewardsState.RewardsPool.MarshalMsg(o) @@ -977,7 +1097,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x40000) == 0 { // if not empty + if (zb0001Mask & 0x200000) == 0 { // if not empty // string "seed" o = append(o, 0xa4, 0x73, 0x65, 0x65, 0x64) o, err = (*z).Seed.MarshalMsg(o) @@ -986,17 +1106,17 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x80000) == 0 { // if not empty + if (zb0001Mask & 0x400000) == 0 { // if not empty // string "tc" o = append(o, 0xa2, 0x74, 0x63) o = msgp.AppendUint64(o, (*z).TxnCounter) } - if (zb0001Mask & 0x100000) == 0 { // if not empty + if (zb0001Mask & 0x800000) == 0 { // if not empty // string "ts" o = append(o, 0xa2, 0x74, 0x73) o = msgp.AppendInt64(o, (*z).TimeStamp) } - if (zb0001Mask & 0x200000) == 0 { // if not empty + if (zb0001Mask & 0x1000000) == 0 { // if not empty // string "txn" o = append(o, 0xa3, 0x74, 0x78, 0x6e) o, err = (*z).TxnRoot.MarshalMsg(o) @@ -1005,7 +1125,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x400000) == 0 { // if not empty + if (zb0001Mask & 0x2000000) == 0 { // if not empty // string "upgradedelay" o = append(o, 0xac, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x65, 0x6c, 0x61, 0x79) o, err = (*z).UpgradeVote.UpgradeDelay.MarshalMsg(o) @@ -1014,7 +1134,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x800000) == 0 { // if not empty + if (zb0001Mask & 0x4000000) == 0 { // if not empty // string "upgradeprop" o = append(o, 0xab, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x70, 0x72, 0x6f, 0x70) o, err = (*z).UpgradeVote.UpgradePropose.MarshalMsg(o) @@ -1023,7 +1143,7 @@ func (z *BlockHeader) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0001Mask & 0x1000000) == 0 { // if not empty + if (zb0001Mask & 0x8000000) == 0 { // if not empty // string "upgradeyes" o = append(o, 0xaa, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x79, 0x65, 0x73) o = msgp.AppendBool(o, (*z).UpgradeVote.UpgradeApprove) @@ -1226,6 +1346,30 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).CompactCertVoters.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CompactCertVoters") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).CompactCertVotersTotal.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CompactCertVotersTotal") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).CompactCertLastRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CompactCertLastRound") + return + } + } if zb0001 > 0 { err = msgp.ErrTooManyArrayFields(zb0001) if err != nil { @@ -1381,6 +1525,24 @@ func (z *BlockHeader) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "TxnCounter") return } + case "ccv": + bts, err = (*z).CompactCertVoters.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CompactCertVoters") + return + } + case "ccvt": + bts, err = (*z).CompactCertVotersTotal.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CompactCertVotersTotal") + return + } + case "ccl": + bts, err = (*z).CompactCertLastRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CompactCertLastRound") + return + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -1401,13 +1563,13 @@ func (_ *BlockHeader) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BlockHeader) Msgsize() (s int) { - s = 3 + 4 + (*z).Round.Msgsize() + 5 + (*z).Branch.Msgsize() + 5 + (*z).Seed.Msgsize() + 4 + (*z).TxnRoot.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 5 + (*z).RewardsState.FeeSink.Msgsize() + 4 + (*z).RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + s = 3 + 4 + (*z).Round.Msgsize() + 5 + (*z).Branch.Msgsize() + 5 + (*z).Seed.Msgsize() + 4 + (*z).TxnRoot.Msgsize() + 3 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len((*z).GenesisID) + 3 + (*z).GenesisHash.Msgsize() + 5 + (*z).RewardsState.FeeSink.Msgsize() + 4 + (*z).RewardsState.RewardsPool.Msgsize() + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 5 + msgp.Uint64Size + 7 + (*z).RewardsState.RewardsRecalculationRound.Msgsize() + 6 + (*z).UpgradeState.CurrentProtocol.Msgsize() + 10 + (*z).UpgradeState.NextProtocol.Msgsize() + 8 + msgp.Uint64Size + 11 + (*z).UpgradeState.NextProtocolVoteBefore.Msgsize() + 11 + (*z).UpgradeState.NextProtocolSwitchOn.Msgsize() + 12 + (*z).UpgradeVote.UpgradePropose.Msgsize() + 13 + (*z).UpgradeVote.UpgradeDelay.Msgsize() + 11 + msgp.BoolSize + 3 + msgp.Uint64Size + 4 + (*z).CompactCertVoters.Msgsize() + 5 + (*z).CompactCertVotersTotal.Msgsize() + 4 + (*z).CompactCertLastRound.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *BlockHeader) MsgIsZero() bool { - return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnRoot.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) + return ((*z).Round.MsgIsZero()) && ((*z).Branch.MsgIsZero()) && ((*z).Seed.MsgIsZero()) && ((*z).TxnRoot.MsgIsZero()) && ((*z).TimeStamp == 0) && ((*z).GenesisID == "") && ((*z).GenesisHash.MsgIsZero()) && ((*z).RewardsState.FeeSink.MsgIsZero()) && ((*z).RewardsState.RewardsPool.MsgIsZero()) && ((*z).RewardsState.RewardsLevel == 0) && ((*z).RewardsState.RewardsRate == 0) && ((*z).RewardsState.RewardsResidue == 0) && ((*z).RewardsState.RewardsRecalculationRound.MsgIsZero()) && ((*z).UpgradeState.CurrentProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocol.MsgIsZero()) && ((*z).UpgradeState.NextProtocolApprovals == 0) && ((*z).UpgradeState.NextProtocolVoteBefore.MsgIsZero()) && ((*z).UpgradeState.NextProtocolSwitchOn.MsgIsZero()) && ((*z).UpgradeVote.UpgradePropose.MsgIsZero()) && ((*z).UpgradeVote.UpgradeDelay.MsgIsZero()) && ((*z).UpgradeVote.UpgradeApprove == false) && ((*z).TxnCounter == 0) && ((*z).CompactCertVoters.MsgIsZero()) && ((*z).CompactCertVotersTotal.MsgIsZero()) && ((*z).CompactCertLastRound.MsgIsZero()) } // MarshalMsg implements msgp.Marshaler diff --git a/data/ledger.go b/data/ledger.go index 9e0adf86c0..3d7ec8903a 100644 --- a/data/ledger.go +++ b/data/ledger.go @@ -18,6 +18,7 @@ package data import ( "fmt" + "sync/atomic" "time" "github.com/algorand/go-algorand/agreement" @@ -43,6 +44,35 @@ type Ledger struct { *ledger.Ledger log logging.Logger + + // a two-item moving window cache for the total number of online circulating coins + lastRoundCirculation atomic.Value + // a two-item moving window cache for the round seed + lastRoundSeed atomic.Value +} + +// roundCirculationPair used to hold a pair of matching round number and the amount of online money +type roundCirculationPair struct { + round basics.Round + onlineMoney basics.MicroAlgos +} + +// roundCirculation is the cache for the circulating coins +type roundCirculation struct { + // elements holds several round-onlineMoney pairs + elements [2]roundCirculationPair +} + +// roundSeedPair is the cache for a single seed at a given round +type roundSeedPair struct { + round basics.Round + seed committee.Seed +} + +// roundSeed is the cache for the seed +type roundSeed struct { + // elements holds several round-seed pairs + elements [2]roundSeedPair } func makeGenesisBlock(proto protocol.ConsensusVersion, genesisBal GenesisBalances, genesisID string, genesisHash crypto.Digest) (bookkeeping.Block, error) { @@ -188,11 +218,32 @@ func (l *Ledger) NextRound() basics.Round { // Circulation implements agreement.Ledger.Circulation. func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) { + circulation, cached := l.lastRoundCirculation.Load().(roundCirculation) + if cached && r != basics.Round(0) { + for _, element := range circulation.elements { + if element.round == r { + return element.onlineMoney, nil + } + } + } + totals, err := l.Totals(r) if err != nil { return basics.MicroAlgos{}, err } + if !cached || r > circulation.elements[1].round { + l.lastRoundCirculation.Store( + roundCirculation{ + elements: [2]roundCirculationPair{ + circulation.elements[1], + roundCirculationPair{ + round: r, + onlineMoney: totals.Online.Money}, + }, + }) + } + return totals.Online.Money, nil } @@ -201,10 +252,33 @@ func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) { // I/O error. // Implements agreement.Ledger.Seed func (l *Ledger) Seed(r basics.Round) (committee.Seed, error) { + seed, cached := l.lastRoundSeed.Load().(roundSeed) + if cached && r != basics.Round(0) { + for _, roundSeed := range seed.elements { + if roundSeed.round == r { + return roundSeed.seed, nil + } + } + } + blockhdr, err := l.BlockHdr(r) if err != nil { return committee.Seed{}, err } + + if !cached || r > seed.elements[1].round { + l.lastRoundSeed.Store( + roundSeed{ + elements: [2]roundSeedPair{ + seed.elements[1], + roundSeedPair{ + round: r, + seed: blockhdr.Seed, + }, + }, + }) + } + return blockhdr.Seed, nil } diff --git a/data/ledger_test.go b/data/ledger_test.go new file mode 100644 index 0000000000..472c12d8e9 --- /dev/null +++ b/data/ledger_test.go @@ -0,0 +1,314 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package data + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/ledger" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" +) + +var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +var testSinkAddr = basics.Address{0x2c, 0x2a, 0x6c, 0xe9, 0xa9, 0xa7, 0xc2, 0x8c, 0x22, 0x95, 0xfd, 0x32, 0x4f, 0x77, 0xa5, 0x4, 0x8b, 0x42, 0xc2, 0xb7, 0xa8, 0x54, 0x84, 0xb6, 0x80, 0xb1, 0xe1, 0x3d, 0x59, 0x9b, 0xeb, 0x36} + +func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState ledger.InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) { + + var poolSecret, sinkSecret *crypto.SignatureSecrets + var seed crypto.Seed + + incentivePoolName := []byte("incentive pool") + copy(seed[:], incentivePoolName) + poolSecret = crypto.GenerateSignatureSecrets(seed) + + feeSinkName := []byte("fee sink") + copy(seed[:], feeSinkName) + sinkSecret = crypto.GenerateSignatureSecrets(seed) + + params := config.Consensus[proto] + poolAddr := testPoolAddr + sinkAddr := testSinkAddr + + var zeroSeed crypto.Seed + var genaddrs [10]basics.Address + var gensecrets [10]*crypto.SignatureSecrets + for i := range genaddrs { + seed := zeroSeed + seed[0] = byte(i) + x := crypto.GenerateSignatureSecrets(seed) + genaddrs[i] = basics.Address(x.SignatureVerifier) + gensecrets[i] = x + } + + initKeys = make(map[basics.Address]*crypto.SignatureSecrets) + initAccounts := make(map[basics.Address]basics.AccountData) + for i := range genaddrs { + initKeys[genaddrs[i]] = gensecrets[i] + // Give each account quite a bit more balance than MinFee or MinBalance + accountStatus := basics.Online + if i%2 == 0 { + accountStatus = basics.NotParticipating + } + initAccounts[genaddrs[i]] = basics.MakeAccountData(accountStatus, basics.MicroAlgos{Raw: uint64((i + 100) * 100000)}) + } + initKeys[poolAddr] = poolSecret + initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567}) + initKeys[sinkAddr] = sinkSecret + initAccounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 7654321}) + + incentivePoolBalanceAtGenesis := initAccounts[poolAddr].MicroAlgos + initialRewardsPerRound := incentivePoolBalanceAtGenesis.Raw / uint64(params.RewardsRateRefreshInterval) + var emptyPayset transactions.Payset + + initBlock := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + GenesisID: tb.Name(), + Round: 0, + RewardsState: bookkeeping.RewardsState{ + RewardsRate: initialRewardsPerRound, + RewardsPool: poolAddr, + FeeSink: sinkAddr, + }, + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: proto, + }, + TxnRoot: emptyPayset.Commit(params.PaysetCommitFlat), + }, + } + if params.SupportGenesisHash { + initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name())) + } + + genesisInitState.Block = initBlock + genesisInitState.Accounts = initAccounts + genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name())) + + return +} + +func TestLedgerCirculation(t *testing.T) { + genesisInitState, keys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + + const inMem = true + cfg := config.GetDefaultLocal() + cfg.Archival = true + log := logging.TestingLog(t) + log.SetLevel(logging.Warn) + realLedger, err := ledger.OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + defer realLedger.Close() + + l := Ledger{Ledger: realLedger} + require.NotNil(t, &l) + + var sourceAccount basics.Address + var destAccount basics.Address + for addr, acctData := range genesisInitState.Accounts { + if addr == testPoolAddr || addr == testSinkAddr { + continue + } + if acctData.Status == basics.Online { + sourceAccount = addr + break + } + } + for addr, acctData := range genesisInitState.Accounts { + if addr == testPoolAddr || addr == testSinkAddr { + continue + } + if acctData.Status == basics.NotParticipating { + destAccount = addr + break + } + } + require.False(t, sourceAccount.IsZero()) + require.False(t, destAccount.IsZero()) + + data, err := realLedger.Lookup(basics.Round(0), destAccount) + require.NoError(t, err) + baseDestValue := data.MicroAlgos.Raw + + blk := genesisInitState.Block + totals, _ := realLedger.Totals(basics.Round(0)) + baseCirculation := totals.Online.Money.Raw + + srcAccountKey := keys[sourceAccount] + require.NotNil(t, srcAccountKey) + + for rnd := basics.Round(1); rnd < basics.Round(600); rnd++ { + blk.BlockHeader.Round++ + blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000) + var tx transactions.Transaction + tx.Sender = sourceAccount + tx.Fee = basics.MicroAlgos{Raw: 10000} + tx.FirstValid = rnd - 1 + tx.LastValid = tx.FirstValid + 999 + tx.Receiver = destAccount + tx.Amount = basics.MicroAlgos{Raw: 1} + tx.Type = protocol.PaymentTx + signedTx := tx.Sign(srcAccountKey) + blk.Payset = transactions.Payset{transactions.SignedTxnInBlock{ + SignedTxnWithAD: transactions.SignedTxnWithAD{ + SignedTxn: signedTx, + }, + }} + require.NoError(t, l.AddBlock(blk, agreement.Certificate{})) + l.WaitForCommit(rnd) + + // test most recent round + if rnd < basics.Round(500) { + data, err = realLedger.Lookup(rnd, destAccount) + require.NoError(t, err) + require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw) + data, err = l.Lookup(rnd, destAccount) + require.NoError(t, err) + require.Equal(t, baseDestValue+uint64(rnd), data.MicroAlgos.Raw) + + totals, err = realLedger.Totals(rnd) + require.NoError(t, err) + roundCirculation := totals.Online.Money.Raw + require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation) + + totals, err = l.Totals(rnd) + require.NoError(t, err) + roundCirculation = totals.Online.Money.Raw + require.Equal(t, baseCirculation-uint64(rnd)*(10001), roundCirculation) + } else if rnd < basics.Round(510) { + // test one round ago + data, err = realLedger.Lookup(rnd-1, destAccount) + require.NoError(t, err) + require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw) + data, err = l.Lookup(rnd-1, destAccount) + require.NoError(t, err) + require.Equal(t, baseDestValue+uint64(rnd)-1, data.MicroAlgos.Raw) + + totals, err = realLedger.Totals(rnd - 1) + require.NoError(t, err) + roundCirculation := totals.Online.Money.Raw + require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation) + + totals, err = l.Totals(rnd - 1) + require.NoError(t, err) + roundCirculation = totals.Online.Money.Raw + require.Equal(t, baseCirculation-uint64(rnd-1)*(10001), roundCirculation) + } else if rnd < basics.Round(520) { + // test one round in the future ( expected error ) + data, err = realLedger.Lookup(rnd+1, destAccount) + require.Error(t, err) + require.Equal(t, uint64(0), data.MicroAlgos.Raw) + data, err = l.Lookup(rnd+1, destAccount) + require.Error(t, err) + require.Equal(t, uint64(0), data.MicroAlgos.Raw) + + _, err = realLedger.Totals(rnd + 1) + require.Error(t, err) + + _, err = l.Totals(rnd + 1) + require.Error(t, err) + } else if rnd < basics.Round(520) { + // test expired round ( expected error ) + _, err = realLedger.Totals(rnd - 500) + require.Error(t, err) + + _, err = l.Totals(rnd - 500) + require.Error(t, err) + } + } + return +} + +func TestLedgerSeed(t *testing.T) { + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + + const inMem = true + cfg := config.GetDefaultLocal() + cfg.Archival = true + log := logging.TestingLog(t) + log.SetLevel(logging.Warn) + realLedger, err := ledger.OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + defer realLedger.Close() + + l := Ledger{Ledger: realLedger} + require.NotNil(t, &l) + + blk := genesisInitState.Block + for rnd := basics.Round(1); rnd < basics.Round(32); rnd++ { + blk.BlockHeader.Round++ + blk.BlockHeader.Seed[0] = byte(uint64(rnd)) + blk.BlockHeader.Seed[1] = byte(uint64(rnd) / 256) + blk.BlockHeader.Seed[2] = byte(uint64(rnd) / 65536) + blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000) + require.NoError(t, l.AddBlock(blk, agreement.Certificate{})) + l.WaitForCommit(rnd) + if rnd < basics.Round(16) { + // test the current round + expectedHdr, err := realLedger.BlockHdr(rnd) + require.NoError(t, err) + + // ensure the item is not in the cache + seed, cached := l.lastRoundSeed.Load().(roundSeed) + if cached { + require.NotEqual(t, seed.elements[1].seed, expectedHdr.Seed) + } + + actualSeed, err := l.Seed(rnd) + require.NoError(t, err) + + require.Equal(t, expectedHdr.Seed, actualSeed) + + seed, cached = l.lastRoundSeed.Load().(roundSeed) + require.True(t, cached) + require.Equal(t, seed.elements[1].seed, expectedHdr.Seed) + } else if rnd < basics.Round(32) { + // test against the previous round + expectedHdr, err := realLedger.BlockHdr(rnd - 1) + require.NoError(t, err) + + // ensure the cache is aligned with the previous round + seed, cached := l.lastRoundSeed.Load().(roundSeed) + require.True(t, cached) + require.Equal(t, seed.elements[1].round, rnd-1) + require.Equal(t, seed.elements[1].seed, expectedHdr.Seed) + + actualSeed, err := l.Seed(rnd) + require.NoError(t, err) + + expectedHdr, err = realLedger.BlockHdr(rnd) + require.NoError(t, err) + + require.Equal(t, expectedHdr.Seed, actualSeed) + + // ensure the cache is aligned with the updated round + seed, cached = l.lastRoundSeed.Load().(roundSeed) + require.True(t, cached) + require.Equal(t, seed.elements[1].round, rnd) + require.Equal(t, seed.elements[1].seed, expectedHdr.Seed) + } + } + return +} diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index 0b972d9b18..c3b6883171 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -84,14 +84,16 @@ type TransactionPool struct { // rememberedTxids. Calling rememberCommit() adds them to the // pendingTxGroups and pendingTxids. This allows us to batch the // changes in OnNewBlock() without preventing a concurrent call - // to Pending() or Verified(). + // to PendingTxGroups() or Verified(). rememberedTxGroups [][]transactions.SignedTxn rememberedVerifyParams [][]verify.Params rememberedTxids map[transactions.Txid]txPoolVerifyCacheVal + + log logging.Logger } // MakeTransactionPool makes a transaction pool. -func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local) *TransactionPool { +func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local, log logging.Logger) *TransactionPool { if cfg.TxPoolExponentialIncreaseFactor < 1 { cfg.TxPoolExponentialIncreaseFactor = 1 } @@ -105,6 +107,7 @@ func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local) *TransactionPo logAssembleStats: cfg.EnableAssembleStats, expFeeFactor: cfg.TxPoolExponentialIncreaseFactor, txPoolMaxSize: cfg.TxPoolSize, + log: log, } pool.cond.L = &pool.mu pool.assemblyCond.L = &pool.assemblyMu @@ -180,9 +183,9 @@ func (pool *TransactionPool) PendingTxIDs() []transactions.Txid { return ids } -// Pending returns a list of transaction groups that should be proposed +// PendingTxGroups returns a list of transaction groups that should be proposed // in the next block, in order. -func (pool *TransactionPool) Pending() [][]transactions.SignedTxn { +func (pool *TransactionPool) PendingTxGroups() [][]transactions.SignedTxn { pool.pendingMu.RLock() defer pool.pendingMu.RUnlock() // note that this operation is safe for the sole reason that arrays in go are immutable. @@ -191,6 +194,15 @@ func (pool *TransactionPool) Pending() [][]transactions.SignedTxn { return pool.pendingTxGroups } +// pendingTxIDsCount returns the number of pending transaction ids that are still waiting +// in the transaction pool. This is identical to the number of transaction ids that would +// be retrieved by a call to PendingTxIDs() +func (pool *TransactionPool) pendingTxIDsCount() int { + pool.pendingMu.RLock() + defer pool.pendingMu.RUnlock() + return len(pool.pendingTxids) +} + // rememberCommit() saves the changes added by remember to // pendingTxGroups and pendingTxids. The caller is assumed to // be holding pool.mu. flush indicates whether previous @@ -234,13 +246,14 @@ func (pool *TransactionPool) pendingCountNoLock() int { return count } -// checkPendingQueueSize test to see if there is more room in the pending -// group transaction list. As long as we haven't surpassed the size limit, we -// should be good to go. -func (pool *TransactionPool) checkPendingQueueSize() error { - pendingSize := len(pool.Pending()) - if pendingSize >= pool.txPoolMaxSize { - return fmt.Errorf("TransactionPool.Test: transaction pool have reached capacity") +// checkPendingQueueSize tests to see if we can grow the pending group transaction list +// by adding txCount more transactions. The limits comes from the total number of transactions +// and not from the total number of transaction groups. +// As long as we haven't surpassed the size limit, we should be good to go. +func (pool *TransactionPool) checkPendingQueueSize(txCount int) error { + pendingSize := pool.pendingTxIDsCount() + if pendingSize+txCount > pool.txPoolMaxSize { + return fmt.Errorf("TransactionPool.checkPendingQueueSize: transaction pool have reached capacity") } return nil } @@ -293,6 +306,16 @@ func (pool *TransactionPool) computeFeePerByte() uint64 { // checkSufficientFee take a set of signed transactions and verifies that each transaction has // sufficient fee to get into the transaction pool func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error { + // Special case: the compact cert transaction, if issued from the + // special compact-cert-sender address, in a singleton group, pays + // no fee. + if len(txgroup) == 1 { + t := txgroup[0].Txn + if t.Type == protocol.CompactCertTx && t.Sender == transactions.CompactCertSender && t.Fee.IsZero() { + return nil + } + } + // get the current fee per byte feePerByte := pool.computeFeePerByte() @@ -310,7 +333,7 @@ func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn // Test performs basic duplicate detection and well-formedness checks // on a transaction group without storing the group. func (pool *TransactionPool) Test(txgroup []transactions.SignedTxn) error { - if err := pool.checkPendingQueueSize(); err != nil { + if err := pool.checkPendingQueueSize(len(txgroup)); err != nil { return err } @@ -399,7 +422,7 @@ func (pool *TransactionPool) RememberOne(t transactions.SignedTxn, verifyParams // Remember stores the provided transaction group. // Precondition: Only Remember() properly-signed and well-formed transactions (i.e., ensure t.WellFormed()) func (pool *TransactionPool) Remember(txgroup []transactions.SignedTxn, verifyParams []verify.Params) error { - if err := pool.checkPendingQueueSize(); err != nil { + if err := pool.checkPendingQueueSize(len(txgroup)); err != nil { return err } @@ -527,7 +550,7 @@ func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledger.St Round uint64 } details.Round = uint64(block.Round()) - logging.Base().Metrics(telemetryspec.Transaction, stats, details) + pool.log.Metrics(telemetryspec.Transaction, stats, details) } } @@ -594,7 +617,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact latest := pool.ledger.Latest() prev, err := pool.ledger.BlockHdr(latest) if err != nil { - logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot get prev header for %d: %v", + pool.log.Warnf("TransactionPool.recomputeBlockEvaluator: cannot get prev header for %d: %v", latest, err) return } @@ -602,7 +625,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact // Process upgrade to see if we support the next protocol version _, upgradeState, err := bookkeeping.ProcessUpgradeParams(prev) if err != nil { - logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: error processing upgrade params for next round: %v", err) + pool.log.Warnf("TransactionPool.recomputeBlockEvaluator: error processing upgrade params for next round: %v", err) return } @@ -610,7 +633,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact // if we don't, and we would rather stall locally than panic) _, ok := config.Consensus[upgradeState.CurrentProtocol] if !ok { - logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: next protocol version %v is not supported", upgradeState.CurrentProtocol) + pool.log.Warnf("TransactionPool.recomputeBlockEvaluator: next protocol version %v is not supported", upgradeState.CurrentProtocol) return } @@ -631,7 +654,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact pool.numPendingWholeBlocks = 0 pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, pendingCount) if err != nil { - logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err) + pool.log.Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err) return } @@ -665,11 +688,11 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact case transactions.MinFeeError: asmStats.InvalidCount++ stats.RemovedInvalidCount++ - logging.Base().Infof("Cannot re-add pending transaction to pool: %v", err) + pool.log.Infof("Cannot re-add pending transaction to pool: %v", err) default: asmStats.InvalidCount++ stats.RemovedInvalidCount++ - logging.Base().Warnf("Cannot re-add pending transaction to pool: %v", err) + pool.log.Warnf("Cannot re-add pending transaction to pool: %v", err) } } } @@ -746,7 +769,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim Round uint64 } details.Round = uint64(round) - logging.Base().Metrics(telemetryspec.Transaction, stats, details) + pool.log.Metrics(telemetryspec.Transaction, stats, details) }() } @@ -754,7 +777,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim // if the transaction pool is more than two rounds behind, we don't want to wait. if pool.assemblyResults.roundStartedEvaluating <= round.SubSaturate(2) { - logging.Base().Infof("AssembleBlock: requested round is more than a single round ahead of the transaction pool %d <= %d-2", pool.assemblyResults.roundStartedEvaluating, round) + pool.log.Infof("AssembleBlock: requested round is more than a single round ahead of the transaction pool %d <= %d-2", pool.assemblyResults.roundStartedEvaluating, round) stats.StopReason = telemetryspec.AssembleBlockEmpty pool.assemblyMu.Unlock() return pool.assembleEmptyBlock(round) @@ -767,7 +790,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim // that the agreement is far behind us, so we're going to return here with error code to let // the agreement know about it. // since the network is already ahead of us, there is no issue here in not generating a block ( since the block would get discarded anyway ) - logging.Base().Infof("AssembleBlock: requested round is behind transaction pool round %d < %d", round, pool.assemblyResults.roundStartedEvaluating) + pool.log.Infof("AssembleBlock: requested round is behind transaction pool round %d < %d", round, pool.assemblyResults.roundStartedEvaluating) return nil, ErrStaleBlockAssemblyRequest } @@ -789,7 +812,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim // this case is expected to happen only if the transaction pool was able to construct *two* rounds during the time we were trying to assemble the empty block. // while this is extreamly unlikely, we need to handle this. the handling it quite straight-forward : // since the network is already ahead of us, there is no issue here in not generating a block ( since the block would get discarded anyway ) - logging.Base().Infof("AssembleBlock: requested round is behind transaction pool round after timing out %d < %d", round, pool.assemblyResults.roundStartedEvaluating) + pool.log.Infof("AssembleBlock: requested round is behind transaction pool round after timing out %d < %d", round, pool.assemblyResults.roundStartedEvaluating) return nil, ErrStaleBlockAssemblyRequest } @@ -801,7 +824,7 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim // check to see if the extra time helped us to get a block. if !pool.assemblyResults.ok { // it didn't. Lucky us - we already prepared an empty block, so we can return this right now. - logging.Base().Warnf("AssembleBlock: ran out of time for round %d", round) + pool.log.Warnf("AssembleBlock: ran out of time for round %d", round) stats.StopReason = telemetryspec.AssembleBlockTimeout if emptyBlockErr != nil { emptyBlockErr = fmt.Errorf("AssembleBlock: failed to construct empty block : %v", emptyBlockErr) @@ -818,10 +841,14 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim // this scenario should not happen unless the txpool is receiving the new blocks via OnNewBlocks // with "jumps" between consecutive blocks ( which is why it's a warning ) // The "normal" usecase is evaluated on the top of the function. - logging.Base().Warnf("AssembleBlock: requested round is behind transaction pool round %d < %d", round, pool.assemblyResults.roundStartedEvaluating) + pool.log.Warnf("AssembleBlock: requested round is behind transaction pool round %d < %d", round, pool.assemblyResults.roundStartedEvaluating) return nil, ErrStaleBlockAssemblyRequest - } else if pool.assemblyResults.roundStartedEvaluating != round { - return nil, fmt.Errorf("AssembleBlock: assembled block round does not match: %d != %d", + } else if pool.assemblyResults.roundStartedEvaluating == round.SubSaturate(1) { + pool.log.Warnf("AssembleBlock: assembled block round did not catch up to requested round: %d != %d", pool.assemblyResults.roundStartedEvaluating, round) + stats.StopReason = telemetryspec.AssembleBlockTimeout + return pool.assembleEmptyBlock(round) + } else if pool.assemblyResults.roundStartedEvaluating < round { + return nil, fmt.Errorf("AssembleBlock: assembled block round much behind requested round: %d != %d", pool.assemblyResults.roundStartedEvaluating, round) } diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go index 1116841eb2..a5eebdd7ac 100644 --- a/data/pools/transactionPool_test.go +++ b/data/pools/transactionPool_test.go @@ -150,7 +150,7 @@ func TestMinBalanceOK(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min tx := transactions.Transaction{ @@ -191,7 +191,7 @@ func TestSenderGoesBelowMinBalance(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min tx := transactions.Transaction{ @@ -233,7 +233,7 @@ func TestSenderGoesBelowMinBalanceDueToAssets(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) assetTx := transactions.Transaction{ Type: protocol.AssetConfigTx, @@ -302,7 +302,7 @@ func TestCloseAccount(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min closeTx := transactions.Transaction{ @@ -363,7 +363,7 @@ func TestCloseAccountWhileTxIsPending(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min tx := transactions.Transaction{ @@ -425,7 +425,7 @@ func TestClosingAccountBelowMinBalance(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min closeTx := transactions.Transaction{ @@ -467,7 +467,7 @@ func TestRecipientGoesBelowMinBalance(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min tx := transactions.Transaction{ @@ -506,7 +506,7 @@ func TestRememberForget(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(mockLedger, cfg) + transactionPool := MakeTransactionPool(mockLedger, cfg, logging.Base()) eval := newBlockEvaluator(t, mockLedger) @@ -538,7 +538,7 @@ func TestRememberForget(t *testing.T) { } } - pending := transactionPool.Pending() + pending := transactionPool.PendingTxGroups() numberOfTxns := numOfAccounts*numOfAccounts - numOfAccounts require.Len(t, pending, numberOfTxns) @@ -549,7 +549,7 @@ func TestRememberForget(t *testing.T) { require.NoError(t, err) transactionPool.OnNewBlock(blk.Block(), ledger.StateDelta{}) - pending = transactionPool.Pending() + pending = transactionPool.PendingTxGroups() require.Len(t, pending, 0) } @@ -571,7 +571,7 @@ func TestCleanUp(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(mockLedger, cfg) + transactionPool := MakeTransactionPool(mockLedger, cfg, logging.Base()) issuedTransactions := 0 for i, sender := range addresses { @@ -612,7 +612,7 @@ func TestCleanUp(t *testing.T) { transactionPool.OnNewBlock(blk.Block(), ledger.StateDelta{}) } - pending := transactionPool.Pending() + pending := transactionPool.PendingTxGroups() require.Zero(t, len(pending)) require.Zero(t, transactionPool.NumExpired(4)) require.Equal(t, issuedTransactions, transactionPool.NumExpired(5)) @@ -648,7 +648,7 @@ func TestFixOverflowOnNewBlock(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(mockLedger, cfg) + transactionPool := MakeTransactionPool(mockLedger, cfg, logging.Base()) overSpender := addresses[0] var overSpenderAmount uint64 @@ -684,7 +684,7 @@ func TestFixOverflowOnNewBlock(t *testing.T) { } } } - pending := transactionPool.Pending() + pending := transactionPool.PendingTxGroups() require.Len(t, pending, savedTransactions) secret := keypair() @@ -720,7 +720,7 @@ func TestFixOverflowOnNewBlock(t *testing.T) { transactionPool.OnNewBlock(block.Block(), ledger.StateDelta{}) - pending = transactionPool.Pending() + pending = transactionPool.PendingTxGroups() // only one transaction is missing require.Len(t, pending, savedTransactions-1) } @@ -743,7 +743,7 @@ func TestOverspender(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) receiver := addresses[1] tx := transactions.Transaction{ @@ -803,7 +803,7 @@ func TestRemove(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) sender := addresses[0] receiver := addresses[1] @@ -824,7 +824,7 @@ func TestRemove(t *testing.T) { } signedTx := tx.Sign(secrets[0]) require.NoError(t, transactionPool.RememberOne(signedTx, verify.Params{})) - require.Equal(t, transactionPool.Pending(), [][]transactions.SignedTxn{[]transactions.SignedTxn{signedTx}}) + require.Equal(t, transactionPool.PendingTxGroups(), [][]transactions.SignedTxn{[]transactions.SignedTxn{signedTx}}) } func TestLogicSigOK(t *testing.T) { @@ -858,7 +858,7 @@ func TestLogicSigOK(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) // sender goes below min tx := transactions.Transaction{ @@ -902,7 +902,7 @@ func TestTransactionPool_CurrentFeePerByte(t *testing.T) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = testPoolSize * 15 cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(l, cfg) + transactionPool := MakeTransactionPool(l, cfg, logging.Base()) for i, sender := range addresses { for j := 0; j < testPoolSize*15/len(addresses); j++ { @@ -953,7 +953,7 @@ func BenchmarkTransactionPoolRememberOne(b *testing.B) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = b.N cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) signedTransactions := make([]transactions.SignedTxn, 0, b.N) for i, sender := range addresses { for j := 0; j < b.N/len(addresses); j++ { @@ -985,7 +985,7 @@ func BenchmarkTransactionPoolRememberOne(b *testing.B) { b.StopTimer() b.ResetTimer() ledger = makeMockLedger(b, initAccFixed(addresses, 1<<32)) - transactionPool = MakeTransactionPool(ledger, cfg) + transactionPool = MakeTransactionPool(ledger, cfg, logging.Base()) b.StartTimer() for _, signedTx := range signedTransactions { @@ -1014,7 +1014,7 @@ func BenchmarkTransactionPoolPending(b *testing.B) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = benchPoolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(ledger, cfg) + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) var block bookkeeping.Block block.Payset = make(transactions.Payset, 0) @@ -1047,12 +1047,12 @@ func BenchmarkTransactionPoolPending(b *testing.B) { b.StartTimer() for i := 0; i < b.N; i++ { - transactionPool.Pending() + transactionPool.PendingTxGroups() } } subs := []int{1000, 5000, 10000, 25000, 50000} for _, bps := range subs { - b.Run(fmt.Sprintf("Pending-%d", bps), func(b *testing.B) { + b.Run(fmt.Sprintf("PendingTxGroups-%d", bps), func(b *testing.B) { sub(b, bps) }) } @@ -1079,7 +1079,7 @@ func BenchmarkTransactionPoolSteadyState(b *testing.B) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = poolSize cfg.EnableProcessBlockStats = false - transactionPool := MakeTransactionPool(l, cfg) + transactionPool := MakeTransactionPool(l, cfg, logging.Base()) var signedTransactions []transactions.SignedTxn for i := 0; i < b.N; i++ { @@ -1153,3 +1153,89 @@ func BenchmarkTransactionPoolSteadyState(b *testing.B) { fmt.Printf("BenchmarkTransactionPoolSteadyState: committed block %d\n", blk.Block().Round()) } } + +func TestTxPoolSizeLimits(t *testing.T) { + numOfAccounts := 2 + // Generate accounts + secrets := make([]*crypto.SignatureSecrets, numOfAccounts) + addresses := make([]basics.Address, numOfAccounts) + + for i := 0; i < numOfAccounts; i++ { + secret := keypair() + addr := basics.Address(secret.SignatureVerifier) + secrets[i] = secret + addresses[i] = addr + } + + firstAddress := addresses[0] + cfg := config.GetDefaultLocal() + cfg.TxPoolSize = testPoolSize + cfg.EnableProcessBlockStats = false + + ledger := makeMockLedger(t, initAcc(map[basics.Address]uint64{firstAddress: proto.MinBalance + 2*proto.MinTxnFee*uint64(cfg.TxPoolSize)})) + + transactionPool := MakeTransactionPool(ledger, cfg, logging.Base()) + + receiver := addresses[1] + + uniqueTxID := 0 + // almost fill the transaction pool, leaving room for one additional transaction group of the biggest size. + for i := 0; i <= cfg.TxPoolSize-config.Consensus[protocol.ConsensusCurrentVersion].MaxTxGroupSize; i++ { + tx := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: transactions.Header{ + Sender: firstAddress, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee + 1}, + FirstValid: 0, + LastValid: 10, + Note: []byte{byte(uniqueTxID), byte(uniqueTxID >> 8), byte(uniqueTxID >> 16)}, + GenesisHash: ledger.GenesisHash(), + }, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: receiver, + Amount: basics.MicroAlgos{Raw: 0}, + }, + } + signedTx := tx.Sign(secrets[0]) + + // consume the transaction of allowed limit + require.NoError(t, transactionPool.RememberOne(signedTx, verify.Params{})) + uniqueTxID++ + } + + for groupSize := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxGroupSize; groupSize > 0; groupSize-- { + var txgroup []transactions.SignedTxn + var verifyParams []verify.Params + // fill the transaction group with groupSize transactions. + for i := 0; i < groupSize; i++ { + tx := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: transactions.Header{ + Sender: firstAddress, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee + 1}, + FirstValid: 0, + LastValid: 10, + Note: []byte{byte(uniqueTxID), byte(uniqueTxID >> 8), byte(uniqueTxID >> 16)}, + GenesisHash: ledger.GenesisHash(), + }, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: receiver, + Amount: basics.MicroAlgos{Raw: 0}, + }, + } + signedTx := tx.Sign(secrets[0]) + txgroup = append(txgroup, signedTx) + verifyParams = append(verifyParams, verify.Params{}) + uniqueTxID++ + } + + // ensure that we would fail adding this. + require.Error(t, transactionPool.Remember(txgroup, verifyParams)) + + if groupSize > 1 { + // add a single transaction and ensure we succeed + // consume the transaction of allowed limit + require.NoError(t, transactionPool.RememberOne(txgroup[0], verifyParams[0])) + } + } +} diff --git a/data/transactions/compactcert.go b/data/transactions/compactcert.go new file mode 100644 index 0000000000..f17a20d314 --- /dev/null +++ b/data/transactions/compactcert.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package transactions + +import ( + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/compactcert" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" +) + +// CompactCertTxnFields captures the fields used for compact cert transactions. +type CompactCertTxnFields struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + CertRound basics.Round `codec:"certrnd"` + Cert compactcert.Cert `codec:"cert"` +} + +// Empty returns whether the CompactCertTxnFields are all zero, +// in the sense of being omitted in a msgpack encoding. +func (cc CompactCertTxnFields) Empty() bool { + if cc.CertRound != 0 { + return false + } + if !cc.Cert.SigCommit.IsZero() || cc.Cert.SignedWeight != 0 { + return false + } + if len(cc.Cert.SigProofs) != 0 || len(cc.Cert.PartProofs) != 0 { + return false + } + if len(cc.Cert.Reveals) != 0 { + return false + } + return true +} + +//msgp:ignore specialAddr +// specialAddr is used to form a unique address that will send out compact certs. +type specialAddr string + +// ToBeHashed implements the crypto.Hashable interface +func (a specialAddr) ToBeHashed() (protocol.HashID, []byte) { + return protocol.SpecialAddr, []byte(a) +} + +// CompactCertSender is the computed address for sending out compact certs. +var CompactCertSender basics.Address + +func init() { + CompactCertSender = basics.Address(crypto.HashObj(specialAddr("CompactCertSender"))) +} diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 42290cbb89..1251efc345 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -1322,6 +1322,7 @@ func disDefault(dis *disassembleState, spec *OpSpec) { _, dis.err = fmt.Fprintf(dis.out, "%s\n", spec.Name) } +var errShortIntcblock = errors.New("intcblock ran past end of program") var errTooManyIntc = errors.New("intcblock with too many items") func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err error) { @@ -1339,7 +1340,7 @@ func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err erro intc = make([]uint64, numInts) for i := uint64(0); i < numInts; i++ { if pos >= len(program) { - err = fmt.Errorf("bytecblock ran past end of program") + err = errShortIntcblock return } intc[i], bytesUsed = binary.Uvarint(program[pos:]) @@ -1368,7 +1369,7 @@ func checkIntConstBlock(cx *evalContext) int { //intc = make([]uint64, numInts) for i := uint64(0); i < numInts; i++ { if pos >= len(cx.program) { - cx.err = fmt.Errorf("bytecblock ran past end of program") + cx.err = errShortIntcblock return 0 } _, bytesUsed = binary.Uvarint(cx.program[pos:]) diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index e44cb372dc..789ccf2cf6 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -1587,3 +1587,16 @@ func TestAssembleConstants(t *testing.T) { }) } } + +func TestErrShortBytecblock(t *testing.T) { + text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8` + program, err := AssembleStringWithVersion(text, 1) + require.NoError(t, err) + _, _, err = parseIntcblock(program, 0) + require.Equal(t, err, errShortIntcblock) + + var cx evalContext + cx.program = program + checkIntConstBlock(&cx) + require.Equal(t, cx.err, errShortIntcblock) +} diff --git a/data/transactions/logic/debugger.go b/data/transactions/logic/debugger.go index 5e72754542..5e51c8a3a6 100644 --- a/data/transactions/logic/debugger.go +++ b/data/transactions/logic/debugger.go @@ -85,6 +85,13 @@ type AppStateChange struct { LocalStateChanges map[basics.Address]basics.StateDelta `codec:"lsch"` } +// GetProgramID returns program or execution ID that is string representation of sha256 checksum. +// It is used later to link program on the user-facing side of the debugger with TEAL evaluator. +func GetProgramID(program []byte) string { + hash := sha256.Sum256([]byte(program)) + return hex.EncodeToString(hash[:]) +} + func makeDebugState(cx *evalContext) DebugState { disasm, dsInfo, err := disassembleInstrumented(cx.program) if err != nil { @@ -92,10 +99,9 @@ func makeDebugState(cx *evalContext) DebugState { disasm = err.Error() } - hash := sha256.Sum256(cx.program) // initialize DebuggerState with immutable fields ds := DebugState{ - ExecID: hex.EncodeToString(hash[:]), + ExecID: GetProgramID(cx.program), Disassembly: disasm, PCOffset: dsInfo.pcOffset, GroupIndex: cx.GroupIndex, @@ -185,6 +191,7 @@ func stackValueToTealValue(sv *stackValue) basics.TealValue { } } +// valueDeltaToValueDelta converts delta's bytes to base64 in a new struct func valueDeltaToValueDelta(vd *basics.ValueDelta) basics.ValueDelta { return basics.ValueDelta{ Action: vd.Action, diff --git a/data/transactions/logic/debugger_test.go b/data/transactions/logic/debugger_test.go index 8a2fba90e6..eb516b1fc7 100644 --- a/data/transactions/logic/debugger_test.go +++ b/data/transactions/logic/debugger_test.go @@ -17,9 +17,11 @@ package logic import ( + "encoding/base64" "os" "testing" + "github.com/algorand/go-algorand/data/basics" "github.com/stretchr/testify/require" ) @@ -125,3 +127,51 @@ func TestDebuggerHook(t *testing.T) { require.Greater(t, testDbg.update, 1) require.Equal(t, 1, len(testDbg.state.Stack)) } + +func TestLineToPC(t *testing.T) { + dState := DebugState{ + Disassembly: "abc\ndef\nghi", + PCOffset: []PCOffset{{PC: 1, Offset: 4}, {PC: 2, Offset: 8}, {PC: 3, Offset: 12}}, + } + pc := dState.LineToPC(0) + require.Equal(t, 0, pc) + + pc = dState.LineToPC(1) + require.Equal(t, 1, pc) + + pc = dState.LineToPC(2) + require.Equal(t, 2, pc) + + pc = dState.LineToPC(3) + require.Equal(t, 3, pc) + + pc = dState.LineToPC(4) + require.Equal(t, 0, pc) + + pc = dState.LineToPC(-1) + require.Equal(t, 0, pc) + + pc = dState.LineToPC(0x7fffffff) + require.Equal(t, 0, pc) + + dState.PCOffset = []PCOffset{} + pc = dState.LineToPC(1) + require.Equal(t, 0, pc) + + dState.PCOffset = []PCOffset{{PC: 1, Offset: 0}} + pc = dState.LineToPC(1) + require.Equal(t, 0, pc) +} + +func TestValueDeltaToValueDelta(t *testing.T) { + vDelta := basics.ValueDelta{ + Action: basics.SetUintAction, + Bytes: "some string", + Uint: uint64(0xffffffff), + } + ans := valueDeltaToValueDelta(&vDelta) + require.Equal(t, vDelta.Action, ans.Action) + require.NotEqual(t, vDelta.Bytes, ans.Bytes) + require.Equal(t, base64.StdEncoding.EncodeToString([]byte(vDelta.Bytes)), ans.Bytes) + require.Equal(t, vDelta.Uint, ans.Uint) +} diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index fee178a6d5..159e25c6df 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -1249,9 +1249,9 @@ func (cx *evalContext) assetParamsEnumToValue(params *basics.AssetParams, field } // TxnFieldToTealValue is a thin wrapper for txnFieldToStack for external use -func TxnFieldToTealValue(txn *transactions.Transaction, groupIndex int, field TxnField) (basics.TealValue, error) { +func TxnFieldToTealValue(txn *transactions.Transaction, groupIndex int, field TxnField, arrayFieldIdx uint64) (basics.TealValue, error) { cx := evalContext{EvalParams: EvalParams{GroupIndex: groupIndex}} - sv, err := cx.txnFieldToStack(txn, field, 0, groupIndex) + sv, err := cx.txnFieldToStack(txn, field, arrayFieldIdx, groupIndex) return sv.toTealValue(), err } diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 4b5df059cf..80d9ee6431 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -110,6 +110,72 @@ func TestEmptyProgram(t *testing.T) { require.False(t, pass) } +// TestMinTealVersionParamEval tests eval/check reading the MinTealVersion from the param +func TestMinTealVersionParamEvalCheck(t *testing.T) { + t.Parallel() + params := defaultEvalParams(nil, nil) + version2 := uint64(rekeyingEnabledVersion) + params.MinTealVersion = &version2 + program := make([]byte, binary.MaxVarintLen64) + // set the teal program version to 1 + binary.PutUvarint(program, 1) + + _, err := Check(program, params) + require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", appsEnabledVersion)) + + // If the param is read correctly, the eval should fail + pass, err := Eval(program, params) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", appsEnabledVersion)) + require.False(t, pass) +} + +func TestTxnFieldToTealValue(t *testing.T) { + + txn := transactions.Transaction{} + groupIndex := 0 + field := FirstValid + values := [6]uint64{0, 1, 2, 0xffffffff, 0xffffffffffffffff} + + for _, value := range values { + txn.FirstValid = basics.Round(value) + tealValue, err := TxnFieldToTealValue(&txn, groupIndex, field, 0) + require.NoError(t, err) + require.Equal(t, basics.TealUintType, tealValue.Type) + require.Equal(t, value, tealValue.Uint) + } + + // check arrayFieldIdx is ignored for non-arrays + field = FirstValid + value := uint64(1) + txn.FirstValid = basics.Round(value) + tealValue, err := TxnFieldToTealValue(&txn, groupIndex, field, 10) + require.NoError(t, err) + require.Equal(t, basics.TealUintType, tealValue.Type) + require.Equal(t, value, tealValue.Uint) + + // check arrayFieldIdx is taken into account for arrays + field = Accounts + sender := basics.Address{} + addr, _ := basics.UnmarshalChecksumAddress("DFPKC2SJP3OTFVJFMCD356YB7BOT4SJZTGWLIPPFEWL3ZABUFLTOY6ILYE") + txn.Accounts = []basics.Address{addr} + tealValue, err = TxnFieldToTealValue(&txn, groupIndex, field, 0) + require.NoError(t, err) + require.Equal(t, basics.TealBytesType, tealValue.Type) + require.Equal(t, string(sender[:]), tealValue.Bytes) + + tealValue, err = TxnFieldToTealValue(&txn, groupIndex, field, 1) + require.NoError(t, err) + require.Equal(t, basics.TealBytesType, tealValue.Type) + require.Equal(t, string(addr[:]), tealValue.Bytes) + + tealValue, err = TxnFieldToTealValue(&txn, groupIndex, field, 100) + require.Error(t, err) + require.Equal(t, basics.TealUintType, tealValue.Type) + require.Equal(t, uint64(0), tealValue.Uint) + require.Equal(t, "", tealValue.Bytes) +} + func TestWrongProtoVersion(t *testing.T) { t.Parallel() for v := uint64(1); v <= AssemblerMaxVersion; v++ { diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 83e57ce4a1..6f41c0089d 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -174,8 +174,6 @@ func (a sortByOpcode) Less(i, j int) bool { return a[i].Opcode < a[j].Opcode } // OpcodesByVersion returns list of opcodes available in a specific version of TEAL // by copying v1 opcodes to v2 to create a full list. -// This function must be used for documentation only because it modifies opcode versions -// to the first introduced for the opcodes updated in later versions. func OpcodesByVersion(version uint64) []OpSpec { // for updated opcodes use the lowest version opcode was introduced in maxOpcode := 0 diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go index 7364cd7197..f2031142b9 100644 --- a/data/transactions/logic/opcodes_test.go +++ b/data/transactions/logic/opcodes_test.go @@ -32,8 +32,57 @@ func TestOpSpecs(t *testing.T) { } } +func (os *OpSpec) equals(oso *OpSpec) bool { + if os.Opcode != oso.Opcode { + return false + } + if os.Name != oso.Name { + return false + } + if !reflect.DeepEqual(os.Args, oso.Args) { + return false + } + if !reflect.DeepEqual(os.Returns, oso.Returns) { + return false + } + if os.Version != oso.Version { + return false + } + if os.Modes != oso.Modes { + return false + } + + return true +} + +func TestOpcodesByVersionReordered(t *testing.T) { + + // Make a copy to restore to the original + OpSpecsOrig := make([]OpSpec, len(OpSpecs)) + for idx, opspec := range OpSpecs { + cp := opspec + OpSpecsOrig[idx] = cp + } + defer func() { + OpSpecs = OpSpecsOrig + }() + + // To test the case where a newer version opcode is before an older version + // Change the order of opcode 0x01 so that version 2 comes before version 1 + tmp := OpSpecs[1] + OpSpecs[1] = OpSpecs[4] + OpSpecs[4] = tmp + + t.Run("TestOpcodesByVersion", TestOpcodesByVersion) +} + func TestOpcodesByVersion(t *testing.T) { - t.Parallel() + // Make a copy of the OpSpecs to check if OpcodesByVersion will change it + OpSpecs2 := make([]OpSpec, len(OpSpecs)) + for idx, opspec := range OpSpecs { + cp := opspec + OpSpecs2[idx] = cp + } opSpecs := make([][]OpSpec, 2) for v := uint64(1); v <= LogicVersion; v++ { @@ -59,6 +108,10 @@ func TestOpcodesByVersion(t *testing.T) { }) } require.Greater(t, len(opSpecs[1]), len(opSpecs[0])) + + for idx, opspec := range OpSpecs { + require.True(t, opspec.equals(&OpSpecs2[idx])) + } } func TestOpcodesVersioningV2(t *testing.T) { diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go index beeeaa6008..77cc22765c 100644 --- a/data/transactions/msgp_gen.go +++ b/data/transactions/msgp_gen.go @@ -50,6 +50,14 @@ import ( // |-----> (*) Msgsize // |-----> (*) MsgIsZero // +// CompactCertTxnFields +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// // Header // |-----> (*) MarshalMsg // |-----> (*) CanMarshalMsg @@ -1481,6 +1489,143 @@ func (z *AssetTransferTxnFields) MsgIsZero() bool { return ((*z).XferAsset.MsgIsZero()) && ((*z).AssetAmount == 0) && ((*z).AssetSender.MsgIsZero()) && ((*z).AssetReceiver.MsgIsZero()) && ((*z).AssetCloseTo.MsgIsZero()) } +// MarshalMsg implements msgp.Marshaler +func (z *CompactCertTxnFields) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).Cert.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).CertRound.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "cert" + o = append(o, 0xa4, 0x63, 0x65, 0x72, 0x74) + o, err = (*z).Cert.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Cert") + return + } + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "certrnd" + o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64) + o, err = (*z).CertRound.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CertRound") + return + } + } + } + return +} + +func (_ *CompactCertTxnFields) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*CompactCertTxnFields) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *CompactCertTxnFields) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).CertRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CertRound") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Cert.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Cert") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = CompactCertTxnFields{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "certrnd": + bts, err = (*z).CertRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CertRound") + return + } + case "cert": + bts, err = (*z).Cert.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Cert") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (_ *CompactCertTxnFields) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*CompactCertTxnFields) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *CompactCertTxnFields) Msgsize() (s int) { + s = 1 + 8 + (*z).CertRound.Msgsize() + 5 + (*z).Cert.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *CompactCertTxnFields) MsgIsZero() bool { + return ((*z).CertRound.MsgIsZero()) && ((*z).Cert.MsgIsZero()) +} + // MarshalMsg implements msgp.Marshaler func (z *Header) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) @@ -3661,177 +3806,185 @@ func (z *SignedTxnWithAD) MsgIsZero() bool { func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0006Len := uint32(40) - var zb0006Mask uint64 /* 48 bits */ + zb0006Len := uint32(42) + var zb0006Mask uint64 /* 51 bits */ if (*z).AssetTransferTxnFields.AssetAmount == 0 { zb0006Len-- - zb0006Mask |= 0x100 + zb0006Mask |= 0x200 } if (*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x200 + zb0006Mask |= 0x400 } if (*z).AssetFreezeTxnFields.AssetFrozen == false { zb0006Len-- - zb0006Mask |= 0x400 + zb0006Mask |= 0x800 } if (*z).PaymentTxnFields.Amount.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x800 + zb0006Mask |= 0x1000 } if len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0 { zb0006Len-- - zb0006Mask |= 0x1000 + zb0006Mask |= 0x2000 } if (*z).ApplicationCallTxnFields.OnCompletion == 0 { zb0006Len-- - zb0006Mask |= 0x2000 + zb0006Mask |= 0x4000 } if len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0 { zb0006Len-- - zb0006Mask |= 0x4000 + zb0006Mask |= 0x8000 } if (*z).AssetConfigTxnFields.AssetParams.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x8000 + zb0006Mask |= 0x10000 } if len((*z).ApplicationCallTxnFields.ForeignAssets) == 0 { zb0006Len-- - zb0006Mask |= 0x10000 + zb0006Mask |= 0x20000 } if len((*z).ApplicationCallTxnFields.Accounts) == 0 { zb0006Len-- - zb0006Mask |= 0x20000 + zb0006Mask |= 0x40000 } if len((*z).ApplicationCallTxnFields.ForeignApps) == 0 { zb0006Len-- - zb0006Mask |= 0x40000 + zb0006Mask |= 0x80000 } if (*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x80000 + zb0006Mask |= 0x100000 } if (*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x100000 + zb0006Mask |= 0x200000 } if (*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x200000 + zb0006Mask |= 0x400000 } if len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0 { zb0006Len-- - zb0006Mask |= 0x400000 + zb0006Mask |= 0x800000 } if (*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x800000 + zb0006Mask |= 0x1000000 } if (*z).AssetTransferTxnFields.AssetSender.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x1000000 + zb0006Mask |= 0x2000000 } if (*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x2000000 + zb0006Mask |= 0x4000000 + } + if (*z).CompactCertTxnFields.Cert.MsgIsZero() { + zb0006Len-- + zb0006Mask |= 0x8000000 + } + if (*z).CompactCertTxnFields.CertRound.MsgIsZero() { + zb0006Len-- + zb0006Mask |= 0x10000000 } if (*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x4000000 + zb0006Mask |= 0x20000000 } if (*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x8000000 + zb0006Mask |= 0x40000000 } if (*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x10000000 + zb0006Mask |= 0x80000000 } if (*z).Header.Fee.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x20000000 + zb0006Mask |= 0x100000000 } if (*z).Header.FirstValid.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x40000000 + zb0006Mask |= 0x200000000 } if (*z).Header.GenesisID == "" { zb0006Len-- - zb0006Mask |= 0x80000000 + zb0006Mask |= 0x400000000 } if (*z).Header.GenesisHash.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x100000000 + zb0006Mask |= 0x800000000 } if (*z).Header.Group.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x200000000 + zb0006Mask |= 0x1000000000 } if (*z).Header.LastValid.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x400000000 + zb0006Mask |= 0x2000000000 } if (*z).Header.Lease == ([32]byte{}) { zb0006Len-- - zb0006Mask |= 0x800000000 + zb0006Mask |= 0x4000000000 } if (*z).KeyregTxnFields.Nonparticipation == false { zb0006Len-- - zb0006Mask |= 0x1000000000 + zb0006Mask |= 0x8000000000 } if len((*z).Header.Note) == 0 { zb0006Len-- - zb0006Mask |= 0x2000000000 + zb0006Mask |= 0x10000000000 } if (*z).PaymentTxnFields.Receiver.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x4000000000 + zb0006Mask |= 0x20000000000 } if (*z).Header.RekeyTo.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x8000000000 + zb0006Mask |= 0x40000000000 } if (*z).KeyregTxnFields.SelectionPK.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x10000000000 + zb0006Mask |= 0x80000000000 } if (*z).Header.Sender.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x20000000000 + zb0006Mask |= 0x100000000000 } if (*z).Type.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x40000000000 + zb0006Mask |= 0x200000000000 } if (*z).KeyregTxnFields.VoteFirst.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x80000000000 + zb0006Mask |= 0x400000000000 } if (*z).KeyregTxnFields.VoteKeyDilution == 0 { zb0006Len-- - zb0006Mask |= 0x100000000000 + zb0006Mask |= 0x800000000000 } if (*z).KeyregTxnFields.VotePK.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x200000000000 + zb0006Mask |= 0x1000000000000 } if (*z).KeyregTxnFields.VoteLast.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x400000000000 + zb0006Mask |= 0x2000000000000 } if (*z).AssetTransferTxnFields.XferAsset.MsgIsZero() { zb0006Len-- - zb0006Mask |= 0x800000000000 + zb0006Mask |= 0x4000000000000 } // variable map header, size zb0006Len o = msgp.AppendMapHeader(o, zb0006Len) if zb0006Len != 0 { - if (zb0006Mask & 0x100) == 0 { // if not empty + if (zb0006Mask & 0x200) == 0 { // if not empty // string "aamt" o = append(o, 0xa4, 0x61, 0x61, 0x6d, 0x74) o = msgp.AppendUint64(o, (*z).AssetTransferTxnFields.AssetAmount) } - if (zb0006Mask & 0x200) == 0 { // if not empty + if (zb0006Mask & 0x400) == 0 { // if not empty // string "aclose" o = append(o, 0xa6, 0x61, 0x63, 0x6c, 0x6f, 0x73, 0x65) o, err = (*z).AssetTransferTxnFields.AssetCloseTo.MarshalMsg(o) @@ -3840,12 +3993,12 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x400) == 0 { // if not empty + if (zb0006Mask & 0x800) == 0 { // if not empty // string "afrz" o = append(o, 0xa4, 0x61, 0x66, 0x72, 0x7a) o = msgp.AppendBool(o, (*z).AssetFreezeTxnFields.AssetFrozen) } - if (zb0006Mask & 0x800) == 0 { // if not empty + if (zb0006Mask & 0x1000) == 0 { // if not empty // string "amt" o = append(o, 0xa3, 0x61, 0x6d, 0x74) o, err = (*z).PaymentTxnFields.Amount.MarshalMsg(o) @@ -3854,7 +4007,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x1000) == 0 { // if not empty + if (zb0006Mask & 0x2000) == 0 { // if not empty // string "apaa" o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x61) if (*z).ApplicationCallTxnFields.ApplicationArgs == nil { @@ -3866,17 +4019,17 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ApplicationArgs[zb0002]) } } - if (zb0006Mask & 0x2000) == 0 { // if not empty + if (zb0006Mask & 0x4000) == 0 { // if not empty // string "apan" o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x6e) o = msgp.AppendUint64(o, uint64((*z).ApplicationCallTxnFields.OnCompletion)) } - if (zb0006Mask & 0x4000) == 0 { // if not empty + if (zb0006Mask & 0x8000) == 0 { // if not empty // string "apap" o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x70) o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ApprovalProgram) } - if (zb0006Mask & 0x8000) == 0 { // if not empty + if (zb0006Mask & 0x10000) == 0 { // if not empty // string "apar" o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x72) o, err = (*z).AssetConfigTxnFields.AssetParams.MarshalMsg(o) @@ -3885,7 +4038,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x10000) == 0 { // if not empty + if (zb0006Mask & 0x20000) == 0 { // if not empty // string "apas" o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x73) if (*z).ApplicationCallTxnFields.ForeignAssets == nil { @@ -3901,7 +4054,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { } } } - if (zb0006Mask & 0x20000) == 0 { // if not empty + if (zb0006Mask & 0x40000) == 0 { // if not empty // string "apat" o = append(o, 0xa4, 0x61, 0x70, 0x61, 0x74) if (*z).ApplicationCallTxnFields.Accounts == nil { @@ -3917,7 +4070,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { } } } - if (zb0006Mask & 0x40000) == 0 { // if not empty + if (zb0006Mask & 0x80000) == 0 { // if not empty // string "apfa" o = append(o, 0xa4, 0x61, 0x70, 0x66, 0x61) if (*z).ApplicationCallTxnFields.ForeignApps == nil { @@ -3933,7 +4086,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { } } } - if (zb0006Mask & 0x80000) == 0 { // if not empty + if (zb0006Mask & 0x100000) == 0 { // if not empty // string "apgs" o = append(o, 0xa4, 0x61, 0x70, 0x67, 0x73) o, err = (*z).ApplicationCallTxnFields.GlobalStateSchema.MarshalMsg(o) @@ -3942,7 +4095,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x100000) == 0 { // if not empty + if (zb0006Mask & 0x200000) == 0 { // if not empty // string "apid" o = append(o, 0xa4, 0x61, 0x70, 0x69, 0x64) o, err = (*z).ApplicationCallTxnFields.ApplicationID.MarshalMsg(o) @@ -3951,7 +4104,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x200000) == 0 { // if not empty + if (zb0006Mask & 0x400000) == 0 { // if not empty // string "apls" o = append(o, 0xa4, 0x61, 0x70, 0x6c, 0x73) o, err = (*z).ApplicationCallTxnFields.LocalStateSchema.MarshalMsg(o) @@ -3960,12 +4113,12 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x400000) == 0 { // if not empty + if (zb0006Mask & 0x800000) == 0 { // if not empty // string "apsu" o = append(o, 0xa4, 0x61, 0x70, 0x73, 0x75) o = msgp.AppendBytes(o, (*z).ApplicationCallTxnFields.ClearStateProgram) } - if (zb0006Mask & 0x800000) == 0 { // if not empty + if (zb0006Mask & 0x1000000) == 0 { // if not empty // string "arcv" o = append(o, 0xa4, 0x61, 0x72, 0x63, 0x76) o, err = (*z).AssetTransferTxnFields.AssetReceiver.MarshalMsg(o) @@ -3974,7 +4127,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x1000000) == 0 { // if not empty + if (zb0006Mask & 0x2000000) == 0 { // if not empty // string "asnd" o = append(o, 0xa4, 0x61, 0x73, 0x6e, 0x64) o, err = (*z).AssetTransferTxnFields.AssetSender.MarshalMsg(o) @@ -3983,7 +4136,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x2000000) == 0 { // if not empty + if (zb0006Mask & 0x4000000) == 0 { // if not empty // string "caid" o = append(o, 0xa4, 0x63, 0x61, 0x69, 0x64) o, err = (*z).AssetConfigTxnFields.ConfigAsset.MarshalMsg(o) @@ -3992,7 +4145,25 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x4000000) == 0 { // if not empty + if (zb0006Mask & 0x8000000) == 0 { // if not empty + // string "cert" + o = append(o, 0xa4, 0x63, 0x65, 0x72, 0x74) + o, err = (*z).CompactCertTxnFields.Cert.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Cert") + return + } + } + if (zb0006Mask & 0x10000000) == 0 { // if not empty + // string "certrnd" + o = append(o, 0xa7, 0x63, 0x65, 0x72, 0x74, 0x72, 0x6e, 0x64) + o, err = (*z).CompactCertTxnFields.CertRound.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CertRound") + return + } + } + if (zb0006Mask & 0x20000000) == 0 { // if not empty // string "close" o = append(o, 0xa5, 0x63, 0x6c, 0x6f, 0x73, 0x65) o, err = (*z).PaymentTxnFields.CloseRemainderTo.MarshalMsg(o) @@ -4001,7 +4172,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x8000000) == 0 { // if not empty + if (zb0006Mask & 0x40000000) == 0 { // if not empty // string "fadd" o = append(o, 0xa4, 0x66, 0x61, 0x64, 0x64) o, err = (*z).AssetFreezeTxnFields.FreezeAccount.MarshalMsg(o) @@ -4010,7 +4181,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x10000000) == 0 { // if not empty + if (zb0006Mask & 0x80000000) == 0 { // if not empty // string "faid" o = append(o, 0xa4, 0x66, 0x61, 0x69, 0x64) o, err = (*z).AssetFreezeTxnFields.FreezeAsset.MarshalMsg(o) @@ -4019,7 +4190,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x20000000) == 0 { // if not empty + if (zb0006Mask & 0x100000000) == 0 { // if not empty // string "fee" o = append(o, 0xa3, 0x66, 0x65, 0x65) o, err = (*z).Header.Fee.MarshalMsg(o) @@ -4028,7 +4199,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x40000000) == 0 { // if not empty + if (zb0006Mask & 0x200000000) == 0 { // if not empty // string "fv" o = append(o, 0xa2, 0x66, 0x76) o, err = (*z).Header.FirstValid.MarshalMsg(o) @@ -4037,12 +4208,12 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x80000000) == 0 { // if not empty + if (zb0006Mask & 0x400000000) == 0 { // if not empty // string "gen" o = append(o, 0xa3, 0x67, 0x65, 0x6e) o = msgp.AppendString(o, (*z).Header.GenesisID) } - if (zb0006Mask & 0x100000000) == 0 { // if not empty + if (zb0006Mask & 0x800000000) == 0 { // if not empty // string "gh" o = append(o, 0xa2, 0x67, 0x68) o, err = (*z).Header.GenesisHash.MarshalMsg(o) @@ -4051,7 +4222,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x200000000) == 0 { // if not empty + if (zb0006Mask & 0x1000000000) == 0 { // if not empty // string "grp" o = append(o, 0xa3, 0x67, 0x72, 0x70) o, err = (*z).Header.Group.MarshalMsg(o) @@ -4060,7 +4231,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x400000000) == 0 { // if not empty + if (zb0006Mask & 0x2000000000) == 0 { // if not empty // string "lv" o = append(o, 0xa2, 0x6c, 0x76) o, err = (*z).Header.LastValid.MarshalMsg(o) @@ -4069,22 +4240,22 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x800000000) == 0 { // if not empty + if (zb0006Mask & 0x4000000000) == 0 { // if not empty // string "lx" o = append(o, 0xa2, 0x6c, 0x78) o = msgp.AppendBytes(o, ((*z).Header.Lease)[:]) } - if (zb0006Mask & 0x1000000000) == 0 { // if not empty + if (zb0006Mask & 0x8000000000) == 0 { // if not empty // string "nonpart" o = append(o, 0xa7, 0x6e, 0x6f, 0x6e, 0x70, 0x61, 0x72, 0x74) o = msgp.AppendBool(o, (*z).KeyregTxnFields.Nonparticipation) } - if (zb0006Mask & 0x2000000000) == 0 { // if not empty + if (zb0006Mask & 0x10000000000) == 0 { // if not empty // string "note" o = append(o, 0xa4, 0x6e, 0x6f, 0x74, 0x65) o = msgp.AppendBytes(o, (*z).Header.Note) } - if (zb0006Mask & 0x4000000000) == 0 { // if not empty + if (zb0006Mask & 0x20000000000) == 0 { // if not empty // string "rcv" o = append(o, 0xa3, 0x72, 0x63, 0x76) o, err = (*z).PaymentTxnFields.Receiver.MarshalMsg(o) @@ -4093,7 +4264,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x8000000000) == 0 { // if not empty + if (zb0006Mask & 0x40000000000) == 0 { // if not empty // string "rekey" o = append(o, 0xa5, 0x72, 0x65, 0x6b, 0x65, 0x79) o, err = (*z).Header.RekeyTo.MarshalMsg(o) @@ -4102,7 +4273,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x10000000000) == 0 { // if not empty + if (zb0006Mask & 0x80000000000) == 0 { // if not empty // string "selkey" o = append(o, 0xa6, 0x73, 0x65, 0x6c, 0x6b, 0x65, 0x79) o, err = (*z).KeyregTxnFields.SelectionPK.MarshalMsg(o) @@ -4111,7 +4282,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x20000000000) == 0 { // if not empty + if (zb0006Mask & 0x100000000000) == 0 { // if not empty // string "snd" o = append(o, 0xa3, 0x73, 0x6e, 0x64) o, err = (*z).Header.Sender.MarshalMsg(o) @@ -4120,7 +4291,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x40000000000) == 0 { // if not empty + if (zb0006Mask & 0x200000000000) == 0 { // if not empty // string "type" o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) o, err = (*z).Type.MarshalMsg(o) @@ -4129,7 +4300,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x80000000000) == 0 { // if not empty + if (zb0006Mask & 0x400000000000) == 0 { // if not empty // string "votefst" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x66, 0x73, 0x74) o, err = (*z).KeyregTxnFields.VoteFirst.MarshalMsg(o) @@ -4138,12 +4309,12 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x100000000000) == 0 { // if not empty + if (zb0006Mask & 0x800000000000) == 0 { // if not empty // string "votekd" o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x64) o = msgp.AppendUint64(o, (*z).KeyregTxnFields.VoteKeyDilution) } - if (zb0006Mask & 0x200000000000) == 0 { // if not empty + if (zb0006Mask & 0x1000000000000) == 0 { // if not empty // string "votekey" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79) o, err = (*z).KeyregTxnFields.VotePK.MarshalMsg(o) @@ -4152,7 +4323,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x400000000000) == 0 { // if not empty + if (zb0006Mask & 0x2000000000000) == 0 { // if not empty // string "votelst" o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x6c, 0x73, 0x74) o, err = (*z).KeyregTxnFields.VoteLast.MarshalMsg(o) @@ -4161,7 +4332,7 @@ func (z *Transaction) MarshalMsg(b []byte) (o []byte, err error) { return } } - if (zb0006Mask & 0x800000000000) == 0 { // if not empty + if (zb0006Mask & 0x4000000000000) == 0 { // if not empty // string "xaid" o = append(o, 0xa4, 0x78, 0x61, 0x69, 0x64) o, err = (*z).AssetTransferTxnFields.XferAsset.MarshalMsg(o) @@ -4630,6 +4801,22 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + if zb0006 > 0 { + zb0006-- + bts, err = (*z).CompactCertTxnFields.CertRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "CertRound") + return + } + } + if zb0006 > 0 { + zb0006-- + bts, err = (*z).CompactCertTxnFields.Cert.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Cert") + return + } + } if zb0006 > 0 { err = msgp.ErrTooManyArrayFields(zb0006) if err != nil { @@ -5011,6 +5198,18 @@ func (z *Transaction) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ClearStateProgram") return } + case "certrnd": + bts, err = (*z).CompactCertTxnFields.CertRound.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CertRound") + return + } + case "cert": + bts, err = (*z).CompactCertTxnFields.Cert.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Cert") + return + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -5047,13 +5246,13 @@ func (z *Transaction) Msgsize() (s int) { for zb0005 := range (*z).ApplicationCallTxnFields.ForeignAssets { s += (*z).ApplicationCallTxnFields.ForeignAssets[zb0005].Msgsize() } - s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + s += 5 + (*z).ApplicationCallTxnFields.LocalStateSchema.Msgsize() + 5 + (*z).ApplicationCallTxnFields.GlobalStateSchema.Msgsize() + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ApprovalProgram) + 5 + msgp.BytesPrefixSize + len((*z).ApplicationCallTxnFields.ClearStateProgram) + 8 + (*z).CompactCertTxnFields.CertRound.Msgsize() + 5 + (*z).CompactCertTxnFields.Cert.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *Transaction) MsgIsZero() bool { - return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) + return ((*z).Type.MsgIsZero()) && ((*z).Header.Sender.MsgIsZero()) && ((*z).Header.Fee.MsgIsZero()) && ((*z).Header.FirstValid.MsgIsZero()) && ((*z).Header.LastValid.MsgIsZero()) && (len((*z).Header.Note) == 0) && ((*z).Header.GenesisID == "") && ((*z).Header.GenesisHash.MsgIsZero()) && ((*z).Header.Group.MsgIsZero()) && ((*z).Header.Lease == ([32]byte{})) && ((*z).Header.RekeyTo.MsgIsZero()) && ((*z).KeyregTxnFields.VotePK.MsgIsZero()) && ((*z).KeyregTxnFields.SelectionPK.MsgIsZero()) && ((*z).KeyregTxnFields.VoteFirst.MsgIsZero()) && ((*z).KeyregTxnFields.VoteLast.MsgIsZero()) && ((*z).KeyregTxnFields.VoteKeyDilution == 0) && ((*z).KeyregTxnFields.Nonparticipation == false) && ((*z).PaymentTxnFields.Receiver.MsgIsZero()) && ((*z).PaymentTxnFields.Amount.MsgIsZero()) && ((*z).PaymentTxnFields.CloseRemainderTo.MsgIsZero()) && ((*z).AssetConfigTxnFields.ConfigAsset.MsgIsZero()) && ((*z).AssetConfigTxnFields.AssetParams.MsgIsZero()) && ((*z).AssetTransferTxnFields.XferAsset.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetAmount == 0) && ((*z).AssetTransferTxnFields.AssetSender.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetReceiver.MsgIsZero()) && ((*z).AssetTransferTxnFields.AssetCloseTo.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAccount.MsgIsZero()) && ((*z).AssetFreezeTxnFields.FreezeAsset.MsgIsZero()) && ((*z).AssetFreezeTxnFields.AssetFrozen == false) && ((*z).ApplicationCallTxnFields.ApplicationID.MsgIsZero()) && ((*z).ApplicationCallTxnFields.OnCompletion == 0) && (len((*z).ApplicationCallTxnFields.ApplicationArgs) == 0) && (len((*z).ApplicationCallTxnFields.Accounts) == 0) && (len((*z).ApplicationCallTxnFields.ForeignApps) == 0) && (len((*z).ApplicationCallTxnFields.ForeignAssets) == 0) && ((*z).ApplicationCallTxnFields.LocalStateSchema.MsgIsZero()) && ((*z).ApplicationCallTxnFields.GlobalStateSchema.MsgIsZero()) && (len((*z).ApplicationCallTxnFields.ApprovalProgram) == 0) && (len((*z).ApplicationCallTxnFields.ClearStateProgram) == 0) && ((*z).CompactCertTxnFields.CertRound.MsgIsZero()) && ((*z).CompactCertTxnFields.Cert.MsgIsZero()) } // MarshalMsg implements msgp.Marshaler diff --git a/data/transactions/msgp_gen_test.go b/data/transactions/msgp_gen_test.go index 380695b1ff..cfd2193e5c 100644 --- a/data/transactions/msgp_gen_test.go +++ b/data/transactions/msgp_gen_test.go @@ -321,6 +321,68 @@ func BenchmarkUnmarshalAssetTransferTxnFields(b *testing.B) { } } +func TestMarshalUnmarshalCompactCertTxnFields(t *testing.T) { + v := CompactCertTxnFields{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingCompactCertTxnFields(t *testing.T) { + protocol.RunEncodingTest(t, &CompactCertTxnFields{}) +} + +func BenchmarkMarshalMsgCompactCertTxnFields(b *testing.B) { + v := CompactCertTxnFields{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgCompactCertTxnFields(b *testing.B) { + v := CompactCertTxnFields{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalCompactCertTxnFields(b *testing.B) { + v := CompactCertTxnFields{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalHeader(t *testing.T) { v := Header{} bts, err := v.MarshalMsg(nil) diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index 2ed0ae5d19..67a4d45fc9 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -94,6 +94,7 @@ type Transaction struct { AssetTransferTxnFields AssetFreezeTxnFields ApplicationCallTxnFields + CompactCertTxnFields } // ApplyData contains information about the transaction's execution. @@ -349,6 +350,35 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa if tx.GlobalStateSchema.NumEntries() > proto.MaxGlobalSchemaEntries { return fmt.Errorf("tx.GlobalStateSchema too large, max number of keys is %d", proto.MaxGlobalSchemaEntries) } + + case protocol.CompactCertTx: + if proto.CompactCertRounds == 0 { + return fmt.Errorf("compact certs not supported") + } + + // This is a placeholder transaction used to store compact certs + // on the ledger, and ensure they are broadly available. Most of + // the fields must be empty. It must be issued from a special + // sender address. + if tx.Sender != CompactCertSender { + return fmt.Errorf("sender must be the compact-cert sender") + } + if !tx.Fee.IsZero() { + return fmt.Errorf("fee must be zero") + } + if len(tx.Note) != 0 { + return fmt.Errorf("note must be empty") + } + if !tx.Group.IsZero() { + return fmt.Errorf("group must be zero") + } + if !tx.RekeyTo.IsZero() { + return fmt.Errorf("rekey must be zero") + } + if tx.Lease != [32]byte{} { + return fmt.Errorf("lease must be zero") + } + default: return fmt.Errorf("unknown tx type %v", tx.Type) } @@ -378,6 +408,10 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa nonZeroFields[protocol.ApplicationCallTx] = true } + if !tx.CompactCertTxnFields.Empty() { + nonZeroFields[protocol.CompactCertTx] = true + } + for t, nonZero := range nonZeroFields { if nonZero && t != tx.Type { return fmt.Errorf("transaction of type %v has non-zero fields for type %v", tx.Type, t) @@ -385,7 +419,11 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa } if tx.Fee.LessThan(basics.MicroAlgos{Raw: proto.MinTxnFee}) { - return makeMinFeeErrorf("transaction had fee %v, which is less than the minimum %v", tx.Fee, proto.MinTxnFee) + if tx.Type == protocol.CompactCertTx { + // Zero fee allowed for compact cert txn. + } else { + return makeMinFeeErrorf("transaction had fee %v, which is less than the minimum %v", tx.Fee, proto.MinTxnFee) + } } if tx.LastValid < tx.FirstValid { return fmt.Errorf("transaction invalid range (%v--%v)", tx.FirstValid, tx.LastValid) diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index eb433a089d..d0ea139426 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -173,6 +173,15 @@ func stxnVerifyCore(s *transactions.SignedTxn, ctx *Context) error { hasLogicSig = true } if numSigs == 0 { + // Special case: special sender address can issue special transaction + // types (compact cert txn) without any signature. The well-formed + // check ensures that this transaction cannot pay any fee, and + // cannot have any other interesting fields, except for the compact + // cert payload. + if s.Txn.Sender == transactions.CompactCertSender && s.Txn.Type == protocol.CompactCertTx { + return nil + } + return errors.New("signedtxn has no sig") } if numSigs > 1 { diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go index cac466aab6..1b07f1e4ff 100644 --- a/data/transactions/verify/txn_test.go +++ b/data/transactions/verify/txn_test.go @@ -121,6 +121,80 @@ func TestTxnValidationEncodeDecode(t *testing.T) { } } +func TestTxnValidationEmptySig(t *testing.T) { + _, signed, _, _ := generateTestObjects(100, 50) + + for _, txn := range signed { + if Txn(&txn, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: protocol.ConsensusCurrentVersion}}) != nil { + t.Errorf("signed transaction %#v did not verify", txn) + } + + txn.Sig = crypto.Signature{} + txn.Msig = crypto.MultisigSig{} + txn.Lsig = transactions.LogicSig{} + if Txn(&txn, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: protocol.ConsensusCurrentVersion}}) == nil { + t.Errorf("transaction %#v verified without sig", txn) + } + } +} + +const ccProto = protocol.ConsensusVersion("test-compact-cert-enabled") + +func TestTxnValidationCompactCert(t *testing.T) { + proto := config.Consensus[protocol.ConsensusCurrentVersion] + proto.CompactCertRounds = 128 + config.Consensus[ccProto] = proto + + stxn := transactions.SignedTxn{ + Txn: transactions.Transaction{ + Type: protocol.CompactCertTx, + Header: transactions.Header{ + Sender: transactions.CompactCertSender, + FirstValid: 0, + LastValid: 10, + }, + }, + } + + err := Txn(&stxn, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.NoError(t, err, "compact cert txn %#v did not verify", stxn) + + stxn2 := stxn + stxn2.Txn.Type = protocol.PaymentTx + stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee} + err = Txn(&stxn2, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.Error(t, err, "payment txn %#v verified from CompactCertSender", stxn2) + + secret := keypair() + stxn2 = stxn + stxn2.Txn.Header.Sender = basics.Address(secret.SignatureVerifier) + stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee} + stxn2 = stxn2.Txn.Sign(secret) + err = Txn(&stxn2, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.Error(t, err, "compact cert txn %#v verified from non-CompactCertSender", stxn2) + + // Compact cert txns are not allowed to have non-zero values for many fields + stxn2 = stxn + stxn2.Txn.Header.Fee = basics.MicroAlgos{Raw: proto.MinTxnFee} + err = Txn(&stxn2, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.Error(t, err, "compact cert txn %#v verified", stxn2) + + stxn2 = stxn + stxn2.Txn.Header.Note = []byte{'A'} + err = Txn(&stxn2, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.Error(t, err, "compact cert txn %#v verified", stxn2) + + stxn2 = stxn + stxn2.Txn.Lease[0] = 1 + err = Txn(&stxn2, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.Error(t, err, "compact cert txn %#v verified", stxn2) + + stxn2 = stxn + stxn2.Txn.RekeyTo = basics.Address(secret.SignatureVerifier) + err = Txn(&stxn2, Context{Params: Params{CurrSpecAddrs: spec, CurrProto: ccProto}}) + require.Error(t, err, "compact cert txn %#v verified", stxn2) +} + func TestDecodeNil(t *testing.T) { // This is a regression test for improper decoding of a nil SignedTxn. // This is a subtle case because decoding a msgpack nil does not run diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 68e317ce2b..c9626cd0f2 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -74,7 +74,7 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) { cfg.TxPoolSize = 20000 cfg.EnableProcessBlockStats = false - tp := pools.MakeTransactionPool(l.Ledger, cfg) + tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base()) signedTransactions := make([]transactions.SignedTxn, 0, b.N) for i := 0; i < b.N/numUsers; i++ { for u := 0; u < numUsers; u++ { diff --git a/docker/build/cicd.centos.Dockerfile b/docker/build/cicd.centos.Dockerfile index 591f403e31..570be6a415 100644 --- a/docker/build/cicd.centos.Dockerfile +++ b/docker/build/cicd.centos.Dockerfile @@ -17,12 +17,7 @@ ENV GOROOT=/usr/local/go \ RUN mkdir -p $GOPATH/src/github.com/algorand COPY . $GOPATH/src/github.com/algorand/go-algorand ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ - BRANCH=${BRANCH} \ - CHANNEL=${CHANNEL} \ - DEFAULTNETWORK=${DEFAULTNETWORK} \ - FULLVERSION=${FULLVERSION} \ - GOPROXY=https://gocenter.io \ - PKG_ROOT=${PKG_ROOT} + GOPROXY=https://gocenter.io WORKDIR $GOPATH/src/github.com/algorand/go-algorand RUN make ci-deps && make clean RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ diff --git a/docker/build/cicd.ubuntu.Dockerfile b/docker/build/cicd.ubuntu.Dockerfile index cbc3506baa..a30750d320 100644 --- a/docker/build/cicd.ubuntu.Dockerfile +++ b/docker/build/cicd.ubuntu.Dockerfile @@ -3,7 +3,8 @@ ARG ARCH="amd64" FROM ${ARCH}/ubuntu:18.04 ARG GOLANG_VERSION ARG ARCH="amd64" -RUN apt-get update && apt-get install -y build-essential git libboost-all-dev wget sqlite3 autoconf jq bsdmainutils shellcheck +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y build-essential git libboost-all-dev wget sqlite3 autoconf jq bsdmainutils shellcheck awscli WORKDIR /root RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz \ && tar -xvf go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz && \ @@ -13,12 +14,7 @@ ENV GOROOT=/usr/local/go \ RUN mkdir -p $GOPATH/src/github.com/algorand COPY . $GOPATH/src/github.com/algorand/go-algorand ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ - BRANCH=${BRANCH} \ - CHANNEL=${CHANNEL} \ - DEFAULTNETWORK=${DEFAULTNETWORK} \ - FULLVERSION=${FULLVERSION} \ - GOPROXY=https://gocenter.io \ - PKG_ROOT=${PKG_ROOT} + GOPROXY=https://gocenter.io WORKDIR $GOPATH/src/github.com/algorand/go-algorand RUN make ci-deps && make clean RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ diff --git a/docker/build/docker.ubuntu.Dockerfile b/docker/build/docker.ubuntu.Dockerfile index ba3dd83c8b..e040d2afce 100644 --- a/docker/build/docker.ubuntu.Dockerfile +++ b/docker/build/docker.ubuntu.Dockerfile @@ -17,6 +17,10 @@ RUN apt-get update && apt-get install -y autoconf bsdmainutils git libboost-all- curl https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz | tar -xzf - && \ mv go /usr/local +ENV GOROOT=/usr/local/go \ + GOPATH=$HOME/go +ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH + WORKDIR /root CMD ["/bin/bash"] diff --git a/docker/build/mule.go.centos.Dockerfile b/docker/build/mule.go.centos.Dockerfile deleted file mode 100644 index a827ae3f5a..0000000000 --- a/docker/build/mule.go.centos.Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -ARG ARCH="amd64" - -FROM ${ARCH}/centos:7 -ARG GOLANG_VERSION -ARG ARCH="amd64" -RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ - yum update -y && \ - yum install -y autoconf wget awscli git gnupg2 nfs-utils python36 python3.7 pip3-python sqlite3 boost-devel expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck -WORKDIR /root -RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz \ - && tar -xvf go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz && \ - mv go /usr/local -RUN pip3 install mulecli -ENV GOROOT=/usr/local/go \ - GOPATH=$HOME/go -RUN mkdir -p $GOPATH/src/github.com/algorand -COPY . $GOPATH/src/github.com/algorand/go-algorand -ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ - BRANCH=${BRANCH} \ - CHANNEL=${CHANNEL} \ - DEFAULTNETWORK=${DEFAULTNETWORK} \ - FULLVERSION=${FULLVERSION} \ - GOPROXY=https://gocenter.io \ - PKG_ROOT=${PKG_ROOT} -WORKDIR $GOPATH/src/github.com/algorand/go-algorand -RUN make ci-deps && make clean -RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ - mkdir -p $GOPATH/src/github.com/algorand/go-algorand -RUN echo "vm.max_map_count = 262144" >> /etc/sysctl.conf -CMD ["/bin/bash"] - diff --git a/docker/build/mule.go.debian.Dockerfile b/docker/build/mule.go.debian.Dockerfile deleted file mode 100644 index fb9ae896d9..0000000000 --- a/docker/build/mule.go.debian.Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM docker:19 as docker -FROM python:3.7 -ARG GOLANG_VERSION - -COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker -COPY *.yaml /root/ - -RUN apt-get update && apt-get install -y autoconf bsdmainutils build-essential curl git libboost-all-dev && \ - curl https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz | tar -xzf - && \ - mv go /usr/local && \ - pip install mulecli - -ENV GOROOT=/usr/local/go \ - GOPATH=$HOME/go -ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH - -WORKDIR /root - -CMD ["/bin/bash"] - diff --git a/docker/build/releases-page.Dockerfile b/docker/build/releases-page.Dockerfile new file mode 100644 index 0000000000..80814f7fb5 --- /dev/null +++ b/docker/build/releases-page.Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:18.04 + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install git python3 python3-pip -y && \ + pip3 install awscli boto3 + +WORKDIR /root + +CMD ["/bin/bash"] + diff --git a/go.mod b/go.mod index 64f8dd5c61..4bb9aedb2c 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,8 @@ require ( github.com/fatih/color v1.7.0 github.com/fortytw2/leaktest v1.3.0 // indirect github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f - github.com/getkin/kin-openapi v0.14.0 + github.com/getkin/kin-openapi v0.22.0 + github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f // indirect github.com/gofrs/flock v0.7.0 github.com/google/go-querystring v1.0.0 @@ -28,8 +29,9 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmoiron/sqlx v1.2.0 github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2 - github.com/labstack/echo/v4 v4.1.16 + github.com/labstack/echo/v4 v4.1.17 github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/matryer/moq v0.1.3 // indirect github.com/mattn/go-sqlite3 v1.10.0 github.com/miekg/dns v1.1.27 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect @@ -43,11 +45,11 @@ require ( github.com/spf13/cobra v0.0.3 github.com/spf13/pflag v1.0.3 // indirect github.com/stretchr/testify v1.6.1 - golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 - golang.org/x/net v0.0.0-20200602114024-627f9648deb9 - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect - golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 + golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a + golang.org/x/net v0.0.0-20200904194848-62affa334b73 + golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f golang.org/x/text v0.3.3 // indirect + golang.org/x/tools v0.0.0-20200904185747-39188db58858 // indirect google.golang.org/appengine v1.6.1 // indirect gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect diff --git a/go.sum b/go.sum index 90b932a0ee..6ffd738e6d 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,7 @@ github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M= github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY= +github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4 h1:Fphwr1XDjkTR/KFbrrkLfY6D2CEOlHqFGomQQrxcHFs= github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -35,9 +36,14 @@ github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f/go.mod h1:GprdPCZg github.com/getkin/kin-openapi v0.3.1/go.mod h1:W8dhxZgpE84ciM+VIItFqkmZ4eHtuomrdIHtASQIqi0= github.com/getkin/kin-openapi v0.14.0 h1:hqwQL7kze/adt0wB+0UJR2nJm+gfUHqM0Gu4D8nByVc= github.com/getkin/kin-openapi v0.14.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw= +github.com/getkin/kin-openapi v0.22.0 h1:J5IFyKd/5yuB6AZAgwK0CMBKnabWcmkowtsl6bRkz4s= +github.com/getkin/kin-openapi v0.22.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4= github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM= @@ -79,16 +85,23 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o= github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI= +github.com/labstack/echo/v4 v4.1.17 h1:PQIBaRplyRy3OjwILGkPg89JRtH2x5bssi59G2EL3fo= +github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54 h1:p8zN0Xu28xyEkPpqLbFXAnjdgBVvTJCpfOtoDf/+/RQ= github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/matryer/moq v0.1.3 h1:+fW3u2jmlPw59a3V6spZKOLCcvrDKzPjMsRvUhnZ/c0= +github.com/matryer/moq v0.1.3/go.mod h1:9RtPYjTnH1bSBIkpvtHkFN7nbWAnO7oRpdJkEIn6UtE= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -140,7 +153,11 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4= github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -148,8 +165,14 @@ golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf3coIG4HqZElCPehJsfAYM= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -160,11 +183,16 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -178,6 +206,9 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -188,11 +219,17 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200423205358-59e73619c742 h1:9OGWpORUXvk8AsaBJlpzzDx7Srv/rSK6rvjcsJq4rJo= golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858 h1:xLt+iB5ksWcZVxqc+g9K41ZHy+6MKWfXCDsjSThnsPA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= diff --git a/installer/external/node_exporter-stable-windows-x86_64.tar.gz b/installer/external/node_exporter-stable-windows-x86_64.tar.gz new file mode 100644 index 0000000000..56d41a144d Binary files /dev/null and b/installer/external/node_exporter-stable-windows-x86_64.tar.gz differ diff --git a/installer/rpm/algorand/algorand.spec b/installer/rpm/algorand/algorand.spec index 2c59cb1b4c..b45301bd1c 100644 --- a/installer/rpm/algorand/algorand.spec +++ b/installer/rpm/algorand/algorand.spec @@ -63,7 +63,7 @@ if [ "%{RELEASE_GENESIS_PROCESS}" != "x" ]; then done cp %{buildroot}/var/lib/algorand/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json %{buildroot}/var/lib/algorand/genesis.json else - cp ${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json %{buildroot}/var/lib/algorand/genesis.json + cp ${REPO_DIR}/installer/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json %{buildroot}/var/lib/algorand/genesis.json #${GOPATH}/bin/buildtools genesis ensure -n ${DEFAULT_RELEASE_NETWORK} --source ${REPO_DIR}/gen/${DEFAULT_RELEASE_NETWORK}/genesis.json --target %{buildroot}/var/lib/algorand/genesis.json --releasedir ${REPO_DIR}/installer/genesis fi diff --git a/ledger/accountdb.go b/ledger/accountdb.go index c6f148dacb..8ea1c4b626 100644 --- a/ledger/accountdb.go +++ b/ledger/accountdb.go @@ -87,6 +87,18 @@ var creatablesMigration = []string{ `ALTER TABLE assetcreators ADD COLUMN ctype INTEGER DEFAULT 0`, } +func createNormalizedOnlineBalanceIndex(idxname string, tablename string) string { + return fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s + ON %s ( normalizedonlinebalance, address, data ) + WHERE normalizedonlinebalance>0`, idxname, tablename) +} + +var createOnlineAccountIndex = []string{ + `ALTER TABLE accountbase + ADD COLUMN normalizedonlinebalance INTEGER`, + createNormalizedOnlineBalanceIndex("onlineaccountbals", "accountbase"), +} + var accountsResetExprs = []string{ `DROP TABLE IF EXISTS acctrounds`, `DROP TABLE IF EXISTS accounttotals`, @@ -100,14 +112,14 @@ var accountsResetExprs = []string{ // accountDBVersion is the database version that this binary would know how to support and how to upgrade to. // details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX // and their descriptions. -var accountDBVersion = int32(3) +var accountDBVersion = int32(4) type accountDelta struct { old basics.AccountData new basics.AccountData } -// catchpointState is used to store catchpoint related varaibles into the catchpointstate table. +// catchpointState is used to store catchpoint related variables into the catchpointstate table. type catchpointState string const ( @@ -136,17 +148,26 @@ func writeCatchpointStagingCreatable(ctx context.Context, tx *sql.Tx, addr basic return nil } -func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []encodedBalanceRecord) error { - insertStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointbalances(address, data) VALUES(?, ?)") +func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []encodedBalanceRecord, proto config.ConsensusParams) error { + insertStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointbalances(address, normalizedonlinebalance, data) VALUES(?, ?, ?)") if err != nil { return err } for _, balance := range bals { - result, err := insertStmt.ExecContext(ctx, balance.Address[:], balance.AccountData) + var data basics.AccountData + err = protocol.Decode(balance.AccountData, &data) + if err != nil { + return err + } + + normBalance := data.NormalizedOnlineBalance(proto) + + result, err := insertStmt.ExecContext(ctx, balance.Address[:], normBalance, balance.AccountData) if err != nil { return err } + aff, err := result.RowsAffected() if err != nil { return err @@ -154,42 +175,68 @@ func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []enco if aff != 1 { return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff) } - } return nil } func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup bool) (err error) { - s := "DROP TABLE IF EXISTS catchpointbalances;" - s += "DROP TABLE IF EXISTS catchpointassetcreators;" - s += "DROP TABLE IF EXISTS catchpointaccounthashes;" - s += "DELETE FROM accounttotals where id='catchpointStaging';" + s := []string{ + "DROP TABLE IF EXISTS catchpointbalances", + "DROP TABLE IF EXISTS catchpointassetcreators", + "DROP TABLE IF EXISTS catchpointaccounthashes", + "DELETE FROM accounttotals where id='catchpointStaging'", + } + if newCatchup { - s += "CREATE TABLE IF NOT EXISTS catchpointassetcreators(asset integer primary key, creator blob, ctype integer);" - s += "CREATE TABLE IF NOT EXISTS catchpointbalances(address blob primary key, data blob);" - s += "CREATE TABLE IF NOT EXISTS catchpointaccounthashes(id integer primary key, data blob);" + // SQLite has no way to rename an existing index. So, we need + // to cook up a fresh name for the index, which will be kept + // around after we rename the table from "catchpointbalances" + // to "accountbase". To construct a unique index name, we + // use the current time. + idxname := fmt.Sprintf("onlineaccountbals%d", time.Now().UnixNano()) + + s = append(s, + "CREATE TABLE IF NOT EXISTS catchpointassetcreators (asset integer primary key, creator blob, ctype integer)", + "CREATE TABLE IF NOT EXISTS catchpointbalances (address blob primary key, data blob, normalizedonlinebalance integer)", + "CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)", + createNormalizedOnlineBalanceIndex(idxname, "catchpointbalances"), + ) } - _, err = tx.Exec(s) - return err + + for _, stmt := range s { + _, err = tx.Exec(stmt) + if err != nil { + return err + } + } + + return nil } // applyCatchpointStagingBalances switches the staged catchpoint catchup tables onto the actual // tables and update the correct balance round. This is the final step in switching onto the new catchpoint round. func applyCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, balancesRound basics.Round) (err error) { - s := "ALTER TABLE accountbase RENAME TO accountbase_old;" - s += "ALTER TABLE assetcreators RENAME TO assetcreators_old;" - s += "ALTER TABLE accounthashes RENAME TO accounthashes_old;" - s += "ALTER TABLE catchpointbalances RENAME TO accountbase;" - s += "ALTER TABLE catchpointassetcreators RENAME TO assetcreators;" - s += "ALTER TABLE catchpointaccounthashes RENAME TO accounthashes;" - s += "DROP TABLE IF EXISTS accountbase_old;" - s += "DROP TABLE IF EXISTS assetcreators_old;" - s += "DROP TABLE IF EXISTS accounthashes_old;" - - _, err = tx.Exec(s) - if err != nil { - return err + stmts := []string{ + "ALTER TABLE accountbase RENAME TO accountbase_old", + "ALTER TABLE assetcreators RENAME TO assetcreators_old", + "ALTER TABLE accounthashes RENAME TO accounthashes_old", + + "ALTER TABLE catchpointbalances RENAME TO accountbase", + "ALTER TABLE catchpointassetcreators RENAME TO assetcreators", + "ALTER TABLE catchpointaccounthashes RENAME TO accounthashes", + + "DROP TABLE IF EXISTS accountbase_old", + "DROP TABLE IF EXISTS assetcreators_old", + "DROP TABLE IF EXISTS accounthashes_old", + } + + for _, stmt := range stmts { + _, err = tx.Exec(stmt) + if err != nil { + return err + } } + _, err = tx.Exec("INSERT OR REPLACE INTO acctrounds(id, rnd) VALUES('acctbase', ?)", balancesRound) if err != nil { return err @@ -259,7 +306,7 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData } } else { serr, ok := err.(sqlite3.Error) - // serr.Code is sqlite.ErrConstraint if the database has already been initalized; + // serr.Code is sqlite.ErrConstraint if the database has already been initialized; // in that case, ignore the error and return nil. if !ok || serr.Code != sqlite3.ErrConstraint { return err @@ -269,6 +316,76 @@ func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData return nil } +// accountsAddNormalizedBalance adds the normalizedonlinebalance column +// to the accountbase table. +func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) error { + var exists bool + err := tx.QueryRow("SELECT 1 FROM pragma_table_info('accountbase') WHERE name='normalizedonlinebalance'").Scan(&exists) + if err == nil { + // Already exists. + return nil + } + if err != sql.ErrNoRows { + return err + } + + for _, stmt := range createOnlineAccountIndex { + _, err := tx.Exec(stmt) + if err != nil { + return err + } + } + + rows, err := tx.Query("SELECT address, data FROM accountbase") + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var addrbuf []byte + var buf []byte + err = rows.Scan(&addrbuf, &buf) + if err != nil { + return err + } + + var data basics.AccountData + err = protocol.Decode(buf, &data) + if err != nil { + return err + } + + normBalance := data.NormalizedOnlineBalance(proto) + if normBalance > 0 { + _, err = tx.Exec("UPDATE accountbase SET normalizedonlinebalance=? WHERE address=?", normBalance, addrbuf) + if err != nil { + return err + } + } + } + + return rows.Err() +} + +// accountDataToOnline returns the part of the AccountData that matters +// for online accounts (to answer top-N queries). We store a subset of +// the full AccountData because we need to store a large number of these +// in memory (say, 1M), and storing that many AccountData could easily +// cause us to run out of memory. +func accountDataToOnline(address basics.Address, ad *basics.AccountData, proto config.ConsensusParams) *onlineAccount { + return &onlineAccount{ + Address: address, + MicroAlgos: ad.MicroAlgos, + RewardsBase: ad.RewardsBase, + NormalizedOnlineBalance: ad.NormalizedOnlineBalance(proto), + VoteID: ad.VoteID, + VoteFirstValid: ad.VoteFirstValid, + VoteLastValid: ad.VoteLastValid, + VoteKeyDilution: ad.VoteKeyDilution, + } +} + func resetAccountHashes(tx *sql.Tx) (err error) { _, err = tx.Exec(`DELETE FROM accounthashes`) return @@ -305,17 +422,17 @@ func accountsDbInit(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) var err error qs := &accountsDbQueries{} - qs.listCreatablesStmt, err = r.Prepare("SELECT asset, creator FROM assetcreators WHERE asset <= ? AND ctype = ? ORDER BY asset desc LIMIT ?") + qs.listCreatablesStmt, err = r.Prepare("SELECT rnd, asset, creator FROM acctrounds LEFT JOIN assetcreators ON assetcreators.asset <= ? AND assetcreators.ctype = ? WHERE acctrounds.id='acctbase' ORDER BY assetcreators.asset desc LIMIT ?") if err != nil { return nil, err } - qs.lookupStmt, err = r.Prepare("SELECT data FROM accountbase WHERE address=?") + qs.lookupStmt, err = r.Prepare("SELECT rnd, data FROM acctrounds LEFT JOIN accountbase ON address=? WHERE id='acctbase'") if err != nil { return nil, err } - qs.lookupCreatorStmt, err = r.Prepare("SELECT creator FROM assetcreators WHERE asset = ? AND ctype = ?") + qs.lookupCreatorStmt, err = r.Prepare("SELECT rnd, creator FROM acctrounds LEFT JOIN assetcreators ON asset = ? AND ctype = ? WHERE id='acctbase'") if err != nil { return nil, err } @@ -362,7 +479,8 @@ func accountsDbInit(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) return qs, nil } -func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) (results []basics.CreatableLocator, err error) { +// listCreatables returns an array of CreatableLocator which have CreatableIndex smaller or equal to maxIdx and are of the provided CreatableType. +func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) (results []basics.CreatableLocator, dbRound basics.Round, err error) { err = db.Retry(func() error { // Query for assets in range rows, err := qs.listCreatablesStmt.Query(maxIdx, ctype, maxResults) @@ -374,11 +492,17 @@ func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxRes // For each row, copy into a new CreatableLocator and append to results var buf []byte var cl basics.CreatableLocator + var creatableIndex sql.NullInt64 for rows.Next() { - err := rows.Scan(&cl.Index, &buf) + err = rows.Scan(&dbRound, &creatableIndex, &buf) if err != nil { return err } + if !creatableIndex.Valid { + // we received an entry without any index. This would happen only on the first entry when there are no creatables of the requested type. + break + } + cl.Index = basics.CreatableIndex(creatableIndex.Int64) copy(cl.Creator[:], buf) cl.Type = ctype results = append(results, cl) @@ -388,14 +512,14 @@ func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxRes return } -func (qs *accountsDbQueries) lookupCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (addr basics.Address, ok bool, err error) { +func (qs *accountsDbQueries) lookupCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (addr basics.Address, ok bool, dbRound basics.Round, err error) { err = db.Retry(func() error { var buf []byte - err := qs.lookupCreatorStmt.QueryRow(cidx, ctype).Scan(&buf) + err := qs.lookupCreatorStmt.QueryRow(cidx, ctype).Scan(&dbRound, &buf) - // Common error: creatable does not exist + // this shouldn't happen unless we can't figure the round number. if err == sql.ErrNoRows { - return nil + return fmt.Errorf("lookupCreator was unable to retrieve round number") } // Some other database error @@ -403,24 +527,34 @@ func (qs *accountsDbQueries) lookupCreator(cidx basics.CreatableIndex, ctype bas return err } - ok = true - copy(addr[:], buf) + if len(buf) > 0 { + ok = true + copy(addr[:], buf) + } return nil }) return } -func (qs *accountsDbQueries) lookup(addr basics.Address) (data basics.AccountData, err error) { +// lookup looks up for a the account data given it's address. It returns the current database round and the matching +// account data, if such was found. If no matching account data could be found for the given address, an empty account data would +// be retrieved. +func (qs *accountsDbQueries) lookup(addr basics.Address) (data basics.AccountData, dbRound basics.Round, err error) { err = db.Retry(func() error { var buf []byte - err := qs.lookupStmt.QueryRow(addr[:]).Scan(&buf) + err := qs.lookupStmt.QueryRow(addr[:]).Scan(&dbRound, &buf) if err == nil { - return protocol.Decode(buf, &data) + if len(buf) > 0 { + return protocol.Decode(buf, &data) + } + // we don't have that account, just return the database round. + return nil } + // this should never happen; it indicates that we don't have a current round in the acctrounds table. if err == sql.ErrNoRows { // Return the zero value of data - return nil + return fmt.Errorf("unable to query account data for address %v : %w", addr, err) } return err @@ -554,6 +688,50 @@ func (qs *accountsDbQueries) close() { } } +// accountsOnlineTop returns the top n online accounts starting at position offset +// (that is, the top offset'th account through the top offset+n-1'th account). +// +// The accounts are sorted by their normalized balance and address. The normalized +// balance has to do with the reward parts of online account balances. See the +// normalization procedure in AccountData.NormalizedOnlineBalance(). +// +// Note that this does not check if the accounts have a vote key valid for any +// particular round (past, present, or future). +func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*onlineAccount, error) { + rows, err := tx.Query("SELECT address, data FROM accountbase WHERE normalizedonlinebalance>0 ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?", n, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + res := make(map[basics.Address]*onlineAccount, n) + for rows.Next() { + var addrbuf []byte + var buf []byte + err = rows.Scan(&addrbuf, &buf) + if err != nil { + return nil, err + } + + var data basics.AccountData + err = protocol.Decode(buf, &data) + if err != nil { + return nil, err + } + + var addr basics.Address + if len(addrbuf) != len(addr) { + err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr)) + return nil, err + } + + copy(addr[:], addrbuf) + res[addr] = accountDataToOnline(addr, &data, proto) + } + + return res, rows.Err() +} + func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) { rows, err := tx.Query("SELECT address, data FROM accountbase") if err != nil { @@ -619,7 +797,7 @@ func accountsPutTotals(tx *sql.Tx, totals AccountTotals, catchpointStaging bool) } // accountsNewRound updates the accountbase and assetcreators by applying the provided deltas to the accounts / creatables. -func accountsNewRound(tx *sql.Tx, updates map[basics.Address]accountDelta, creatables map[basics.CreatableIndex]modifiedCreatable) (err error) { +func accountsNewRound(tx *sql.Tx, updates map[basics.Address]accountDelta, creatables map[basics.CreatableIndex]modifiedCreatable, proto config.ConsensusParams) (err error) { var insertCreatableIdxStmt, deleteCreatableIdxStmt, deleteStmt, replaceStmt *sql.Stmt @@ -629,7 +807,7 @@ func accountsNewRound(tx *sql.Tx, updates map[basics.Address]accountDelta, creat } defer deleteStmt.Close() - replaceStmt, err = tx.Prepare("REPLACE INTO accountbase (address, data) VALUES (?, ?)") + replaceStmt, err = tx.Prepare("REPLACE INTO accountbase (address, normalizedonlinebalance, data) VALUES (?, ?, ?)") if err != nil { return } @@ -640,12 +818,12 @@ func accountsNewRound(tx *sql.Tx, updates map[basics.Address]accountDelta, creat // prune empty accounts _, err = deleteStmt.Exec(addr[:]) } else { - _, err = replaceStmt.Exec(addr[:], protocol.Encode(&data.new)) + normBalance := data.new.NormalizedOnlineBalance(proto) + _, err = replaceStmt.Exec(addr[:], normBalance, protocol.Encode(&data.new)) } if err != nil { return } - } if len(creatables) > 0 { @@ -787,7 +965,7 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e // note that we should be quite liberal on timing here, since it might perform much slower // on low-power devices. if scannedAccounts%1000 == 0 { - // The return value from ResetTransactionWarnDeadline can be safely ignored here since it would only default to writing the warnning + // The return value from ResetTransactionWarnDeadline can be safely ignored here since it would only default to writing the warning // message, which would let us know that it failed anyway. db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(time.Second)) } @@ -838,10 +1016,6 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e return } -// merkleCommitterNodesPerPage controls how many nodes will be stored in a single page -// value was calibrated using BenchmarkCalibrateNodesPerPage -var merkleCommitterNodesPerPage = int64(116) - type merkleCommitter struct { tx *sql.Tx deleteStmt *sql.Stmt @@ -893,11 +1067,6 @@ func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) { return content, nil } -// GetNodesCountPerPage returns the page size ( number of nodes per page ) -func (mc *merkleCommitter) GetNodesCountPerPage() (pageSize int64) { - return merkleCommitterNodesPerPage -} - // encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table. type encodedAccountsBatchIter struct { rows *sql.Rows diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go index 9d9374847b..033bd1f9cd 100644 --- a/ledger/accountdb_test.go +++ b/ledger/accountdb_test.go @@ -17,16 +17,23 @@ package ledger import ( + "bytes" "context" "database/sql" + "encoding/binary" "fmt" + "os" + "sort" + "strings" "testing" + "time" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" ) @@ -52,7 +59,8 @@ func randomAccountData(rewardsLevel uint64) basics.AccountData { } data.RewardsBase = rewardsLevel - + data.VoteFirstValid = 0 + data.VoteLastValid = 1000 return data } @@ -335,10 +343,14 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics. require.NoError(t, err) defer aq.close() + proto := config.Consensus[protocol.ConsensusCurrentVersion] + err = accountsAddNormalizedBalance(tx, proto) + require.NoError(t, err) + var totalOnline, totalOffline, totalNotPart uint64 for addr, data := range accts { - d, err := aq.lookup(addr) + d, _, err := aq.lookup(addr) require.NoError(t, err) require.Equal(t, d, data) @@ -366,9 +378,50 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics. require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline) require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart) - d, err := aq.lookup(randomAddress()) + d, dbRound, err := aq.lookup(randomAddress()) require.NoError(t, err) + require.Equal(t, rnd, dbRound) require.Equal(t, d, basics.AccountData{}) + + onlineAccounts := make(map[basics.Address]*onlineAccount) + for addr, data := range accts { + if data.Status == basics.Online { + onlineAccounts[addr] = accountDataToOnline(addr, &data, proto) + } + } + + for i := 0; i < len(onlineAccounts); i++ { + dbtop, err := accountsOnlineTop(tx, 0, uint64(i), proto) + require.NoError(t, err) + require.Equal(t, i, len(dbtop)) + + // Compute the top-N accounts ourselves + var testtop []onlineAccount + for _, data := range onlineAccounts { + testtop = append(testtop, *data) + } + + sort.Slice(testtop, func(i, j int) bool { + ibal := testtop[i].NormalizedOnlineBalance + jbal := testtop[j].NormalizedOnlineBalance + if ibal > jbal { + return true + } + if ibal < jbal { + return false + } + return bytes.Compare(testtop[i].Address[:], testtop[j].Address[:]) > 0 + }) + + for j := 0; j < i; j++ { + _, ok := dbtop[testtop[j].Address] + require.True(t, ok) + } + } + + top, err := accountsOnlineTop(tx, 0, uint64(len(onlineAccounts)+1), proto) + require.NoError(t, err) + require.Equal(t, len(top), len(onlineAccounts)) } func TestAccountDBInit(t *testing.T) { @@ -475,7 +528,7 @@ func TestAccountDBRound(t *testing.T) { accts = newaccts ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs, expectedDbImage, numElementsPerSegement) - err = accountsNewRound(tx, updates, ctbsWithDeletes) + err = accountsNewRound(tx, updates, ctbsWithDeletes, proto) require.NoError(t, err) err = totalsNewRounds(tx, []map[basics.Address]accountDelta{updates}, []AccountTotals{{}}, []config.ConsensusParams{proto}) require.NoError(t, err) @@ -545,7 +598,7 @@ func randomCreatableSampling(iteration int, crtbsList []basics.CreatableIndex, ctb := creatables[crtbsList[i]] if ctb.created && // Always delete the first element, to make sure at least one - // element is always deleted. + // element is always deleted. (i == delSegmentStart || 1 == (crypto.RandUint64()%2)) { ctb.created = false newSample[crtbsList[i]] = ctb @@ -785,3 +838,94 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) { qs.close() require.Nil(t, qs.listCreatablesStmt) } + +func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder bool) { + proto := config.Consensus[protocol.ConsensusCurrentVersion] + genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion) + const inMem = false + log := logging.TestingLog(b) + cfg := config.GetDefaultLocal() + cfg.Archival = false + log.SetLevel(logging.Warn) + dbBaseFileName := strings.Replace(b.Name(), "/", "_", -1) + l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg) + require.NoError(b, err, "could not open ledger") + defer func() { + l.Close() + os.Remove(dbBaseFileName + ".block.sqlite") + os.Remove(dbBaseFileName + ".tracker.sqlite") + }() + catchpointAccessor := MakeCatchpointCatchupAccessor(l, log) + catchpointAccessor.ResetStagingBalances(context.Background(), true) + targetAccountsCount := uint64(b.N) + accountsLoaded := uint64(0) + var last64KStart time.Time + last64KSize := uint64(0) + last64KAccountCreationTime := time.Duration(0) + accountsWritingStarted := time.Now() + accountsGenerationDuration := time.Duration(0) + b.ResetTimer() + for accountsLoaded < targetAccountsCount { + b.StopTimer() + balancesLoopStart := time.Now() + // generate a chunk; + chunkSize := targetAccountsCount - accountsLoaded + if chunkSize > BalancesPerCatchpointFileChunk { + chunkSize = BalancesPerCatchpointFileChunk + } + last64KSize += chunkSize + if accountsLoaded >= targetAccountsCount-64*1024 && last64KStart.IsZero() { + last64KStart = time.Now() + last64KSize = chunkSize + last64KAccountCreationTime = time.Duration(0) + } + var balances catchpointFileBalancesChunk + balances.Balances = make([]encodedBalanceRecord, chunkSize) + for i := uint64(0); i < chunkSize; i++ { + var randomAccount encodedBalanceRecord + accountData := basics.AccountData{RewardsBase: accountsLoaded + i} + accountData.MicroAlgos.Raw = crypto.RandUint63() + randomAccount.AccountData = protocol.Encode(&accountData) + crypto.RandBytes(randomAccount.Address[:]) + if ascendingOrder { + binary.LittleEndian.PutUint64(randomAccount.Address[:], accountsLoaded+i) + } + balances.Balances[i] = randomAccount + } + balanceLoopDuration := time.Now().Sub(balancesLoopStart) + last64KAccountCreationTime += balanceLoopDuration + accountsGenerationDuration += balanceLoopDuration + b.StartTimer() + err = l.trackerDBs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + err = writeCatchpointStagingBalances(ctx, tx, balances.Balances, proto) + return + }) + + require.NoError(b, err) + accountsLoaded += chunkSize + } + if !last64KStart.IsZero() { + last64KDuration := time.Now().Sub(last64KStart) - last64KAccountCreationTime + fmt.Printf("%-82s%-7d (last 64k) %-6d ns/account %d accounts/sec\n", b.Name(), last64KSize, (last64KDuration / time.Duration(last64KSize)).Nanoseconds(), int(float64(last64KSize)/float64(last64KDuration.Seconds()))) + } + stats, err := l.trackerDBs.wdb.Vacuum(context.Background()) + require.NoError(b, err) + fmt.Printf("%-82sdb fragmentation %.1f%%\n", b.Name(), float32(stats.PagesBefore-stats.PagesAfter)*100/float32(stats.PagesBefore)) + b.ReportMetric(float64(b.N)/float64((time.Now().Sub(accountsWritingStarted)-accountsGenerationDuration).Seconds()), "accounts/sec") +} + +func BenchmarkWriteCatchpointStagingBalances(b *testing.B) { + benchSizes := []int{1024 * 100, 1024 * 200, 1024 * 400} + for _, size := range benchSizes { + b.Run(fmt.Sprintf("RandomInsertOrder-%d", size), func(b *testing.B) { + b.N = size + benchmarkWriteCatchpointStagingBalancesSub(b, false) + }) + } + for _, size := range benchSizes { + b.Run(fmt.Sprintf("AscendingInsertOrder-%d", size), func(b *testing.B) { + b.N = size + benchmarkWriteCatchpointStagingBalancesSub(b, true) + }) + } +} diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index fc2928c036..b7a10f9b4a 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -17,6 +17,7 @@ package ledger import ( + "container/heap" "context" "database/sql" "encoding/hex" @@ -41,6 +42,7 @@ import ( "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" + "github.com/algorand/go-algorand/util/metrics" ) const ( @@ -62,6 +64,17 @@ const ( // value was calibrated using BenchmarkCalibrateCacheNodeSize var trieCachedNodesCount = 9000 +// merkleCommitterNodesPerPage controls how many nodes will be stored in a single page +// value was calibrated using BenchmarkCalibrateNodesPerPage +var merkleCommitterNodesPerPage = int64(116) + +var trieMemoryConfig = merkletrie.MemoryConfig{ + NodesCountPerPage: merkleCommitterNodesPerPage, + CachedNodesCount: trieCachedNodesCount, + PageFillFactor: 0.95, + MaxChildrenPagesThreshold: 64, +} + // A modifiedAccount represents an account that has been modified since // the persistent state stored in the account DB (i.e., in the range of // rounds covered by the accountUpdates tracker). @@ -170,11 +183,11 @@ type accountUpdates struct { // written to the database. balancesTrie *merkletrie.Trie - // The last catchpoint label that was writted to the database. Should always align with what's in the database. + // The last catchpoint label that was written to the database. Should always align with what's in the database. // note that this is the last catchpoint *label* and not the catchpoint file. lastCatchpointLabel string - // catchpointWriting help to syncronize the catchpoint file writing. When this channel is closed, no writting is going on. + // catchpointWriting help to syncronize the catchpoint file writing. When this channel is closed, no writing is going on. // the channel is non-closed while writing the current accounts state to disk. catchpointWriting chan struct{} @@ -186,7 +199,7 @@ type accountUpdates struct { // ctx is the context for the committing go-routine. It's also used as the "parent" of the catchpoint generation operation. ctx context.Context - // ctxCancel is the canceling function for canceling the commiting go-routine ( i.e. signaling the commiting go-routine that it's time to abort ) + // ctxCancel is the canceling function for canceling the committing go-routine ( i.e. signaling the committing go-routine that it's time to abort ) ctxCancel context.CancelFunc // deltasAccum stores the accumulated deltas for every round starting dbRound-1. @@ -195,15 +208,21 @@ type accountUpdates struct { // committedOffset is the offset at which we'd like to persist all the previous account information to disk. committedOffset chan deferedCommit - // accountsMu is the syncronization mutex for accessing the various non-static varaibles. + // accountsMu is the synchronization mutex for accessing the various non-static variables. accountsMu deadlock.RWMutex - // accountsWriting provides syncronization around the background writing of account balances. + // accountsReadCond used to synchronize read access to the internal data structures. + accountsReadCond *sync.Cond + + // accountsWriting provides synchronization around the background writing of account balances. accountsWriting sync.WaitGroup - // commitSyncerClosed is the blocking channel for syncronizing closing the commitSyncer goroutine. Once it's closed, the + // commitSyncerClosed is the blocking channel for synchronizing closing the commitSyncer goroutine. Once it's closed, the // commitSyncer can be assumed to have aborted. commitSyncerClosed chan struct{} + + // voters keeps track of Merkle trees of online accounts, used for compact certificates. + voters *votersTracker } type deferedCommit struct { @@ -212,6 +231,39 @@ type deferedCommit struct { lookback basics.Round } +// RoundOffsetError is an error for when requested round is behind earliest stored db entry +type RoundOffsetError struct { + round basics.Round + dbRound basics.Round +} + +func (e *RoundOffsetError) Error() string { + return fmt.Sprintf("round %d before dbRound %d", e.round, e.dbRound) +} + +// StaleDatabaseRoundError is generated when we detect that the database round is behind the accountUpdates in-memory dbRound. This +// should never happen, since we update the database first, and only upon a successfull update we update the in-memory dbRound. +type StaleDatabaseRoundError struct { + memoryRound basics.Round + databaseRound basics.Round +} + +func (e *StaleDatabaseRoundError) Error() string { + return fmt.Sprintf("database round %d is behind in-memory round %d", e.databaseRound, e.memoryRound) +} + +// MismatchingDatabaseRoundError is generated when we detect that the database round is different than the accountUpdates in-memory dbRound. This +// could happen normally when the database and the in-memory dbRound aren't syncronized. However, when we work in non-sync mode, we expect the database to be +// always syncronized with the in-memory data. When that condition is violated, this error is generated. +type MismatchingDatabaseRoundError struct { + memoryRound basics.Round + databaseRound basics.Round +} + +func (e *MismatchingDatabaseRoundError) Error() string { + return fmt.Sprintf("database round %d mismatching in-memory round %d", e.databaseRound, e.memoryRound) +} + // initialize initializes the accountUpdates structure func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, genesisProto config.ConsensusParams, genesisAccounts map[basics.Address]basics.AccountData) { au.initProto = genesisProto @@ -243,6 +295,7 @@ func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, gene // initialize the commitSyncerClosed with a closed channel ( since the commitSyncer go-routine is not active ) au.commitSyncerClosed = make(chan struct{}) close(au.commitSyncerClosed) + au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker()) } // loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional @@ -273,6 +326,12 @@ func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error { au.generateCatchpoint(basics.Round(writingCatchpointRound), au.lastCatchpointLabel, writingCatchpointDigest, time.Duration(0)) } + au.voters = &votersTracker{} + err = au.voters.loadFromDisk(l, au) + if err != nil { + return err + } + return nil } @@ -307,13 +366,16 @@ func (au *accountUpdates) IsWritingCatchpointFile() bool { } } -// Lookup returns the accound data for a given address at a given round. The withRewards indicates whether the -// rewards should be added to the AccountData before returning. Note that the function doesn't update the account with the rewards, -// even while it could return the AccoutData which represent the "rewarded" account data. -func (au *accountUpdates) Lookup(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) { - au.accountsMu.RLock() - defer au.accountsMu.RUnlock() - return au.lookupImpl(rnd, addr, withRewards) +// LookupWithRewards returns the account data for a given address at a given round. +// Note that the function doesn't update the account with the rewards, +// even while it does return the AccoutData which represent the "rewarded" account data. +func (au *accountUpdates) LookupWithRewards(rnd basics.Round, addr basics.Address) (data basics.AccountData, err error) { + return au.lookupWithRewardsImpl(rnd, addr) +} + +// LookupWithoutRewards returns the account data for a given address at a given round. +func (au *accountUpdates) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (data basics.AccountData, validThrough basics.Round, err error) { + return au.lookupWithoutRewardsImpl(rnd, addr, true /* take lock*/) } // ListAssets lists the assets by their asset index, limiting to the first maxResults @@ -329,75 +391,213 @@ func (au *accountUpdates) ListApplications(maxAppIdx basics.AppIndex, maxResults // listCreatables lists the application/asset by their app/asset index, limiting to the first maxResults func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) ([]basics.CreatableLocator, error) { au.accountsMu.RLock() - defer au.accountsMu.RUnlock() - - // Sort indices for creatables that have been created/deleted. If this - // turns out to be too inefficient, we could keep around a heap of - // created/deleted asset indices in memory. - keys := make([]basics.CreatableIndex, 0, len(au.creatables)) - for cidx, delta := range au.creatables { - if delta.ctype != ctype { - continue + for { + currentDbRound := au.dbRound + currentDeltaLen := len(au.deltas) + // Sort indices for creatables that have been created/deleted. If this + // turns out to be too inefficient, we could keep around a heap of + // created/deleted asset indices in memory. + keys := make([]basics.CreatableIndex, 0, len(au.creatables)) + for cidx, delta := range au.creatables { + if delta.ctype != ctype { + continue + } + if cidx <= maxCreatableIdx { + keys = append(keys, cidx) + } } - if cidx <= maxCreatableIdx { - keys = append(keys, cidx) + sort.Slice(keys, func(i, j int) bool { return keys[i] > keys[j] }) + + // Check for creatables that haven't been synced to disk yet. + unsyncedCreatables := make([]basics.CreatableLocator, 0, len(keys)) + deletedCreatables := make(map[basics.CreatableIndex]bool, len(keys)) + for _, cidx := range keys { + delta := au.creatables[cidx] + if delta.created { + // Created but only exists in memory + unsyncedCreatables = append(unsyncedCreatables, basics.CreatableLocator{ + Type: delta.ctype, + Index: cidx, + Creator: delta.creator, + }) + } else { + // Mark deleted creatables for exclusion from the results set + deletedCreatables[cidx] = true + } } - } - sort.Slice(keys, func(i, j int) bool { return keys[i] > keys[j] }) - // Check for creatables that haven't been synced to disk yet. - var unsyncedCreatables []basics.CreatableLocator - deletedCreatables := make(map[basics.CreatableIndex]bool) - for _, cidx := range keys { - delta := au.creatables[cidx] - if delta.created { - // Created but only exists in memory - unsyncedCreatables = append(unsyncedCreatables, basics.CreatableLocator{ - Type: delta.ctype, - Index: cidx, - Creator: delta.creator, - }) - } else { - // Mark deleted creatables for exclusion from the results set - deletedCreatables[cidx] = true + au.accountsMu.RUnlock() + + // Check in-memory created creatables, which will always be newer than anything + // in the database + if uint64(len(unsyncedCreatables)) >= maxResults { + return unsyncedCreatables[:maxResults], nil } - } + res := unsyncedCreatables + + // Fetch up to maxResults - len(res) + len(deletedCreatables) from the database, + // so we have enough extras in case creatables were deleted + numToFetch := maxResults - uint64(len(res)) + uint64(len(deletedCreatables)) + dbResults, dbRound, err := au.accountsq.listCreatables(maxCreatableIdx, numToFetch, ctype) + if err != nil { + return nil, err + } + + if dbRound == currentDbRound { + // Now we merge the database results with the in-memory results + for _, loc := range dbResults { + // Check if we have enough results + if uint64(len(res)) == maxResults { + return res, nil + } + + // Creatable was deleted + if _, ok := deletedCreatables[loc.Index]; ok { + continue + } - // Check in-memory created creatables, which will always be newer than anything - // in the database - var res []basics.CreatableLocator - for _, loc := range unsyncedCreatables { - if uint64(len(res)) == maxResults { + // We're OK to include this result + res = append(res, loc) + } return res, nil } - res = append(res, loc) + if dbRound < currentDbRound { + au.log.Errorf("listCreatables: database round %d is behind in-memory round %d", dbRound, currentDbRound) + return []basics.CreatableLocator{}, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound} + } + au.accountsMu.RLock() + for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) { + au.accountsReadCond.Wait() + } } +} - // Fetch up to maxResults - len(res) + len(deletedCreatables) from the database, - // so we have enough extras in case creatables were deleted - numToFetch := maxResults - uint64(len(res)) + uint64(len(deletedCreatables)) - dbResults, err := au.accountsq.listCreatables(maxCreatableIdx, numToFetch, ctype) - if err != nil { - return nil, err - } +// onlineTop returns the top n online accounts, sorted by their normalized +// balance and address, whose voting keys are valid in voteRnd. See the +// normalization description in AccountData.NormalizedOnlineBalance(). +func (au *accountUpdates) onlineTop(rnd basics.Round, voteRnd basics.Round, n uint64) ([]*onlineAccount, error) { + proto := au.ledger.GenesisProto() + au.accountsMu.RLock() + for { + currentDbRound := au.dbRound + currentDeltaLen := len(au.deltas) + offset, err := au.roundOffset(rnd) + if err != nil { + au.accountsMu.RUnlock() + return nil, err + } + + // Determine how many accounts have been modified in-memory, + // so that we obtain enough top accounts from disk (accountdb). + // If the *onlineAccount is nil, that means the account is offline + // as of the most recent change to that account, or its vote key + // is not valid in voteRnd. Otherwise, the *onlineAccount is the + // representation of the most recent state of the account, and it + // is online and can vote in voteRnd. + modifiedAccounts := make(map[basics.Address]*onlineAccount) + for o := uint64(0); o < offset; o++ { + for addr, d := range au.deltas[o] { + if d.new.Status != basics.Online { + modifiedAccounts[addr] = nil + continue + } - // Now we merge the database results with the in-memory results - for _, loc := range dbResults { - // Check if we have enough results - if uint64(len(res)) == maxResults { - return res, nil + if !(d.new.VoteFirstValid <= voteRnd && voteRnd <= d.new.VoteLastValid) { + modifiedAccounts[addr] = nil + continue + } + + modifiedAccounts[addr] = accountDataToOnline(addr, &d.new, proto) + } } - // Creatable was deleted - if _, ok := deletedCreatables[loc.Index]; ok { + au.accountsMu.RUnlock() + + // Build up a set of candidate accounts. Start by loading the + // top N + len(modifiedAccounts) accounts from disk (accountdb). + // This ensures that, even if the worst case if all in-memory + // changes are deleting the top accounts in accountdb, we still + // will have top N left. + // + // Keep asking for more accounts until we get the desired number, + // or there are no more accounts left. + candidates := make(map[basics.Address]*onlineAccount) + batchOffset := uint64(0) + batchSize := uint64(1024) + var dbRound basics.Round + for uint64(len(candidates)) < n+uint64(len(modifiedAccounts)) { + var accts map[basics.Address]*onlineAccount + start := time.Now() + ledgerAccountsonlinetopCount.Inc(nil) + err = au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + accts, err = accountsOnlineTop(tx, batchOffset, batchSize, proto) + if err != nil { + return + } + dbRound, _, err = accountsRound(tx) + return + }) + ledgerAccountsonlinetopMicros.AddMicrosecondsSince(start, nil) + if err != nil { + return nil, err + } + + if dbRound != currentDbRound { + break + } + + for addr, data := range accts { + if !(data.VoteFirstValid <= voteRnd && voteRnd <= data.VoteLastValid) { + continue + } + candidates[addr] = data + } + + // If we got fewer than batchSize accounts, there are no + // more accounts to look at. + if uint64(len(accts)) < batchSize { + break + } + + batchOffset += batchSize + } + if dbRound != currentDbRound && dbRound != basics.Round(0) { + // database round doesn't match the last au.dbRound we sampled. + au.accountsMu.RLock() + for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) { + au.accountsReadCond.Wait() + } continue } - // We're OK to include this result - res = append(res, loc) - } + // Now update the candidates based on the in-memory deltas. + for addr, oa := range modifiedAccounts { + if oa == nil { + delete(candidates, addr) + } else { + candidates[addr] = oa + } + } + + // Get the top N accounts from the candidate set, by inserting all of + // the accounts into a heap and then pulling out N elements from the + // heap. + topHeap := &onlineTopHeap{ + accts: nil, + } + + for _, data := range candidates { + heap.Push(topHeap, data) + } - return res, nil + var res []*onlineAccount + for topHeap.Len() > 0 && uint64(len(res)) < n { + acct := heap.Pop(topHeap).(*onlineAccount) + res = append(res, acct) + } + + return res, nil + } } // GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database. @@ -409,16 +609,14 @@ func (au *accountUpdates) GetLastCatchpointLabel() string { // GetCreatorForRound returns the creator for a given asset/app index at a given round func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) { - au.accountsMu.RLock() - defer au.accountsMu.RUnlock() - return au.getCreatorForRoundImpl(rnd, cidx, ctype) + return au.getCreatorForRoundImpl(rnd, cidx, ctype, true /* take the lock */) } -// committedUpTo enqueues commiting the balances for round committedRound-lookback. -// The defered committing is done so that we could calculate the historical balances lookback rounds back. -// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistance of this -// operation to a syncer goroutine. The one caviat is that when storing a catchpoint round, we would want to -// wait until the catchpoint creation is done, so that the persistance of the catchpoint file would have an +// committedUpTo enqueues committing the balances for round committedRound-lookback. +// The deferred committing is done so that we could calculate the historical balances lookback rounds back. +// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistence of this +// operation to a syncer goroutine. The one caveat is that when storing a catchpoint round, we would want to +// wait until the catchpoint creation is done, so that the persistence of the catchpoint file would have an // uninterrupted view of the balances at a given point of time. func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound basics.Round) { var isCatchpointRound, hasMultipleIntermediateCatchpoint bool @@ -490,6 +688,8 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b return } + newBase = au.voters.lowestRound(newBase) + offset = uint64(newBase - au.dbRound) // check to see if this is a catchpoint round @@ -500,7 +700,7 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b // If we recently flushed, wait to aggregate some more blocks. // ( unless we're creating a catchpoint, in which case we want to flush it right away - // so that all the instances of the catchpoint would contain the exacy same data ) + // so that all the instances of the catchpoint would contain exactly the same data ) flushTime := time.Now() if !flushTime.After(au.lastFlushTime.Add(balancesFlushInterval)) && !isCatchpointRound && pendingDeltas < pendingDeltasFlushThreshold { return au.dbRound @@ -527,8 +727,9 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b // which invokes the internal implementation after taking the lock. func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta StateDelta) { au.accountsMu.Lock() - defer au.accountsMu.Unlock() au.newBlockImpl(blk, delta) + au.accountsMu.Unlock() + au.accountsReadCond.Broadcast() } // Totals returns the totals for a given round @@ -541,10 +742,13 @@ func (au *accountUpdates) Totals(rnd basics.Round) (totals AccountTotals, err er // GetCatchpointStream returns an io.Reader to the catchpoint file associated with the provided round func (au *accountUpdates) GetCatchpointStream(round basics.Round) (io.ReadCloser, error) { dbFileName := "" + start := time.Now() + ledgerGetcatchpointCount.Inc(nil) err := au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { dbFileName, _, _, err = getCatchpoint(tx, round) return }) + ledgerGetcatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil && err != sql.ErrNoRows { // we had some sql error. return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err) @@ -614,6 +818,11 @@ func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest { return aul.au.ledger.GenesisHash() } +// CompactCertVoters returns the top online accounts at round rnd. +func (aul *accountUpdatesLedgerEvaluator) CompactCertVoters(rnd basics.Round) (voters *VotersForRound, err error) { + return aul.au.voters.getVoters(rnd) +} + // BlockHdr returns the header of the given round. When the evaluator is running, it's only referring to the previous header, which is what we // are providing here. Any attempt to access a different header would get denied. func (aul *accountUpdatesLedgerEvaluator) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { @@ -623,30 +832,25 @@ func (aul *accountUpdatesLedgerEvaluator) BlockHdr(r basics.Round) (bookkeeping. return bookkeeping.BlockHeader{}, ErrNoEntry{} } -// Lookup returns the account balance for a given address at a given round -func (aul *accountUpdatesLedgerEvaluator) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) { - return aul.au.lookupImpl(rnd, addr, true) -} - // Totals returns the totals for a given round func (aul *accountUpdatesLedgerEvaluator) Totals(rnd basics.Round) (AccountTotals, error) { return aul.au.totalsImpl(rnd) } -// isDup return whether a transaction is a duplicate one. It's not needed by the accountUpdatesLedgerEvaluator and implemeted as a stub. +// isDup return whether a transaction is a duplicate one. It's not needed by the accountUpdatesLedgerEvaluator and implemented as a stub. func (aul *accountUpdatesLedgerEvaluator) isDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, txlease) (bool, error) { // this is a non-issue since this call will never be made on non-validating evaluation - return false, fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initilization ") + return false, fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initialization ") } // LookupWithoutRewards returns the account balance for a given address at a given round, without the reward -func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, error) { - return aul.au.lookupImpl(rnd, addr, false) +func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) { + return aul.au.lookupWithoutRewardsImpl(rnd, addr, false /*don't sync*/) } // GetCreatorForRound returns the asset/app creator for a given asset/app index at a given round func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) { - return aul.au.getCreatorForRoundImpl(rnd, cidx, ctype) + return aul.au.getCreatorForRoundImpl(rnd, cidx, ctype, false /* don't sync */) } // totalsImpl returns the totals for a given round @@ -701,7 +905,7 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, } // initializeFromDisk performs the atomic operation of loading the accounts data information from disk -// and preparing the accountUpdates for operation, including initlizating the commitSyncer goroutine. +// and preparing the accountUpdates for operation, including initializing the commitSyncer goroutine. func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRound, lastestBlockRound basics.Round, err error) { au.dbs = l.trackerDB() au.log = l.trackerLog() @@ -713,6 +917,8 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo } lastestBlockRound = l.Latest() + start := time.Now() + ledgerAccountsinitCount.Inc(nil) err = au.dbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { var err0 error au.dbRound, err0 = au.accountsInitialize(ctx, tx) @@ -740,6 +946,7 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo au.roundTotals = []AccountTotals{totals} return nil }) + ledgerAccountsinitMicros.AddMicrosecondsSince(start, nil) if err != nil { return } @@ -797,7 +1004,7 @@ func accountHashBuilder(addr basics.Address, accountData basics.AccountData, enc return hash[:] } -// accountsInitialize initializes the accounts DB if needed and return currrent account round. +// accountsInitialize initializes the accounts DB if needed and return current account round. // as part of the initialization, it tests the current database schema version, and perform upgrade // procedures to bring it up to the database schema supported by the binary. func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (basics.Round, error) { @@ -808,7 +1015,7 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b } // if database version is greater than supported by current binary, write a warning. This would keep the existing - // fallback behaviour where we could use an older binary iff the schema happen to be backward compatible. + // fallback behavior where we could use an older binary iff the schema happen to be backward compatible. if dbVersion > accountDBVersion { au.log.Warnf("accountsInitialize database schema version is %d, but algod supports only %d", dbVersion, accountDBVersion) } @@ -838,6 +1045,12 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err) return 0, err } + case 3: + dbVersion, err = au.upgradeDatabaseSchema3(ctx, tx) + if err != nil { + au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err) + return 0, err + } default: return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion) } @@ -869,13 +1082,14 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b if err != nil { return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err) } - trie, err := merkletrie.MakeTrie(committer, trieCachedNodesCount) + + trie, err := merkletrie.MakeTrie(committer, trieMemoryConfig) if err != nil { return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err) } // we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie. - // we can figure this out by examinine the hash of the root: + // we can figure this out by examining the hash of the root: rootHash, err := trie.RootHash() if err != nil { return rnd, fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err) @@ -943,7 +1157,7 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err) } - // we've just updated the markle trie, update the hashRound to reflect that. + // we've just updated the merkle trie, update the hashRound to reflect that. err = updateAccountsRound(tx, rnd, rnd) if err != nil { return 0, fmt.Errorf("accountsInitialize was unable to update the account round to %d: %v", rnd, err) @@ -990,15 +1204,15 @@ func (au *accountUpdates) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx // upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2 // -// The schema updated to verison 2 intended to ensure that the encoding of all the accounts data is +// The schema updated to version 2 intended to ensure that the encoding of all the accounts data is // both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack. // the upgraded messagepack was decoding the account data correctly, but would have different // encoding compared to it's predecessor. As a result, some of the account data that was previously stored // would have different encoded representation than the one on disk. -// To address this, this startup proceduce would attempt to scan all the accounts data. for each account data, we would +// To address this, this startup procedure would attempt to scan all the accounts data. for each account data, we would // see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding. // then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints. -// once the upgrade is complete, the accountsInitialize would (if needed) rebuild the merke trie using the new +// once the upgrade is complete, the accountsInitialize would (if needed) rebuild the merkle trie using the new // encoded accounts. // // This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing @@ -1074,6 +1288,22 @@ func (au *accountUpdates) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx return 3, nil } +// upgradeDatabaseSchema3 upgrades the database schema from version 3 to version 4, +// adding the normalizedonlinebalance column to the accountbase table. +func (au *accountUpdates) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) { + err = accountsAddNormalizedBalance(tx, au.ledger.GenesisProto()) + if err != nil { + return 0, err + } + + // update version + _, err = db.SetUserVersion(ctx, tx, 4) + if err != nil { + return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 3 to 4: %v", err) + } + return 4, nil +} + // deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk. // once all the files have been deleted, it would go ahead and remove the entries from the table. func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) { @@ -1144,7 +1374,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltasRound []map[basic } if accumulatedChanges >= trieAccumulatedChangesFlush { accumulatedChanges = 0 - err = au.balancesTrie.Commit() + _, err = au.balancesTrie.Commit() if err != nil { return } @@ -1152,7 +1382,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltasRound []map[basic } // write it all to disk. if accumulatedChanges > 0 { - err = au.balancesTrie.Commit() + _, err = au.balancesTrie.Commit() } return } @@ -1210,87 +1440,247 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta StateDelta) } au.roundTotals = append(au.roundTotals, newTotals) -} - -// lookupImpl returns the accound data for a given address at a given round. The withRewards indicates whether the -// rewards should be added to the AccountData before returning. Note that the function doesn't update the account with the rewards, -// even while it could return the AccoutData which represent the "rewarded" account data. -func (au *accountUpdates) lookupImpl(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) { - offset, err := au.roundOffset(rnd) - if err != nil { - return - } - offsetForRewards := offset + au.voters.newBlock(blk.BlockHeader) +} +// lookupWithRewardsImpl returns the account data for a given address at a given round. +// The rewards are added to the AccountData before returning. Note that the function doesn't update the account with the rewards, +// even while it does return the AccoutData which represent the "rewarded" account data. +func (au *accountUpdates) lookupWithRewardsImpl(rnd basics.Round, addr basics.Address) (data basics.AccountData, err error) { + au.accountsMu.RLock() + needUnlock := true defer func() { - if withRewards { - totals := au.roundTotals[offsetForRewards] - proto := au.protos[offsetForRewards] - data = data.WithUpdatedRewards(proto, totals.RewardsLevel) + if needUnlock { + au.accountsMu.RUnlock() } }() + var offset uint64 + var rewardsProto config.ConsensusParams + var rewardsLevel uint64 + withRewards := true + for { + currentDbRound := au.dbRound + currentDeltaLen := len(au.deltas) + offset, err = au.roundOffset(rnd) + if err != nil { + return + } - // Check if this is the most recent round, in which case, we can - // use a cache of the most recent account state. - if offset == uint64(len(au.deltas)) { - macct, ok := au.accounts[addr] - if ok { - return macct.data, nil + rewardsProto = au.protos[offset] + rewardsLevel = au.roundTotals[offset].RewardsLevel + + // we're testing the withRewards here and setting the defer function only once, and only if withRewards is true. + // we want to make this defer only after setting the above rewardsProto/rewardsLevel. + if withRewards { + defer func() { + if err == nil { + data = data.WithUpdatedRewards(rewardsProto, rewardsLevel) + } + }() + withRewards = false } - } else { - // Check if the account has been updated recently. Traverse the deltas - // backwards to ensure that later updates take priority if present. - for offset > 0 { - offset-- - d, ok := au.deltas[offset][addr] - if ok { - return d.new, nil + + // check if we've had this address modified in the past rounds. ( i.e. if it's in the deltas ) + macct, indeltas := au.accounts[addr] + if indeltas { + // Check if this is the most recent round, in which case, we can + // use a cache of the most recent account state. + if offset == uint64(len(au.deltas)) { + return macct.data, nil + } + // the account appears in the deltas, but we don't know if it appears in the + // delta range of [0..offset], so we'll need to check : + // Traverse the deltas backwards to ensure that later updates take + // priority if present. + for offset > 0 { + offset-- + d, ok := au.deltas[offset][addr] + if ok { + return d.new, nil + } } } - } - // No updates of this account in the in-memory deltas; use on-disk DB. - // The check in roundOffset() made sure the round is exactly the one - // present in the on-disk DB. As an optimization, we avoid creating - // a separate transaction here, and directly use a prepared SQL query - // against the database. - return au.accountsq.lookup(addr) + au.accountsMu.RUnlock() + needUnlock = false + + var dbRound basics.Round + // No updates of this account in the in-memory deltas; use on-disk DB. + // The check in roundOffset() made sure the round is exactly the one + // present in the on-disk DB. As an optimization, we avoid creating + // a separate transaction here, and directly use a prepared SQL query + // against the database. + data, dbRound, err = au.accountsq.lookup(addr) + if dbRound == currentDbRound { + return data, err + } + + if dbRound < currentDbRound { + au.log.Errorf("accountUpdates.lookupWithRewardsImpl: database round %d is behind in-memory round %d", dbRound, currentDbRound) + return basics.AccountData{}, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound} + } + au.accountsMu.RLock() + needUnlock = true + for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) { + au.accountsReadCond.Wait() + } + } } -// getCreatorForRoundImpl returns the asset/app creator for a given asset/app index at a given round -func (au *accountUpdates) getCreatorForRoundImpl(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) { - offset, err := au.roundOffset(rnd) - if err != nil { - return basics.Address{}, false, err +// lookupWithoutRewardsImpl returns the account data for a given address at a given round. +func (au *accountUpdates) lookupWithoutRewardsImpl(rnd basics.Round, addr basics.Address, syncronized bool) (data basics.AccountData, validThrough basics.Round, err error) { + needUnlock := false + if syncronized { + au.accountsMu.RLock() + needUnlock = true } + defer func() { + if needUnlock { + au.accountsMu.RUnlock() + } + }() + var offset uint64 + for { + currentDbRound := au.dbRound + currentDeltaLen := len(au.deltas) + offset, err = au.roundOffset(rnd) + if err != nil { + return + } - // If this is the most recent round, au.creatables has will have the latest - // state and we can skip scanning backwards over creatableDeltas - if offset == uint64(len(au.deltas)) { - // Check if we already have the asset/creator in cache - creatableDelta, ok := au.creatables[cidx] - if ok { - if creatableDelta.created && creatableDelta.ctype == ctype { - return creatableDelta.creator, true, nil + // check if we've had this address modified in the past rounds. ( i.e. if it's in the deltas ) + macct, indeltas := au.accounts[addr] + if indeltas { + // Check if this is the most recent round, in which case, we can + // use a cache of the most recent account state. + if offset == uint64(len(au.deltas)) { + return macct.data, rnd, nil + } + // the account appears in the deltas, but we don't know if it appears in the + // delta range of [0..offset], so we'll need to check : + // Traverse the deltas backwards to ensure that later updates take + // priority if present. + for offset > 0 { + offset-- + d, ok := au.deltas[offset][addr] + if ok { + // the returned validThrough here is not optimal, but it still correct. We could get a more accurate value by scanning + // the deltas forward, but this would be time consuming loop, which might not pay off. + return d.new, rnd, nil + } + } + } else { + // we know that the account in not in the deltas - so there is no point in scanning it. + // we've going to fall back to search in the database, but before doing so, we should + // update the rnd so that it would point to the end of the known delta range. + // ( that would give us the best validity range ) + rnd = currentDbRound + basics.Round(currentDeltaLen) + } + + if syncronized { + au.accountsMu.RUnlock() + needUnlock = false + } + var dbRound basics.Round + // No updates of this account in the in-memory deltas; use on-disk DB. + // The check in roundOffset() made sure the round is exactly the one + // present in the on-disk DB. As an optimization, we avoid creating + // a separate transaction here, and directly use a prepared SQL query + // against the database. + data, dbRound, err = au.accountsq.lookup(addr) + if dbRound == currentDbRound { + return data, rnd, err + } + if syncronized { + if dbRound < currentDbRound { + au.log.Errorf("accountUpdates.lookupWithoutRewardsImpl: database round %d is behind in-memory round %d", dbRound, currentDbRound) + return basics.AccountData{}, basics.Round(0), &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound} } - return basics.Address{}, false, nil + au.accountsMu.RLock() + needUnlock = true + for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) { + au.accountsReadCond.Wait() + } + } else { + // in non-sync mode, we don't wait since we already assume that we're syncronized. + au.log.Errorf("accountUpdates.lookupWithoutRewardsImpl: database round %d mismatching in-memory round %d", dbRound, currentDbRound) + return basics.AccountData{}, basics.Round(0), &MismatchingDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound} } - } else { - for offset > 0 { - offset-- - creatableDelta, ok := au.creatableDeltas[offset][cidx] + } +} + +// getCreatorForRoundImpl returns the asset/app creator for a given asset/app index at a given round +func (au *accountUpdates) getCreatorForRoundImpl(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType, syncronized bool) (creator basics.Address, ok bool, err error) { + unlock := false + if syncronized { + au.accountsMu.RLock() + unlock = true + } + defer func() { + if unlock { + au.accountsMu.RUnlock() + } + }() + var dbRound basics.Round + var offset uint64 + for { + currentDbRound := au.dbRound + currentDeltaLen := len(au.deltas) + offset, err = au.roundOffset(rnd) + if err != nil { + return basics.Address{}, false, err + } + + // If this is the most recent round, au.creatables has the latest + // state and we can skip scanning backwards over creatableDeltas + if offset == uint64(len(au.deltas)) { + // Check if we already have the asset/creator in cache + creatableDelta, ok := au.creatables[cidx] if ok { if creatableDelta.created && creatableDelta.ctype == ctype { return creatableDelta.creator, true, nil } return basics.Address{}, false, nil } + } else { + for offset > 0 { + offset-- + creatableDelta, ok := au.creatableDeltas[offset][cidx] + if ok { + if creatableDelta.created && creatableDelta.ctype == ctype { + return creatableDelta.creator, true, nil + } + return basics.Address{}, false, nil + } + } } - } - // Check the database - return au.accountsq.lookupCreator(cidx, ctype) + if syncronized { + au.accountsMu.RUnlock() + unlock = false + } + // Check the database + creator, ok, dbRound, err = au.accountsq.lookupCreator(cidx, ctype) + + if dbRound == currentDbRound { + return + } + if syncronized { + if dbRound < currentDbRound { + au.log.Errorf("accountUpdates.getCreatorForRoundImpl: database round %d is behind in-memory round %d", dbRound, currentDbRound) + return basics.Address{}, false, &StaleDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound} + } + au.accountsMu.RLock() + unlock = true + for currentDbRound >= au.dbRound && currentDeltaLen == len(au.deltas) { + au.accountsReadCond.Wait() + } + } else { + au.log.Errorf("accountUpdates.getCreatorForRoundImpl: database round %d mismatching in-memory round %d", dbRound, currentDbRound) + return basics.Address{}, false, &MismatchingDatabaseRoundError{databaseRound: dbRound, memoryRound: currentDbRound} + } + } } // accountsCreateCatchpointLabel creates a catchpoint label and write it. @@ -1304,7 +1694,10 @@ func (au *accountUpdates) accountsCreateCatchpointLabel(committedRound basics.Ro // roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken. func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err error) { if rnd < au.dbRound { - err = fmt.Errorf("round %d before dbRound %d", rnd, au.dbRound) + err = &RoundOffsetError{ + round: rnd, + dbRound: au.dbRound, + } return } @@ -1317,7 +1710,7 @@ func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err erro return off, nil } -// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeue deferedCommits and +// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeues deferedCommits and // send the tasks to commitRound for completing the operation. func (au *accountUpdates) commitSyncer(deferedCommits chan deferedCommit) { defer close(au.commitSyncerClosed) @@ -1364,7 +1757,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb return } - // adjust the offset according to what happend meanwhile.. + // adjust the offset according to what happened meanwhile.. offset -= uint64(au.dbRound - dbRound) dbRound = au.dbRound @@ -1397,7 +1790,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb au.accountsMu.RUnlock() // in committedUpTo, we expect that this function we close the catchpointWriting when - // it's on a catchpoint round and it's an archival ledger. Doing this in a defered function + // it's on a catchpoint round and it's an archival ledger. Doing this in a deferred function // here would prevent us from "forgetting" to close that channel later on. defer func() { if isCatchpointRound && au.archivalLedger { @@ -1418,7 +1811,11 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb beforeUpdatingBalancesTime := time.Now() var trieBalancesHash crypto.Digest - err := au.dbs.wdb.AtomicCommitWriteLock(func(ctx context.Context, tx *sql.Tx) (err error) { + genesisProto := au.ledger.GenesisProto() + + start := time.Now() + ledgerCommitroundCount.Inc(nil) + err := au.dbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { treeTargetRound := basics.Round(0) if au.catchpointInterval > 0 { mc, err0 := makeMerkleCommitter(tx, false) @@ -1426,7 +1823,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb return err0 } if au.balancesTrie == nil { - trie, err := merkletrie.MakeTrie(mc, trieCachedNodesCount) + trie, err := merkletrie.MakeTrie(mc, trieMemoryConfig) if err != nil { au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err) return err @@ -1438,7 +1835,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb treeTargetRound = dbRound + basics.Round(offset) } for i := uint64(0); i < offset; i++ { - err = accountsNewRound(tx, deltas[i], creatableDeltas[i]) + err = accountsNewRound(tx, deltas[i], creatableDeltas[i], genesisProto) if err != nil { return err } @@ -1465,8 +1862,8 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb } } return nil - }, &au.accountsMu) - + }) + ledgerCommitroundMicros.AddMicrosecondsSince(start, nil) if err != nil { au.balancesTrie = nil au.log.Warnf("unable to advance account snapshot: %v", err) @@ -1491,6 +1888,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb } updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime) + au.accountsMu.Lock() // Drop reference counts to modified accounts, and evict them // from in-memory cache when no references remain. for addr, cnt := range flushcount { @@ -1539,6 +1937,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb au.lastFlushTime = flushTime au.accountsMu.Unlock() + au.accountsReadCond.Broadcast() if isCatchpointRound && au.archivalLedger && catchpointLabel != "" { // generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written. @@ -1595,6 +1994,8 @@ func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label } var catchpointWriter *catchpointWriter + start := time.Now() + ledgerGeneratecatchpointCount.Inc(nil) err = au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { catchpointWriter = makeCatchpointWriter(au.ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label) for more { @@ -1639,6 +2040,7 @@ func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label } return }) + ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil { au.log.Warnf("accountUpdates: generateCatchpoint: %v", err) @@ -1681,7 +2083,7 @@ func catchpointRoundToPath(rnd basics.Round) string { } // saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round. -// after a successfull insert operation to the database, it would delete up to 2 old entries, as needed. +// after a successful insert operation to the database, it would delete up to 2 old entries, as needed. // deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the // database and storage realign. func (au *accountUpdates) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) { @@ -1749,6 +2151,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) { } }() + ledgerVacuumCount.Inc(nil) vacuumStats, err := au.dbs.wdb.Vacuum(ctx) close(vacuumExitCh) vacuumLoggingAbort.Wait() @@ -1758,6 +2161,7 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) { return err } vacuumElapsedTime := time.Now().Sub(startTime) + ledgerVacuumMicros.AddUint64(uint64(vacuumElapsedTime.Microseconds()), nil) au.log.Infof("Vacuuming accounts database completed within %v, reducing number of pages from %d to %d and size from %d to %d", vacuumElapsedTime, vacuumStats.PagesBefore, vacuumStats.PagesAfter, vacuumStats.SizeBefore, vacuumStats.SizeAfter) @@ -1772,3 +2176,16 @@ func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) { au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.BalancesAccountVacuumEvent, vacuumTelemetryStats) return } + +var ledgerAccountsonlinetopCount = metrics.NewCounter("ledger_accountsonlinetop_count", "calls") +var ledgerAccountsonlinetopMicros = metrics.NewCounter("ledger_accountsonlinetop_micros", "µs spent") +var ledgerGetcatchpointCount = metrics.NewCounter("ledger_getcatchpoint_count", "calls") +var ledgerGetcatchpointMicros = metrics.NewCounter("ledger_getcatchpoint_micros", "µs spent") +var ledgerAccountsinitCount = metrics.NewCounter("ledger_accountsinit_count", "calls") +var ledgerAccountsinitMicros = metrics.NewCounter("ledger_accountsinit_micros", "µs spent") +var ledgerCommitroundCount = metrics.NewCounter("ledger_commitround_count", "calls") +var ledgerCommitroundMicros = metrics.NewCounter("ledger_commitround_micros", "µs spent") +var ledgerGeneratecatchpointCount = metrics.NewCounter("ledger_generatecatchpoint_count", "calls") +var ledgerGeneratecatchpointMicros = metrics.NewCounter("ledger_generatecatchpoint_micros", "µs spent") +var ledgerVacuumCount = metrics.NewCounter("ledger_vacuum_count", "calls") +var ledgerVacuumMicros = metrics.NewCounter("ledger_vacuum_micros", "µs spent") diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index ff198f30f1..efba00a1a0 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -20,8 +20,11 @@ import ( "bytes" "context" "database/sql" + "errors" "fmt" + "io/ioutil" "os" + "path/filepath" "runtime" "sync" "testing" @@ -110,6 +113,13 @@ func (ml *mockLedgerForTracker) GenesisHash() crypto.Digest { return crypto.Digest{} } +func (ml *mockLedgerForTracker) GenesisProto() config.ConsensusParams { + if len(ml.blocks) > 0 { + return config.Consensus[ml.blocks[0].block.CurrentProtocol] + } + return config.Consensus[protocol.ConsensusCurrentVersion] +} + // this function used to be in acctupdates.go, but we were never using it for production purposes. This // function has a conceptual flaw in that it attempts to load the entire balances into memory. This might // not work if we have large number of balances. On these unit testing, however, it's not the case, and it's @@ -147,15 +157,18 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates _, err := au.Totals(latest + 1) require.Error(t, err) - _, err = au.Lookup(latest+1, randomAddress(), false) + var validThrough basics.Round + _, validThrough, err = au.LookupWithoutRewards(latest+1, randomAddress()) require.Error(t, err) + require.Equal(t, basics.Round(0), validThrough) if base > 0 { _, err := au.Totals(base - 1) require.Error(t, err) - _, err = au.Lookup(base-1, randomAddress(), false) + _, validThrough, err = au.LookupWithoutRewards(base-1, randomAddress()) require.Error(t, err) + require.Equal(t, basics.Round(0), validThrough) } roundsRanges := []struct { @@ -180,9 +193,10 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates var totalOnline, totalOffline, totalNotPart uint64 for addr, data := range accts[rnd] { - d, err := au.Lookup(rnd, addr, false) + d, validThrough, err := au.LookupWithoutRewards(rnd, addr) require.NoError(t, err) require.Equal(t, d, data) + require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), fmt.Sprintf("validThrough :%v\nrnd :%v\n", validThrough, rnd)) rewardsDelta := rewards[rnd] - d.RewardsBase switch d.Status { @@ -211,8 +225,9 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, lates require.Equal(t, totals.Participating().Raw, totalOnline+totalOffline) require.Equal(t, totals.All().Raw, totalOnline+totalOffline+totalNotPart) - d, err := au.Lookup(rnd, randomAddress(), false) + d, validThrough, err := au.LookupWithoutRewards(rnd, randomAddress()) require.NoError(t, err) + require.GreaterOrEqualf(t, uint64(validThrough), uint64(rnd), fmt.Sprintf("validThrough :%v\nrnd :%v\n", validThrough, rnd)) require.Equal(t, d, basics.AccountData{}) } } @@ -702,15 +717,17 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) { updates := make(map[basics.Address]accountDelta) moneyAccountsExpectedAmounts = append(moneyAccountsExpectedAmounts, make([]uint64, len(moneyAccounts))) toAccount := moneyAccounts[0] - toAccountDataOld, err := au.Lookup(i-1, toAccount, false) + toAccountDataOld, validThrough, err := au.LookupWithoutRewards(i-1, toAccount) require.NoError(t, err) + require.Equal(t, i-1, validThrough) toAccountDataNew := toAccountDataOld for j := 1; j < len(moneyAccounts); j++ { fromAccount := moneyAccounts[j] - fromAccountDataOld, err := au.Lookup(i-1, fromAccount, false) + fromAccountDataOld, validThrough, err := au.LookupWithoutRewards(i-1, fromAccount) require.NoError(t, err) + require.Equal(t, i-1, validThrough) require.Equalf(t, moneyAccountsExpectedAmounts[i-1][j], fromAccountDataOld.MicroAlgos.Raw, "Account index : %d\nRound number : %d", j, i) fromAccountDataNew := fromAccountDataOld @@ -738,20 +755,20 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) { if checkRound < uint64(testback) { continue } - acct, err := au.Lookup(basics.Round(checkRound-uint64(testback)), moneyAccounts[j], false) + acct, validThrough, err := au.LookupWithoutRewards(basics.Round(checkRound-uint64(testback)), moneyAccounts[j]) // we might get an error like "round 2 before dbRound 5", which is the success case, so we'll ignore it. - if err != nil { + roundOffsetError := &RoundOffsetError{} + if errors.As(err, &roundOffsetError) { + require.Equal(t, basics.Round(0), validThrough) // verify it's the expected error and not anything else. - var r1, r2 int - n, err2 := fmt.Sscanf(err.Error(), "round %d before dbRound %d", &r1, &r2) - require.NoErrorf(t, err2, "unable to parse : %v", err) - require.Equal(t, 2, n) - require.Less(t, r1, r2) + require.Less(t, int64(roundOffsetError.round), int64(roundOffsetError.dbRound)) if testback > 1 { testback-- } continue } + require.NoError(t, err) + require.GreaterOrEqual(t, int64(validThrough), int64(basics.Round(checkRound-uint64(testback)))) // if we received no error, we want to make sure the reported amount is correct. require.Equalf(t, moneyAccountsExpectedAmounts[checkRound-uint64(testback)][j], acct.MicroAlgos.Raw, "Account index : %d\nRound number : %d", j, checkRound) testback++ @@ -782,8 +799,9 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) { au.waitAccountsWriting() for idx, addr := range moneyAccounts { - balance, err := au.Lookup(lastRound, addr, false) + balance, validThrough, err := au.LookupWithoutRewards(lastRound, addr) require.NoErrorf(t, err, "unable to retrieve balance for account idx %d %v", idx, addr) + require.Equal(t, lastRound, validThrough) if idx != 0 { require.Equalf(t, 100*1000000-roundCount*(roundCount-1)/2, int(balance.MicroAlgos.Raw), "account idx %d %v has the wrong balance", idx, addr) } else { @@ -978,6 +996,9 @@ func TestListCreatables(t *testing.T) { err = accountsInit(tx, accts, proto) require.NoError(t, err) + err = accountsAddNormalizedBalance(tx, proto) + require.NoError(t, err) + au := &accountUpdates{} au.accountsq, err = accountsDbInit(tx, tx) require.NoError(t, err) @@ -997,7 +1018,7 @@ func TestListCreatables(t *testing.T) { // ******* No deletes ******* // sync with the database var updates map[basics.Address]accountDelta - err = accountsNewRound(tx, updates, ctbsWithDeletes) + err = accountsNewRound(tx, updates, ctbsWithDeletes, proto) require.NoError(t, err) // nothing left in cache au.creatables = make(map[basics.CreatableIndex]modifiedCreatable) @@ -1013,10 +1034,96 @@ func TestListCreatables(t *testing.T) { // ******* Results are obtained from the database and from the cache ******* // ******* Deletes are in the database and in the cache ******* // sync with the database. This has deletes synced to the database. - err = accountsNewRound(tx, updates, au.creatables) + err = accountsNewRound(tx, updates, au.creatables, proto) require.NoError(t, err) // get new creatables in the cache. There will be deletes in the cache from the previous batch. au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs, expectedDbImage, numElementsPerSegement) listAndCompareComb(t, au, expectedDbImage) } + +func TestIsWritingCatchpointFile(t *testing.T) { + + au := &accountUpdates{} + + au.catchpointWriting = make(chan struct{}, 1) + ans := au.IsWritingCatchpointFile() + require.True(t, ans) + + close(au.catchpointWriting) + ans = au.IsWritingCatchpointFile() + require.False(t, ans) +} + +func TestGetCatchpointStream(t *testing.T) { + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + + ml := makeMockLedgerForTracker(t, true) + defer ml.close() + ml.blocks = randomInitChain(protocol.ConsensusCurrentVersion, 10) + + accts := []map[basics.Address]basics.AccountData{randomAccounts(20, true)} + au := &accountUpdates{} + conf := config.GetDefaultLocal() + conf.CatchpointInterval = 1 + au.initialize(conf, ".", proto, accts[0]) + defer au.close() + + err := au.loadFromDisk(ml) + require.NoError(t, err) + + filesToCreate := 4 + + temporaryDirectroy, err := ioutil.TempDir(os.TempDir(), "catchpoints") + require.NoError(t, err) + defer func() { + os.RemoveAll(temporaryDirectroy) + }() + catchpointsDirectory := filepath.Join(temporaryDirectroy, "catchpoints") + err = os.Mkdir(catchpointsDirectory, 0777) + require.NoError(t, err) + + au.dbDirectory = temporaryDirectroy + + // Create the catchpoint files with dummy data + for i := 0; i < filesToCreate; i++ { + fileName := filepath.Join("catchpoints", fmt.Sprintf("%d.catchpoint", i)) + data := []byte{byte(i), byte(i + 1), byte(i + 2)} + err = ioutil.WriteFile(filepath.Join(temporaryDirectroy, fileName), data, 0666) + require.NoError(t, err) + + // Store the catchpoint into the database + err := au.accountsq.storeCatchpoint(context.Background(), basics.Round(i), fileName, "", 0) + require.NoError(t, err) + } + + dataRead := make([]byte, 3) + var n int + + // File on disk, and database has the record + reader, err := au.GetCatchpointStream(basics.Round(1)) + n, err = reader.Read(dataRead) + require.NoError(t, err) + require.Equal(t, 3, n) + outData := []byte{1, 2, 3} + require.Equal(t, outData, dataRead) + + // File deleted, but record in the database + err = os.Remove(filepath.Join(temporaryDirectroy, "catchpoints", "2.catchpoint")) + reader, err = au.GetCatchpointStream(basics.Round(2)) + require.Equal(t, ErrNoEntry{}, err) + require.Nil(t, reader) + + // File on disk, but database lost the record + err = au.accountsq.storeCatchpoint(context.Background(), basics.Round(3), "", "", 0) + reader, err = au.GetCatchpointStream(basics.Round(3)) + n, err = reader.Read(dataRead) + require.NoError(t, err) + require.Equal(t, 3, n) + outData = []byte{3, 4, 5} + require.Equal(t, outData, dataRead) + + err = au.deleteStoredCatchpoints(context.Background(), au.accountsq) + require.NoError(t, err) +} diff --git a/ledger/archival_test.go b/ledger/archival_test.go index ddb32b4957..beb9d51d78 100644 --- a/ledger/archival_test.go +++ b/ledger/archival_test.go @@ -89,6 +89,10 @@ func (wl *wrappedLedger) GenesisHash() crypto.Digest { return wl.l.GenesisHash() } +func (wl *wrappedLedger) GenesisProto() config.ConsensusParams { + return wl.l.GenesisProto() +} + func getInitState() (genesisInitState InitState) { blk := bookkeeping.Block{} blk.CurrentProtocol = protocol.ConsensusCurrentVersion diff --git a/ledger/blockqueue.go b/ledger/blockqueue.go index 6b889ae7c9..2fc9f3f511 100644 --- a/ledger/blockqueue.go +++ b/ledger/blockqueue.go @@ -21,6 +21,7 @@ import ( "database/sql" "fmt" "sync" + "time" "github.com/algorand/go-deadlock" @@ -29,6 +30,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/metrics" ) type blockEntry struct { @@ -54,11 +56,14 @@ func bqInit(l *Ledger) (*blockQueue, error) { bq.l = l bq.running = true bq.closed = make(chan struct{}) + ledgerBlockqInitCount.Inc(nil) + start := time.Now() err := bq.l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { var err0 error bq.lastCommitted, err0 = blockLatest(tx) return err0 }) + ledgerBlockqInitMicros.AddMicrosecondsSince(start, nil) if err != nil { return nil, err } @@ -101,6 +106,8 @@ func (bq *blockQueue) syncer() { workQ := bq.q bq.mu.Unlock() + start := time.Now() + ledgerSyncBlockputCount.Inc(nil) err := bq.l.blockDBs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { for _, e := range workQ { err0 := blockPut(tx, e.block, e.cert) @@ -110,6 +117,7 @@ func (bq *blockQueue) syncer() { } return nil }) + ledgerSyncBlockputMicros.AddMicrosecondsSince(start, nil) bq.mu.Lock() @@ -134,9 +142,12 @@ func (bq *blockQueue) syncer() { bq.mu.Unlock() minToSave := bq.l.notifyCommit(committed) + bfstart := time.Now() + ledgerSyncBlockforgetCount.Inc(nil) err = bq.l.blockDBs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { return blockForgetBefore(tx, minToSave) }) + ledgerSyncBlockforgetMicros.AddMicrosecondsSince(bfstart, nil) if err != nil { bq.l.log.Warnf("blockQueue.syncer: blockForgetBefore(%d): %v", minToSave, err) } @@ -245,11 +256,14 @@ func (bq *blockQueue) getBlock(r basics.Round) (blk bookkeeping.Block, err error return } + start := time.Now() + ledgerGetblockCount.Inc(nil) err = bq.l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { var err0 error blk, err0 = blockGet(tx, r) return err0 }) + ledgerGetblockMicros.AddMicrosecondsSince(start, nil) err = updateErrNoEntry(err, lastCommitted, latest) return } @@ -264,11 +278,14 @@ func (bq *blockQueue) getBlockHdr(r basics.Round) (hdr bookkeeping.BlockHeader, return } + start := time.Now() + ledgerGetblockhdrCount.Inc(nil) err = bq.l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { var err0 error hdr, err0 = blockGetHdr(tx, r) return err0 }) + ledgerGetblockhdrMicros.AddMicrosecondsSince(start, nil) err = updateErrNoEntry(err, lastCommitted, latest) return } @@ -287,11 +304,14 @@ func (bq *blockQueue) getEncodedBlockCert(r basics.Round) (blk []byte, cert []by return } + start := time.Now() + ledgerGeteblockcertCount.Inc(nil) err = bq.l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { var err0 error blk, cert, err0 = blockGetEncodedCert(tx, r) return err0 }) + ledgerGeteblockcertMicros.AddMicrosecondsSince(start, nil) err = updateErrNoEntry(err, lastCommitted, latest) return } @@ -306,11 +326,29 @@ func (bq *blockQueue) getBlockCert(r basics.Round) (blk bookkeeping.Block, cert return } + start := time.Now() + ledgerGetblockcertCount.Inc(nil) err = bq.l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { var err0 error blk, cert, err0 = blockGetCert(tx, r) return err0 }) + ledgerGetblockcertMicros.AddMicrosecondsSince(start, nil) err = updateErrNoEntry(err, lastCommitted, latest) return } + +var ledgerBlockqInitCount = metrics.NewCounter("ledger_blockq_init_count", "calls to init block queue") +var ledgerBlockqInitMicros = metrics.NewCounter("ledger_blockq_init_micros", "µs spent to init block queue") +var ledgerSyncBlockputCount = metrics.NewCounter("ledger_blockq_sync_put_count", "calls to sync block queue") +var ledgerSyncBlockputMicros = metrics.NewCounter("ledger_blockq_sync_put_micros", "µs spent to sync block queue") +var ledgerSyncBlockforgetCount = metrics.NewCounter("ledger_blockq_sync_forget_count", "calls") +var ledgerSyncBlockforgetMicros = metrics.NewCounter("ledger_blockq_sync_forget_micros", "µs spent") +var ledgerGetblockCount = metrics.NewCounter("ledger_blockq_getblock_count", "calls") +var ledgerGetblockMicros = metrics.NewCounter("ledger_blockq_getblock_micros", "µs spent") +var ledgerGetblockhdrCount = metrics.NewCounter("ledger_blockq_getblockhdr_count", "calls") +var ledgerGetblockhdrMicros = metrics.NewCounter("ledger_blockq_getblockhdr_micros", "µs spent") +var ledgerGeteblockcertCount = metrics.NewCounter("ledger_blockq_geteblockcert_count", "calls") +var ledgerGeteblockcertMicros = metrics.NewCounter("ledger_blockq_geteblockcert_micros", "µs spent") +var ledgerGetblockcertCount = metrics.NewCounter("ledger_blockq_getblockcert_count", "calls") +var ledgerGetblockcertMicros = metrics.NewCounter("ledger_blockq_getblockcert_micros", "µs spent") diff --git a/ledger/bulletin.go b/ledger/bulletin.go index 688b864729..b5e4fb581a 100644 --- a/ledger/bulletin.go +++ b/ledger/bulletin.go @@ -25,13 +25,13 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" ) -// notifier is a struct that encapsulates a single-shot channel; it will only be signalled once. +// notifier is a struct that encapsulates a single-shot channel; it will only be signaled once. type notifier struct { signal chan struct{} notified uint32 } -// makeNotifier constructs a notifier that has not been signalled. +// makeNotifier constructs a notifier that has not been signaled. func makeNotifier() notifier { return notifier{signal: make(chan struct{}), notified: 0} } diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go index 5427bc0950..5d8313fdc1 100644 --- a/ledger/catchpointwriter.go +++ b/ledger/catchpointwriter.go @@ -76,7 +76,7 @@ type encodedBalanceRecord struct { } // CatchpointFileHeader is the content we would have in the "content.msgpack" file in the catchpoint tar archive. -// we need it to be public, as it's being decoded externaly by the catchpointdump utility. +// we need it to be public, as it's being decoded externally by the catchpointdump utility. type CatchpointFileHeader struct { _struct struct{} `codec:",omitempty,omitemptyarray"` diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go index b9672da107..d39f3a1615 100644 --- a/ledger/catchpointwriter_test.go +++ b/ledger/catchpointwriter_test.go @@ -269,7 +269,9 @@ func TestFullCatchpointWriter(t *testing.T) { require.NoError(t, err) // create a ledger. - l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, InitState{}, conf) + var initState InitState + initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion + l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf) require.NoError(t, err) defer l.Close() accessor := MakeCatchpointCatchupAccessor(l, l.log) @@ -322,8 +324,10 @@ func TestFullCatchpointWriter(t *testing.T) { // verify that the account data aligns with what we originally stored : for addr, acct := range accts { - acctData, err := l.LookupWithoutRewards(0, addr) + acctData, validThrough, err := l.LookupWithoutRewards(0, addr) require.NoError(t, err) require.Equal(t, acct, acctData) + require.Equal(t, basics.Round(0), validThrough) + } } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index eee9da58d2..a3429b556b 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -22,6 +22,7 @@ import ( "encoding/hex" "fmt" "strings" + "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -30,6 +31,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/metrics" ) // CatchpointCatchupAccessor is an interface for the accessor wrapping the database storage for the catchpoint catchup functionality. @@ -58,7 +60,7 @@ type CatchpointCatchupAccessor interface { // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) - // StoreBalancesRound calculates the balances round based on the first block and the associated consensus parametets, and + // StoreBalancesRound calculates the balances round based on the first block and the associated consensus parameters, and // store that to the database StoreBalancesRound(ctx context.Context, blk *bookkeeping.Block) (err error) @@ -107,6 +109,13 @@ const ( // catchpointCatchupStateLast is the last entry in the CatchpointCatchupState enumeration. catchpointCatchupStateLast = CatchpointCatchupStateSwitch + + // minMerkleTrieEvictFrequency control the minimal number of accounts changes that we will attempt to update between + // two consecutive evict calls. + minMerkleTrieEvictFrequency = uint64(1024) + // maxMerkleTrieEvictionDuration is the upper bound for the time we'll let the evict call take before lowing the number + // of accounts per update. + maxMerkleTrieEvictionDuration = 2000 * time.Millisecond ) // MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger @@ -177,6 +186,8 @@ func (c *CatchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context if !newCatchup { c.ledger.setSynchronousMode(ctx, c.ledger.synchronousMode) } + start := time.Now() + ledgerResetstagingbalancesCount.Inc(nil) err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { err = resetCatchpointStagingBalances(ctx, tx, newCatchup) if err != nil { @@ -209,6 +220,7 @@ func (c *CatchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context } return }) + ledgerResetstagingbalancesMicros.AddMicrosecondsSince(start, nil) return } @@ -220,9 +232,10 @@ type CatchpointCatchupAccessorProgress struct { TotalChunks uint64 SeenHeader bool - // Having the cachedTrie here would help to accelarate the catchup process since the trie maintain an internal cache of nodes. + // Having the cachedTrie here would help to accelerate the catchup process since the trie maintain an internal cache of nodes. // While rebuilding the trie, we don't want to force and reload (some) of these nodes into the cache for each catchpoint file chunk. - cachedTrie *merkletrie.Trie + cachedTrie *merkletrie.Trie + evictFrequency uint64 } // ProgressStagingBalances deserialize the given bytes as a temporary staging balances @@ -256,6 +269,8 @@ func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex // later on: // TotalAccounts, TotalAccounts, Catchpoint, BlockHeaderDigest, BalancesRound wdb := c.ledger.trackerDB().wdb + start := time.Now() + ledgerProcessstagingcontentCount.Inc(nil) err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { sq, err := accountsDbInit(tx, tx) if err != nil { @@ -269,6 +284,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex err = accountsPutTotals(tx, fileHeader.Totals, true) return }) + ledgerProcessstagingcontentMicros.AddMicrosecondsSince(start, nil) if err == nil { progress.SeenHeader = true progress.TotalAccounts = fileHeader.TotalAccounts @@ -294,7 +310,10 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte return fmt.Errorf("processStagingBalances received a chunk with no accounts") } + proto := c.ledger.GenesisProto() wdb := c.ledger.trackerDB().wdb + start := time.Now() + ledgerProcessstagingbalancesCount.Inc(nil) err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { // create the merkle trie for the balances var mc *merkleCommitter @@ -304,7 +323,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte } if progress.cachedTrie == nil { - progress.cachedTrie, err = merkletrie.MakeTrie(mc, trieCachedNodesCount) + progress.cachedTrie, err = merkletrie.MakeTrie(mc, trieMemoryConfig) if err != nil { return } @@ -312,7 +331,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte progress.cachedTrie.SetCommitter(mc) } - err = writeCatchpointStagingBalances(ctx, tx, balances.Balances) + err = writeCatchpointStagingBalances(ctx, tx, balances.Balances, proto) if err != nil { return } @@ -358,28 +377,46 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte err = progress.EvictAsNeeded(uint64(len(balances.Balances))) return }) + ledgerProcessstagingbalancesMicros.AddMicrosecondsSince(start, nil) if err == nil { progress.ProcessedAccounts += uint64(len(balances.Balances)) progress.ProcessedBytes += uint64(len(bytes)) } - // not strictly required, but clean up the pointer in case of either a failuire or when we're done. + // not strictly required, but clean up the pointer in case of either a failure or when we're done. if err != nil || progress.ProcessedAccounts == progress.TotalAccounts { progress.cachedTrie = nil - // restore "normal" syncronous mode + // restore "normal" synchronous mode c.ledger.setSynchronousMode(ctx, c.ledger.synchronousMode) } return err } -// EvictAsNeeded calls Evict on the cachedTrie priodically, or once we're done updating the trie. +// EvictAsNeeded calls Evict on the cachedTrie periodically, or once we're done updating the trie. func (progress *CatchpointCatchupAccessorProgress) EvictAsNeeded(balancesCount uint64) (err error) { if progress.cachedTrie == nil { return nil } + if progress.evictFrequency == 0 { + progress.evictFrequency = trieRebuildCommitFrequency + } // periodically, perform commit & evict to flush it to the disk and rebalance the cache memory utilization. - if (progress.ProcessedAccounts/trieRebuildCommitFrequency) < ((progress.ProcessedAccounts+balancesCount)/trieRebuildCommitFrequency) || + if (progress.ProcessedAccounts/progress.evictFrequency) < ((progress.ProcessedAccounts+balancesCount)/progress.evictFrequency) || (progress.ProcessedAccounts+balancesCount) == progress.TotalAccounts { + evictStart := time.Now() _, err = progress.cachedTrie.Evict(true) + if err == nil { + evictDelta := time.Now().Sub(evictStart) + if evictDelta > maxMerkleTrieEvictionDuration { + if progress.evictFrequency > minMerkleTrieEvictFrequency { + progress.evictFrequency /= 2 + } + } else { + progress.evictFrequency *= 2 + if progress.evictFrequency > trieRebuildCommitFrequency { + progress.evictFrequency = trieRebuildCommitFrequency + } + } + } } return } @@ -414,6 +451,8 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl } blockRound = basics.Round(iRound) + start := time.Now() + ledgerVerifycatchpointCount.Inc(nil) err = rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { // create the merkle trie for the balances mc, err0 := makeMerkleCommitter(tx, true) @@ -421,7 +460,7 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl return fmt.Errorf("unable to make MerkleCommitter: %v", err0) } var trie *merkletrie.Trie - trie, err = merkletrie.MakeTrie(mc, trieCachedNodesCount) + trie, err = merkletrie.MakeTrie(mc, trieMemoryConfig) if err != nil { return fmt.Errorf("unable to make trie: %v", err) } @@ -437,6 +476,7 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl } return }) + ledgerVerifycatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil { return err } @@ -452,13 +492,15 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl return nil } -// StoreBalancesRound calculates the balances round based on the first block and the associated consensus parametets, and +// StoreBalancesRound calculates the balances round based on the first block and the associated consensus parameters, and // store that to the database func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, blk *bookkeeping.Block) (err error) { // calculate the balances round and store it. It *should* be identical to the one in the catchpoint file header, but we don't want to // trust the one in the catchpoint file header, so we'll calculate it ourselves. balancesRound := blk.Round() - basics.Round(config.Consensus[blk.CurrentProtocol].MaxBalLookback) wdb := c.ledger.trackerDB().wdb + start := time.Now() + ledgerStorebalancesroundCount.Inc(nil) err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { sq, err := accountsDbInit(tx, tx) if err != nil { @@ -471,15 +513,19 @@ func (c *CatchpointCatchupAccessorImpl) StoreBalancesRound(ctx context.Context, } return }) + ledgerStorebalancesroundMicros.AddMicrosecondsSince(start, nil) return } // StoreFirstBlock stores a single block to the blocks database. func (c *CatchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk *bookkeeping.Block) (err error) { blockDbs := c.ledger.blockDB() + start := time.Now() + ledgerStorefirstblockCount.Inc(nil) err = blockDbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { return blockStartCatchupStaging(tx, *blk) }) + ledgerStorefirstblockMicros.AddMicrosecondsSince(start, nil) if err != nil { return err } @@ -489,9 +535,12 @@ func (c *CatchpointCatchupAccessorImpl) StoreFirstBlock(ctx context.Context, blk // StoreBlock stores a single block to the blocks database. func (c *CatchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *bookkeeping.Block) (err error) { blockDbs := c.ledger.blockDB() + start := time.Now() + ledgerCatchpointStoreblockCount.Inc(nil) err = blockDbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { return blockPutStaging(tx, *blk) }) + ledgerCatchpointStoreblockMicros.AddMicrosecondsSince(start, nil) if err != nil { return err } @@ -501,12 +550,15 @@ func (c *CatchpointCatchupAccessorImpl) StoreBlock(ctx context.Context, blk *boo // FinishBlocks concludes the catchup of the blocks database. func (c *CatchpointCatchupAccessorImpl) FinishBlocks(ctx context.Context, applyChanges bool) (err error) { blockDbs := c.ledger.blockDB() + start := time.Now() + ledgerCatchpointFinishblocksCount.Inc(nil) err = blockDbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { if applyChanges { return blockCompleteCatchup(tx) } return blockAbortCatchup(tx) }) + ledgerCatchpointFinishblocksMicros.AddMicrosecondsSince(start, nil) if err != nil { return err } @@ -516,10 +568,13 @@ func (c *CatchpointCatchupAccessorImpl) FinishBlocks(ctx context.Context, applyC // EnsureFirstBlock ensure that we have a single block in the staging block table, and returns that block func (c *CatchpointCatchupAccessorImpl) EnsureFirstBlock(ctx context.Context) (blk bookkeeping.Block, err error) { blockDbs := c.ledger.blockDB() + start := time.Now() + ledgerCatchpointEnsureblock1Count.Inc(nil) err = blockDbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { blk, err = blockEnsureSingleBlock(tx) return }) + ledgerCatchpointEnsureblock1Micros.AddMicrosecondsSince(start, nil) if err != nil { return blk, err } @@ -544,6 +599,8 @@ func (c *CatchpointCatchupAccessorImpl) CompleteCatchup(ctx context.Context) (er // finishBalances concludes the catchup of the balances(tracker) database. func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err error) { wdb := c.ledger.trackerDB().wdb + start := time.Now() + ledgerCatchpointFinishBalsCount.Inc(nil) err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { var balancesRound uint64 var totals AccountTotals @@ -601,5 +658,27 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return }) + ledgerCatchpointFinishBalsMicros.AddMicrosecondsSince(start, nil) return err } + +var ledgerResetstagingbalancesCount = metrics.NewCounter("ledger_catchup_resetstagingbalances_count", "calls") +var ledgerResetstagingbalancesMicros = metrics.NewCounter("ledger_catchup_resetstagingbalances_micros", "µs spent") +var ledgerProcessstagingcontentCount = metrics.NewCounter("ledger_catchup_processstagingcontent_count", "calls") +var ledgerProcessstagingcontentMicros = metrics.NewCounter("ledger_catchup_processstagingcontent_micros", "µs spent") +var ledgerProcessstagingbalancesCount = metrics.NewCounter("ledger_catchup_processstagingbalances_count", "calls") +var ledgerProcessstagingbalancesMicros = metrics.NewCounter("ledger_catchup_processstagingbalances_micros", "µs spent") +var ledgerVerifycatchpointCount = metrics.NewCounter("ledger_catchup_verifycatchpoint_count", "calls") +var ledgerVerifycatchpointMicros = metrics.NewCounter("ledger_catchup_verifycatchpoint_micros", "µs spent") +var ledgerStorebalancesroundCount = metrics.NewCounter("ledger_catchup_storebalancesround_count", "calls") +var ledgerStorebalancesroundMicros = metrics.NewCounter("ledger_catchup_storebalancesround_micros", "µs spent") +var ledgerStorefirstblockCount = metrics.NewCounter("ledger_catchup_storefirstblock_count", "calls") +var ledgerStorefirstblockMicros = metrics.NewCounter("ledger_catchup_storefirstblock_micros", "µs spent") +var ledgerCatchpointStoreblockCount = metrics.NewCounter("ledger_catchup_catchpoint_storeblock_count", "calls") +var ledgerCatchpointStoreblockMicros = metrics.NewCounter("ledger_catchup_catchpoint_storeblock_micros", "µs spent") +var ledgerCatchpointFinishblocksCount = metrics.NewCounter("ledger_catchup_catchpoint_finishblocks_count", "calls") +var ledgerCatchpointFinishblocksMicros = metrics.NewCounter("ledger_catchup_catchpoint_finishblocks_micros", "µs spent") +var ledgerCatchpointEnsureblock1Count = metrics.NewCounter("ledger_catchup_catchpoint_ensureblock1_count", "calls") +var ledgerCatchpointEnsureblock1Micros = metrics.NewCounter("ledger_catchup_catchpoint_ensureblock1_micros", "µs spent") +var ledgerCatchpointFinishBalsCount = metrics.NewCounter("ledger_catchup_catchpoint_finish_bals_count", "calls") +var ledgerCatchpointFinishBalsMicros = metrics.NewCounter("ledger_catchup_catchpoint_finish_bals_micros", "µs spent") diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go new file mode 100644 index 0000000000..788b6ca121 --- /dev/null +++ b/ledger/catchupaccessor_test.go @@ -0,0 +1,132 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package ledger + +import ( + "context" + "encoding/binary" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" +) + +func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) { + genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion) + const inMem = false + log := logging.TestingLog(b) + cfg := config.GetDefaultLocal() + cfg.Archival = false + log.SetLevel(logging.Warn) + dbBaseFileName := strings.Replace(b.Name(), "/", "_", -1) + // delete database files, in case they were left there by previous iterations of this test. + os.Remove(dbBaseFileName + ".block.sqlite") + os.Remove(dbBaseFileName + ".tracker.sqlite") + l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg) + require.NoError(b, err, "could not open ledger") + defer func() { + l.Close() + os.Remove(dbBaseFileName + ".block.sqlite") + os.Remove(dbBaseFileName + ".tracker.sqlite") + }() + + catchpointAccessor := MakeCatchpointCatchupAccessor(l, log) + catchpointAccessor.ResetStagingBalances(context.Background(), true) + + accountsCount := uint64(b.N) + fileHeader := CatchpointFileHeader{ + Version: catchpointFileVersion, + BalancesRound: basics.Round(0), + BlocksRound: basics.Round(0), + Totals: AccountTotals{}, + TotalAccounts: accountsCount, + TotalChunks: (accountsCount + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk, + Catchpoint: "", + BlockHeaderDigest: crypto.Digest{}, + } + encodedFileHeader := protocol.Encode(&fileHeader) + var progress CatchpointCatchupAccessorProgress + err = catchpointAccessor.ProgressStagingBalances(context.Background(), "content.msgpack", encodedFileHeader, &progress) + require.NoError(b, err) + + // pre-create all encoded chunks. + accounts := uint64(0) + encodedAccountChunks := make([][]byte, 0, accountsCount/BalancesPerCatchpointFileChunk+1) + last64KIndex := -1 + for accounts < accountsCount { + // generate a chunk; + chunkSize := accountsCount - accounts + if chunkSize > BalancesPerCatchpointFileChunk { + chunkSize = BalancesPerCatchpointFileChunk + } + if accounts >= accountsCount-64*1024 && last64KIndex == -1 { + last64KIndex = len(encodedAccountChunks) + } + var balances catchpointFileBalancesChunk + balances.Balances = make([]encodedBalanceRecord, chunkSize) + for i := uint64(0); i < chunkSize; i++ { + var randomAccount encodedBalanceRecord + accountData := basics.AccountData{} + accountData.MicroAlgos.Raw = crypto.RandUint63() + randomAccount.AccountData = protocol.Encode(&accountData) + crypto.RandBytes(randomAccount.Address[:]) + binary.LittleEndian.PutUint64(randomAccount.Address[:], accounts+i) + balances.Balances[i] = randomAccount + } + encodedAccountChunks = append(encodedAccountChunks, protocol.Encode(&balances)) + accounts += chunkSize + } + + b.ResetTimer() + accounts = uint64(0) + var last64KStart time.Time + for len(encodedAccountChunks) > 0 { + encodedAccounts := encodedAccountChunks[0] + encodedAccountChunks = encodedAccountChunks[1:] + + if last64KIndex == 0 { + last64KStart = time.Now() + } + + err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress) + require.NoError(b, err) + last64KIndex-- + } + if !last64KStart.IsZero() { + last64KDuration := time.Now().Sub(last64KStart) + b.ReportMetric(float64(last64KDuration.Nanoseconds())/float64(64*1024), "ns/last_64k_account") + } +} + +func BenchmarkRestoringFromCatchpointFile(b *testing.B) { + benchSizes := []int{1024 * 100, 1024 * 200, 1024 * 400, 1024 * 800} + for _, size := range benchSizes { + b.Run(fmt.Sprintf("Restore-%d", size), func(b *testing.B) { + b.N = size + benchmarkRestoringFromCatchpointFileHelper(b) + }) + } +} diff --git a/ledger/compactcert.go b/ledger/compactcert.go new file mode 100644 index 0000000000..e552d50dc9 --- /dev/null +++ b/ledger/compactcert.go @@ -0,0 +1,159 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package ledger + +import ( + "fmt" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto/compactcert" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/logging" +) + +// AcceptableCompactCertWeight computes the acceptable signed weight +// of a compact cert if it were to appear in a transaction with a +// particular firstValid round. Earlier rounds require a smaller cert. +// votersHdr specifies the block that contains the Merkle commitment of +// the voters for this compact cert (and thus the compact cert is for +// votersHdr.Round() + CompactCertRounds). +func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round) uint64 { + proto := config.Consensus[votersHdr.CurrentProtocol] + certRound := votersHdr.Round + basics.Round(proto.CompactCertRounds) + total := votersHdr.CompactCertVotersTotal + + // The acceptable weight depends on the elapsed time (in rounds) + // from the block we are trying to construct a certificate for. + // Start by subtracting the round number of the block being certified. + // If that round hasn't even passed yet, require 100% votes in cert. + offset := firstValid.SubSaturate(certRound) + if offset == 0 { + return total.ToUint64() + } + + // During the first proto.CompactCertRound/2 + 1 + 1 blocks, the + // signatures are still being broadcast, so, continue requiring + // 100% votes. + // + // The first +1 comes from CompactCertWorker.broadcastSigs: it only + // broadcasts signatures for round R starting with round R+1, to + // ensure nodes have the block for round R already in their ledger, + // to check the sig. + // + // The second +1 comes from the fact that, if we are checking this + // acceptable weight to decide whether to allow this transaction in + // a block, the transaction was sent out one round ago. + offset = offset.SubSaturate(basics.Round(proto.CompactCertRounds/2 + 2)) + if offset == 0 { + return total.ToUint64() + } + + // In the next proto.CompactCertRounds/2 blocks, linearly scale + // the acceptable weight from 100% to 0%. If we are outside of + // that window, accept any weight. + if offset >= basics.Round(proto.CompactCertRounds/2) { + return 0 + } + + w, overflowed := basics.Muldiv(total.ToUint64(), proto.CompactCertRounds/2-uint64(offset), proto.CompactCertRounds/2) + if overflowed { + // Shouldn't happen, but a safe fallback is to accept a larger cert. + logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow", + total, proto.CompactCertRounds, certRound, firstValid) + return 0 + } + + return w +} + +// CompactCertParams computes the parameters for building or verifying +// a compact cert for block hdr, using voters from block votersHdr. +func CompactCertParams(votersHdr bookkeeping.BlockHeader, hdr bookkeeping.BlockHeader) (res compactcert.Params, err error) { + proto := config.Consensus[votersHdr.CurrentProtocol] + + if proto.CompactCertRounds == 0 { + err = fmt.Errorf("compact certs not enabled") + return + } + + if votersHdr.Round%basics.Round(proto.CompactCertRounds) != 0 { + err = fmt.Errorf("votersHdr %d not a multiple of %d", + votersHdr.Round, proto.CompactCertRounds) + return + } + + if hdr.Round != votersHdr.Round+basics.Round(proto.CompactCertRounds) { + err = fmt.Errorf("certifying block %d not %d ahead of voters %d", + hdr.Round, proto.CompactCertRounds, votersHdr.Round) + return + } + + totalWeight := votersHdr.CompactCertVotersTotal.ToUint64() + provenWeight, overflowed := basics.Muldiv(totalWeight, proto.CompactCertWeightThreshold, 100) + if overflowed { + err = fmt.Errorf("overflow computing provenWeight[%d]: %d * %d / 100", + hdr.Round, totalWeight, proto.CompactCertWeightThreshold) + return + } + + res = compactcert.Params{ + Msg: hdr, + ProvenWeight: provenWeight, + SigRound: hdr.Round + 1, + SecKQ: proto.CompactCertSecKQ, + } + return +} + +// validateCompactCert checks that a compact cert is valid. +func validateCompactCert(certHdr bookkeeping.BlockHeader, cert compactcert.Cert, votersHdr bookkeeping.BlockHeader, lastCertRnd basics.Round, atRound basics.Round) error { + proto := config.Consensus[certHdr.CurrentProtocol] + + if proto.CompactCertRounds == 0 { + return fmt.Errorf("compact certs not enabled: rounds = %d", proto.CompactCertRounds) + } + + if certHdr.Round%basics.Round(proto.CompactCertRounds) != 0 { + return fmt.Errorf("cert at %d for non-multiple of %d", certHdr.Round, proto.CompactCertRounds) + } + + votersRound := certHdr.Round.SubSaturate(basics.Round(proto.CompactCertRounds)) + if votersRound != votersHdr.Round { + return fmt.Errorf("new cert is for %d (voters %d), but votersHdr from %d", + certHdr.Round, votersRound, votersHdr.Round) + } + + if lastCertRnd != 0 && lastCertRnd != votersRound { + return fmt.Errorf("last cert from %d, but new cert is for %d (voters %d)", + lastCertRnd, certHdr.Round, votersRound) + } + + acceptableWeight := AcceptableCompactCertWeight(votersHdr, atRound) + if cert.SignedWeight < acceptableWeight { + return fmt.Errorf("insufficient weight at %d: %d < %d", + atRound, cert.SignedWeight, acceptableWeight) + } + + ccParams, err := CompactCertParams(votersHdr, certHdr) + if err != nil { + return err + } + + verif := compactcert.MkVerifier(ccParams, votersHdr.CompactCertVoters) + return verif.Verify(&cert) +} diff --git a/ledger/cow.go b/ledger/cow.go index 6d1e7bd661..10b4c69166 100644 --- a/ledger/cow.go +++ b/ledger/cow.go @@ -37,6 +37,8 @@ type roundCowParent interface { isDup(basics.Round, basics.Round, transactions.Txid, txlease) (bool, error) txnCounter() uint64 getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) + compactCertLast() basics.Round + blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) } type roundCowState struct { @@ -62,6 +64,10 @@ type StateDelta struct { // new block header; read-only hdr *bookkeeping.BlockHeader + + // last round for which we have seen a compact cert. + // zero if no compact cert seen. + compactCertSeen basics.Round } func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader) *roundCowState { @@ -123,6 +129,17 @@ func (cb *roundCowState) txnCounter() uint64 { return cb.lookupParent.txnCounter() + uint64(len(cb.mods.Txids)) } +func (cb *roundCowState) compactCertLast() basics.Round { + if cb.mods.compactCertSeen != 0 { + return cb.mods.compactCertSeen + } + return cb.lookupParent.compactCertLast() +} + +func (cb *roundCowState) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + return cb.lookupParent.blockHdr(r) +} + func (cb *roundCowState) put(addr basics.Address, old basics.AccountData, new basics.AccountData, newCreatable *basics.CreatableLocator, deletedCreatable *basics.CreatableLocator) { prev, present := cb.mods.accts[addr] if present { @@ -153,6 +170,10 @@ func (cb *roundCowState) addTx(txn transactions.Transaction, txid transactions.T cb.mods.txleases[txlease{sender: txn.Sender, lease: txn.Lease}] = txn.LastValid } +func (cb *roundCowState) sawCompactCert(rnd basics.Round) { + cb.mods.compactCertSeen = rnd +} + func (cb *roundCowState) child() *roundCowState { return &roundCowState{ lookupParent: cb, @@ -190,6 +211,7 @@ func (cb *roundCowState) commitToParent() { for cidx, delta := range cb.mods.creatables { cb.commitParent.mods.creatables[cidx] = delta } + cb.commitParent.mods.compactCertSeen = cb.mods.compactCertSeen } func (cb *roundCowState) modifiedAccounts() []basics.Address { diff --git a/ledger/cow_test.go b/ledger/cow_test.go index 6b871e45a6..4bb21c545f 100644 --- a/ledger/cow_test.go +++ b/ledger/cow_test.go @@ -54,6 +54,14 @@ func (ml *mockLedger) txnCounter() uint64 { return 0 } +func (ml *mockLedger) compactCertLast() basics.Round { + return 0 +} + +func (ml *mockLedger) blockHdr(_ basics.Round) (bookkeeping.BlockHeader, error) { + return bookkeeping.BlockHeader{}, nil +} + func checkCow(t *testing.T, cow *roundCowState, accts map[basics.Address]basics.AccountData) { for addr, data := range accts { d, err := cow.lookup(addr) diff --git a/ledger/eval.go b/ledger/eval.go index d898b9951a..ce8f5a2364 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -23,6 +23,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/compactcert" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/committee" @@ -59,6 +60,9 @@ type roundCowBase struct { // TxnCounter from previous block header. txnCount uint64 + // CompactCertLastRound from previous block header. + compactCertSeen basics.Round + // The current protocol consensus params. proto config.ConsensusParams } @@ -67,8 +71,9 @@ func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.Creat return x.l.GetCreatorForRound(x.rnd, cidx, ctype) } -func (x *roundCowBase) lookup(addr basics.Address) (basics.AccountData, error) { - return x.l.LookupWithoutRewards(x.rnd, addr) +func (x *roundCowBase) lookup(addr basics.Address) (acctData basics.AccountData, err error) { + acctData, _, err = x.l.LookupWithoutRewards(x.rnd, addr) + return acctData, err } func (x *roundCowBase) isDup(firstValid, lastValid basics.Round, txid transactions.Txid, txl txlease) (bool, error) { @@ -79,6 +84,14 @@ func (x *roundCowBase) txnCounter() uint64 { return x.txnCount } +func (x *roundCowBase) compactCertLast() basics.Round { + return x.compactCertSeen +} + +func (x *roundCowBase) blockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { + return x.l.BlockHdr(r) +} + // wrappers for roundCowState to satisfy the (current) apply.Balances interface func (cs *roundCowState) Get(addr basics.Address, withPendingRewards bool) (basics.BalanceRecord, error) { acctdata, err := cs.lookup(addr) @@ -161,6 +174,30 @@ func (cs *roundCowState) ConsensusParams() config.ConsensusParams { return cs.proto } +func (cs *roundCowState) compactCert(certRnd basics.Round, cert compactcert.Cert, atRound basics.Round) error { + lastCertRnd := cs.compactCertLast() + + certHdr, err := cs.blockHdr(certRnd) + if err != nil { + return err + } + + proto := config.Consensus[certHdr.CurrentProtocol] + votersRnd := certRnd.SubSaturate(basics.Round(proto.CompactCertRounds)) + votersHdr, err := cs.blockHdr(votersRnd) + if err != nil { + return err + } + + err = validateCompactCert(certHdr, cert, votersHdr, lastCertRnd, atRound) + if err != nil { + return err + } + + cs.sawCompactCert(certRnd) + return nil +} + // BlockEvaluator represents an in-progress evaluation of a block // against the ledger. type BlockEvaluator struct { @@ -183,11 +220,11 @@ type BlockEvaluator struct { type ledgerForEvaluator interface { GenesisHash() crypto.Digest BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) - Lookup(basics.Round, basics.Address) (basics.AccountData, error) Totals(basics.Round) (AccountTotals, error) isDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, txlease) (bool, error) - LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, error) + LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error) GetCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error) + CompactCertVoters(basics.Round) (*VotersForRound, error) } // StartEvaluator creates a BlockEvaluator, given a ledger and a block header @@ -232,6 +269,8 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, paysetHin eval.block.Payset = make([]transactions.SignedTxnInBlock, 0, paysetHint) } + prevProto := proto + if hdr.Round > 0 { var err error eval.prevHeader, err = l.BlockHdr(base.rnd) @@ -240,6 +279,11 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, paysetHin } base.txnCount = eval.prevHeader.TxnCounter + base.compactCertSeen = eval.prevHeader.CompactCertLastRound + prevProto, ok = config.Consensus[eval.prevHeader.CurrentProtocol] + if !ok { + return nil, protocol.Error(eval.prevHeader.CurrentProtocol) + } } prevTotals, err := l.Totals(eval.prevHeader.Round) @@ -248,11 +292,15 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, paysetHin } poolAddr := eval.prevHeader.RewardsPool - incentivePoolData, err := l.Lookup(eval.prevHeader.Round, poolAddr) + // get the reward pool account data without any rewards + incentivePoolData, _, err := l.LookupWithoutRewards(eval.prevHeader.Round, poolAddr) if err != nil { return nil, err } + // this is expected to be a no-op, but update the rewards on the rewards pool if it was configured to receive rewards ( unlike mainnet ). + incentivePoolData = incentivePoolData.WithUpdatedRewards(prevProto, eval.prevHeader.RewardsLevel) + if generate { if eval.proto.SupportGenesisHash { eval.block.BlockHeader.GenesisHash = eval.genesisHash @@ -647,11 +695,11 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, appEval *app // completely zero, which means the account will be deleted.) rewardlvl := cow.rewardsLevel() for _, addr := range cow.modifiedAccounts() { - // Skip FeeSink and RewardsPool MinBalance checks here. - // There's only two accounts, so space isn't an issue, and we don't + // Skip FeeSink, RewardsPool, and CompactCertSender MinBalance checks here. + // There's only a few accounts, so space isn't an issue, and we don't // expect them to have low balances, but if they do, it may cause - // surprises for every transaction. - if addr == spec.FeeSink || addr == spec.RewardsPool { + // surprises. + if addr == spec.FeeSink || addr == spec.RewardsPool || addr == transactions.CompactCertSender { continue } @@ -689,7 +737,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, appEval *app } // applyTransaction changes the balances according to this transaction. -func applyTransaction(tx transactions.Transaction, balances apply.Balances, steva apply.StateEvaluator, spec transactions.SpecialAddresses, ctr uint64) (ad transactions.ApplyData, err error) { +func applyTransaction(tx transactions.Transaction, balances *roundCowState, steva apply.StateEvaluator, spec transactions.SpecialAddresses, ctr uint64) (ad transactions.ApplyData, err error) { params := balances.ConsensusParams() // move fee to pool @@ -738,6 +786,9 @@ func applyTransaction(tx transactions.Transaction, balances apply.Balances, stev case protocol.ApplicationCallTx: err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, balances, &ad, ctr, steva) + case protocol.CompactCertTx: + err = balances.compactCert(tx.CertRound, tx.Cert, tx.Header.FirstValid) + default: err = fmt.Errorf("Unknown transaction type %v", tx.Type) } @@ -753,6 +804,31 @@ func applyTransaction(tx transactions.Transaction, balances apply.Balances, stev return } +// compactCertVotersAndTotal returns the expected values of CompactCertVoters +// and CompactCertVotersTotal for a block. +func (eval *BlockEvaluator) compactCertVotersAndTotal() (root crypto.Digest, total basics.MicroAlgos, err error) { + if eval.proto.CompactCertRounds == 0 { + return + } + + if eval.block.Round()%basics.Round(eval.proto.CompactCertRounds) != 0 { + return + } + + lookback := eval.block.Round().SubSaturate(basics.Round(eval.proto.CompactCertVotersLookback)) + voters, err := eval.l.CompactCertVoters(lookback) + if err != nil { + return + } + + if voters != nil { + root = voters.Tree.Root() + total = voters.TotalWeight + } + + return +} + // Call "endOfBlock" after all the block's rewards and transactions are processed. func (eval *BlockEvaluator) endOfBlock() error { if eval.generate { @@ -762,6 +838,14 @@ func (eval *BlockEvaluator) endOfBlock() error { } else { eval.block.TxnCounter = 0 } + + var err error + eval.block.CompactCertVoters, eval.block.CompactCertVotersTotal, err = eval.compactCertVotersAndTotal() + if err != nil { + return err + } + + eval.block.CompactCertLastRound = eval.state.compactCertLast() } return nil @@ -783,6 +867,20 @@ func (eval *BlockEvaluator) finalValidation() error { if eval.block.TxnCounter != expectedTxnCount { return fmt.Errorf("txn count wrong: %d != %d", eval.block.TxnCounter, expectedTxnCount) } + + expectedVoters, expectedVotersWeight, err := eval.compactCertVotersAndTotal() + if err != nil { + return err + } + if eval.block.CompactCertVoters != expectedVoters { + return fmt.Errorf("CompactCertVoters wrong: %v != %v", eval.block.CompactCertVoters, expectedVoters) + } + if eval.block.CompactCertVotersTotal != expectedVotersWeight { + return fmt.Errorf("CompactCertVotersTotal wrong: %v != %v", eval.block.CompactCertVotersTotal, expectedVotersWeight) + } + if eval.block.CompactCertLastRound != eval.state.compactCertLast() { + return fmt.Errorf("CompactCertLastRound wrong: %v != %v", eval.block.CompactCertLastRound, eval.state.compactCertLast()) + } } return nil diff --git a/ledger/ledger.go b/ledger/ledger.go index 7492913da8..67450cc849 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "os" + "time" "github.com/algorand/go-deadlock" @@ -33,6 +34,7 @@ import ( "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/db" + "github.com/algorand/go-algorand/util/metrics" ) // Ledger is a database storing the contents of the ledger. @@ -125,9 +127,12 @@ func OpenLedger( l.setSynchronousMode(context.Background(), l.synchronousMode) + start := time.Now() + ledgerInitblocksdbCount.Inc(nil) err = l.blockDBs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { return initBlocksDB(tx, l, []bookkeeping.Block{genesisInitState.Block}, cfg.Archival) }) + ledgerInitblocksdbMicros.AddMicrosecondsSince(start, nil) if err != nil { err = fmt.Errorf("OpenLedger.initBlocksDB %v", err) return nil, err @@ -195,6 +200,8 @@ func (l *Ledger) reloadLedger() error { // verifyMatchingGenesisHash tests to see that the latest block header pointing to the same genesis hash provided in genesisHash. func (l *Ledger) verifyMatchingGenesisHash() (err error) { // Check that the genesis hash, if present, matches. + start := time.Now() + ledgerVerifygenhashCount.Inc(nil) err = l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error { latest, err := blockLatest(tx) if err != nil { @@ -215,6 +222,7 @@ func (l *Ledger) verifyMatchingGenesisHash() (err error) { } return nil }) + ledgerVerifygenhashMicros.AddMicrosecondsSince(start, nil) return } @@ -346,7 +354,7 @@ func (l *Ledger) Close() { // then, we shut down the trackers and their corresponding goroutines. l.trackers.close() - // last, we close the underlaying database connections. + // last, we close the underlying database connections. l.blockDBs.close() l.trackerDBs.close() } @@ -398,6 +406,15 @@ func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableTy return l.accts.GetCreatorForRound(l.blockQ.latest(), cidx, ctype) } +// CompactCertVoters returns the top online accounts at round rnd. +// The result might be nil, even with err=nil, if there are no voters +// for that round because compact certs were not enabled. +func (l *Ledger) CompactCertVoters(rnd basics.Round) (voters *VotersForRound, err error) { + l.trackerMu.RLock() + defer l.trackerMu.RUnlock() + return l.accts.voters.getVoters(rnd) +} + // ListAssets takes a maximum asset index and maximum result length, and // returns up to that many CreatableLocators from the database where app idx is // less than or equal to the maximum. @@ -424,7 +441,7 @@ func (l *Ledger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountDa defer l.trackerMu.RUnlock() // Intentionally apply (pending) rewards up to rnd. - data, err := l.accts.Lookup(rnd, addr, true) + data, err := l.accts.LookupWithRewards(rnd, addr) if err != nil { return basics.AccountData{}, err } @@ -434,16 +451,16 @@ func (l *Ledger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountDa // LookupWithoutRewards is like Lookup but does not apply pending rewards up // to the requested round rnd. -func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, error) { +func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, basics.Round, error) { l.trackerMu.RLock() defer l.trackerMu.RUnlock() - data, err := l.accts.Lookup(rnd, addr, false) + data, validThrough, err := l.accts.LookupWithoutRewards(rnd, addr) if err != nil { - return basics.AccountData{}, err + return basics.AccountData{}, basics.Round(0), err } - return data, nil + return data, validThrough, nil } // Totals returns the totals of all accounts at the end of round rnd. @@ -460,7 +477,7 @@ func (l *Ledger) isDup(currentProto config.ConsensusParams, current basics.Round } // GetRoundTxIds returns a map of the transactions ids that we have for the given round -// this function is currently not being used, but remains here as it migth be useful in the future. +// this function is currently not being used, but remains here as it might be useful in the future. func (l *Ledger) GetRoundTxIds(rnd basics.Round) (txMap map[transactions.Txid]bool) { l.trackerMu.RLock() defer l.trackerMu.RUnlock() @@ -577,6 +594,11 @@ func (l *Ledger) GenesisHash() crypto.Digest { return l.genesisHash } +// GenesisProto returns the initial protocol for this ledger. +func (l *Ledger) GenesisProto() config.ConsensusParams { + return l.genesisProto +} + // GetCatchpointCatchupState returns the current state of the catchpoint catchup. func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state CatchpointCatchupState, err error) { return MakeCatchpointCatchupAccessor(l, l.log).GetState(ctx) @@ -585,8 +607,8 @@ func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state Catchpoin // GetCatchpointStream returns an io.ReadCloser file stream from which the catchpoint file // for the provided round could be retrieved. If no such stream can be generated, a non-nil // error is returned. The io.ReadCloser and the error are mutually exclusive - -// if error is returned, the file stream is gurenteed to be nil, and vice versa, -// if the file stream is not nil, the error is gurenteed to be nil. +// if error is returned, the file stream is guaranteed to be nil, and vice versa, +// if the file stream is not nil, the error is guaranteed to be nil. func (l *Ledger) GetCatchpointStream(round basics.Round) (io.ReadCloser, error) { l.trackerMu.RLock() defer l.trackerMu.RUnlock() @@ -608,7 +630,7 @@ func (l *Ledger) trackerLog() logging.Logger { } // trackerEvalVerified is used by the accountUpdates to reconstruct the StateDelta from a given block during it's loadFromDisk execution. -// when this function is called, the trackers mutex is expected alredy to be taken. The provided accUpdatesLedger would allow the +// when this function is called, the trackers mutex is expected already to be taken. The provided accUpdatesLedger would allow the // evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time. func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (StateDelta, error) { // passing nil as the verificationPool is ok since we've asking the evaluator to skip verification. @@ -629,3 +651,8 @@ type txlease struct { sender basics.Address lease [32]byte } + +var ledgerInitblocksdbCount = metrics.NewCounter("ledger_initblocksdb_count", "calls") +var ledgerInitblocksdbMicros = metrics.NewCounter("ledger_initblocksdb_micros", "µs spent") +var ledgerVerifygenhashCount = metrics.NewCounter("ledger_verifygenhash_count", "calls") +var ledgerVerifygenhashMicros = metrics.NewCounter("ledger_verifygenhash_micros", "µs spent") diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 1eb3070cd1..868907a5d6 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -61,7 +61,7 @@ func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Tr } } -func testGenerateInitState(t *testing.T, proto protocol.ConsensusVersion) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) { +func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) { params := config.Consensus[proto] poolAddr := testPoolAddr sinkAddr := testSinkAddr @@ -95,7 +95,7 @@ func testGenerateInitState(t *testing.T, proto protocol.ConsensusVersion) (genes initBlock := bookkeeping.Block{ BlockHeader: bookkeeping.BlockHeader{ - GenesisID: t.Name(), + GenesisID: tb.Name(), Round: 0, RewardsState: bookkeeping.RewardsState{ RewardsRate: initialRewardsPerRound, @@ -109,12 +109,12 @@ func testGenerateInitState(t *testing.T, proto protocol.ConsensusVersion) (genes }, } if params.SupportGenesisHash { - initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(t.Name())) + initBlock.BlockHeader.GenesisHash = crypto.Hash([]byte(tb.Name())) } genesisInitState.Block = initBlock genesisInitState.Accounts = initAccounts - genesisInitState.GenesisHash = crypto.Hash([]byte(t.Name())) + genesisInitState.GenesisHash = crypto.Hash([]byte(tb.Name())) return } @@ -1078,3 +1078,139 @@ func TestLedgerReload(t *testing.T) { } } } + +// TestGetLastCatchpointLabel tests ledger.GetLastCatchpointLabel is returning the correct value. +func TestGetLastCatchpointLabel(t *testing.T) { + + //initLedger + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + const inMem = true + log := logging.TestingLog(t) + cfg := config.GetDefaultLocal() + cfg.Archival = true + ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + defer ledger.Close() + + // set some value + lastCatchpointLabel := "someCatchpointLabel" + ledger.accts.lastCatchpointLabel = lastCatchpointLabel + + // verify the value is returned + require.Equal(t, lastCatchpointLabel, ledger.GetLastCatchpointLabel()) +} + +// generate at least 3 asset and 3 app creatables, and return the ids +// of the asset/app with at least 3 elements less or equal. +func generateCreatables(numElementsPerSegement int) ( + randomCtbs map[basics.CreatableIndex]modifiedCreatable, + assetID3, + appID3 basics.CreatableIndex, + err error) { + + _, randomCtbs = randomCreatables(numElementsPerSegement) + asCounter3 := 0 + apCounter3 := 0 + + for x := 0; x < 10; x++ { + // find the assetid greater than at least 2 assetids + for cID, crtble := range randomCtbs { + switch crtble.ctype { + case basics.AssetCreatable: + if assetID3 == 0 { + assetID3 = cID + continue + } + asCounter3++ + if assetID3 < cID { + assetID3 = cID + } + case basics.AppCreatable: + if appID3 == 0 { + appID3 = cID + continue + } + apCounter3++ + if appID3 < cID { + appID3 = cID + } + } + if asCounter3 >= 3 && apCounter3 >= 3 { + // found at least 3rd smallest of both + break + } + } + + // there should be at least 3 asset and 3 app creatables generated. + // In the rare event this does not happen, repeat... up to 10 times (x) + if asCounter3 >= 3 && apCounter3 >= 3 { + break + } + } + if asCounter3 < 3 && apCounter3 < 3 { + return nil, 0, 0, fmt.Errorf("could not generate 3 apps and 3 assets") + } + return +} + +// TestListAssetsAndApplications tests the ledger.ListAssets and ledger.ListApplications +// interfaces. The detailed test on the correctness of these functions is given in: +// TestListCreatables (acctupdates_test.go) +func TestListAssetsAndApplications(t *testing.T) { + + numElementsPerSegement := 10 // This is multiplied by 10. see randomCreatables + + //initLedger + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + const inMem = true + log := logging.TestingLog(t) + cfg := config.GetDefaultLocal() + cfg.Archival = true + ledger, err := OpenLedger(log, t.Name(), inMem, genesisInitState, cfg) + require.NoError(t, err, "could not open ledger") + defer ledger.Close() + + // ******* All results are obtained from the cache. Empty database ******* + // ******* No deletes ******* + // get random data. Inital batch, no deletes + randomCtbs, maxAsset, maxApp, err := generateCreatables(numElementsPerSegement) + require.NoError(t, err) + + // set the cache + ledger.accts.creatables = randomCtbs + + // Test ListAssets + // Check the number of results limit + results, err := ledger.ListAssets(basics.AssetIndex(maxAsset), 2) + require.NoError(t, err) + require.Equal(t, 2, len(results)) + // Check the max asset id limit + results, err = ledger.ListAssets(basics.AssetIndex(maxAsset), 100) + assetCount := 0 + for id, ctb := range randomCtbs { + if ctb.ctype == basics.AssetCreatable && + ctb.created && + id <= maxAsset { + assetCount++ + } + } + require.Equal(t, assetCount, len(results)) + + // Test ListApplications + // Check the number of results limit + ledger.accts.creatables = randomCtbs + results, err = ledger.ListApplications(basics.AppIndex(maxApp), 2) + require.NoError(t, err) + require.Equal(t, 2, len(results)) + // Check the max application id limit + results, err = ledger.ListApplications(basics.AppIndex(maxApp), 100) + appCount := 0 + for id, ctb := range randomCtbs { + if ctb.ctype == basics.AppCreatable && + ctb.created && + id <= maxApp { + appCount++ + } + } + require.Equal(t, appCount, len(results)) +} diff --git a/ledger/notifier.go b/ledger/notifier.go index e2f4bff6f1..b22ad584b8 100644 --- a/ledger/notifier.go +++ b/ledger/notifier.go @@ -41,7 +41,7 @@ type blockNotifier struct { listeners []BlockListener pendingBlocks []blockDeltaPair running bool - // closing is the waitgroup used to syncronize closing the worker goroutine. It's being increased during loadFromDisk, and the worker is responsible to call Done on it once it's aborting it's goroutine. The close function waits on this to complete. + // closing is the waitgroup used to synchronize closing the worker goroutine. It's being increased during loadFromDisk, and the worker is responsible to call Done on it once it's aborting it's goroutine. The close function waits on this to complete. closing sync.WaitGroup } diff --git a/ledger/onlineacct.go b/ledger/onlineacct.go new file mode 100644 index 0000000000..489d8624cc --- /dev/null +++ b/ledger/onlineacct.go @@ -0,0 +1,89 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package ledger + +import ( + "bytes" + + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" +) + +// An onlineAccount corresponds to an account whose AccountData.Status +// is Online. This is used for a Merkle tree commitment of online +// accounts, which is subsequently used to validate participants for +// a compact certificate. +type onlineAccount struct { + // These are a subset of the fields from the corresponding AccountData. + Address basics.Address + MicroAlgos basics.MicroAlgos + RewardsBase uint64 + NormalizedOnlineBalance uint64 + VoteID crypto.OneTimeSignatureVerifier + VoteFirstValid basics.Round + VoteLastValid basics.Round + VoteKeyDilution uint64 +} + +// onlineTopHeap implements heap.Interface for tracking top N online accounts. +type onlineTopHeap struct { + accts []*onlineAccount +} + +// Len implements sort.Interface +func (h *onlineTopHeap) Len() int { + return len(h.accts) +} + +// Less implements sort.Interface +func (h *onlineTopHeap) Less(i, j int) bool { + // For the heap, "less" means the element is returned earlier by Pop(), + // so we actually implement "greater-than" here. + ibal := h.accts[i].NormalizedOnlineBalance + jbal := h.accts[j].NormalizedOnlineBalance + + if ibal > jbal { + return true + } + if ibal < jbal { + return false + } + + bcmp := bytes.Compare(h.accts[i].Address[:], h.accts[j].Address[:]) + if bcmp > 0 { + return true + } + + return false +} + +// Swap implements sort.Interface +func (h *onlineTopHeap) Swap(i, j int) { + h.accts[i], h.accts[j] = h.accts[j], h.accts[i] +} + +// Push implements heap.Interface +func (h *onlineTopHeap) Push(x interface{}) { + h.accts = append(h.accts, x.(*onlineAccount)) +} + +// Pop implements heap.Interface +func (h *onlineTopHeap) Pop() interface{} { + res := h.accts[len(h.accts)-1] + h.accts = h.accts[:len(h.accts)-1] + return res +} diff --git a/ledger/tracker.go b/ledger/tracker.go index 91cabfe260..f756139217 100644 --- a/ledger/tracker.go +++ b/ledger/tracker.go @@ -20,6 +20,7 @@ import ( "fmt" "reflect" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -92,6 +93,7 @@ type ledgerForTracker interface { Block(basics.Round) (bookkeeping.Block, error) BlockHdr(basics.Round) (bookkeeping.BlockHeader, error) GenesisHash() crypto.Digest + GenesisProto() config.ConsensusParams } type trackerRegistry struct { diff --git a/ledger/voters.go b/ledger/voters.go new file mode 100644 index 0000000000..089c782da3 --- /dev/null +++ b/ledger/voters.go @@ -0,0 +1,351 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package ledger + +import ( + "fmt" + "sync" + + "github.com/algorand/go-deadlock" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/compactcert" + "github.com/algorand/go-algorand/crypto/merklearray" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" +) + +// The votersTracker maintains the Merkle tree for the most recent +// commitments to online accounts for compact certificates. +// +// We maintain multiple Merkle trees: we might commit to a new Merkle tree in +// block X, but we need the Merkle tree from block X-params.CompactCertBlocks +// to build the compact certificate for block X. +// +// votersTracker is kind-of like a tracker, but hangs off the acctupdates +// rather than a direct ledger tracker. We don't have an explicit interface +// for such an "accounts tracker" yet, however. +type votersTracker struct { + // round contains the top online accounts in a given round. + // + // To avoid increasing block latency, we include a Merkle commitment + // to the top online accounts as of block X in the block header of + // block X+CompactCertVotersLookback. This gives each node some time + // to construct this Merkle tree, before its root is needed in a block. + // + // This round map is indexed by the block X, using the terminology from + // the above example, to be used in X+CompactCertVotersLookback. + // + // We maintain round entries for two reasons: + // + // The first is to maintain the tree for an upcoming block -- that is, + // if X+Loookback= minRound { + minRound = hdr.CompactCertLastRound + } + for rPlusLookback >= minRound { + r := rPlusLookback.SubSaturate(basics.Round(proto.CompactCertVotersLookback)) + hdr, err = l.BlockHdr(r) + if err != nil { + switch err.(type) { + case ErrNoEntry: + // If we cannot retrieve a block to construct the tree + // then we must have already evicted that block, which + // must have been because compact certs weren't enabled + // when that block was being considered for eviction. + // OK to stop. + return nil + default: + return err + } + } + + if config.Consensus[hdr.CurrentProtocol].CompactCertRounds == 0 { + // Walked backwards past a protocol upgrade, no more compact certs. + return nil + } + + err = vt.loadTree(hdr) + if err != nil { + return err + } + + rPlusLookback = rPlusLookback.SubSaturate(basics.Round(proto.CompactCertRounds)) + } + + return nil +} + +func (vt *votersTracker) loadTree(hdr bookkeeping.BlockHeader) error { + r := hdr.Round + + _, ok := vt.round[r] + if ok { + // Already loaded. + return nil + } + + proto := config.Consensus[hdr.CurrentProtocol] + if proto.CompactCertRounds == 0 { + // No compact certs. + return nil + } + + tr := &VotersForRound{ + Proto: proto, + } + tr.cond = sync.NewCond(&tr.mu) + vt.round[r] = tr + + go func() { + err := tr.loadTree(vt.l, vt.au, hdr) + if err != nil { + vt.au.log.Warnf("votersTracker.loadTree(%d): %v", hdr.Round, err) + + tr.mu.Lock() + tr.loadTreeError = err + tr.cond.Broadcast() + tr.mu.Unlock() + } + }() + return nil +} + +func (tr *VotersForRound) loadTree(l ledgerForTracker, au *accountUpdates, hdr bookkeeping.BlockHeader) error { + r := hdr.Round + + // certRound is the block that we expect to form a compact certificate for, + // using the balances from round r. + certRound := r + basics.Round(tr.Proto.CompactCertVotersLookback+tr.Proto.CompactCertRounds) + + // sigKeyRound is the ephemeral key ID that we expect to be used for signing + // the block from certRound. It is one higher because the keys for certRound + // might be deleted by the time consensus is reached on the block and we try + // to sign the compact cert for block certRound. + sigKeyRound := certRound + 1 + + top, err := au.onlineTop(r, sigKeyRound, tr.Proto.CompactCertTopVoters) + if err != nil { + return err + } + + participants := make(participantsArray, len(top)) + addrToPos := make(map[basics.Address]uint64) + var totalWeight basics.MicroAlgos + + for i, acct := range top { + var ot basics.OverflowTracker + rewards := basics.PendingRewards(&ot, tr.Proto, acct.MicroAlgos, acct.RewardsBase, hdr.RewardsLevel) + money := ot.AddA(acct.MicroAlgos, rewards) + if ot.Overflowed { + return fmt.Errorf("votersTracker.loadTree: overflow adding rewards %d + %d", acct.MicroAlgos, rewards) + } + + totalWeight = ot.AddA(totalWeight, money) + if ot.Overflowed { + return fmt.Errorf("votersTracker.loadTree: overflow computing totalWeight %d + %d", totalWeight.ToUint64(), money.ToUint64()) + } + + keyDilution := acct.VoteKeyDilution + if keyDilution == 0 { + keyDilution = tr.Proto.DefaultKeyDilution + } + + participants[i] = compactcert.Participant{ + PK: acct.VoteID, + Weight: money.ToUint64(), + KeyDilution: keyDilution, + } + addrToPos[acct.Address] = uint64(i) + } + + tree, err := merklearray.Build(participants) + if err != nil { + return err + } + + tr.mu.Lock() + tr.AddrToPos = addrToPos + tr.Participants = participants + tr.TotalWeight = totalWeight + tr.Tree = tree + tr.cond.Broadcast() + tr.mu.Unlock() + + return nil +} + +func (vt *votersTracker) newBlock(hdr bookkeeping.BlockHeader) { + proto := config.Consensus[hdr.CurrentProtocol] + if proto.CompactCertRounds == 0 { + // No compact certs. + return + } + + // Check if any blocks can be forgotten because the compact cert is available. + for r, tr := range vt.round { + commitRound := r + basics.Round(tr.Proto.CompactCertVotersLookback) + certRound := commitRound + basics.Round(tr.Proto.CompactCertRounds) + if certRound <= hdr.CompactCertLastRound { + delete(vt.round, r) + } + } + + // This might be a block where we snapshot the online participants, + // to eventually construct a merkle tree for commitment in a later + // block. + r := uint64(hdr.Round) + if (r+proto.CompactCertVotersLookback)%proto.CompactCertRounds == 0 { + _, ok := vt.round[basics.Round(r)] + if ok { + vt.au.log.Errorf("votersTracker.newBlock: round %d already present", r) + } else { + err := vt.loadTree(hdr) + if err != nil { + vt.au.log.Warnf("votersTracker.newBlock: loadTree: %v", err) + } + } + } +} + +// lowestRound() returns the lowest round state (blocks and accounts) needed by +// the votersTracker in case of a restart. The accountUpdates tracker will +// not delete account state before this round, so that after a restart, it's +// possible to reconstruct the votersTracker. If votersTracker does +// not need any blocks, it returns base. +func (vt *votersTracker) lowestRound(base basics.Round) basics.Round { + minRound := base + for r := range vt.round { + if r < minRound { + minRound = r + } + } + return minRound +} + +// getVoters() returns the top online participants from round r. +func (vt *votersTracker) getVoters(r basics.Round) (*VotersForRound, error) { + tr, ok := vt.round[r] + if !ok { + // Not tracked: compact certs not enabled. + return nil, nil + } + + // Wait for the Merkle tree to be constructed. + tr.mu.Lock() + defer tr.mu.Unlock() + for tr.Tree == nil { + if tr.loadTreeError != nil { + return nil, tr.loadTreeError + } + + tr.cond.Wait() + } + + return tr, nil +} + +//msgp:ignore participantsArray +// participantsArray implements merklearray.Array and is used to commit +// to a Merkle tree of online accounts. +type participantsArray []compactcert.Participant + +func (a participantsArray) Length() uint64 { + return uint64(len(a)) +} + +func (a participantsArray) Get(pos uint64) (crypto.Hashable, error) { + if pos >= uint64(len(a)) { + return nil, fmt.Errorf("participantsArray.Get(%d) out of bounds %d", pos, len(a)) + } + + return a[pos], nil +} diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index 1b465a134b..94e36cac64 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -926,7 +926,7 @@ func MakeDryrunStateBytes(client Client, txnOrStxn interface{}, other []transact } } -// MakeDryrunState function creates DryrunRequest data structure and serializes it into a file +// MakeDryrunState function creates v2.DryrunRequest data structure func MakeDryrunState(client Client, txnOrStxn interface{}, other []transactions.SignedTxn, proto string) (dr v2.DryrunRequest, err error) { gdr, err := MakeDryrunStateGenerated(client, txnOrStxn, other, proto) if err != nil { @@ -935,78 +935,81 @@ func MakeDryrunState(client Client, txnOrStxn interface{}, other []transactions. return v2.DryrunRequestFromGenerated(&gdr) } -// MakeDryrunStateGenerated function creates DryrunRequest data structure and serializes it into a file +// MakeDryrunStateGenerated function creates generatedV2.DryrunRequest data structure func MakeDryrunStateGenerated(client Client, txnOrStxn interface{}, other []transactions.SignedTxn, proto string) (dr generatedV2.DryrunRequest, err error) { var txns []transactions.SignedTxn - var tx *transactions.Transaction - if txn, ok := txnOrStxn.(transactions.Transaction); ok { - tx = &txn + if txnOrStxn == nil { + // empty input do nothing + } else if txn, ok := txnOrStxn.(transactions.Transaction); ok { txns = append(txns, transactions.SignedTxn{Txn: txn}) } else if stxn, ok := txnOrStxn.(transactions.SignedTxn); ok { - tx = &stxn.Txn txns = append(txns, stxn) } else { err = fmt.Errorf("unsupported txn type") return } + txns = append(txns, other...) for i := range txns { enc := protocol.EncodeJSON(&txns[i]) dr.Txns = append(dr.Txns, enc) } - if tx.Type == protocol.ApplicationCallTx { - apps := []basics.AppIndex{tx.ApplicationID} - apps = append(apps, tx.ForeignApps...) - for _, appIdx := range apps { - var appParams generatedV2.ApplicationParams - if appIdx == 0 { - // if it is an app create txn then use params from the txn - appParams.ApprovalProgram = tx.ApprovalProgram - appParams.ClearStateProgram = tx.ClearStateProgram - appParams.GlobalStateSchema = &generatedV2.ApplicationStateSchema{ - NumUint: tx.GlobalStateSchema.NumUint, - NumByteSlice: tx.GlobalStateSchema.NumByteSlice, - } - appParams.LocalStateSchema = &generatedV2.ApplicationStateSchema{ - NumUint: tx.LocalStateSchema.NumUint, - NumByteSlice: tx.LocalStateSchema.NumByteSlice, + for _, txn := range txns { + tx := txn.Txn + if tx.Type == protocol.ApplicationCallTx { + apps := []basics.AppIndex{tx.ApplicationID} + apps = append(apps, tx.ForeignApps...) + for _, appIdx := range apps { + var appParams generatedV2.ApplicationParams + if appIdx == 0 { + // if it is an app create txn then use params from the txn + appParams.ApprovalProgram = tx.ApprovalProgram + appParams.ClearStateProgram = tx.ClearStateProgram + appParams.GlobalStateSchema = &generatedV2.ApplicationStateSchema{ + NumUint: tx.GlobalStateSchema.NumUint, + NumByteSlice: tx.GlobalStateSchema.NumByteSlice, + } + appParams.LocalStateSchema = &generatedV2.ApplicationStateSchema{ + NumUint: tx.LocalStateSchema.NumUint, + NumByteSlice: tx.LocalStateSchema.NumByteSlice, + } + appParams.Creator = tx.Sender.String() + // zero is not acceptable by ledger in dryrun/debugger + appIdx = defaultAppIdx + } else { + // otherwise need to fetch app state + var app generatedV2.Application + if app, err = client.ApplicationInformation(uint64(tx.ApplicationID)); err != nil { + return + } + appParams = app.Params } - appParams.Creator = tx.Sender.String() - // zero is not acceptable by ledger in dryrun/debugger - appIdx = defaultAppIdx - } else { - // otherwise need to fetch app state - var app generatedV2.Application - if app, err = client.ApplicationInformation(uint64(tx.ApplicationID)); err != nil { + dr.Apps = append(dr.Apps, generatedV2.Application{ + Id: uint64(appIdx), + Params: appParams, + }) + } + + accounts := append(tx.Accounts, tx.Sender) + for _, acc := range accounts { + var info generatedV2.Account + if info, err = client.AccountInformationV2(acc.String()); err != nil { return } - appParams = app.Params + dr.Accounts = append(dr.Accounts, info) } - dr.Apps = append(dr.Apps, generatedV2.Application{ - Id: uint64(appIdx), - Params: appParams, - }) - } - accounts := append(tx.Accounts, tx.Sender) - for _, acc := range accounts { - var info generatedV2.Account - if info, err = client.AccountInformationV2(acc.String()); err != nil { + dr.ProtocolVersion = proto + if dr.Round, err = client.CurrentRound(); err != nil { return } - dr.Accounts = append(dr.Accounts, info) - } - - dr.ProtocolVersion = proto - if dr.Round, err = client.CurrentRound(); err != nil { - return - } - var b v1.Block - if b, err = client.Block(dr.Round); err != nil { - return + var b v1.Block + if b, err = client.Block(dr.Round); err != nil { + return + } + dr.LatestTimestamp = uint64(b.Timestamp) } - dr.LatestTimestamp = uint64(b.Timestamp) } return } diff --git a/libgoal/lockedFileWindows.go b/libgoal/lockedFileWindows.go new file mode 100644 index 0000000000..3a33fba8f7 --- /dev/null +++ b/libgoal/lockedFileWindows.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build windows + +package libgoal + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +type windowsLocker struct { +} + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// makeLocker create a windows file locker. +func makeLocker() (*windowsLocker, error) { + locker := &windowsLocker{} + return locker, nil +} + +func (f *windowsLocker) tryRLock(fd *os.File) error { + if errNo := lockFileEx(syscall.Handle(fd.Fd()), winLockfileSharedLock|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errors.New("cannot lock file") + } + return nil +} + +func (f *windowsLocker) tryLock(fd *os.File) error { + if errNo := lockFileEx(syscall.Handle(fd.Fd()), winLockfileExclusiveLock|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errors.New("cannot lock file") + } + return nil +} + +func (f *windowsLocker) unlock(fd *os.File) error { + if errNo := unlockFileEx(syscall.Handle(fd.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errors.New("cannot unlock file") + } + return nil +} + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) syscall.Errno { + r1, _, errNo := syscall.Syscall6(uintptr(procLockFileEx), 6, uintptr(handle), uintptr(flags), uintptr(reserved), uintptr(numberOfBytesToLockLow), uintptr(numberOfBytesToLockHigh), uintptr(unsafe.Pointer(offset))) + if r1 != 1 { + if errNo == 0 { + return syscall.EINVAL + } + return errNo + } + return 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) syscall.Errno { + r1, _, errNo := syscall.Syscall6(uintptr(procUnlockFileEx), 5, uintptr(handle), uintptr(reserved), uintptr(numberOfBytesToLockLow), uintptr(numberOfBytesToLockHigh), uintptr(unsafe.Pointer(offset)), 0) + if r1 != 1 { + if errNo == 0 { + return syscall.EINVAL + } + return errNo + } + return 0 +} diff --git a/logging/log.go b/logging/log.go index a500eba5b9..6877c6655a 100644 --- a/logging/log.go +++ b/logging/log.go @@ -51,8 +51,7 @@ type Level uint32 // Create a general Base logger var ( - baseLogger Logger - telemetryConfig TelemetryConfig + baseLogger Logger ) const ( @@ -91,10 +90,6 @@ func init() { Init() } -func initializeConfig(cfg TelemetryConfig) { - telemetryConfig = cfg -} - // Fields maps logrus fields type Fields = logrus.Fields @@ -156,6 +151,7 @@ type Logger interface { EnableTelemetry(cfg TelemetryConfig) error UpdateTelemetryURI(uri string) error GetTelemetryEnabled() bool + GetTelemetryUploadingEnabled() bool Metrics(category telemetryspec.Category, metrics telemetryspec.MetricDetails, details interface{}) Event(category telemetryspec.Category, identifier telemetryspec.Event) EventWithDetails(category telemetryspec.Category, identifier telemetryspec.Event, details interface{}) @@ -375,29 +371,52 @@ func (l logger) EnableTelemetry(cfg TelemetryConfig) (err error) { func (l logger) UpdateTelemetryURI(uri string) (err error) { err = l.loggerState.telemetry.hook.UpdateHookURI(uri) if err == nil { - telemetryConfig.URI = uri + l.loggerState.telemetry.telemetryConfig.URI = uri } return } +// GetTelemetryEnabled returns true if +// logging.config Enable, or SendToLog or config.json +// TelemetryToLog is true. func (l logger) GetTelemetryEnabled() bool { return l.loggerState.telemetry != nil } func (l logger) GetTelemetrySession() string { - return telemetryConfig.SessionGUID + if !l.GetTelemetryEnabled() { + return "" + } + return l.loggerState.telemetry.telemetryConfig.SessionGUID } func (l logger) GetTelemetryHostName() string { - return telemetryConfig.getHostName() + if !l.GetTelemetryEnabled() { + return "" + } + return l.loggerState.telemetry.telemetryConfig.getHostName() } func (l logger) GetInstanceName() string { - return telemetryConfig.getInstanceName() + if !l.GetTelemetryEnabled() { + return "" + } + return l.loggerState.telemetry.telemetryConfig.getInstanceName() } func (l logger) GetTelemetryURI() string { - return telemetryConfig.URI + if !l.GetTelemetryEnabled() { + return "" + } + return l.loggerState.telemetry.telemetryConfig.URI +} + +// GetTelemetryUploadingEnabled returns true if telemetry logging is +// enabled for uploading messages. +// This is decided by Enable parameter in logging.config +func (l logger) GetTelemetryUploadingEnabled() bool { + return l.GetTelemetryEnabled() && + l.loggerState.telemetry.telemetryConfig.Enable } func (l logger) Metrics(category telemetryspec.Category, metrics telemetryspec.MetricDetails, details interface{}) { diff --git a/logging/telemetry.go b/logging/telemetry.go index e14ee5a0f6..1b89d1d1ab 100644 --- a/logging/telemetry.go +++ b/logging/telemetry.go @@ -85,7 +85,7 @@ func makeTelemetryState(cfg TelemetryConfig, hookFactory hookFactory) (*telemetr } else { telemetry.hook = new(dummyHook) } - telemetry.sendToLog = cfg.SendToLog + telemetry.telemetryConfig = cfg return telemetry, nil } @@ -148,7 +148,6 @@ func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryC configPath, err = config.GetConfigFilePath(TelemetryConfigFilename) if err != nil { cfg := createTelemetryConfig() - initializeConfig(cfg) return cfg, true, err } cfg, err = LoadTelemetryConfig(configPath) @@ -171,7 +170,6 @@ func EnsureTelemetryConfigCreated(dataDir *string, genesisID string) (TelemetryC } cfg.ChainID = fmt.Sprintf("%s-%s", ch, genesisID) - initializeConfig(cfg) return cfg, created, err } @@ -224,7 +222,7 @@ func (t *telemetryState) logTelemetry(l logger, message string, details interfac entry.Level = logrus.InfoLevel entry.Message = message - if t.sendToLog { + if t.telemetryConfig.SendToLog { entry.Info(message) } t.hook.Fire(entry) diff --git a/logging/telemetryCommon.go b/logging/telemetryCommon.go index a66484e5c9..af24d732fe 100644 --- a/logging/telemetryCommon.go +++ b/logging/telemetryCommon.go @@ -47,9 +47,9 @@ type telemetryHook interface { } type telemetryState struct { - history *logBuffer - hook telemetryHook - sendToLog bool + history *logBuffer + hook telemetryHook + telemetryConfig TelemetryConfig } // TelemetryConfig represents the configuration of Telemetry logging diff --git a/logging/telemetryConfig.go b/logging/telemetryConfig.go index a127d91833..d8fd544698 100644 --- a/logging/telemetryConfig.go +++ b/logging/telemetryConfig.go @@ -37,7 +37,7 @@ const hostnameLength = 255 // TelemetryOverride Determines whether an override value is set and what it's value is. // The first return value is whether an override variable is found, if it is, the second is the override value. -func TelemetryOverride(env string) bool { +func TelemetryOverride(env string, telemetryConfig *TelemetryConfig) bool { env = strings.ToLower(env) if env == "1" || env == "true" { @@ -140,7 +140,5 @@ func loadTelemetryConfig(path string) (TelemetryConfig, error) { cfg.Name = SanitizeTelemetryString(cfg.Name, 1) } - initializeConfig(cfg) - return cfg, err } diff --git a/logging/telemetry_test.go b/logging/telemetry_test.go index 1a806fffb0..73d52f104a 100644 --- a/logging/telemetry_test.go +++ b/logging/telemetry_test.go @@ -58,7 +58,6 @@ func makeMockTelemetryHook(level logrus.Level) mockTelemetryHook { } type telemetryTestFixture struct { - cfg TelemetryConfig hook mockTelemetryHook telem *telemetryState l logger @@ -70,20 +69,22 @@ func makeTelemetryTestFixture(minLevel logrus.Level) *telemetryTestFixture { func makeTelemetryTestFixtureWithConfig(minLevel logrus.Level, cfg *TelemetryConfig) *telemetryTestFixture { f := &telemetryTestFixture{} + var lcfg TelemetryConfig if cfg == nil { - f.cfg = createTelemetryConfig() + lcfg = createTelemetryConfig() } else { - f.cfg = *cfg + lcfg = *cfg } - f.cfg.Enable = true - f.cfg.MinLogLevel = minLevel + lcfg.Enable = true + lcfg.MinLogLevel = minLevel f.hook = makeMockTelemetryHook(minLevel) f.l = Base().(logger) f.l.SetLevel(Debug) // Ensure logging doesn't filter anything out - f.telem, _ = makeTelemetryState(f.cfg, func(cfg TelemetryConfig) (hook logrus.Hook, err error) { + f.telem, _ = makeTelemetryState(lcfg, func(cfg TelemetryConfig) (hook logrus.Hook, err error) { return &f.hook, nil }) + f.l.loggerState.telemetry = f.telem return f } @@ -146,7 +147,7 @@ func TestTelemetryHook(t *testing.T) { a := require.New(t) f := makeTelemetryTestFixture(logrus.InfoLevel) - a.NotNil(f.telem) + a.NotNil(f.l.loggerState.telemetry) a.Zero(len(f.hookEntries())) f.telem.logMetrics(f.l, testString1, testMetrics{}, nil) diff --git a/logging/usage.go b/logging/usage.go index ec9e1fdd91..0d0515f8b6 100644 --- a/logging/usage.go +++ b/logging/usage.go @@ -19,46 +19,38 @@ package logging import ( "context" "sync" - "syscall" "time" -) -func timevalSubToMicroseconds(a, b syscall.Timeval) int64 { - seconds := a.Sec - b.Sec - var dusec int32 - if b.Usec > a.Usec { - seconds-- - dusec = int32(1000000) + int32(a.Usec-b.Usec) - } else { - dusec = int32(a.Usec - b.Usec) - } - return (int64(seconds) * 1000000) + int64(dusec) -} + "github.com/algorand/go-algorand/util" +) // UsageLogThread utility logging method func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *sync.WaitGroup) { if wg != nil { defer wg.Done() } - var usage syscall.Rusage + var now time.Time - var prevUsage syscall.Rusage + var prevUtime, prevStime int64 + var Utime, Stime int64 var prevTime time.Time + ticker := time.NewTicker(period) hasPrev := false + for true { select { case <-ticker.C: case <-ctx.Done(): return } + now = time.Now() - err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage) - if err != nil { - } + Utime, Stime, _ = util.GetCurrentProcessTimes() + if hasPrev { - userNanos := timevalSubToMicroseconds(usage.Utime, prevUsage.Utime) * 1000 - sysNanos := timevalSubToMicroseconds(usage.Stime, prevUsage.Stime) * 1000 + userNanos := Utime - prevUtime + sysNanos := Stime - prevStime wallNanos := now.Sub(prevTime).Nanoseconds() userf := float64(userNanos) / float64(wallNanos) sysf := float64(sysNanos) / float64(wallNanos) @@ -66,7 +58,9 @@ func UsageLogThread(ctx context.Context, log Logger, period time.Duration, wg *s } else { hasPrev = true } - prevUsage = usage + + prevUtime = Utime + prevStime = Stime prevTime = now } } diff --git a/mule.yaml b/mule.yaml deleted file mode 100644 index bfa4b56939..0000000000 --- a/mule.yaml +++ /dev/null @@ -1,220 +0,0 @@ -tasks: - - - task: shell.Shell - name: go-version - command: scripts/get_golang_version.sh - saveLogs: true - - # Stash tasks - - task: stash.Stash - name: linux-amd64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64 - globSpecs: - - tmp/node_pkgs/** - - crypto/libs/** - - gen/devnet/genesis.json - - gen/testnet/genesis.json - - gen/mainnet/genesis.json - - task: stash.Stash - name: darwin-amd64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64 - globSpecs: - - tmp/node_pkgs/** - - crypto/libs/** - - gen/devnet/genesis.json - - gen/testnet/genesis.json - - gen/mainnet/genesis.json - - task: stash.Stash - name: linux-arm64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm64 - globSpecs: - - tmp/node_pkgs/** - - crypto/libs/** - - gen/devnet/genesis.json - - gen/testnet/genesis.json - - gen/mainnet/genesis.json - - task: stash.Stash - name: linux-arm32 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm32 - globSpecs: - - tmp/node_pkgs/** - - crypto/libs/** - - gen/devnet/genesis.json - - gen/testnet/genesis.json - - gen/mainnet/genesis.json - - # Unstash tasks - - task: stash.Unstash - name: linux-arm64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm64 - - task: stash.Unstash - name: linux-amd64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64 - - task: stash.Unstash - name: darwin-amd64 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64 - - task: stash.Unstash - name: linux-arm32 - bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm32 - - # Docker tasks - - task: docker.Version - configFilePath: scripts/configure_dev-deps.sh - - task: shell.docker.Ensure - name: centos - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}-{{ shell.Shell.go-version.outputs.stdout }}' - goVersion: '{{ shell.Shell.go-version.outputs.stdout }}' - dockerFilePath: docker/build/cicd.centos.Dockerfile - dependencies: shell.Shell.go-version docker.Version - - task: shell.docker.Ensure - name: alpine - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}-{{ shell.Shell.go-version.outputs.stdout }}' - goVersion: '{{ shell.Shell.go-version.outputs.stdout }}' - dockerFilePath: docker/build/cicd.alpine.Dockerfile - dependencies: shell.Shell.go-version docker.Version - - task: docker.Make - name: build - docker: - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}-{{ shell.Shell.go-version.outputs.stdout }}' - goVersion: '{{ shell.Shell.go-version.outputs.stdout }}' - workDir: /go/src/github.com/algorand/go-algorand - target: ci-build - dependencies: shell.Shell.go-version docker.Version - - task: docker.Make - name: fulltest - docker: - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}-{{ shell.Shell.go-version.outputs.stdout }}' - goVersion: '{{ shell.Shell.go-version.outputs.stdout }}' - workDir: /go/src/github.com/algorand/go-algorand - target: fulltest -j4 - dependencies: shell.Shell.go-version docker.Version - - task: docker.Make - name: integration-test - docker: - env: - - SHORTTEST=-short - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}-{{ shell.Shell.go-version.outputs.stdout }}' - goVersion: '{{ shell.Shell.go-version.outputs.stdout }}' - workDir: /go/src/github.com/algorand/go-algorand - target: ci-integration -j4 - dependencies: shell.Shell.go-version docker.Version - - task: docker.Make - name: archive - docker: - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}-{{ shell.Shell.go-version.outputs.stdout }}' - goVersion: '{{ shell.Shell.go-version.outputs.stdout }}' - workDir: /go/src/github.com/algorand/go-algorand - target: ci-archive - dependencies: shell.Shell.go-version docker.Version - - # Local Tasks - - task: shell.Make - name: ci-deps build - target: ci-build - - - task: shell.Make - name: fulltest - target: fulltest -j4 - - task: shell.Make - name: shorttest - target: shorttest -j3 - - task: shell.Make - name: integration-test - target: ci-integration -j4 - - task: shell.Make - name: archive - target: archive - -jobs: - # Linux amd64 jobs - build-linux-amd64: - configs: - arch: amd64 - tasks: - - shell.docker.Ensure.centos - - docker.Make.build -# - stash.Stash.linux-amd64 - test-linux-amd64-integration: - configs: - arch: amd64 - tasks: - - shell.docker.Ensure.centos -# - stash.Unstash.linux-amd64 - - docker.Make.integration-test - test-linux-amd64-fulltest: - configs: - arch: amd64 - tasks: - - shell.docker.Ensure.centos - - docker.Make.fulltest - - # Darwin amd64 jobs - # build-darwin-amd64: - # configs: - # arch: amd64 - # tasks: - # - shell.Make.build - # - stash.Stash.darwin-amd64 - # test-darwin-amd64-integration: - # configs: - # arch: amd64 - # tasks: - # - stash.Unstash.darwin-amd64 - # - shell.Make.integration-test - # test-darwin-amd64-fulltest: - # configs: - # arch: amd64 - # tasks: - # - shell.Make.fulltest - - # Linux arm64 jobs - build-linux-arm64: - configs: - arch: arm64v8 - tasks: - - shell.docker.Ensure.centos - - docker.Make.build -# - stash.Stash.linux-arm64 - test-linux-arm64-integration: - configs: - arch: arm64v8 - tasks: - - shell.docker.Ensure.centos - - stash.Unstash.linux-arm64 - - docker.Make.integration-test - - # Linux arm32 jobs - build-linux-arm32: - configs: - arch: arm32v6 - tasks: - - shell.docker.Ensure.alpine - - docker.Make.build - - stash.Stash.linux-arm32 - - # Archive jobs - archive-linux-amd64: - configs: - arch: amd64 - tasks: - - shell.docker.Ensure.centos - - stash.Unstash.linux-amd64 - # - stash.Unstash.darwin-amd64 - - stash.Unstash.linux-arm64 - - stash.Unstash.linux-arm32 - - docker.Make.archive - diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index 1f6e365848..725816e180 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -185,9 +185,16 @@ func (t NetworkTemplate) Validate() error { } accounts[upperAcct] = true } + totalPctInt, _ := totalPct.Int64() + const epsilon = 0.0000001 if totalPctInt != 100 { - return fmt.Errorf("invalid template: Genesis account allocations must total 100 (actual %v)", totalPct) + totalPctFloat, _ := totalPct.Float64() + if totalPctInt < 100 && totalPctFloat > (100.0-epsilon) { + // ignore. This is a rounding error. + } else { + return fmt.Errorf("invalid template: Genesis account allocations must total 100 (actual %v)", totalPct) + } } // No wallet can be assigned to more than one node diff --git a/netdeploy/networkTemplates_test.go b/netdeploy/networkTemplates_test.go index 18d02a76d4..2b91682758 100644 --- a/netdeploy/networkTemplates_test.go +++ b/netdeploy/networkTemplates_test.go @@ -82,4 +82,9 @@ func TestValidate(t *testing.T) { template, _ = loadTemplate(filepath.Join(templateDir, "NegativeStake.json")) err = template.Validate() a.Error(err) + + templateDir, _ = filepath.Abs("../test/testdata/nettemplates") + template, _ = loadTemplate(filepath.Join(templateDir, "TwoNodesOneRelay1000Accounts.json")) + err = template.Validate() + a.NoError(err) } diff --git a/netdeploy/remote/buildConfig.go b/netdeploy/remote/buildConfig.go index c62fa0580e..ab8e44b68e 100644 --- a/netdeploy/remote/buildConfig.go +++ b/netdeploy/remote/buildConfig.go @@ -40,6 +40,7 @@ type BuildConfig struct { CrontabSchedule string EnableAlgoh bool DashboardEndpoint string + MiscStringString []string } // LoadBuildConfig loads a BuildConfig structure from a json file diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index 5e10ebeb99..79d70d3001 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -42,13 +42,19 @@ var ErrDeployedNetworkRootDirExists = fmt.Errorf("unable to generate deployed ne // ErrDeployedNetworkInsufficientHosts is returned by Validate if our target network requires more hosts than the topology provides var ErrDeployedNetworkInsufficientHosts = fmt.Errorf("target network requires more hosts than the topology provides") -// ErrDeployedNetworkTokenError is returned by InitDeployedNetworkConfig -// if the config file contains {{ or }} after token replacement -var ErrDeployedNetworkTokenError = fmt.Errorf("config file contains unrecognized token") - // ErrDeployedNetworkNameCantIncludeWildcard is returned by Validate if network name contains '*' var ErrDeployedNetworkNameCantIncludeWildcard = fmt.Errorf("network name cannont include wild-cards") +// ErrDeployedNetworkTemplate A template file contained {{Field}} sections that were not handled by a corresponding Field value in configuration. +type ErrDeployedNetworkTemplate struct { + UnhandledTemplate string +} + +// Error satisfies error interface +func (ednt ErrDeployedNetworkTemplate) Error() string { + return fmt.Sprintf("config file contains unrecognized token: %s", ednt.UnhandledTemplate) +} + // DeployedNetworkConfig represents the complete configuration specification for a deployed network type DeployedNetworkConfig struct { Hosts []HostConfig @@ -104,12 +110,21 @@ func replaceTokens(original string, buildConfig BuildConfig) (expanded string, e tokenPairs = append(tokenPairs, "{{CrontabSchedule}}", buildConfig.CrontabSchedule) tokenPairs = append(tokenPairs, "{{EnableAlgoh}}", strconv.FormatBool(buildConfig.EnableAlgoh)) tokenPairs = append(tokenPairs, "{{DashboardEndpoint}}", buildConfig.DashboardEndpoint) + tokenPairs = append(tokenPairs, buildConfig.MiscStringString...) expanded = strings.NewReplacer(tokenPairs...).Replace(original) // To validate that there wasn't a typo in an intended token, look for obvious clues like "{{" or "}}" - if strings.Index(expanded, "{{") >= 0 || strings.Index(expanded, "}}") >= 0 { - return "", ErrDeployedNetworkTokenError + openIndex := strings.Index(expanded, "{{") + closeIndex := strings.Index(expanded, "}}") + if openIndex >= 0 || closeIndex >= 0 { + if openIndex < 0 { + openIndex = 0 + } + if closeIndex < 0 { + closeIndex = len(expanded) - 2 + } + return "", ErrDeployedNetworkTemplate{expanded[openIndex : closeIndex+2]} } return @@ -360,6 +375,7 @@ type cloudHostConfiguration struct { type cloudHostSpec struct { Name string + Group string Provider string Region string InstanceType string @@ -404,6 +420,9 @@ func (cfg DeployedNetwork) GenerateCloudTemplate(templates HostTemplates, target if err != nil { return } + + hostSpec.Group = strings.TrimSpace(cloudHost.Group) + topology.Hosts = append(topology.Hosts, hostSpec) } @@ -488,6 +507,7 @@ func createHostSpec(host HostConfig, template cloudHost) (hostSpec cloudHostSpec } hostSpec.Name = host.Name + hostSpec.Group = host.Group hostSpec.Provider = template.Provider hostSpec.Region = template.Region hostSpec.InstanceType = template.BaseConfiguration diff --git a/netdeploy/remote/hostConfig.go b/netdeploy/remote/hostConfig.go index c620a91d36..f3f15313aa 100644 --- a/netdeploy/remote/hostConfig.go +++ b/netdeploy/remote/hostConfig.go @@ -19,5 +19,6 @@ package remote // HostConfig represents the configuration of a single deployed Host type HostConfig struct { Name string + Group string Nodes []NodeConfig } diff --git a/netdeploy/remote/topology.go b/netdeploy/remote/topology.go index b5b9eb34e6..7abda88fc7 100644 --- a/netdeploy/remote/topology.go +++ b/netdeploy/remote/topology.go @@ -23,6 +23,7 @@ import ( type cloudHostType struct { Name string + Group string Template string } diff --git a/network/ping.go b/network/ping.go index 0988f61d00..5450335ac0 100644 --- a/network/ping.go +++ b/network/ping.go @@ -28,7 +28,7 @@ func pingHandler(message IncomingMessage) OutgoingMessage { if len(message.Data) > 8 { return OutgoingMessage{} } - message.Net.(*WebsocketNetwork).log.Debugf("ping from %#v", message.Sender) + message.Net.(*WebsocketNetwork).log.Debugf("ping from peer %#v", message.Sender.(*wsPeer).wsPeerCore) peer := message.Sender.(*wsPeer) tbytes := []byte(protocol.PingReplyTag) mbytes := make([]byte, len(tbytes)+len(message.Data)) diff --git a/network/ping_test.go b/network/ping_test.go index 4914917c83..d7283831a8 100644 --- a/network/ping_test.go +++ b/network/ping_test.go @@ -60,7 +60,7 @@ func TestPing(t *testing.T) { if lastPingRoundTripTime > 0 { postPing := time.Now() testTime := postPing.Sub(prePing) - if (lastPingRoundTripTime < testTime) { + if lastPingRoundTripTime < testTime { // success return } diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go index 5bceb3b087..4ec59f66a8 100644 --- a/network/requestLogger_test.go +++ b/network/requestLogger_test.go @@ -29,19 +29,21 @@ import ( type eventsDetailsLogger struct { logging.Logger - eventReceived chan struct{} + eventIdentifier telemetryspec.Event + eventReceived chan interface{} } func (dl eventsDetailsLogger) EventWithDetails(category telemetryspec.Category, identifier telemetryspec.Event, details interface{}) { - if category == telemetryspec.Network && identifier == telemetryspec.HTTPRequestEvent { - dl.eventReceived <- struct{}{} + if category == telemetryspec.Network && identifier == dl.eventIdentifier { + dl.eventReceived <- details + } } // for two node network, check that B can ping A and get a reply func TestRequestLogger(t *testing.T) { log := logging.TestingLog(t) - dl := eventsDetailsLogger{Logger: log, eventReceived: make(chan struct{}, 1)} + dl := eventsDetailsLogger{Logger: log, eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.HTTPRequestEvent} log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) netA := &WebsocketNetwork{ log: dl, diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 5a154156ee..4e332ed35a 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -44,7 +44,6 @@ import ( "github.com/algorand/websocket" "github.com/gorilla/mux" "golang.org/x/net/netutil" - "golang.org/x/sys/unix" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" @@ -105,6 +104,10 @@ const maxMessageQueueDuration = 25 * time.Second // verify that their current outgoing message is not being blocked for too long. const slowWritingPeerMonitorInterval = 5 * time.Second +// unprintableCharacterGlyph is used to replace any non-ascii character when logging incoming network string directly +// to the log file. Note that the log file itself would also json-encode these before placing them in the log file. +const unprintableCharacterGlyph = "▯" + var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections) var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections) @@ -649,49 +652,6 @@ func (wn *WebsocketNetwork) setup() { } } -func (wn *WebsocketNetwork) rlimitIncomingConnections() error { - var lim unix.Rlimit - err := unix.Getrlimit(unix.RLIMIT_NOFILE, &lim) - if err != nil { - return err - } - - // If rlim_max is not sufficient, reduce IncomingConnectionsLimit - var rlimitMaxCap uint64 - if lim.Max < wn.config.ReservedFDs { - rlimitMaxCap = 0 - } else { - rlimitMaxCap = lim.Max - wn.config.ReservedFDs - } - if rlimitMaxCap > uint64(MaxInt) { - rlimitMaxCap = uint64(MaxInt) - } - if wn.config.IncomingConnectionsLimit > int(rlimitMaxCap) { - wn.log.Warnf("Reducing IncomingConnectionsLimit from %d to %d since RLIMIT_NOFILE is %d", - wn.config.IncomingConnectionsLimit, rlimitMaxCap, lim.Max) - wn.config.IncomingConnectionsLimit = int(rlimitMaxCap) - } - - // Set rlim_cur to match IncomingConnectionsLimit - newLimit := uint64(wn.config.IncomingConnectionsLimit) + wn.config.ReservedFDs - if newLimit > lim.Cur { - if runtime.GOOS == "darwin" && newLimit > 10240 && lim.Max == 0x7fffffffffffffff { - // The max file limit is 10240, even though - // the max returned by Getrlimit is 1<<63-1. - // This is OPEN_MAX in sys/syslimits.h. - // see https://github.com/golang/go/issues/30401 - newLimit = 10240 - } - lim.Cur = newLimit - err = unix.Setrlimit(unix.RLIMIT_NOFILE, &lim) - if err != nil { - return err - } - } - - return nil -} - // Start makes network connections and threads func (wn *WebsocketNetwork) Start() { var err error @@ -847,7 +807,7 @@ func (wn *WebsocketNetwork) setHeaders(header http.Header) { func (wn *WebsocketNetwork) checkServerResponseVariables(otherHeader http.Header, addr string) (bool, string) { matchingVersion, otherVersion := wn.checkProtocolVersionMatch(otherHeader) if matchingVersion == "" { - wn.log.Infof("new peer %s version mismatch, mine=%v theirs=%s, headers %#v", addr, SupportedProtocolVersions, otherVersion, otherHeader) + wn.log.Info(filterASCII(fmt.Sprintf("new peer %s version mismatch, mine=%v theirs=%s, headers %#v", addr, SupportedProtocolVersions, otherVersion, otherHeader))) return false, "" } otherRandom := otherHeader.Get(NodeRandomHeader) @@ -855,7 +815,7 @@ func (wn *WebsocketNetwork) checkServerResponseVariables(otherHeader http.Header // This is pretty harmless and some configurations of phonebooks or DNS records make this likely. Quietly filter it out. if otherRandom == "" { // missing header. - wn.log.Warnf("new peer %s did not include random ID header in request. mine=%s headers %#v", addr, wn.RandomID, otherHeader) + wn.log.Warn(filterASCII(fmt.Sprintf("new peer %s did not include random ID header in request. mine=%s headers %#v", addr, wn.RandomID, otherHeader))) } else { wn.log.Debugf("new peer %s has same node random id, am I talking to myself? %s", addr, wn.RandomID) } @@ -864,7 +824,7 @@ func (wn *WebsocketNetwork) checkServerResponseVariables(otherHeader http.Header otherGenesisID := otherHeader.Get(GenesisHeader) if wn.GenesisID != otherGenesisID { if otherGenesisID != "" { - wn.log.Warnf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.GenesisID, otherGenesisID, otherHeader) + wn.log.Warn(filterASCII(fmt.Sprintf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.GenesisID, otherGenesisID, otherHeader))) } else { wn.log.Warnf("new peer %#v did not include genesis header in response. mine=%#v headers %#v", addr, wn.GenesisID, otherHeader) } @@ -934,7 +894,8 @@ func (wn *WebsocketNetwork) checkProtocolVersionMatch(otherHeaders http.Header) return supportedProtocolVersion, otherVersion } } - return "", otherVersion + + return "", filterASCII(otherVersion) } // checkIncomingConnectionVariables checks the variables that were provided on the request, and compares them to the @@ -951,7 +912,7 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo } if wn.GenesisID != otherGenesisID { - wn.log.Warnf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", request.RemoteAddr, wn.GenesisID, otherGenesisID, request.Header) + wn.log.Warn(filterASCII(fmt.Sprintf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", request.RemoteAddr, wn.GenesisID, otherGenesisID, request.Header))) networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "mismatching genesis-id"}) response.WriteHeader(http.StatusPreconditionFailed) response.Write([]byte("mismatching genesis ID")) @@ -963,7 +924,7 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo // This is pretty harmless and some configurations of phonebooks or DNS records make this likely. Quietly filter it out. var message string // missing header. - wn.log.Warnf("new peer %s did not include random ID header in request. mine=%s headers %#v", request.RemoteAddr, wn.RandomID, request.Header) + wn.log.Warn(filterASCII(fmt.Sprintf("new peer %s did not include random ID header in request. mine=%s headers %#v", request.RemoteAddr, wn.RandomID, request.Header))) networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "missing random ID header"}) message = fmt.Sprintf("Request was missing a %s header", NodeRandomHeader) response.WriteHeader(http.StatusPreconditionFailed) @@ -1010,10 +971,10 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt matchingVersion, otherVersion := wn.checkProtocolVersionMatch(request.Header) if matchingVersion == "" { - wn.log.Infof("new peer %s version mismatch, mine=%v theirs=%s, headers %#v", request.RemoteAddr, SupportedProtocolVersions, otherVersion, request.Header) + wn.log.Info(filterASCII(fmt.Sprintf("new peer %s version mismatch, mine=%v theirs=%s, headers %#v", request.RemoteAddr, SupportedProtocolVersions, otherVersion, request.Header))) networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "mismatching protocol version"}) response.WriteHeader(http.StatusPreconditionFailed) - message := fmt.Sprintf("Requested version %s not in %v mismatches server version", otherVersion, SupportedProtocolVersions) + message := fmt.Sprintf("Requested version %s not in %v mismatches server version", filterASCII(otherVersion), SupportedProtocolVersions) n, err := response.Write([]byte(message)) if err != nil { wn.log.Warnf("ws failed to write response '%s' : n = %d err = %v", message, n, err) @@ -1797,6 +1758,23 @@ func (wn *WebsocketNetwork) GetRoundTripper() http.RoundTripper { return &wn.transport } +// filterASCII filter out the non-ascii printable characters out of the given input string and +// and replace these with unprintableCharacterGlyph. +// It's used as a security qualifier before logging a network-provided data. +// The function allows only characters in the range of [32..126], which excludes all the +// control character, new lines, deletion, etc. All the alpha numeric and punctuation characters +// are included in this range. +func filterASCII(unfilteredString string) (filteredString string) { + for i, r := range unfilteredString { + if int(r) >= 0x20 && int(r) <= 0x7e { + filteredString += string(unfilteredString[i]) + } else { + filteredString += unprintableCharacterGlyph + } + } + return +} + // tryConnect opens websocket connection and checks initial connection parameters. // addr should be 'host:port' or a URL, gossipAddr is the websocket endpoint URL func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { @@ -1835,6 +1813,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { if len(errString) > 128 { errString = errString[:128] } + errString = filterASCII(errString) // we're guaranteed to have a valid response object. switch response.StatusCode { diff --git a/network/wsNetwork_common.go b/network/wsNetwork_common.go new file mode 100644 index 0000000000..3cfce65b1c --- /dev/null +++ b/network/wsNetwork_common.go @@ -0,0 +1,68 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build !windows + +package network + +import ( + "runtime" + + "golang.org/x/sys/unix" +) + +func (wn *WebsocketNetwork) rlimitIncomingConnections() error { + var lim unix.Rlimit + err := unix.Getrlimit(unix.RLIMIT_NOFILE, &lim) + if err != nil { + return err + } + + // If rlim_max is not sufficient, reduce IncomingConnectionsLimit + var rlimitMaxCap uint64 + if lim.Max < wn.config.ReservedFDs { + rlimitMaxCap = 0 + } else { + rlimitMaxCap = lim.Max - wn.config.ReservedFDs + } + if rlimitMaxCap > uint64(MaxInt) { + rlimitMaxCap = uint64(MaxInt) + } + if wn.config.IncomingConnectionsLimit > int(rlimitMaxCap) { + wn.log.Warnf("Reducing IncomingConnectionsLimit from %d to %d since RLIMIT_NOFILE is %d", + wn.config.IncomingConnectionsLimit, rlimitMaxCap, lim.Max) + wn.config.IncomingConnectionsLimit = int(rlimitMaxCap) + } + + // Set rlim_cur to match IncomingConnectionsLimit + newLimit := uint64(wn.config.IncomingConnectionsLimit) + wn.config.ReservedFDs + if newLimit > lim.Cur { + if runtime.GOOS == "darwin" && newLimit > 10240 && lim.Max == 0x7fffffffffffffff { + // The max file limit is 10240, even though + // the max returned by Getrlimit is 1<<63-1. + // This is OPEN_MAX in sys/syslimits.h. + // see https://github.com/golang/go/issues/30401 + newLimit = 10240 + } + lim.Cur = newLimit + err = unix.Setrlimit(unix.RLIMIT_NOFILE, &lim) + if err != nil { + return err + } + } + + return nil +} diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 40059ee1dd..0c83006f99 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -30,7 +30,6 @@ import ( "strings" "sync" "sync/atomic" - "syscall" "testing" "time" @@ -42,7 +41,9 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/metrics" ) @@ -1069,14 +1070,16 @@ func TestWebsocketNetworkManyIdle(t *testing.T) { waitReady(t, clients[i], readyTimeout.C) } - var r0, r1 syscall.Rusage - syscall.Getrusage(syscall.RUSAGE_SELF, &r0) + var r0utime, r1utime int64 + var r0stime, r1stime int64 + + r0utime, r0stime, _ = util.GetCurrentProcessTimes() time.Sleep(10 * time.Second) - syscall.Getrusage(syscall.RUSAGE_SELF, &r1) + r1utime, r1stime, _ = util.GetCurrentProcessTimes() t.Logf("Background CPU use: user %v, system %v\n", - time.Duration(r1.Utime.Nano()-r0.Utime.Nano()), - time.Duration(r1.Stime.Nano()-r0.Stime.Nano())) + time.Duration(r1utime-r0utime), + time.Duration(r1stime-r0stime)) } // TODO: test both sides of http-header setting and checking? @@ -1194,7 +1197,7 @@ func TestDelayedMessageDrop(t *testing.T) { func TestSlowPeerDisconnection(t *testing.T) { log := logging.TestingLog(t) - log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) + log.SetLevel(logging.Info) wn := &WebsocketNetwork{ log: log, config: defaultConfig, @@ -1205,6 +1208,7 @@ func TestSlowPeerDisconnection(t *testing.T) { } wn.setup() wn.eventualReadyDelay = time.Second + wn.messagesOfInterest = nil // clear this before starting the network so that we won't be sending a MOI upon connection. netA := wn netA.config.GossipFanout = 1 @@ -1231,12 +1235,17 @@ func TestSlowPeerDisconnection(t *testing.T) { require.Equalf(t, len(peers), 1, "Expected number of peers should be 1") peer := peers[0] // modify the peer on netA and - atomic.StoreInt64(&peer.intermittentOutgoingMessageEnqueueTime, time.Now().Add(-maxMessageQueueDuration).Add(-time.Second).UnixNano()) - // wait up to 2*slowWritingPeerMonitorInterval for the monitor to figure out it needs to disconnect. - expire := time.Now().Add(maxMessageQueueDuration * time.Duration(2)) + beforeLoopTime := time.Now() + atomic.StoreInt64(&peer.intermittentOutgoingMessageEnqueueTime, beforeLoopTime.Add(-maxMessageQueueDuration).Add(time.Second).UnixNano()) + // wait up to 10 seconds for the monitor to figure out it needs to disconnect. + expire := beforeLoopTime.Add(2 * slowWritingPeerMonitorInterval) for { peers = netA.peerSnapshot(peers) if len(peers) == 0 || peers[0] != peer { + // make sure it took more than 1 second, and less than 5 seconds. + waitTime := time.Now().Sub(beforeLoopTime) + require.LessOrEqual(t, int64(time.Second), int64(waitTime)) + require.GreaterOrEqual(t, int64(5*time.Second), int64(waitTime)) break } if time.Now().After(expire) { @@ -1375,6 +1384,12 @@ func TestCheckProtocolVersionMatch(t *testing.T) { matchingVersion, otherVersion = wn.checkProtocolVersionMatch(header3) require.Equal(t, "", matchingVersion) require.Equal(t, "3", otherVersion) + + header4 := make(http.Header) + header4.Add(ProtocolVersionHeader, "5\n") + matchingVersion, otherVersion = wn.checkProtocolVersionMatch(header4) + require.Equal(t, "", matchingVersion) + require.Equal(t, "5"+unprintableCharacterGlyph, otherVersion) } func handleTopicRequest(msg IncomingMessage) (out OutgoingMessage) { @@ -1541,3 +1556,184 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) { } } } + +// Set up two nodes, have one of them disconnect from the other, and monitor disconnection error on the side that did not issue the disconnection. +// Plan: +// Network A will be sending messages to network B. +// Network B will respond with another message for the first 4 messages. When it receive the 5th message, it would close the connection. +// We want to get an event with disconnectRequestReceived +func TestWebsocketDisconnection(t *testing.T) { + netA := makeTestWebsocketNode(t) + netA.config.GossipFanout = 1 + netA.config.EnablePingHandler = false + dl := eventsDetailsLogger{Logger: logging.TestingLog(t), eventReceived: make(chan interface{}, 1), eventIdentifier: telemetryspec.DisconnectPeerEvent} + netA.log = dl + + netA.Start() + defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }() + netB := makeTestWebsocketNode(t) + netB.config.GossipFanout = 1 + netB.config.EnablePingHandler = false + addrA, postListen := netA.Address() + require.True(t, postListen) + t.Log(addrA) + netB.phonebook.ReplacePeerList([]string{addrA}, "default") + netB.Start() + defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() + + msgHandlerA := func(msg IncomingMessage) (out OutgoingMessage) { + // if we received a message, send a message back. + if msg.Data[0]%10 == 2 { + netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{msg.Data[0] + 8}, true, nil) + } + return + } + + var msgCounterNetB uint32 + msgHandlerB := func(msg IncomingMessage) (out OutgoingMessage) { + if atomic.AddUint32(&msgCounterNetB, 1) == 5 { + // disconnect + netB.DisconnectPeers() + } else { + // if we received a message, send a message back. + netB.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{msg.Data[0] + 1}, true, nil) + netB.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{msg.Data[0] + 2}, true, nil) + } + return + } + + // register all the handlers. + taggedHandlersA := []TaggedMessageHandler{ + TaggedMessageHandler{ + Tag: protocol.ProposalPayloadTag, + MessageHandler: HandlerFunc(msgHandlerA), + }, + } + netA.ClearHandlers() + netA.RegisterHandlers(taggedHandlersA) + + taggedHandlersB := []TaggedMessageHandler{ + TaggedMessageHandler{ + Tag: protocol.ProposalPayloadTag, + MessageHandler: HandlerFunc(msgHandlerB), + }, + } + netB.ClearHandlers() + netB.RegisterHandlers(taggedHandlersB) + + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0}, true, nil) + // wait until the peers disconnect. + for { + peers := netA.GetPeers(PeersConnectedIn) + if len(peers) == 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + + select { + case eventDetails := <-dl.eventReceived: + switch disconnectPeerEventDetails := eventDetails.(type) { + case telemetryspec.DisconnectPeerEventDetails: + require.Equal(t, disconnectPeerEventDetails.Reason, string(disconnectRequestReceived)) + default: + require.FailNow(t, "Unexpected event was send : %v", eventDetails) + } + + default: + require.FailNow(t, "The DisconnectPeerEvent was missing") + } +} + +// TestASCIIFiltering tests the behaviour of filterASCII by feeding it with few known inputs and verifying the expected outputs. +func TestASCIIFiltering(t *testing.T) { + testUnicodePrintableStrings := []struct { + testString string + expectedString string + }{ + {"abc", "abc"}, + {"", ""}, + {"אבג", unprintableCharacterGlyph + unprintableCharacterGlyph + unprintableCharacterGlyph}, + {"\u001b[31mABC\u001b[0m", unprintableCharacterGlyph + "[31mABC" + unprintableCharacterGlyph + "[0m"}, + {"ab\nc", "ab" + unprintableCharacterGlyph + "c"}, + } + for _, testElement := range testUnicodePrintableStrings { + outString := filterASCII(testElement.testString) + require.Equalf(t, testElement.expectedString, outString, "test string:%s", testElement.testString) + } +} + +type callbackLogger struct { + logging.Logger + InfoCallback func(...interface{}) + InfofCallback func(string, ...interface{}) + WarnCallback func(...interface{}) + WarnfCallback func(string, ...interface{}) +} + +func (cl callbackLogger) Info(args ...interface{}) { + cl.InfoCallback(args...) +} +func (cl callbackLogger) Infof(s string, args ...interface{}) { + cl.InfofCallback(s, args...) +} + +func (cl callbackLogger) Warn(args ...interface{}) { + cl.WarnCallback(args...) +} +func (cl callbackLogger) Warnf(s string, args ...interface{}) { + cl.WarnfCallback(s, args...) +} + +// TestMaliciousCheckServerResponseVariables test the checkServerResponseVariables to ensure it doesn't print the a malicious input without being filtered to the log file. +func TestMaliciousCheckServerResponseVariables(t *testing.T) { + wn := makeTestWebsocketNode(t) + wn.GenesisID = "genesis-id1" + wn.RandomID = "random-id1" + wn.log = callbackLogger{ + Logger: wn.log, + InfoCallback: func(args ...interface{}) { + s := fmt.Sprint(args...) + require.NotContains(t, s, "א") + }, + InfofCallback: func(s string, args ...interface{}) { + s = fmt.Sprintf(s, args...) + require.NotContains(t, s, "א") + }, + WarnCallback: func(args ...interface{}) { + s := fmt.Sprint(args...) + require.NotContains(t, s, "א") + }, + WarnfCallback: func(s string, args ...interface{}) { + s = fmt.Sprintf(s, args...) + require.NotContains(t, s, "א") + }, + } + + header1 := http.Header{} + header1.Set(ProtocolVersionHeader, ProtocolVersion+"א") + header1.Set(NodeRandomHeader, wn.RandomID+"tag") + header1.Set(GenesisHeader, wn.GenesisID) + responseVariableOk, matchingVersion := wn.checkServerResponseVariables(header1, "addressX") + require.Equal(t, false, responseVariableOk) + require.Equal(t, "", matchingVersion) + + header2 := http.Header{} + header2.Set(ProtocolVersionHeader, ProtocolVersion) + header2.Set("א", "א") + header2.Set(GenesisHeader, wn.GenesisID) + responseVariableOk, matchingVersion = wn.checkServerResponseVariables(header2, "addressX") + require.Equal(t, false, responseVariableOk) + require.Equal(t, "", matchingVersion) + + header3 := http.Header{} + header3.Set(ProtocolVersionHeader, ProtocolVersion) + header3.Set(NodeRandomHeader, wn.RandomID+"tag") + header3.Set(GenesisHeader, wn.GenesisID+"א") + responseVariableOk, matchingVersion = wn.checkServerResponseVariables(header3, "addressX") + require.Equal(t, false, responseVariableOk) + require.Equal(t, "", matchingVersion) +} diff --git a/network/wsNetwork_windows.go b/network/wsNetwork_windows.go new file mode 100644 index 0000000000..af58d2cd00 --- /dev/null +++ b/network/wsNetwork_windows.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build windows + +package network + +func (wn *WebsocketNetwork) rlimitIncomingConnections() error { + return nil +} diff --git a/network/wsPeer.go b/network/wsPeer.go index c01a61f3b8..2ddb08a2b3 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -115,6 +115,7 @@ const disconnectIdleConn disconnectReason = "IdleConnection" const disconnectSlowConn disconnectReason = "SlowConnection" const disconnectLeastPerformingPeer disconnectReason = "LeastPerformingPeer" const disconnectCliqueResolve disconnectReason = "CliqueResolving" +const disconnectRequestReceived disconnectReason = "DisconnectRequest" // Response is the structure holding the response from the server type Response struct { @@ -361,7 +362,11 @@ func dedupSafeTag(t protocol.Tag) bool { } func (wp *wsPeer) readLoop() { - defer wp.readLoopCleanup() + // the cleanupCloseError sets the default error to disconnectReadError; depending on the exit reason, the error might get changed. + cleanupCloseError := disconnectReadError + defer func() { + wp.readLoopCleanup(cleanupCloseError) + }() wp.conn.SetReadLimit(maxMessageLength) slurper := LimitedReaderSlurper{Limit: maxMessageLength} for { @@ -372,6 +377,7 @@ func (wp *wsPeer) readLoop() { switch ce.Code { case websocket.CloseNormalClosure, websocket.CloseGoingAway: // deliberate close, no error + cleanupCloseError = disconnectRequestReceived return default: // fall through to reportReadErr @@ -515,8 +521,8 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) { return } -func (wp *wsPeer) readLoopCleanup() { - wp.internalClose(disconnectReadError) +func (wp *wsPeer) readLoopCleanup(reason disconnectReason) { + wp.internalClose(reason) wp.wg.Done() } @@ -537,7 +543,7 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) { func (wp *wsPeer) writeLoopSend(msg sendMessage) (exit bool) { if len(msg.data) > maxMessageLength { - wp.net.log.Errorf("trying to send a message longer than we would recieve: %d > %d tag=%#v", len(msg.data), maxMessageLength, string(msg.data[0:2])) + wp.net.log.Errorf("trying to send a message longer than we would recieve: %d > %d tag=%s", len(msg.data), maxMessageLength, string(msg.data[0:2])) // just drop it, don't break the connection return false } diff --git a/node/assemble_test.go b/node/assemble_test.go index 53fc09a20d..175068cb81 100644 --- a/node/assemble_test.go +++ b/node/assemble_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data" @@ -98,7 +99,7 @@ func BenchmarkAssembleBlock(b *testing.B) { cfg := config.GetDefaultLocal() cfg.TxPoolSize = txPoolSize cfg.EnableAssembleStats = false - tp := pools.MakeTransactionPool(l.Ledger, cfg) + tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base()) errcount := 0 okcount := 0 var worstTxID transactions.Txid @@ -161,3 +162,77 @@ func BenchmarkAssembleBlock(b *testing.B) { require.True(b, found) } } + +type callbackLogger struct { + logging.Logger + WarnfCallback func(string, ...interface{}) +} + +func (cl callbackLogger) Warnf(s string, args ...interface{}) { + cl.WarnfCallback(s, args...) +} + +func TestAssembleBlockTransactionPoolBehind(t *testing.T) { + const numUsers = 100 + expectingLog := false + baseLog := logging.TestingLog(t) + baseLog.SetLevel(logging.Info) + log := &callbackLogger{ + Logger: baseLog, + WarnfCallback: func(s string, args ...interface{}) { + require.True(t, expectingLog) + require.Equal(t, s, "AssembleBlock: assembled block round did not catch up to requested round: %d != %d") + expectingLog = false + }, + } + secrets := make([]*crypto.SignatureSecrets, numUsers) + addresses := make([]basics.Address, numUsers) + + genesis := make(map[basics.Address]basics.AccountData) + for i := 0; i < numUsers; i++ { + secret := keypair() + addr := basics.Address(secret.SignatureVerifier) + secrets[i] = secret + addresses[i] = addr + genesis[addr] = basics.AccountData{ + Status: basics.Online, + MicroAlgos: basics.MicroAlgos{Raw: 10000000000000}, + } + } + + genesis[poolAddr] = basics.AccountData{ + Status: basics.NotParticipating, + MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinBalance}, + } + + require.Equal(t, len(genesis), numUsers+1) + genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr) + const inMem = true + cfg := config.GetDefaultLocal() + cfg.Archival = true + ledger, err := data.LoadLedger(log, "ledgerName", inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, nil, cfg) + require.NoError(t, err) + + l := ledger + const txPoolSize = 6000 + cfg = config.GetDefaultLocal() + cfg.TxPoolSize = txPoolSize + cfg.EnableAssembleStats = false + tp := pools.MakeTransactionPool(l.Ledger, cfg, log) + + next := l.NextRound() + deadline := time.Now().Add(time.Second) + block, err := tp.AssembleBlock(next, deadline) + require.NoError(t, err) + require.NoError(t, ledger.AddBlock(block.Block(), agreement.Certificate{Round: next})) + + expectingLog = true + + next = l.NextRound() + deadline = time.Now().Add(time.Second) + block, err = tp.AssembleBlock(next, deadline) + require.NoError(t, err) + require.NoError(t, ledger.AddBlock(block.Block(), agreement.Certificate{Round: next})) + + require.False(t, expectingLog) +} diff --git a/node/error.go b/node/error.go new file mode 100644 index 0000000000..4afc08bda3 --- /dev/null +++ b/node/error.go @@ -0,0 +1,64 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package node + +import ( + "fmt" +) + +// Catchpoint already in progress error + +// CatchpointAlreadyInProgressError indicates that the requested catchpoint is already running +type CatchpointAlreadyInProgressError struct { + catchpoint string +} + +// MakeCatchpointAlreadyInProgressError creates the error +func MakeCatchpointAlreadyInProgressError(catchpoint string) *CatchpointAlreadyInProgressError { + return &CatchpointAlreadyInProgressError{ + catchpoint: catchpoint, + } +} + +// Error satisfies builtin interface `error` +func (e *CatchpointAlreadyInProgressError) Error() string { + return fmt.Sprintf("the requested catchpoint '%s' is already in progress, suppressing error", e.catchpoint) +} + +// Catchpoint unable to start error + +// CatchpointUnableToStartError indicates that the requested catchpoint cannot be started +type CatchpointUnableToStartError struct { + catchpointRunning string + catchpointRequested string +} + +// MakeCatchpointUnableToStartError creates the error +func MakeCatchpointUnableToStartError(catchpointRunning, catchpointRequested string) *CatchpointUnableToStartError { + return &CatchpointUnableToStartError{ + catchpointRunning: catchpointRunning, + catchpointRequested: catchpointRequested, + } +} + +// Error satisfies builtin interface `error` +func (e *CatchpointUnableToStartError) Error() string { + return fmt.Sprintf( + "unable to start catchpoint catchup for '%s' - already catching up '%s'", + e.catchpointRequested, + e.catchpointRunning) +} diff --git a/node/impls.go b/node/impls.go index 2fc65e6619..ad00f979be 100644 --- a/node/impls.go +++ b/node/impls.go @@ -18,12 +18,15 @@ package node import ( "context" + "errors" "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/catchup" "github.com/algorand/go-algorand/data" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/pools" + "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/util/execpool" @@ -111,3 +114,15 @@ func (l agreementLedger) EnsureDigest(cert agreement.Certificate, verifier *agre // 4. no other senders to this channel exists l.UnmatchedPendingCertificates <- catchup.PendingUnmatchedCertificate{Cert: cert, VoteVerifier: verifier} } + +// Wrapping error with a LedgerDroppedRoundError when an old round is requested but the ledger has already dropped the entry +func (l agreementLedger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) { + record, err := l.Ledger.Lookup(rnd, addr) + var e *ledger.RoundOffsetError + if errors.As(err, &e) { + err = &agreement.LedgerDroppedRoundError{ + Err: err, + } + } + return record, err +} diff --git a/node/node.go b/node/node.go index 3fcb008ae5..db198df34a 100644 --- a/node/node.go +++ b/node/node.go @@ -115,6 +115,9 @@ type AlgorandFullNode struct { log logging.Logger + // syncStatusMu used for locking lastRoundTimestamp and hasSyncedSinceStartup + // syncStatusMu added so OnNewBlock wouldn't be blocked by oldKeyDeletionThread during catchup + syncStatusMu deadlock.Mutex lastRoundTimestamp time.Time hasSyncedSinceStartup bool @@ -191,7 +194,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd return nil, err } - node.transactionPool = pools.MakeTransactionPool(node.ledger.Ledger, cfg) + node.transactionPool = pools.MakeTransactionPool(node.ledger.Ledger, cfg, node.log) blockListeners := []ledger.BlockListener{ node.transactionPool, @@ -577,12 +580,13 @@ func (node *AlgorandFullNode) GetPendingTransaction(txID transactions.Txid) (res // Status returns a StatusReport structure reporting our status as Active and with our ledger's LastRound func (node *AlgorandFullNode) Status() (s StatusReport, err error) { - node.mu.Lock() - defer node.mu.Unlock() - + node.syncStatusMu.Lock() s.LastRoundTimestamp = node.lastRoundTimestamp s.HasSyncedSinceStartup = node.hasSyncedSinceStartup + node.syncStatusMu.Unlock() + node.mu.Lock() + defer node.mu.Unlock() if node.catchpointCatchupService != nil { // we're in catchpoint catchup mode. lastBlockHeader := node.catchpointCatchupService.GetLatestBlockHeader() @@ -664,7 +668,7 @@ func (node *AlgorandFullNode) SuggestedFee() basics.MicroAlgos { // GetPendingTxnsFromPool returns a snapshot of every pending transactions from the node's transaction pool in a slice. // Transactions are sorted in decreasing order. If no transactions, returns an empty slice. func (node *AlgorandFullNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) { - return bookkeeping.SignedTxnGroupsFlatten(node.transactionPool.Pending()), nil + return bookkeeping.SignedTxnGroupsFlatten(node.transactionPool.PendingTxGroups()), nil } // Reload participation keys from disk periodically @@ -755,10 +759,13 @@ func (node *AlgorandFullNode) IsArchival() bool { // OnNewBlock implements the BlockListener interface so we're notified after each block is written to the ledger func (node *AlgorandFullNode) OnNewBlock(block bookkeeping.Block, delta ledger.StateDelta) { - node.mu.Lock() + if node.ledger.Latest() > block.Round() { + return + } + node.syncStatusMu.Lock() node.lastRoundTimestamp = time.Now() node.hasSyncedSinceStartup = true - node.mu.Unlock() + node.syncStatusMu.Unlock() // Wake up oldKeyDeletionThread(), non-blocking. select { @@ -839,7 +846,11 @@ func (node *AlgorandFullNode) StartCatchup(catchpoint string) error { } if node.catchpointCatchupService != nil { stats := node.catchpointCatchupService.GetStatistics() - return fmt.Errorf("unable to start catchpoint catchup for '%s' - already catching up '%s'", catchpoint, stats.CatchpointLabel) + // No need to return an error + if catchpoint == stats.CatchpointLabel { + return MakeCatchpointAlreadyInProgressError(catchpoint) + } + return MakeCatchpointUnableToStartError(stats.CatchpointLabel, catchpoint) } var err error node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, node.ledger.Ledger, node.config) diff --git a/node/node_test.go b/node/node_test.go index e3aa4f0afa..56f2564cb4 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -42,7 +42,7 @@ import ( "github.com/algorand/go-algorand/util/execpool" ) -var expectedAgreementTime = 2*config.Protocol.BigLambda + 3*config.Protocol.SmallLambda + 2*time.Second +var expectedAgreementTime = 2*config.Protocol.BigLambda + config.Protocol.SmallLambda + config.Consensus[protocol.ConsensusCurrentVersion].AgreementFilterTimeout + 2*time.Second var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} diff --git a/nodecontrol/NodeController.go b/nodecontrol/NodeController.go index 8467bf5ff6..e45501ed5b 100644 --- a/nodecontrol/NodeController.go +++ b/nodecontrol/NodeController.go @@ -21,6 +21,8 @@ import ( "path/filepath" "syscall" "time" + + "github.com/algorand/go-algorand/util" ) // NodeController provides an object for controlling a specific algod node instance @@ -94,15 +96,15 @@ func (nc *NodeController) FullStart(args NodeStartArgs) (algodAlreadyRunning, km // FullStop stops both algod and kmd, if they're running func (nc NodeController) FullStop() error { - _, _, err := nc.stopProcesses() + _, err := nc.stopProcesses() return err } // stopProcesses attempts to read PID files for algod and kmd and kill the // corresponding processes. If it can't read a PID file, it doesn't return an // error, but if it reads a PID file and the process doesn't die, it does -func (nc NodeController) stopProcesses() (algodAlreadyStopped, kmdAlreadyStopped bool, err error) { - algodAlreadyStopped, err = nc.StopAlgod() +func (nc NodeController) stopProcesses() (kmdAlreadyStopped bool, err error) { + err = nc.StopAlgod() if err != nil { return } @@ -116,7 +118,7 @@ func killPID(pid int) error { return err } - err = syscall.Kill(pid, syscall.SIGTERM) + err = util.KillProcess(pid, syscall.SIGTERM) if err != nil { return err } @@ -129,7 +131,7 @@ func killPID(pid int) error { } select { case <-waitLong: - return syscall.Kill(pid, syscall.SIGKILL) + return util.KillProcess(pid, syscall.SIGKILL) case <-time.After(time.Millisecond * 100): } } diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go index d42a9ff995..59edb38166 100644 --- a/nodecontrol/algodControl.go +++ b/nodecontrol/algodControl.go @@ -40,6 +40,24 @@ const StdErrFilename = "algod-err.log" // StdOutFilename is the name of the file in where stdout will be captured if not redirected to host const StdOutFilename = "algod-out.log" +// NodeNotRunningError thrown when StopAlgod is called but there is no running algod in requested directory +type NodeNotRunningError struct { + algodDataDir string +} + +func (e *NodeNotRunningError) Error() string { + return fmt.Sprintf("no running node in directory '%s'", e.algodDataDir) +} + +// MissingDataDirError thrown when StopAlgod is called but requested directory does not exist +type MissingDataDirError struct { + algodDataDir string +} + +func (e *MissingDataDirError) Error() string { + return fmt.Sprintf("the provided directory '%s' does not exist", e.algodDataDir) +} + // AlgodClient attempts to build a client.RestClient for communication with // the algod REST API, but fails if we can't find the net file func (nc NodeController) AlgodClient() (algodClient client.RestClient, err error) { @@ -133,7 +151,11 @@ func (nc NodeController) algodRunning() (isRunning bool) { } // StopAlgod reads the net file and kills the algod process -func (nc *NodeController) StopAlgod() (alreadyStopped bool, err error) { +func (nc *NodeController) StopAlgod() (err error) { + // Check for valid data directory + if !util.IsDir(nc.algodDataDir) { + return &MissingDataDirError{algodDataDir: nc.algodDataDir} + } // Find algod PID algodPID, err := nc.GetAlgodPID() if err == nil { @@ -143,8 +165,7 @@ func (nc *NodeController) StopAlgod() (alreadyStopped bool, err error) { return } } else { - err = nil - alreadyStopped = true + return &NodeNotRunningError{algodDataDir: nc.algodDataDir} } return } @@ -185,9 +206,8 @@ func (nc *NodeController) StartAlgod(args AlgodStartArgs) (alreadyRunning bool, errLogger.SetLinePrefix(linePrefix) outLogger.SetLinePrefix(linePrefix) } - // Wait on the algod process and check if exits - algodExitChan := make(chan struct{}) + algodExitChan := make(chan error, 1) startAlgodCompletedChan := make(chan struct{}) defer close(startAlgodCompletedChan) go func() { @@ -202,14 +222,14 @@ func (nc *NodeController) StartAlgod(args AlgodStartArgs) (alreadyRunning bool, } default: } - algodExitChan <- struct{}{} + algodExitChan <- err }() - success := false for !success { select { - case <-algodExitChan: - return false, errAlgodExitedEarly + case err := <-algodExitChan: + err = &errAlgodExitedEarly{err} + return false, err case <-time.After(time.Millisecond * 100): // If we can't talk to the API yet, spin algodClient, err := nc.AlgodClient() diff --git a/nodecontrol/algodControl_test.go b/nodecontrol/algodControl_test.go new file mode 100644 index 0000000000..4c5f6ccb6b --- /dev/null +++ b/nodecontrol/algodControl_test.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package nodecontrol + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStopAlgodErrorNotRunning(t *testing.T) { + nodeController := MakeNodeController("", ".") + err := nodeController.StopAlgod() + var e *NodeNotRunningError + require.True(t, errors.As(err, &e)) +} + +func TestStopAlgodErrorInvalidDirectory(t *testing.T) { + nodeController := MakeNodeController("", "[][]") + err := nodeController.StopAlgod() + var e *MissingDataDirError + require.True(t, errors.As(err, &e)) +} diff --git a/nodecontrol/kmdControl.go b/nodecontrol/kmdControl.go index 00123add48..27b05dde79 100644 --- a/nodecontrol/kmdControl.go +++ b/nodecontrol/kmdControl.go @@ -172,7 +172,7 @@ func (kc *KMDController) StartKMD(args KMDStartArgs) (alreadyRunning bool, err e // Got a PID. Is there actually a process running there? // "If sig is 0, then no signal is sent, but existence and permission // checks are still performed" - err = syscall.Kill(int(pid), syscall.Signal(0)) + err = util.KillProcess(int(pid), syscall.Signal(0)) if err == nil { // Yup, return alreadyRunning = true return true, nil diff --git a/nodecontrol/nodeControlErrors.go b/nodecontrol/nodeControlErrors.go index 8d0a53762b..1107ee9b2c 100644 --- a/nodecontrol/nodeControlErrors.go +++ b/nodecontrol/nodeControlErrors.go @@ -20,6 +20,20 @@ import ( "fmt" ) -var errAlgodExitedEarly = fmt.Errorf("node exited before we could contact it") var errKMDDataDirNotAbs = fmt.Errorf("kmd data dir must be absolute path") var errKMDExitedEarly = fmt.Errorf("kmd exited before we could contact it") + +type errAlgodExitedEarly struct { + innerError error +} + +func (e *errAlgodExitedEarly) Error() string { + if e.innerError == nil { + return "node exited before we could contact it" + } + return fmt.Sprintf("node exited with an error code, check node.log for more details : %v", e.innerError) +} + +func (e *errAlgodExitedEarly) Unwrap(err error) error { + return e.innerError +} diff --git a/package-deploy.yaml b/package-deploy.yaml index 2206ed4a78..853c809e72 100644 --- a/package-deploy.yaml +++ b/package-deploy.yaml @@ -1,93 +1,73 @@ agents: - - name: deb + - name: docker dockerFilePath: docker/build/docker.ubuntu.Dockerfile - image: algorand/mule-linux-ubuntu + image: algorand/docker-ubuntu version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` env: - - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID, + - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + - NETWORK=$NETWORK + - VERSION=$VERSION volumes: - /var/run/docker.sock:/var/run/docker.sock workDir: $HOME/projects/go-algorand + - name: releases-page + dockerFilePath: docker/build/releases-page.Dockerfile + image: algorand/releases-page + version: scripts/configure_dev-deps.sh + env: + - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + workDir: $HOME/projects/go-algorand + - name: rpm - dockerFilePath: docker/build/mule.go.centos.Dockerfile - image: algorand/mule-linux-centos + dockerFilePath: docker/build/cicd.centos.Dockerfile + image: algorand/cicd-centos version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` env: - - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID, + - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + - CHANNEL=$CHANNEL + - PACKAGES_DIR=$PACKAGES_DIR + - NO_DEPLOY=$NO_DEPLOY + - S3_SOURCE=$S3_SOURCE + - VERSION=$VERSION volumes: - - $HOME/packages:/root/packages, - - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent, + - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent - $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx workDir: $HOME/projects/go-algorand tasks: - task: docker.Make - name: deb - agent: deb - target: mule-deploy-deb + name: docker + agent: docker + target: mule-docker + + - task: docker.Make + name: releases-page + agent: releases-page + target: mule-releases-page - task: docker.Make name: rpm agent: rpm target: mule-deploy-rpm - - task: s3.BucketCopy - name: deb - src: s3://algorand-staging/releases/$CHANNEL/$VERSION/algorand_${CHANNEL}_${OS_TYPE}-${ARCH_TYPE}_${VERSION}.deb - dest: /projects/go-algorand/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE - - - task: s3.BucketCopy - name: rpm - src: s3://algorand-staging/releases/$CHANNEL/$VERSION/algorand-${VERSION}-1.${ARCH_BIT}.rpm - dest: /projects/go-algorand/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE - - - task: s3.BucketCopy - name: gnupg - src: s3://algorand-devops-misc/tools/gnupg2.2.9_centos7_amd64.tar.bz2 - dest: /root - - - task: s3.BucketCopy - name: deploy-dev-deb-repo - src: s3://algorand-staging/releases/$CHANNEL/$VERSION - dest: s3://algorand-dev-deb-repo/releases/$CHANNEL/$VERSION - - # TODO: For now, we're hardcoding the channel until the beta - # releases are sorted out. This will then be updated. - - task: s3.BucketCopy - name: deploy-rpm-repo - src: /root/rpmrepo - dest: s3://algorand-releases/rpm/stable - jobs: - package-deploy: + package-deploy-rpm: tasks: - - docker.Make.deb - docker.Make.rpm - package-deploy-setup-copy: - tasks: - - s3.BucketCopy.deploy-dev-deb-repo - - package-deploy-setup-deb: - tasks: - - s3.BucketCopy.deb - - package-deploy-setup-rpm: - tasks: - - s3.BucketCopy.rpm - - package-deploy-setup-gnupg: + docker-hub: tasks: - - s3.BucketCopy.gnupg + - docker.Make.docker - package-deploy-rpm-repo: + releases-page: tasks: - - s3.BucketCopy.deploy-rpm-repo + - docker.Make.releases-page diff --git a/package-sign.yaml b/package-sign.yaml index bb32340018..fc3674f992 100644 --- a/package-sign.yaml +++ b/package-sign.yaml @@ -1,75 +1,30 @@ agents: - name: deb - dockerFilePath: docker/build/mule.go.debian.Dockerfile - image: algorand/go-algorand-ci-mule-debian + dockerFilePath: docker/build/cicd.ubuntu.Dockerfile + image: algorand/go-algorand-ci-linux-ubuntu version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` env: - - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID, + - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + - BRANCH=$BRANCH + - CHANNEL=$CHANNEL + - S3_SOURCE=$S3_SOURCE + - VERSION=$VERSION volumes: - - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent, + - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent - $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx workDir: $HOME/projects/go-algorand tasks: - task: docker.Make - name: package-sign-deb + name: package-sign agent: deb - target: mule-sign-deb - - - task: docker.Make - name: package-sign-rpm - agent: deb - target: mule-sign-rpm - - - task: docker.Make - name: package-sign-tarball - agent: deb - target: mule-sign-tar.gz - - - task: docker.Make - name: package-sign-source - agent: deb - target: mule-sign-source - - - task: s3.DownloadFile - name: deb - bucketName: algorand-staging - objectName: releases/$CHANNEL/$VERSION/algorand_${CHANNEL}_${OS_TYPE}-${ARCH_TYPE}_${VERSION}.deb - outputDir: /projects/go-algorand/tmp/node_pkgs/${OS_TYPE}/${ARCH_TYPE} - - - task: s3.DownloadFile - name: rpm - bucketName: algorand-staging - objectName: releases/$CHANNEL/$VERSION/algorand-${VERSION}-1.${ARCH_BIT}.rpm - outputDir: /projects/go-algorand/tmp/node_pkgs/${OS_TYPE}/${ARCH_TYPE} - - - task: s3.DownloadFiles - name: tarball - bucketName: algorand-staging - prefix: releases/$CHANNEL/$VERSION - suffix: tar.gz - outputDir: /projects/go-algorand/tmp/node_pkgs/${OS_TYPE}/${ARCH_TYPE} + target: mule-sign jobs: package-sign: tasks: - - docker.Make.package-sign-deb - - docker.Make.package-sign-rpm - - docker.Make.package-sign-tarball - - docker.Make.package-sign-source - - package-sign-setup-deb: - tasks: - - s3.DownloadFile.deb - - package-sign-setup-rpm: - tasks: - - s3.DownloadFile.rpm - - package-sign-setup-tarball: - tasks: - - s3.DownloadFiles.tarball + - docker.Make.package-sign diff --git a/package-test.yaml b/package-test.yaml index 511974c777..080838cfc4 100644 --- a/package-test.yaml +++ b/package-test.yaml @@ -1,13 +1,38 @@ agents: - name: deb - dockerFilePath: docker/build/mule.go.debian.Dockerfile + dockerFilePath: docker/build/cicd.ubuntu.Dockerfile image: algorand/mule-linux-debian version: scripts/configure_dev-deps.sh buildArgs: - GOLANG_VERSION=`./scripts/get_golang_version.sh` env: - - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID, + - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + - BRANCH=$BRANCH + - CHANNEL=$CHANNEL + - NETWORK=$NETWORK + - S3_SOURCE=$S3_SOURCE + - SHA=$SHA + - VERSION=$VERSION + volumes: + - /var/run/docker.sock:/var/run/docker.sock + workDir: $HOME/projects/go-algorand + + - name: rpm + dockerFilePath: docker/build/cicd.centos.Dockerfile + image: algorand/mule-linux-centos + version: scripts/configure_dev-deps.sh + buildArgs: + - GOLANG_VERSION=`./scripts/get_golang_version.sh` + env: + - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + - BRANCH=$BRANCH + - CHANNEL=$CHANNEL + - NETWORK=$NETWORK + - S3_SOURCE=$S3_SOURCE + - SHA=$SHA + - VERSION=$VERSION volumes: - /var/run/docker.sock:/var/run/docker.sock workDir: $HOME/projects/go-algorand @@ -20,40 +45,20 @@ tasks: - task: docker.Make name: package-test-rpm - agent: deb + agent: rpm target: mule-test-rpm - - task: s3.DownloadFile - name: deb - bucketName: algorand-staging - objectName: releases/$CHANNEL/$VERSION/algorand_${CHANNEL}_${OS_TYPE}-${ARCH_TYPE}_${VERSION}.deb - outputDir: /projects/go-algorand/tmp/node_pkgs/${OS_TYPE}/${ARCH_TYPE} - - - task: s3.DownloadFile - name: rpm - bucketName: algorand-staging - objectName: releases/$CHANNEL/$VERSION/algorand-${VERSION}-1.${ARCH_BIT}.rpm - outputDir: /projects/go-algorand/tmp/node_pkgs/${OS_TYPE}/${ARCH_TYPE} - jobs: - package-test-deb: - tasks: - - docker.Make.package-test-deb - - package-test-rpm: - tasks: - - docker.Make.package-test-rpm - package-test: tasks: - docker.Make.package-test-deb - docker.Make.package-test-rpm - package-test-setup-deb: + package-test-deb: tasks: - - s3.DownloadFile.deb + - docker.Make.package-test-deb - package-test-setup-rpm: + package-test-rpm: tasks: - - s3.DownloadFile.rpm + - docker.Make.package-test-rpm diff --git a/package-upload.yaml b/package-upload.yaml new file mode 100644 index 0000000000..9d13c458ef --- /dev/null +++ b/package-upload.yaml @@ -0,0 +1,23 @@ +tasks: + - task: s3.BucketCopy + name: amd64 + src: $HOME/projects/go-algorand/tmp/node_pkgs/linux/amd64 + dest: s3://$STAGING/$CHANNEL/$VERSION/ + + - task: s3.BucketCopy + name: arm + src: $HOME/projects/go-algorand/tmp/node_pkgs/linux/arm + dest: s3://$STAGING/$CHANNEL/$VERSION/ + + - task: s3.BucketCopy + name: arm64 + src: $HOME/projects/go-algorand/tmp/node_pkgs/linux/arm64 + dest: s3://$STAGING/$CHANNEL/$VERSION/ + +jobs: + package-upload: + tasks: + - s3.BucketCopy.amd64 + - s3.BucketCopy.arm + - s3.BucketCopy.arm64 + diff --git a/package.yaml b/package.yaml index 37cc922e30..5094ff95ff 100644 --- a/package.yaml +++ b/package.yaml @@ -15,7 +15,7 @@ agents: - GOLANG_VERSION=`./scripts/get_golang_version.sh` workDir: $HOME/projects/go-algorand - - name: docker-ubuntu + - name: docker dockerFilePath: docker/build/docker.ubuntu.Dockerfile image: algorand/go-algorand-docker-linux-ubuntu version: scripts/configure_dev-deps.sh @@ -26,6 +26,11 @@ agents: workDir: $HOME/projects/go-algorand tasks: + - task: docker.Make + name: build + agent: deb + target: ci-build + - task: docker.Make name: rpm agent: rpm @@ -37,17 +42,29 @@ tasks: target: mule-package-deb - task: docker.Make - name: docker-image - agent: docker-ubuntu + name: docker + agent: docker target: mule-package-docker jobs: package: tasks: + - docker.Make.build - docker.Make.deb - docker.Make.rpm + - docker.Make.docker + + package-deb: + tasks: + - docker.Make.build + - docker.Make.deb + + package-rpm: + tasks: + - docker.Make.build + - docker.Make.rpm package-docker: tasks: - - docker.Make.docker-image + - docker.Make.docker diff --git a/protocol/hash.go b/protocol/hash.go index d3f821690e..747d696114 100644 --- a/protocol/hash.go +++ b/protocol/hash.go @@ -28,6 +28,10 @@ const ( AuctionParams HashID = "aP" AuctionSettlement HashID = "aS" + CompactCertCoin HashID = "ccc" + CompactCertPart HashID = "ccp" + CompactCertSig HashID = "ccs" + AgreementSelector HashID = "AS" BlockHeader HashID = "BH" BalanceRecord HashID = "BR" @@ -44,6 +48,7 @@ const ( ProgramData HashID = "ProgData" ProposerSeed HashID = "PS" Seed HashID = "SD" + SpecialAddr HashID = "SpecialAddr" TestHashable HashID = "TE" TxGroup HashID = "TG" Transaction HashID = "TX" diff --git a/protocol/txntype.go b/protocol/txntype.go index d8e50ecc00..dbd60603cd 100644 --- a/protocol/txntype.go +++ b/protocol/txntype.go @@ -41,6 +41,9 @@ const ( // ApplicationCallTx allows creating, deleting, and interacting with an application ApplicationCallTx TxType = "appl" + // CompactCertTx records a compact certificate + CompactCertTx TxType = "cert" + // UnknownTx signals an error UnknownTx TxType = "unknown" ) diff --git a/rpcs/httpTxSync.go b/rpcs/httpTxSync.go index ded2d0dd7c..8a1b642ce6 100644 --- a/rpcs/httpTxSync.go +++ b/rpcs/httpTxSync.go @@ -140,7 +140,7 @@ func (hts *HTTPTxSync) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups default: hts.log.Warn("txSync response status code : ", response.StatusCode) response.Body.Close() - return nil, fmt.Errorf("txSync POST error response status code %d", response.StatusCode) + return nil, fmt.Errorf("txSync POST error response status code %d for '%s'. Request bloom filter length was %d bytes", response.StatusCode, syncURL, len(bloomParam)) } // at this point, we've already receieved the response headers. ensure that the diff --git a/rpcs/txService.go b/rpcs/txService.go index 76eae00295..81e8c48a64 100644 --- a/rpcs/txService.go +++ b/rpcs/txService.go @@ -20,11 +20,13 @@ import ( "encoding/base64" "net/http" "strconv" + "strings" "time" - "github.com/algorand/go-deadlock" "github.com/gorilla/mux" + "github.com/algorand/go-deadlock" + "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -60,8 +62,8 @@ func base64PaddedSize(n int64) int64 { func makeTxService(pool PendingTxAggregate, genesisID string, txPoolSize int, responseSizeLimit int) *TxService { // figure out how many bytes do we expect the bloom filter to be in the worst case scenario. - bloomRequestSizeBits, _ := bloom.Optimal(txPoolSize, bloomFilterFalsePositiveRate) - filterBytes := int64((bloomRequestSizeBits + 7) / 8) // convert bits -> bytes. + filterBytes := bloom.BinaryMarshalLength(txPoolSize, bloomFilterFalsePositiveRate) + // since the bloom filter is going to be base64 encoded, account for that as well. filterPackedBytes := base64PaddedSize(filterBytes) // The http transport add some additional content to the form ( form keys, separators, etc.) // we need to account for these if we're trying to match the size in the worst case scenario. @@ -107,7 +109,11 @@ func (txs *TxService) ServeHTTP(response http.ResponseWriter, request *http.Requ request.Body = http.MaxBytesReader(response, request.Body, txs.maxRequestBodyLength) err := request.ParseForm() if err != nil { - txs.log.Infof("http.ParseForm fail: %s", err) + if strings.Contains(err.Error(), "http: request body too large") { + txs.log.Infof("http.ParseForm fail due to body length exceed max limit size of %d", txs.maxRequestBodyLength) + } else { + txs.log.Infof("http.ParseForm fail: %s", err) + } response.WriteHeader(http.StatusBadRequest) return } @@ -185,10 +191,10 @@ func (txs *TxService) updateTxCache() (pendingTxGroups [][]transactions.SignedTx // we need to check again, since we released and took the lock. if txs.lastUpdate == 0 || txs.lastUpdate+updateInterval < currentUnixTime { - // The txs.pool.Pending() function allocates a new array on every call. That means that the old + // The txs.pool.PendingTxGroups() function allocates a new array on every call. That means that the old // array ( if being used ) is still valid. There is no risk of data race here since // the txs.pendingTxGroups is a slice (hence a pointer to the array) and not the array itself. - txs.pendingTxGroups = txs.pool.Pending() + txs.pendingTxGroups = txs.pool.PendingTxGroups() txs.lastUpdate = currentUnixTime } return txs.pendingTxGroups diff --git a/rpcs/txSyncer.go b/rpcs/txSyncer.go index 81560fcf99..8e66feb381 100644 --- a/rpcs/txSyncer.go +++ b/rpcs/txSyncer.go @@ -32,7 +32,7 @@ import ( // PendingTxAggregate is a container of pending transactions type PendingTxAggregate interface { PendingTxIDs() []transactions.Txid - Pending() [][]transactions.SignedTxn + PendingTxGroups() [][]transactions.SignedTxn } // TxSyncClient abstracts sync-ing pending transactions from a peer. diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go index 049e44ddf6..63e4feb9aa 100644 --- a/rpcs/txSyncer_test.go +++ b/rpcs/txSyncer_test.go @@ -75,7 +75,7 @@ func (mock mockPendingTxAggregate) PendingTxIDs() []transactions.Txid { } return ids } -func (mock mockPendingTxAggregate) Pending() [][]transactions.SignedTxn { +func (mock mockPendingTxAggregate) PendingTxGroups() [][]transactions.SignedTxn { return bookkeeping.SignedTxnsToGroups(mock.txns) } @@ -169,7 +169,7 @@ func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool func TestSyncFromClient(t *testing.T) { clientPool := makeMockPendingTxAggregate(2) serverPool := makeMockPendingTxAggregate(1) - runner := mockRunner{failWithNil: false, failWithError: false, txgroups: serverPool.Pending()[len(serverPool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: false, txgroups: serverPool.PendingTxGroups()[len(serverPool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -182,7 +182,7 @@ func TestSyncFromClient(t *testing.T) { func TestSyncFromUnsupportedClient(t *testing.T) { pool := makeMockPendingTxAggregate(3) - runner := mockRunner{failWithNil: true, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: true, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -195,7 +195,7 @@ func TestSyncFromUnsupportedClient(t *testing.T) { func TestSyncFromClientAndQuit(t *testing.T) { pool := makeMockPendingTxAggregate(3) - runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -209,7 +209,7 @@ func TestSyncFromClientAndQuit(t *testing.T) { func TestSyncFromClientAndError(t *testing.T) { pool := makeMockPendingTxAggregate(3) - runner := mockRunner{failWithNil: false, failWithError: true, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: true, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -221,7 +221,7 @@ func TestSyncFromClientAndError(t *testing.T) { func TestSyncFromClientAndTimeout(t *testing.T) { pool := makeMockPendingTxAggregate(3) - runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -240,7 +240,7 @@ func TestSync(t *testing.T) { nodeA.start() nodeAURL := nodeA.rootURL() - runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -266,7 +266,7 @@ func TestNoClientsSync(t *testing.T) { func TestStartAndStop(t *testing.T) { t.Skip("TODO: replace this test in new client paradigm") pool := makeMockPendingTxAggregate(3) - runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} @@ -294,7 +294,7 @@ func TestStartAndStop(t *testing.T) { func TestStartAndQuit(t *testing.T) { pool := makeMockPendingTxAggregate(3) - runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)} + runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.PendingTxGroups()[len(pool.PendingTxGroups())-1:], done: make(chan *rpc.Call)} client := mockRPCClient{client: &runner, log: logging.TestingLog(t)} clientAgg := mockClientAggregator{peers: []network.Peer{&client}} handler := mockHandler{} diff --git a/scripts/build_package.sh b/scripts/build_package.sh index e43e419b77..e3625917c9 100755 --- a/scripts/build_package.sh +++ b/scripts/build_package.sh @@ -32,7 +32,12 @@ if [ ! -d "${PKG_ROOT}" ]; then exit 1 fi -export GOPATH=$(go env GOPATH) +UNAME=$(uname) +if [[ "${UNAME}" == *"MINGW"* ]]; then + GOPATH1=$HOME/go +else + export GOPATH=$(go env GOPATH) +fi export GOPATHBIN=${GOPATH%%:*}/bin REPO_DIR=$(pwd) @@ -83,29 +88,15 @@ done mkdir ${PKG_ROOT}/genesis -if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then - genesis_dirs=("devnet" "testnet" "mainnet" "betanet") - for dir in "${genesis_dirs[@]}"; do - mkdir -p ${PKG_ROOT}/genesis/${dir} - cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json ${PKG_ROOT}/genesis/${dir}/ - #${GOPATHBIN}/buildtools genesis ensure -n ${dir} --source ${REPO_DIR}/gen/${dir}/genesis.json --target ${PKG_ROOT}/genesis/${dir}/genesis.json --releasedir ${REPO_DIR}/installer/genesis - if [ $? -ne 0 ]; then exit 1; fi - done - # Copy the appropriate network genesis.json for our default (in root ./genesis folder) - cp ${PKG_ROOT}/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json ${PKG_ROOT}/genesis - if [ $? -ne 0 ]; then exit 1; fi -elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta" ]]; then - cp ${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/genesis/ - #${GOPATHBIN}/buildtools genesis ensure -n ${DEFAULTNETWORK} --source ${REPO_DIR}/gen/${DEFAULTNETWORK}/genesis.json --target ${PKG_ROOT}/genesis/genesis.json --releasedir ${REPO_DIR}/installer/genesis - if [ $? -ne 0 ]; then exit 1; fi -else - cp installer/genesis/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/genesis +genesis_dirs=("devnet" "testnet" "mainnet" "betanet") +for dir in "${genesis_dirs[@]}"; do + mkdir -p ${PKG_ROOT}/genesis/${dir} + cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json ${PKG_ROOT}/genesis/${dir}/ if [ $? -ne 0 ]; then exit 1; fi - #if [ -z "${TIMESTAMP}" ]; then - # TIMESTAMP=$(date +%s) - #fi - #${GOPATHBIN}/buildtools genesis timestamp -f ${PKG_ROOT}/genesis/genesis.json -t ${TIMESTAMP} -fi +done +# Copy the appropriate network genesis.json for our default (in root ./genesis folder) +cp ${PKG_ROOT}/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json ${PKG_ROOT}/genesis +if [ $? -ne 0 ]; then exit 1; fi TOOLS_ROOT=${PKG_ROOT}/tools diff --git a/scripts/buildhost/README.md b/scripts/buildhost/README.md index fcf763f979..b01786d016 100644 --- a/scripts/buildhost/README.md +++ b/scripts/buildhost/README.md @@ -1,5 +1,3 @@ -[![Build Status](https://travis-ci.com/algorand/go-algorand.svg?token=25XP72ADqbCQJ3TJVC9S&branch=master)](https://travis-ci.com/algorand/go-algorand) - buildhost ==================== @@ -32,6 +30,7 @@ sudo cp 50-cloud-init.yaml 50-cloud-init.yaml.bak ``` Merge the following into 50-cloud-init.yaml, while retaining the original mac address: + ``` # This file is generated from information provided by # the datasource. Changes to it will not persist across an instance. # To disable cloud-init's network configuration capabilities, write a file @@ -50,6 +49,7 @@ network: interfaces: [ens3] macaddress: "02:2c:f9:9d:ec:04" dhcp4: true +``` run the following script: ```bash diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh index 92fb4f49d7..bf2bb0383d 100755 --- a/scripts/check_deps.sh +++ b/scripts/check_deps.sh @@ -19,7 +19,12 @@ TEAL_FG=$(tput setaf 6 2>/dev/null) YELLOW_FG=$(tput setaf 3 2>/dev/null) END_FG_COLOR=$(tput sgr0 2>/dev/null) -GOPATH=$(go env GOPATH) +UNAME=$(uname) +if [[ "${UNAME}" == *"MINGW"* ]]; then + GOPATH=$HOME/go +else + GOPATH=$(go env GOPATH) +fi export GOPATH GO_BIN="$(echo "$GOPATH" | cut -d: -f1)/bin" MISSING=0 diff --git a/scripts/compute_branch_network.sh b/scripts/compute_branch_network.sh index 0ee463769b..9967463b16 100755 --- a/scripts/compute_branch_network.sh +++ b/scripts/compute_branch_network.sh @@ -12,11 +12,15 @@ fi if [ "${BRANCH}" = "rel/stable" ]; then echo "testnet" exit 0 +elif [ "${BRANCH}" = "rel/beta" ]; then + echo "betanet" + exit 0 fi #get parent of current branch #credit to https://stackoverflow.com/questions/3161204/find-the-parent-branch-of-a-git-branch -BRANCHPARENT="$(git show-branch | grep '\*' | grep -v '${BRANCH}' | head -n1 | sed 's/.*\[\(.*\)\].*/\1/' | sed 's/[\^~].*//')" +BRANCHPARENT="$(git show-branch | grep '\*' | grep -v '${BRANCH}' | head -n1 | sed 's/.*\[\(.*\)\].*/\1/' | sed 's/[\^~].*//' || ${BRANCH})" +BRANCHPARENT=${BRANCHPARENT:-$BRANCH} if [ "${BRANCHPARENT}" = "rel/stable" ]; then echo "testnet" diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh index cfb8757717..6ec1deb72a 100755 --- a/scripts/configure_dev.sh +++ b/scripts/configure_dev.sh @@ -41,6 +41,34 @@ function install_or_upgrade { fi } +function install_windows_shellcheck() { + version="v0.7.1" + wget https://github.com/koalaman/shellcheck/releases/download/$version/shellcheck-$version.zip -O /tmp/shellcheck-$version.zip + if [ $? -ne 0 ]; then + rm /tmp/shellcheck-$version.zip &> /dev/null + echo "Error downloading shellcheck $version" + return 1 + fi + + unzip -o /tmp/shellcheck-$version.zip shellcheck-$version.exe -d /tmp + if [ $? -ne 0 ]; then + rm /tmp/shellcheck-$version.zip &> /dev/null + echo "Unable to decompress shellcheck $version" + return 1 + fi + + mv -f /tmp/shellcheck-$version.exe /usr/bin/shellcheck.exe + if [ $? -ne 0 ]; then + rm /tmp/shellcheck-$version.zip &> /dev/null + echo "Unable to move shellcheck to /usr/bin" + return 1 + fi + + rm /tmp/shellcheck-$version.zip &> /dev/null + + return 0 +} + if [ "${OS}" = "linux" ]; then if ! which sudo > /dev/null then @@ -61,6 +89,19 @@ elif [ "${OS}" = "darwin" ]; then install_or_upgrade automake install_or_upgrade shellcheck install_or_upgrade python3 +elif [ "${OS}" = "windows" ]; then + $msys2 pacman -S --disable-download-timeout --noconfirm git automake autoconf m4 libtool make mingw-w64-x86_64-gcc mingw-w64-x86_64-boost mingw-w64-x86_64-python mingw-w64-x86_64-jq unzip procps + if [ $? -ne 0 ] + then + echo "Error installing pacman dependencies" + exit 1 + fi + + install_windows_shellcheck + if [ $? -ne 0 ] + then + exit 1 + fi fi if ${SKIP_GO_DEPS} ; then @@ -68,4 +109,3 @@ if ${SKIP_GO_DEPS} ; then fi "$SCRIPTPATH"/configure_dev-deps.sh - diff --git a/scripts/create_and_deploy_recipe.sh b/scripts/create_and_deploy_recipe.sh index 91fab946b4..9a0e01c982 100755 --- a/scripts/create_and_deploy_recipe.sh +++ b/scripts/create_and_deploy_recipe.sh @@ -114,6 +114,12 @@ ${GOPATH}/bin/netgoal build -r "${ROOTDIR}" -n "${NETWORK}" --recipe "${RECIPEFI export S3_RELEASE_BUCKET="${S3_RELEASE_BUCKET}" ${SRCPATH}/scripts/upload_config.sh "${ROOTDIR}" "${CHANNEL}" +NETWORK_PERF_RULES_PATH="$(dirname $RECIPEFILE)/network_performance_rules" + +if [ -f "${NETWORK_PERF_RULES_PATH}" ]; then + cp "${NETWORK_PERF_RULES_PATH}" "${ROOTDIR}/network_performance_rules" +fi + # Deploy binaries if [ "${NO_DEPLOY}" = "" ]; then # Now generate a private build using our custom genesis.json and deploy it to S3 also diff --git a/scripts/dump_genesis.sh b/scripts/dump_genesis.sh index c61d2fcbc7..b3c1f752b0 100755 --- a/scripts/dump_genesis.sh +++ b/scripts/dump_genesis.sh @@ -1,4 +1,4 @@ -#!/bin/sh -e +#!/usr/bin/env bash if [ "$1" = "" ]; then echo "Usage: $0 genesis.json" @@ -9,13 +9,21 @@ D=$(mktemp -d) trap "rm -r $D" 0 GENJSON="$1" -GOPATH1=$(go env GOPATH | cut -d: -f1) +UNAME=$(uname) +if [[ "${UNAME}" == *"MINGW"* ]]; then + GOPATH1=$HOME/go +else + GOPATH1=$(go env GOPATH | cut -d: -f1) +fi $GOPATH1/bin/algod -d $D -g "$GENJSON" -x >/dev/null LEDGERS=$D/*/ledger.*sqlite for LEDGER in $LEDGERS; do for T in $(echo .tables | sqlite3 $LEDGER); do - case "$T" in + #remove trailing newlines echoed by Windows' sqlite3 app + T=${T//[$'\t\r\n ']} + + case $T in blocks) SORT=rnd ;; diff --git a/scripts/ostype.sh b/scripts/ostype.sh index 7d187f995e..f3f16a45b7 100755 --- a/scripts/ostype.sh +++ b/scripts/ostype.sh @@ -6,6 +6,8 @@ if [ "${UNAME}" = "Darwin" ]; then echo "darwin" elif [ "${UNAME}" = "Linux" ]; then echo "linux" +elif [[ "${UNAME}" == *"MINGW"* ]] || [[ ${UNAME} == *"MSYS_NT"* ]]; then + echo "windows" else echo "unsupported" exit 1 diff --git a/scripts/release/build/rpm/package.sh b/scripts/release/build/rpm/package.sh index 45c756fa5d..7ffbe31368 100755 --- a/scripts/release/build/rpm/package.sh +++ b/scripts/release/build/rpm/package.sh @@ -11,7 +11,7 @@ export REPO_DIR DEFAULT_RELEASE_NETWORK=$("$REPO_DIR/scripts/compute_branch_release_network.sh" "$DEFAULTNETWORK") export DEFAULT_RELEASE_NETWORK DEFAULTNETWORK=devnet -export DEFAULT_NETWORK +export DEFAULTNETWORK ALGO_BIN="$HOME/subhome/go/bin" export ALGO_BIN diff --git a/scripts/release/common/cpu_name.sh b/scripts/release/common/cpu_name.sh new file mode 100755 index 0000000000..77d946baa2 --- /dev/null +++ b/scripts/release/common/cpu_name.sh @@ -0,0 +1,16 @@ +# Take common name (amd64, arm64, arm) and map to the unames (x86_64, aarch64) + +COMMON_NAME="${1}" + +if [ "amd64" == "${COMMON_NAME}" ]; then + echo "x86_64" +elif [ "arm64" == "${COMMON_NAME}" ]; then + echo "aarch64" +elif [ "arm32" == "${COMMON_NAME}" ]; then + echo "armv7l" +elif [ "arm" == "${COMMON_NAME}" ]; then + echo "armv7l" +else + echo "Unsupported cpu arch ${COMMON_NAME}" + exit 1 +fi diff --git a/scripts/release/forward_gpg_agent.sh b/scripts/release/forward_gpg_agent.sh index 38161ec034..ef9665fd45 100755 --- a/scripts/release/forward_gpg_agent.sh +++ b/scripts/release/forward_gpg_agent.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# shellcheck disable=2196 + # JENKINS=username@ip # JENKINS_KEY=location/to/jenkins/private_key.pem @@ -29,7 +31,7 @@ INSTANCE=$(ssh -i "$JENKINS_KEY" "$JENKINS" sudo cat /opt/jenkins/workspace/"$BR gpgp=$(find /usr/lib/gnupg{2,,1} -type f -name gpg-preset-passphrase 2> /dev/null) # Here we need to grab the signing subkey, hence `tail -1`. -KEYGRIP=$(gpg -K --with-keygrip --textmode dev@algorand.com | grep -AE 1 '^ssb[^#]' | grep Keygrip | awk '{ print $3 }') +KEYGRIP=$(gpg -K --with-keygrip --textmode dev@algorand.com | egrep -A 1 '^ssb[^#]' | grep Keygrip | awk '{ print $3 }') echo "enter dev@ password" $gpgp --verbose --preset "$KEYGRIP" diff --git a/scripts/release/mule/Makefile.mule b/scripts/release/mule/Makefile.mule index eb0465423f..69f35d7dfd 100644 --- a/scripts/release/mule/Makefile.mule +++ b/scripts/release/mule/Makefile.mule @@ -42,21 +42,25 @@ ci-build: buildsrc gen ci-setup # https://scene-si.org/2019/12/04/make-dynamic-makefile-targets/ mule = $(shell ls -d scripts/release/mule/*/ | awk 'BEGIN { FS="/" ; OFS="-" } { print $$3, $$4 }') -mule-deploy-%: PKG_TYPE=$* -mule-deploy-%: - scripts/release/mule/deploy/$(PKG_TYPE)/deploy.sh +mule-deploy-rpm: + scripts/release/mule/deploy/rpm/deploy.sh + +mule-docker: + scripts/release/mule/deploy/docker/docker.sh -mule-package-deb: ci-build mule-package-%: PKG_TYPE=$* mule-package-%: echo Building algorand package... - scripts/release/mule/package/$(PKG_TYPE)/package.sh + scripts/release/mule/package/$(PKG_TYPE)/package.sh algorand echo Building algorand-devtools package... scripts/release/mule/package/$(PKG_TYPE)/package.sh algorand-devtools -mule-sign-%: PKG_TYPE=$* -mule-sign-%: - scripts/release/mule/sign/sign.sh $(PKG_TYPE) +mule-releases-page: + echo Generating the releases page... + cd scripts/release/mule/deploy/releases_page && ./generate_releases_page.sh + +mule-sign: + scripts/release/mule/sign/sign.sh mule-test-%: PKG_TYPE=$* mule-test-%: diff --git a/scripts/release/mule/README.md b/scripts/release/mule/README.md new file mode 100644 index 0000000000..0c2caf026f --- /dev/null +++ b/scripts/release/mule/README.md @@ -0,0 +1,270 @@ +# Package Build Pipeline + +- [Environment Variables](#environment-variables) +- [Build Stages](#build-stages) +- [Custom Builds](#custom-builds) +- [Examples](#examples) +- [Manual Deploy](#manual-deploy) + +# Environment Variables + +Each stage listed in the next section will have several environment variables automatically that are available to the stage. Depending on the stage, the environment variables may be exported to subprocesses. + +These env vars generally don't change between stages. Here is a list of variables that are computed if not passed on the CLI (more on that later): + +- `ARCH_TYPE`, i.e., `amd64` +- `BRANCH` +- `CHANNEL` +- `OS_TYPE` +- `VERSION` + +In addition, make sure that the following AWS credentials are set in environment variables: + +- `AWS_ACCESS_KEY_ID` +- `AWS_SECRET_ACCESS_KEY` + +# Build Stages + +- [package](#package) +- [upload](#upload) +- [test](#test) +- [sign](#sign) +- [deploy](#deploy) + +## package + +- see `./go-algorand/package.yaml` + +#### `mule` jobs + + - package + + calls `ci-build` make target + + packages `deb`, `rpm` and `docker` + + - package-deb + + packages only `deb` + + - package-rpm: + + packages only `rpm` + + - package-docker + + packages docker image + +## upload + +- see `./go-algorand/package-upload.yaml` + +- customizable environment variables: + + + `CHANNEL` + + `STAGING` + + `VERSION` + +#### `mule` jobs + + - package-upload + +## test + +- see `./go-algorand/package-test.yaml` + +- customizable environment variables: + + + `BRANCH` + + `CHANNEL` + + `ARCH_BIT`, i.e., the value from `uname -m` + + `NETWORK` + + `S3_SOURCE`, i.e., the S3 bucket from which to download + + `SHA`, i.e., the value from `git rev-parse HEAD` if not passed on CLI + + `VERSION` + +#### `mule` jobs + + - package-test + + tests both `deb` and `rpm` + + - package-test-deb + + tests only `deb` + + - package-test-rpm + + tests only `rpm` + +## sign + +- see `./go-algorand/package-sign.yaml` + +- customizable environment variables: + + + `BRANCH` + + `CHANNEL` + + `ARCH_BIT`, i.e., the value from `uname -m` + + `S3_SOURCE`, i.e., the S3 bucket from which to download + + `VERSION` + +### `mule` jobs + + - package-sign + + signs all build artifacts + +## deploy + +- see `./go-algorand/package-deploy.yaml` + +- customizable environment variables: + + + `CHANNEL` + + `NETWORK` + + `NO_DEPLOY` + + `PACKAGES_DIR` + + `S3_SOURCE` + + `VERSION` + +#### `mule` jobs + + - package-deploy-rpm + + deploys `rpm` + + - docker-hub + + pushes new image to docker hub + + - releases-page + + creates and pushes new releases page to S3 + +# Custom Builds + +It is sometimes necessary to create packages after doing a local build. + +For example, the packaging build process will look like this: + +``` +mule -f package.yaml package +``` + +This can produce packages like the following: + +``` +algorand_dev_linux-amd64_2.1.86615.deb +algorand-devtools_dev_linux-amd64_2.1.86615.deb +``` + +Note that this is in the format `{ALGORAND_PACKAGE_NAME}_{CHANNEL}_{OS_TYPE}-{ARCH_TYPE}_{VERSION}.deb. `rpm` packages will follow their own format which is easy to intuit. + +It is common that a custom build is performed on a feature branch other than `rel/stable` or `rel/beta` and that the build environment will need to be modified. In these instances, it is important to be able to pass values to the build process to customize a build. + +The most common way to do this is to modify the environment that the subprocess inherits by specifying the values on the command *before* the command. This won't need to be done for the package stage, but often needs to be done with subsequent stages. + +In order to be able to correctly run some of the stages, such as testing and signing, several values needed by the subsequent stages must be explicitly passed to those stages. + +> Verifying which env vars can be overridden is as simple as opening the `mule` yaml file for the respective stage and examining the list of env vars in the `agents`' `env` list. +> +> For example: +> +> agents: +> - name: deb +> dockerFilePath: docker/build/cicd.ubuntu.Dockerfile +> image: algorand/mule-linux-debian +> version: scripts/configure_dev-deps.sh +> buildArgs: +> - GOLANG_VERSION=`./scripts/get_golang_version.sh` +> env: +> - BRANCH=$BRANCH +> - CHANNEL=$CHANNEL +> - NETWORK=$NETWORK +> - SHA=$SHA +> - VERSION=$VERSION + +Let's look at some examples. + +# Examples + +### Packaging + + mule -f package.yaml package + +### Uploading + + STAGING=the-staging-area CHANNEL=beta VERSION=latest mule -f package-upload.yaml package-upload + +### Testing + +1. As part of the test suite, the `verify_package_string.sh` test needs the `BRANCH` as well as the `SHA`: + + BRANCH=update_signing CHANNEL=dev SHA=aecd5318 VERSION=2.1.86615 mule -f package-test.yaml package-test + +1. To test local packages on the filesystem, do not set the `S3_SOURCE` environment variable. Note that the tests still expect the packages to be in the usual place, i.e., `./go-algorand/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE/`. + + BRANCH=update_signing CHANNEL=dev VERSION=2.1.86615 mule -f package-test.yaml package-test + +1. By setting the `S3_SOURCE` variable, the script will know to download packages from staging (instead of getting them from the local filesystem) and test. This will download the packages to the usual place, i.e., `./go-algorand/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE/`. + + Note that this can be used to test a pending official release. + + CHANNEL=beta S3_SOURCE=the-staging-area VERSION=2.1.6 mule -f package-test.yaml package-test + +1. When testing locally, very often it is necessary to specify the `BRANCH`, `NETWORK` and `SHA` of the last commit to be able to having passing tests. This is because the local environment will most likely not match the environment in which the packages were packaged. + + BRANCH=rel/stable CHANNEL=stable NETWORK=mainnet S3_SOURCE=the-staging-area SHA=df65da2b VERSION=2.1.6 mule -f package-test.yaml package-test + +### Signing + +1. Sign local packages located on the filesystem because `S3_SOURCE` is not set. Note that the packages should be in the usual place, i.e., `./go-algorand/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE/`. + + CHANNEL=dev VERSION=2.1.86615 mule -f package-sign.yaml package-sign + +1. Download packages from staging and sign. Again, the script will know to download from S3 because the `S3_SOURCE` has been set. This will download the packages to the usual place, i.e., `./go-algorand/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE/`. + + CHANNEL=beta S3_SOURCE=the-staging-area VERSION=2.1.6 mule -f package-sign.yaml package-sign + +### Deploying + +1. The new rpm packages will be downloaded from staging if the `S3_SOURCE` variable is set. Each package will then be pushed to `s3:algorand-releases:`. + + S3_SOURCE=the-staging-area VERSION=2.1.6 mule -f package-deploy.yaml package-deploy-rpm + +1. Packages are not downloaded from staging but rather are copied from the location on the local filesystem specified by `PACKAGES_DIR` in the `mule` yaml file. Each package will then be pushed to `s3:algorand-releases:`. + + PACKAGES_DIR=/packages_location/foo VERSION=2.1.86615 mule -f package-deploy.yaml package-deploy-rpm + +1. `NO_DEPLOY` is set to `true`. Instead of automatically pushing to `s3:algorand-releases:`, this will copy the `rpmrepo` directory that was created in the container to the `WORKDIR` in the host environment (the `WORKDIR` is set in the `mule` yaml file). + + This is handy when testing a deployment and not yet ready to deploy. + + NO_DEPLOY=true S3_SOURCE=the-staging-area VERSION=2.1.6 mule -f package-deploy.yaml package-deploy-rpm + +# Manual Deploy + +> Before any processes are run, make sure that the signing keys have been added to the `gpg-agent`. The `gpg_preset_passphrase.sh` helper script is provided just for this purpose. + +Currently, it is still necessary to run two stages manually: sign and deploy. This is for several reasons, though principally because GPG signing of the build assets occurs in both stages. + +The processes that make up both stages have been `mule-ified` as much as possible, and all but one can be run as a `mule` task (deploying deb packages, which are done in its own separate docker container). + +### Signing + +Usually, the packages are pulled down from S3 where the eks pipeline or the `mule` `package-upload` task had placed them. Issue the following command to download and sign them: + +``` +CHANNEL=stable S3_SOURCE=the-internal-area VERSION=2.1.6 mule -f package-sign.yaml package-sign +``` + +> These are downloaded to the usual location at `tmp/node_pkgs/OS_TYPE/ARCH/` on the local filesystem. + +### Misc + +The following is an example of several commands issued for all the stages when building locally: + +``` +mule -f package.yaml package +CHANNEL=dev VERSION=2.1.87522 SHA=730b3fd0 mule -f package-test.yaml package-test +CHANNEL=dev VERSION=2.1.87522 mule -f package-sign.yaml package-sign +CHANNEL=dev VERSION=2.1.87522 mule -f package-upload.yaml package-upload +CHANNEL=dev VERSION=2.1.87522 NO_DEPLOY=true mule -f package-deploy.yaml package-deploy +mule -f package-deploy.yaml releases-page + +docker build --build-arg AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" --build-arg AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" -t aptly-test . + +docker run --name aptly-algorand --rm -i -v "$XDG_RUNTIME_DIR/gnupg/S.gpg-agent":/root/.gnupg/S.gpg-agent -v "$HOME/.gnupg/pubring.kbx":/root/.gnupg/pubring.kbx -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" -e CHANNEL=dev -e REPO=algorand -e VERSION=2.1.87522 aptly-test bash create_and_push + +docker run --name aptly-algorand --rm -it aptly-test +``` + diff --git a/scripts/release/mule/deploy/README.md b/scripts/release/mule/deploy/README.md new file mode 100644 index 0000000000..06eaa57eec --- /dev/null +++ b/scripts/release/mule/deploy/README.md @@ -0,0 +1,25 @@ +## Manual deploy steps for deb packages + +It's **very** important that the docker container is run **before** the `release-page` **and** the `docker-hub` `mule` tasks in `package-deploy`. + +The docker container will do the following (see the `create_and_push` shell script): + +1. Copy the new `algorand` and `algorand-devtools` packages from the `algorand-staging` to `algorand-internal` buckets so the `packages/` directory in the container will be properly synced with the `algorand-internal` bucket. + +1. Sync `algorand-internal/packages` -> `packages/` in the container. + +1. Add the deb packages to the appropriate `aptly` repo. + +1. Create the snapshot (naming convention is `CHANNEL-VERSION`). + +1. Switch out the old snapshot in the `algorand-releases/deb` location for this new one. + +1. Sync `algorand-staging` -> `algorand-dev-deb-repo` + +When that is finished, it is safe to run the following commands (order doesn't matter): + +- `mule -f package-deploy releases-page` +- `mule -f package-deploy docker-hub` + +> Note that the releases page is built from the latest release in the `algorand-dev-deb-repo` bucket, hence the need to have first run the docker container which performs that sync operation. + diff --git a/scripts/release/mule/deploy/deb/.aptly.conf b/scripts/release/mule/deploy/deb/.aptly.conf new file mode 100644 index 0000000000..6300abc3b7 --- /dev/null +++ b/scripts/release/mule/deploy/deb/.aptly.conf @@ -0,0 +1,30 @@ +{ + "rootDir": "/root/aptly", + "downloadConcurrency": 4, + "downloadSpeedLimit": 0, + "architectures": [], + "dependencyFollowSuggests": false, + "dependencyFollowRecommends": false, + "dependencyFollowAllVariants": false, + "dependencyFollowSource": false, + "dependencyVerboseResolve": false, + "gpgDisableSign": false, + "gpgDisableVerify": false, + "gpgProvider": "gpg", + "downloadSourcePackages": false, + "skipLegacyPool": true, + "ppaDistributorID": "ubuntu", + "ppaCodename": "", + "skipContentsPublishing": false, + "FileSystemPublishEndpoints": {}, + "S3PublishEndpoints": { + "algorand-releases": { + "region":"us-east-1", + "bucket":"algorand-releases", + "acl":"public-read", + "prefix":"deb" + } + }, + "SwiftPublishEndpoints": {} +} + diff --git a/scripts/release/mule/deploy/deb/Dockerfile b/scripts/release/mule/deploy/deb/Dockerfile new file mode 100644 index 0000000000..5492ba5a93 --- /dev/null +++ b/scripts/release/mule/deploy/deb/Dockerfile @@ -0,0 +1,27 @@ +FROM ubuntu:18.04 + +ARG AWS_ACCESS_KEY_ID +ARG AWS_SECRET_ACCESS_KEY + +ENV DEBIAN_FRONTEND noninteractive \ + AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ + AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY + +RUN apt-get update && apt-get install aptly awscli binutils curl gnupg2 silversearcher-ag tree -y + +WORKDIR /root + +COPY . . + +RUN aws s3 sync s3://algorand-internal/packages packages && \ + aptly repo create -distribution=stable -architectures=amd64 -component=main -comment=mainnet algorand && \ + aptly repo create -distribution=beta -architectures=amd64 -component=main -comment=betanet algorand-beta && \ + aptly repo create -distribution=indexer -architectures=amd64 -component=main -comment=indexer algorand-indexer && \ + aptly repo create -distribution=indexer-beta -architectures=amd64 -component=main -comment=indexer-beta algorand-indexer-beta && \ + aptly repo add algorand packages/deb/stable/*.deb && \ + aptly repo add algorand-beta packages/deb/beta/*.deb && \ + aptly repo add algorand-indexer packages/deb/indexer/stable/*.deb && \ + aptly repo add algorand-indexer-beta packages/deb/indexer/beta/*.deb + +CMD ["/bin/bash"] + diff --git a/scripts/release/mule/deploy/deb/create_and_push b/scripts/release/mule/deploy/deb/create_and_push new file mode 100755 index 0000000000..ef7df6f74c --- /dev/null +++ b/scripts/release/mule/deploy/deb/create_and_push @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# This script is doing the following: +# 1. Copying the new packages from staging to the packages cache. +# 2. Syncing from the packages cache to the local container. +# 3. Aptly operations: +# i. Add all the packages to the correct $REPO. This includes the newly-synced packages. +# ii. Create the new snapshot that contains the new packages and the previous ones for +# the channel. +# iii. Switch out the old snapshot for the new one. Note that in the "publish" command that +# the $CHANNEL is the distro. +# 4. Copy the new packages from staging to `algorand-dev-deb-repo` so that the releases page can +# be generated. + +CHANNEL=${CHANNEL:-stable} +REPO=${REPO:-algorand} + +usage() { + echo "Usage: -e CHANNEL=\$CHANNEL -e VERSION=\$VERSION -e REPO=\$REPO -e SNAPSHOT=\$SNAPSHOT $0 " + exit "$1" +} + +if [ "$HELP" = true ] +then + echo ------------------------------ + echo "default values:" + echo + CHANNEL=stable + echo + REPO=algorand + echo + SNAPSHOT="\$CHANNEL-\$VERSION" + echo + echo "aptly repos (values for REPO):" + echo "+ algorand" + echo "+ algorand-beta" + echo "+ algorand-indexer" + echo "+ algorand-indexer-beta" + echo ------------------------------ + exit 0 +fi + +if [ -z "$VERSION" ] +then + echo "Version is a required parameter." + usage 1 +fi + +PKG_DIR="/root/packages/deb/$CHANNEL/" + +if [ -z "$SNAPSHOT" ] +then + SNAPSHOT="$CHANNEL-$VERSION" +fi + +echo -e "REPO $REPO\nSNAPSHOT $SNAPSHOT\nPKG_DIR $PKG_DIR" + +# It's necessary to copy packages from staging to packages/. +SOURCE_PREFIX="algorand-staging/releases/$CHANNEL/$VERSION" +DEST="s3://algorand-internal/packages/deb/$CHANNEL/" +aws s3 cp "s3://$SOURCE_PREFIX/algorand_${CHANNEL}_linux-amd64_${VERSION}.deb" "$DEST" +aws s3 cp "s3://$SOURCE_PREFIX/algorand-devtools_${CHANNEL}_linux-amd64_${VERSION}.deb" "$DEST" + +aws s3 sync s3://algorand-internal/packages packages + +aptly repo add "$REPO" "$PKG_DIR"/*.deb +aptly snapshot create "$SNAPSHOT" from repo "$REPO" +aptly publish switch "$CHANNEL" s3:algorand-releases: "$SNAPSHOT" + +aws s3 sync "s3://algorand-staging/releases/$CHANNEL/$VERSION" "s3://algorand-dev-deb-repo/releases/$CHANNEL/$("/root/reverse_hex_timestamp")_$VERSION" + diff --git a/scripts/release/mule/deploy/deb/deploy.sh b/scripts/release/mule/deploy/deb/deploy.sh deleted file mode 100755 index 83b044ae54..0000000000 --- a/scripts/release/mule/deploy/deb/deploy.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -echo -date "+build_release begin SNAPSHOT stage %Y%m%d_%H%M%S" -echo - -ARCH_BIT=$(uname -m) -ARCH_TYPE=$(./scripts/archtype.sh) -OS_TYPE=$(./scripts/ostype.sh) -VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} -BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)} -CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} -PKG_DIR="./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" -SIGNING_KEY_ADDR=dev@algorand.com - -chmod 400 "$HOME/.gnupg" - -if ! $USE_CACHE -then - export ARCH_BIT - export ARCH_TYPE - export CHANNEL - export OS_TYPE - export VERSION - - mule -f package-deploy.yaml package-deploy-setup-deb -fi - -apt-get install aptly -y - -cat <"${HOME}/.aptly.conf" -{ - "rootDir": "${HOME}/aptly", - "downloadConcurrency": 4, - "downloadSpeedLimit": 0, - "architectures": [], - "dependencyFollowSuggests": false, - "dependencyFollowRecommends": false, - "dependencyFollowAllVariants": false, - "dependencyFollowSource": false, - "dependencyVerboseResolve": false, - "gpgDisableSign": false, - "gpgDisableVerify": false, - "gpgProvider": "gpg", - "downloadSourcePackages": false, - "skipLegacyPool": true, - "ppaDistributorID": "ubuntu", - "ppaCodename": "", - "skipContentsPublishing": false, - "FileSystemPublishEndpoints": {}, - "S3PublishEndpoints": { - "algorand-releases": { - "region":"us-east-1", - "bucket":"algorand-releases", - "acl":"public-read", - "prefix":"deb" - } - }, - "SwiftPublishEndpoints": {} -} -EOF - -DEBS_DIR="$HOME/packages/deb/$CHANNEL" -DEB="algorand_${CHANNEL}_linux-amd64_${VERSION}.deb" - -cp "$PKG_DIR/$DEB" "$DEBS_DIR" - -SNAPSHOT="${CHANNEL}-${VERSION}" -aptly repo create -distribution="$CHANNEL" -component=main algorand -aptly repo add algorand "$DEBS_DIR"/*.deb -aptly snapshot create "$SNAPSHOT" from repo algorand -aptly publish snapshot -gpg-key="$SIGNING_KEY_ADDR" -origin=Algorand -label=Algorand "$SNAPSHOT" "s3:algorand-releases:" - -echo -date "+build_release end SNAPSHOT stage %Y%m%d_%H%M%S" -echo - diff --git a/scripts/release/mule/deploy/deb/reverse_hex_timestamp b/scripts/release/mule/deploy/deb/reverse_hex_timestamp new file mode 100755 index 0000000000..2c49f8d520 --- /dev/null +++ b/scripts/release/mule/deploy/deb/reverse_hex_timestamp @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 +# a reverse hex timestamp is useful for putting newest things first in S3 bucket object sort order +import sys +import time + +sys.stdout.write('{:08x}'.format(0xfffffffff - int(time.time()))) + diff --git a/scripts/release/mule/deploy/docker/docker.sh b/scripts/release/mule/deploy/docker/docker.sh new file mode 100755 index 0000000000..6668b70df6 --- /dev/null +++ b/scripts/release/mule/deploy/docker/docker.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# +# mainnet and testnet are pushed: +# ./docker.sh mainnet 2.0.6 +# +# For betanet: +# ./docker.sh betanet +# +set -ex + +NETWORK=${NEWORK:-mainnet} +VERSION=${VERSION:-latest} + +if [[ ! "$NETWORK" =~ ^mainnet$|^testnet$|^betanet$ ]] +then + echo "[$0] Network values must be either \`mainnet\`, \`testnet\` or \`betanet\`." + exit 1 +fi + +pushd docker/releases + +if [ "$NETWORK" = mainnet ] +then + # Build and push mainnet. + ./build_releases.sh + + # Build and push testnet. + ./build_releases.sh --network testnet + + if [ -z "$VERSION" ] + then + echo "[$0] No version specified." + exit 1 + fi + + ./build_releases.sh --tagname "$VERSION" +elif [ "$NETWORK" = betanet ] +then + ./build_releases.sh --network betanet +fi + +popd + diff --git a/scripts/release/mule/deploy/releases_page/generate_releases_page.py b/scripts/release/mule/deploy/releases_page/generate_releases_page.py new file mode 100755 index 0000000000..262a8a8fe5 --- /dev/null +++ b/scripts/release/mule/deploy/releases_page/generate_releases_page.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 +# +# This script builds https://releases.algorand.com/index.html. +# +# To run: +# ./generate_releases_page.py > index.html + +import sys +import boto3 + +staging_bucket = "algorand-dev-deb-repo" +staging_prefix = "http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/" +key_url = "https://releases.algorand.com/key.pub" +releases_bucket = "algorand-releases" +releases_prefix = "https://releases.algorand.com/" +html_tpl = "html.tpl" +styles_url = "releases_page.css" +tokens = ["stable", "beta", "indexer"] + +def get_stage_release_set(response): + prefix = None + all = {} + they = [] + for x in response["Contents"]: + path = x["Key"] + pre, fname = path.rsplit("/", 1) + if fname.startswith("tools_") or fname.startswith("install_") or fname.startswith("pending_"): + continue + if prefix is None: + prefix = pre + they.append(x) + elif prefix == pre: + they.append(x) + else: + all[prefix] = they + prefix = None + they = [x] + return all + +def release_set_files(rset): + files = {} + for x in rset: + path = x["Key"] + pre, fname = path.rsplit("/", 1) + if fname.startswith("hashes_"): + continue + didsuf = False + for suffix in (".asc", ".sig"): + if fname.endswith(suffix): + froot = fname[:-len(suffix)] + fd = files.get(froot) + if fd is None: + fd = {} + files[froot] = fd + fd[suffix] = x + didsuf = True + break + if didsuf: + continue + fd = files.get(fname) + if fd is None: + fd = {} + files[fname] = fd + fd["file"] = path + fd["Size"] = x["Size"] + return files + +def get_hashes_data(s3, rset): + text = "" + for x in rset: + path = x["Key"] + pre, fname = path.rsplit("/", 1) + if fname.endswith(".asc"): + continue + if fname.endswith(".sig"): + continue + if fname.startswith("hashes"): + ob = s3.get_object(Bucket=staging_bucket, Key=path) + text += ob["Body"].read().decode() + return text + +def read_hashes(fin): + by_fname = {} + for line in fin: + if not line: + continue + line = line.strip() + if not line: + continue + if line[0] == "#": + continue + hashstr, fname = line.split() + ob = by_fname.get(fname) + if not ob: + ob = {} + by_fname[fname] = ob + if len(hashstr) == 32: + ob["md5"] = hashstr + elif len(hashstr) == 64: + ob["sha256"] = hashstr + elif len(hashstr) == 128: + ob["sha512"] = hashstr + return by_fname + +def objects_by_fname(they): + out = {} + for x in they: + path = x["Key"] + if path.endswith("/"): + continue + parts = path.rsplit("/", 1) + fname = parts[-1] + out[fname] = x + return out + +def getContent(url): + with open(url, "r") as reader: + content = reader.read() + + return content + +def build_page(channels): + html = getContent(html_tpl).replace("{styles}", getContent(styles_url)) + + for n in tokens: + html = html.replace("".join(["{", n, "}"]), "".join(channels[n])) + + sys.stdout.write(html) + +def get_furl(release_files, fname, skey): + rfpath = release_files.get(fname) + if rfpath is not None: + return releases_prefix + rfpath["Key"] + else: + return staging_prefix + skey + +def main(): + s3 = boto3.client("s3") + channels = {} + + for channel in ["stable", "beta", "indexer"]: + staging_response = s3.list_objects_v2(Bucket=staging_bucket, Prefix="releases/" + channel + "/", MaxKeys=100) + release_sets = get_stage_release_set(staging_response) + releases_response = s3.list_objects_v2(Bucket=releases_bucket) + release_files = objects_by_fname(releases_response["Contents"]) + + table = [] + + for key, rset in release_sets.items(): + hashftext = get_hashes_data(s3, rset) + fhashes = read_hashes(hashftext.splitlines()) + files = release_set_files(rset) + + for fname, info in files.items(): + if "file" not in info: + continue + furl = get_furl(release_files, fname, info['file']) + ftext = ''.format(furl, fname) + sig = info.get(".sig") + stext = "" + if sig is not None: + sfname = sig["Key"].rsplit("/", 1)[-1] + surl = get_furl(release_files, sfname, sig["Key"]) + stext = '.sig'.format(surl) + size = info.get("Size", "") + hashes = fhashes.get(fname) + if hashes: + for hn in ("md5", "sha256", "sha512"): + hv = hashes.get(hn) + if hv: + ftext += '
{}
'.format(hn, hv) + if not hashes and not stext: + continue + tbody = ["{}{}{}".format(ftext, size, stext)] + table.append("".join(tbody)) + + # Only add the spacer *after* every set. + # It's not readily apparent to me why `indexer` would have a dict with a single + # item. This needs additional investigation. + # + # For instance, when creating the "indexer" table, the first line was empty b/c + # it added a spacer. This was b/c there were two dicts and the first only + # contained one item, which was useless. + # + # For now, just ignore those dicts. + if len(files.items()) > 1: + table.append('') + + channels[channel] = table + + build_page(channels) + +if __name__ == "__main__": + main() + diff --git a/scripts/release/mule/deploy/releases_page/generate_releases_page.sh b/scripts/release/mule/deploy/releases_page/generate_releases_page.sh new file mode 100755 index 0000000000..bccf5994c6 --- /dev/null +++ b/scripts/release/mule/deploy/releases_page/generate_releases_page.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Note: For this script to correctly pick up the new release, the new repo containing the new +# packages MUST have been already pushed to S3! (See the deb deployment mule step.) +# +# 1. Generate the new releases page from the contents of `algorand-dev-deb-repo/releases/CHANNEL`. +# 2. Backup up the current releases page (index.html). +# 3. Copy the new index.html to staging. +# 4. Copy the new index.html to `algorand-releases`. + +set -ex + +./generate_releases_page.py > index.html +aws s3 cp s3://algorand-releases/index.html s3://algorand-staging/releases-page/index.html-previous +aws s3 cp index.html s3://algorand-staging/releases-page/ +aws s3 cp index.html s3://algorand-releases/ + diff --git a/scripts/release/mule/deploy/releases_page/html.tpl b/scripts/release/mule/deploy/releases_page/html.tpl new file mode 100644 index 0000000000..c52f342d45 --- /dev/null +++ b/scripts/release/mule/deploy/releases_page/html.tpl @@ -0,0 +1,42 @@ + + + + + + + +

Algorand Releases

+

See Algorand Developer Resources for instructions on installation and getting started

+

The Algorand public key to verify these files (except RPM**) is at https://releases.algorand.com/key.pub

+

The public key for verifying RPMs is https://releases.algorand.com/rpm/rpm_algorand.pub

+

** The RPM package for the 2.0.3 release was signed with the https://releases.algorand.com/key.pub. All other releases will have been signed with the RPM key as noted.

+ +
+ +
+

algod

+

stable

+ +{stable} +
FileBytesGPG Signature
+ +

beta

+ +{beta} +
FileBytesGPG Signature
+
+ +
+ +
+

Indexer releases

+ +{indexer} +
FileBytesGPG Signature
+
+ + + + diff --git a/scripts/release/mule/deploy/releases_page/releases_page.css b/scripts/release/mule/deploy/releases_page/releases_page.css new file mode 100644 index 0000000000..0475a70b1f --- /dev/null +++ b/scripts/release/mule/deploy/releases_page/releases_page.css @@ -0,0 +1,47 @@ +div.hash { + font-family: monospace; +} + +div.fname { + font-size: 120%; +} + +section { + margin: 0 0 0 4em; +} + +section h1 { + color: #000; + font-size: 175%; + margin-left: -3em; + padding: 20px; + width: 10em; +} + +table { + border-collapse: collapse; +} + +tr.spacer td { + border: 0; + padding: 20px 0; +} + +th { + background-color: #DDD; + border: 2px inset gray; + padding: 10px; +} + +td { + border: 2px inset gray; + padding: 1px; + text-align: center; + vertical-align: middle; + width: 200px; +} + +td:first-child { + text-align: left; +} + diff --git a/scripts/release/mule/deploy/rpm/deploy.sh b/scripts/release/mule/deploy/rpm/deploy.sh index d8903f2df2..7c8ede1aa7 100755 --- a/scripts/release/mule/deploy/rpm/deploy.sh +++ b/scripts/release/mule/deploy/rpm/deploy.sh @@ -1,16 +1,35 @@ #!/usr/bin/env bash -# shellcheck disable=2045 +# shellcheck disable=2035,2045 set -ex +echo +date "+build_release begin DEPLOY rpm stage %Y%m%d_%H%M%S" +echo + +ARCH_TYPE=$(./scripts/archtype.sh) +OS_TYPE=$(./scripts/ostype.sh) +CHANNEL=${CHANNEL:-stable} +NO_DEPLOY=${NO_DEPLOY:-false} +PACKAGES_DIR=${PACKAGES_DIR:-"./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE"} VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} -mule -f package-deploy.yaml package-deploy-setup-gnupg +if [ -n "$S3_SOURCE" ] +then + PREFIX="$S3_SOURCE/$CHANNEL/$VERSION" + + aws s3 cp "s3://$PREFIX/algorand-$VERSION-1.x86_64.rpm" /root + aws s3 cp "s3://$PREFIX/algorand-devtools-$VERSION-1.x86_64.rpm" /root +else + cp "$PACKAGES_DIR"/*"$VERSION"*.rpm /root +fi pushd /root + +aws s3 cp s3://algorand-devops-misc/tools/gnupg2.2.9_centos7_amd64.tar.bz2 . tar jxf gnupg*.tar.bz2 -export PATH=/root/gnupg2/bin:"${PATH}" +export PATH="/root/gnupg2/bin:$PATH" export LD_LIBRARY_PATH=/root/gnupg2/lib mkdir -p .gnupg @@ -39,7 +58,7 @@ rpm.addSign(sys.argv[1], '') EOF mkdir rpmrepo -for rpm in $(ls packages/rpm/stable/*"$VERSION"*.rpm) +for rpm in $(ls *"$VERSION"*.rpm) do python2 rpmsign.py "$rpm" cp -p "$rpm" rpmrepo @@ -49,7 +68,16 @@ createrepo --database rpmrepo rm -f rpmrepo/repodata/repomd.xml.asc gpg -u rpm@algorand.com --detach-sign --armor rpmrepo/repodata/repomd.xml -popd +if $NO_DEPLOY +then + popd + cp -r /root/rpmrepo . +else + aws s3 sync rpmrepo "s3://algorand-releases/rpm/$CHANNEL/" + aws s3 cp *"$VERSION"*.rpm "s3://algorand-internal/packages/rpm/$CHANNEL/" +fi -mule -f package-deploy.yaml package-deploy-rpm-repo +echo +date "+build_release end DEPLOY rpm stage %Y%m%d_%H%M%S" +echo diff --git a/scripts/release/mule/gpg_preset_passphrase.sh b/scripts/release/mule/gpg_preset_passphrase.sh new file mode 100755 index 0000000000..3a974c7022 --- /dev/null +++ b/scripts/release/mule/gpg_preset_passphrase.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +gpgp=$(find /usr/lib/gnupg{2,,1} -type f -name gpg-preset-passphrase 2> /dev/null) + +# Here we need to grab the signing subkey, hence `tail -1`. +KEYGRIP=$(gpg -K --with-keygrip --textmode dev@algorand.com | grep Keygrip | tail -1 | awk '{ print $3 }') +echo "enter dev@ password" +$gpgp --verbose --preset "$KEYGRIP" + +KEYGRIP=$(gpg -K --with-keygrip --textmode rpm@algorand.com | grep Keygrip | head -1 | awk '{ print $3 }') +echo "enter rpm@ password" +$gpgp --verbose --preset "$KEYGRIP" + diff --git a/scripts/release/mule/package/deb/package.sh b/scripts/release/mule/package/deb/package.sh index 5662269b39..c8eb0e789a 100755 --- a/scripts/release/mule/package/deb/package.sh +++ b/scripts/release/mule/package/deb/package.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# shellcheck disable=2038,2045,2064 +# shellcheck disable=2038,2045,2064,2129,2162 set -ex @@ -7,108 +7,114 @@ echo date "+build_release begin PACKAGE DEB stage %Y%m%d_%H%M%S" echo -ARCH=$(./scripts/archtype.sh) -OS_TYPE=$(./scripts/ostype.sh) -BRANCH=${BRANCH:-$(./scripts/compute_branch.sh "$BRANCH")} +BRANCH=${BRANCH:-$(./scripts/compute_branch.sh)} CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} -OUTDIR="./tmp/node_pkgs/$OS_TYPE/$ARCH" -mkdir -p "$OUTDIR/bin" -ALGO_BIN="./tmp/node_pkgs/$OS_TYPE/$ARCH/$CHANNEL/$OS_TYPE-$ARCH/bin" -VER=${VERSION:-$(./scripts/compute_build_number.sh -f)} +VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} # A make target in Makefile.mule may pass the name as an argument. -ALGORAND_PACKAGE_NAME=${1:-$(./scripts/compute_package_name.sh "$CHANNEL")} +PACKAGE_NAME="$1" -echo "Building debian package for '${OS} - ${ARCH}'" - -DEFAULTNETWORK=$("./scripts/compute_branch_network.sh") -DEFAULT_RELEASE_NETWORK=$("./scripts/compute_branch_release_network.sh" "${DEFAULTNETWORK}") +DEFAULTNETWORK=${DEFAULTNETWORK:-$(./scripts/compute_branch_network.sh "$BRANCH")} +DEFAULT_RELEASE_NETWORK=$("./scripts/compute_branch_release_network.sh" "$DEFAULTNETWORK") export DEFAULT_RELEASE_NETWORK -PKG_ROOT=$(mktemp -d) -trap "rm -rf $PKG_ROOT" 0 - -mkdir -p "${PKG_ROOT}/usr/bin" - -# NOTE: keep in sync with `./installer/rpm/algorand.spec`. -if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then - BIN_FILES=("carpenter" "catchupsrv" "msgpacktool" "tealcut" "tealdbg") - UNATTENDED_UPGRADES_FILE="53algorand-devtools-upgrades" - OUTPUT_DEB="$OUTDIR/algorand-devtools_${CHANNEL}_${OS_TYPE}-${ARCH}_${VER}.deb" - REQUIRED_ALGORAND_PKG=$("./scripts/compute_package_name.sh" "$CHANNEL") -else - BIN_FILES=("algocfg" "algod" "algoh" "algokey" "ddconfig.sh" "diagcfg" "goal" "kmd" "node_exporter") - UNATTENDED_UPGRADES_FILE="51algorand-upgrades" - OUTPUT_DEB="$OUTDIR/algorand_${CHANNEL}_${OS_TYPE}-${ARCH}_${VER}.deb" -fi - -for binary in "${BIN_FILES[@]}"; do - cp "${ALGO_BIN}/${binary}" "${PKG_ROOT}"/usr/bin - chmod 755 "${PKG_ROOT}/usr/bin/${binary}" -done - -if [[ ! "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then - mkdir -p "${PKG_ROOT}/usr/lib/algorand" - lib_files=("updater" "find-nodes.sh") - for lib in "${lib_files[@]}"; do - cp "${ALGO_BIN}/${lib}" "${PKG_ROOT}/usr/lib/algorand" - chmod g-w "${PKG_ROOT}/usr/lib/algorand/${lib}" - done - - data_files=("config.json.example" "system.json") - mkdir -p "${PKG_ROOT}/var/lib/algorand" - for data in "${data_files[@]}"; do - cp "installer/${data}" "${PKG_ROOT}/var/lib/algorand" +find tmp/node_pkgs -name "*${CHANNEL}*linux*${VERSION}*.tar.gz" | cut -d '/' -f3-4 | sort --unique | while read OS_ARCH; do + PKG_ROOT=$(mktemp -d) + trap "rm -rf $PKG_ROOT" 0 + + ALGORAND_PACKAGE_NAME=$(./scripts/compute_package_name.sh "$CHANNEL" "$PACKAGE_NAME") + mkdir -p "${PKG_ROOT}/usr/bin" + OS_TYPE=$(echo "${OS_ARCH}" | cut -d '/' -f1) + ARCH=$(echo "${OS_ARCH}" | cut -d '/' -f2) + PKG_DIR="./tmp/node_pkgs/$OS_TYPE/$ARCH" + mkdir -p "$PKG_DIR/bin" + ALGO_BIN="${PKG_DIR}/$CHANNEL/$OS_TYPE-$ARCH/bin" + + # NOTE: keep in sync with `./installer/rpm/algorand.spec`. + if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then + BIN_FILES=("carpenter" "catchupsrv" "msgpacktool" "tealcut" "tealdbg") + UNATTENDED_UPGRADES_FILE="53algorand-devtools-upgrades" + OUTPUT_DEB="$PKG_DIR/algorand-devtools_${CHANNEL}_${OS_TYPE}-${ARCH}_${VERSION}.deb" + REQUIRED_ALGORAND_PKG=$("./scripts/compute_package_name.sh" "$CHANNEL") + else + BIN_FILES=("algocfg" "algod" "algoh" "algokey" "ddconfig.sh" "diagcfg" "goal" "kmd" "node_exporter") + UNATTENDED_UPGRADES_FILE="51algorand-upgrades" + OUTPUT_DEB="$PKG_DIR/algorand_${CHANNEL}_${OS_TYPE}-${ARCH}_${VERSION}.deb" + fi + + for binary in "${BIN_FILES[@]}"; do + cp "${ALGO_BIN}/$binary" "$PKG_ROOT/usr/bin" + chmod 755 "$PKG_ROOT/usr/bin/$binary" done - cp "./installer/genesis/${DEFAULTNETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis.json" - - # files should not be group writable but directories should be - chmod -R g-w "${PKG_ROOT}/var/lib/algorand" - find "${PKG_ROOT}/var/lib/algorand" -type d | xargs chmod g+w - - SYSTEMD_FILES=("algorand.service" "algorand@.service") - mkdir -p "${PKG_ROOT}/lib/systemd/system" - for svc in "${SYSTEMD_FILES[@]}"; do - cp "installer/${svc}" "${PKG_ROOT}/lib/systemd/system" - chmod 644 "${PKG_ROOT}/lib/systemd/system/${svc}" - done -fi - -mkdir -p "${PKG_ROOT}/etc/apt/apt.conf.d" -cat < "${PKG_ROOT}/etc/apt/apt.conf.d/${UNATTENDED_UPGRADES_FILE}" + if [[ ! "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then + mkdir -p "$PKG_ROOT/usr/lib/algorand" + lib_files=("updater" "find-nodes.sh") + for lib in "${lib_files[@]}"; do + cp "$ALGO_BIN/$lib" "$PKG_ROOT/usr/lib/algorand" + chmod g-w "$PKG_ROOT/usr/lib/algorand/$lib" + done + + data_files=("config.json.example" "system.json") + mkdir -p "$PKG_ROOT/var/lib/algorand" + for data in "${data_files[@]}"; do + cp "installer/$data" "$PKG_ROOT/var/lib/algorand" + done + + genesis_dirs=("devnet" "testnet" "mainnet" "betanet") + for dir in "${genesis_dirs[@]}"; do + mkdir -p "$PKG_ROOT/var/lib/algorand/genesis/$dir" + cp "./installer/genesis/$dir/genesis.json" "$PKG_ROOT/var/lib/algorand/genesis/$dir/genesis.json" + done + cp "./installer/genesis/$DEFAULT_RELEASE_NETWORK/genesis.json" "$PKG_ROOT/var/lib/algorand/genesis.json" + + # files should not be group writable but directories should be + chmod -R g-w "$PKG_ROOT/var/lib/algorand" + find "$PKG_ROOT/var/lib/algorand" -type d | xargs chmod g+w + + SYSTEMD_FILES=("algorand.service" "algorand@.service") + mkdir -p "$PKG_ROOT/lib/systemd/system" + for svc in "${SYSTEMD_FILES[@]}"; do + cp "installer/$svc" "$PKG_ROOT/lib/systemd/system" + chmod 644 "$PKG_ROOT/lib/systemd/system/$svc" + done + fi + + mkdir -p "$PKG_ROOT/etc/apt/apt.conf.d" + cat > "$PKG_ROOT/etc/apt/apt.conf.d/$UNATTENDED_UPGRADES_FILE" << EOF ## This file is provided by the Algorand package to configure ## unattended upgrades for the Algorand node software. Unattended-Upgrade::Allowed-Origins { - "Algorand:${CHANNEL}"; + "Algorand:$CHANNEL"; }; Dpkg::Options { - "--force-confdef"; - "--force-confold"; + "--force-confdef"; + "--force-confold"; }; EOF -mkdir -p "${PKG_ROOT}/DEBIAN" -if [[ "$PKG_NAME" =~ devtools ]]; then - INSTALLER_DIR="algorand-devtools" -else - INSTALLER_DIR=algorand -fi -# Can contain `control`, `preinst`, `postinst`, `prerm`, `postrm`, `conffiles`. -CTL_FILES_DIR="installer/debian/${INSTALLER_DIR}" -for ctl_file in $(ls "${CTL_FILES_DIR}"); do - # Copy first, to preserve permissions, then overwrite to fill in template. - cp -a "${CTL_FILES_DIR}/${ctl_file}" "${PKG_ROOT}/DEBIAN/${ctl_file}" - < "${CTL_FILES_DIR}/${ctl_file}" \ - sed -e "s,@ARCH@,${ARCH}," \ - -e "s,@VER@,${VER}," \ - -e "s,@REQUIRED_ALGORAND_PKG@,$REQUIRED_ALGORAND_PKG," \ - > "${PKG_ROOT}/DEBIAN/${ctl_file}" -done + mkdir -p "$PKG_ROOT/DEBIAN" + if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then + INSTALLER_DIR="algorand-devtools" + else + INSTALLER_DIR=algorand + fi + # Can contain `control`, `preinst`, `postinst`, `prerm`, `postrm`, `conffiles`. + CTL_FILES_DIR="installer/debian/$INSTALLER_DIR" + for ctl_file in $(ls "${CTL_FILES_DIR}"); do + # Copy first, to preserve permissions, then overwrite to fill in template. + cp -a "$CTL_FILES_DIR/$ctl_file" "$PKG_ROOT/DEBIAN/$ctl_file" + < "$CTL_FILES_DIR/$ctl_file" \ + sed -e "s,@ARCH@,$ARCH," \ + -e "s,@VER@,$VERSION," \ + -e "s,@PKG_NAME@,$ALGORAND_PACKAGE_NAME," \ + -e "s,@REQUIRED_ALGORAND_PKG@,$REQUIRED_ALGORAND_PKG," \ + > "$PKG_ROOT/DEBIAN/$ctl_file" + done -# TODO: make `Files:` segments for vendor/... and crypto/libsodium-fork, but reasonably this should be understood to cover all _our_ files and copied in packages continue to be licenced under their own terms -cat < "${PKG_ROOT}/DEBIAN/copyright" + # TODO: make `Files:` segments for vendor/... and crypto/libsodium-fork, but reasonably this should be understood to cover all _our_ files and copied in packages continue to be licenced under their own terms + cat > "$PKG_ROOT/DEBIAN/copyright" << EOF Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: Algorand Upstream-Contact: Algorand developers @@ -119,13 +125,45 @@ Copyright: Algorand developers License: AGPL-3+ EOF -sed 's/^$/./g' < COPYING | sed 's/^/ /g' >> "${PKG_ROOT}/DEBIAN/copyright" -mkdir -p "${PKG_ROOT}/usr/share/doc/${ALGORAND_PACKAGE_NAME}" -cp -p "${PKG_ROOT}/DEBIAN/copyright" "${PKG_ROOT}/usr/share/doc/${ALGORAND_PACKAGE_NAME}/copyright" + sed 's/^$/./g' < COPYING | sed 's/^/ /g' >> "$PKG_ROOT/DEBIAN/copyright" + mkdir -p "$PKG_ROOT/usr/share/doc/$ALGORAND_PACKAGE_NAME" + cp -p "$PKG_ROOT/DEBIAN/copyright" "$PKG_ROOT/usr/share/doc/$ALGORAND_PACKAGE_NAME/copyright" -dpkg-deb --build "${PKG_ROOT}" "${OUTPUT_DEB}" + dpkg-deb --build "$PKG_ROOT" "$OUTPUT_DEB" -echo -date "+build_release end PACKAGE DEB stage %Y%m%d_%H%M%S" -echo + ############################################################ + + pushd "$PKG_DIR" + + STATUSFILE=build_status_${CHANNEL}_${OS_TYPE}-${ARCH}_${VERSION} + cat >> "$STATUSFILE" << EOF +go version: +EOF + + /usr/local/go/bin/go version >> "$STATUSFILE" + + ############################################################ + + cat >> "$STATUSFILE" << EOF +go env: +EOF + + /usr/local/go/bin/go env >> "$STATUSFILE" + + ############################################################ + + cat >> "$STATUSFILE" << EOF +dpkg-l: +EOF + + dpkg -l >> "$STATUSFILE" + + popd + + ############################################################ + + echo + date "+build_release end PACKAGE DEB stage %Y%m%d_%H%M%S" + echo +done diff --git a/scripts/release/mule/package/docker/package.sh b/scripts/release/mule/package/docker/package.sh index 92efb4310a..d04faac906 100755 --- a/scripts/release/mule/package/docker/package.sh +++ b/scripts/release/mule/package/docker/package.sh @@ -6,13 +6,13 @@ echo date "+build_release begin PACKAGE DOCKER stage %Y%m%d_%H%M%S" echo -ARCH=$(./scripts/archtype.sh) +ARCH_TYPE=$(./scripts/archtype.sh) OS_TYPE=$(./scripts/ostype.sh) BRANCH=${BRANCH:-$(./scripts/compute_branch.sh "$BRANCH")} CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} -PKG_ROOT_DIR="./tmp/node_pkgs/$OS_TYPE/$ARCH" -FULLVERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} -ALGOD_INSTALL_TAR_FILE="$PKG_ROOT_DIR/node_${CHANNEL}_${OS_TYPE}-${ARCH}_${FULLVERSION}.tar.gz" +PKG_ROOT_DIR="./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" +VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} +ALGOD_INSTALL_TAR_FILE="$PKG_ROOT_DIR/node_${CHANNEL}_${OS_TYPE}-${ARCH_TYPE}_${VERSION}.tar.gz" if [ -f "$ALGOD_INSTALL_TAR_FILE" ]; then echo "using install file $ALGOD_INSTALL_TAR_FILE" @@ -22,7 +22,7 @@ else fi INPUT_ALGOD_TAR_FILE="temp_install.tar.gz" -CHANNEL_VERSION="${CHANNEL}_${FULLVERSION}" +CHANNEL_VERSION="${CHANNEL}_${VERSION}" NEW_PKG_DIR="algod_pkg_$CHANNEL_VERSION" DOCKER_EXPORT_FILE="algod_docker_export_$CHANNEL_VERSION.tar.gz" DOCKER_PKG_FILE="algod_docker_package_$CHANNEL_VERSION.tar.gz" diff --git a/scripts/release/mule/package/rpm/package.sh b/scripts/release/mule/package/rpm/package.sh index 3d60cb9d29..98cc95394b 100755 --- a/scripts/release/mule/package/rpm/package.sh +++ b/scripts/release/mule/package/rpm/package.sh @@ -1,51 +1,56 @@ #!/bin/bash +# shellcheck disable=2086,2162 set -ex echo "Building RPM package" REPO_DIR=$(pwd) -ARCH=$(./scripts/archtype.sh) -OS_TYPE=$(./scripts/ostype.sh) FULLVERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} -BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)} +BRANCH=${BRANCH:-$(./scripts/compute_branch.sh)} CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} -ALGO_BIN="$REPO_DIR/tmp/node_pkgs/$OS_TYPE/$ARCH/$CHANNEL/$OS_TYPE-$ARCH/bin" -# TODO: Should there be a default network? -DEFAULTNETWORK=devnet +DEFAULTNETWORK=${DEFAULTNETWORK:-$(./scripts/compute_branch_network.sh "$BRANCH")} DEFAULT_RELEASE_NETWORK=$(./scripts/compute_branch_release_network.sh "$DEFAULTNETWORK") - -# A make target in Makefile.mule may pass the name as an argument. -ALGORAND_PACKAGE_NAME=${1:-$(./scripts/compute_package_name.sh "$CHANNEL")} - -if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then - REQUIRED_ALGORAND_PACKAGE=$(./scripts/compute_package_name.sh "$CHANNEL") -fi - -# The following need to be exported for use in ./go-algorand/installer/rpm/$ALGORAND_PACKAGE_NAME/$ALGORAND_PACKAGE_NAME.spec. -export DEFAULT_NETWORK -export DEFAULT_RELEASE_NETWORK -export REPO_DIR -export ALGO_BIN - -RPMTMP=$(mktemp -d 2>/dev/null || mktemp -d -t "rpmtmp") -trap 'rm -rf $RPMTMP' 0 - -TEMPDIR=$(mktemp -d) -if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then - INSTALLER_DIR="algorand-devtools" -else - INSTALLER_DIR=algorand -fi -trap 'rm -rf $TEMPDIR' 0 -< "./installer/rpm/$INSTALLER_DIR/$INSTALLER_DIR.spec" \ - sed -e "s,@ALGORAND_PACKAGE_NAME@,$REQUIRED_ALGORAND_PACKAGE," \ - -e "s,@VER@,$FULLVERSION," \ - -e "s,@ARCH@,$ARCH," \ - -e "s,@REQUIRED_ALGORAND_PKG@,$ALGORAND_PACKAGE_NAME," \ - > "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" - -rpmbuild --buildroot "$HOME/foo" --define "_rpmdir $RPMTMP" --define "RELEASE_GENESIS_PROCESS x$RELEASE_GENESIS_PROCESS" --define "LICENSE_FILE ./COPYING" -bb "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" - -cp -p "$RPMTMP"/*/*.rpm "./tmp/node_pkgs/$OS_TYPE/$ARCH" - +PACKAGE_NAME="$1" + +find tmp/node_pkgs -name "*${CHANNEL}*linux*${FULLVERSION}*.tar.gz" | cut -d '/' -f3-4 | sort --unique | while read OS_ARCH; do + OS_TYPE=$(echo "${OS_ARCH}" | cut -d '/' -f1) + ARCH_TYPE=$(echo "${OS_ARCH}" | cut -d '/' -f2) + ARCH_UNAME=$(./scripts/release/common/cpu_name.sh ${ARCH_TYPE}) + ALGO_BIN="$REPO_DIR/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE/$CHANNEL/$OS_TYPE-$ARCH_TYPE/bin" + # A make target in Makefile.mule may pass the name as an argument. + ALGORAND_PACKAGE_NAME=$(./scripts/compute_package_name.sh "$CHANNEL" "$PACKAGE_NAME") + + if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then + REQUIRED_ALGORAND_PACKAGE=$(./scripts/compute_package_name.sh "$CHANNEL") + fi + + # The following need to be exported for use in ./go-algorand/installer/rpm/$ALGORAND_PACKAGE_NAME/$ALGORAND_PACKAGE_NAME.spec. + export DEFAULTNETWORK + export DEFAULT_RELEASE_NETWORK + export REPO_DIR + export ALGO_BIN + + RPMTMP=$(mktemp -d 2>/dev/null || mktemp -d -t "rpmtmp") + trap 'rm -rf $RPMTMP' 0 + + TEMPDIR=$(mktemp -d) + if [[ "$ALGORAND_PACKAGE_NAME" =~ devtools ]]; then + INSTALLER_DIR="algorand-devtools" + else + INSTALLER_DIR=algorand + fi + trap 'rm -rf $TEMPDIR' 0 + < "./installer/rpm/$INSTALLER_DIR/$INSTALLER_DIR.spec" \ + sed -e "s,@PKG_NAME@,$ALGORAND_PACKAGE_NAME," \ + -e "s,@VER@,$FULLVERSION," \ + -e "s,@ARCH@,$ARCH_UNAME," \ + -e "s,@REQUIRED_ALGORAND_PKG@,$REQUIRED_ALGORAND_PACKAGE," \ + > "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" + + rpmbuild --buildroot "$HOME/foo" --define "_rpmdir $RPMTMP" --define "RELEASE_GENESIS_PROCESS xtrue" --define "LICENSE_FILE ./COPYING" -bb "$TEMPDIR/$ALGORAND_PACKAGE_NAME.spec" --target $ARCH_UNAME + + cp -p "$RPMTMP"/*/*.rpm "./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" + echo "${RPMTMP}" + echo "${TEMPDIR}" +done diff --git a/scripts/release/mule/sign/sign.sh b/scripts/release/mule/sign/sign.sh index 272f132da1..7fec7f677d 100755 --- a/scripts/release/mule/sign/sign.sh +++ b/scripts/release/mule/sign/sign.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash -# shellcheck disable=2035 +# shellcheck disable=2035,2129,2162 + +# TODO: This needs to be reworked a bit to support Darwin. set -exo pipefail @@ -7,84 +9,72 @@ echo date "+build_release begin SIGN stage %Y%m%d_%H%M%S" echo -PKG_TYPE="$1" -ARCH_BIT=$(uname -m) -ARCH_TYPE=$(./scripts/archtype.sh) -OS_TYPE=$(./scripts/ostype.sh) VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} -BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)} +BRANCH=${BRANCH:-$(./scripts/compute_branch.sh)} CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} -PKG_DIR="./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" +PKG_DIR="./tmp/node_pkgs" SIGNING_KEY_ADDR=dev@algorand.com -if ! $USE_CACHE -then - export ARCH_BIT - export ARCH_TYPE - export CHANNEL - export OS_TYPE - export VERSION - - if [ "$PKG_TYPE" == "tar.gz" ] - then - mule -f package-sign.yaml package-sign-setup-tarball - else - mule -f package-sign.yaml "package-sign-setup-$PKG_TYPE" - fi -fi - -make_hashes () { - # We need to futz a bit with "source" to make the hashes correct. - local HASH_TYPE=${1:-$PKG_TYPE} - local PACKAGE_TYPE=${2:-$PKG_TYPE} - - HASHFILE="hashes_${CHANNEL}_${OS_TYPE}_${ARCH_TYPE}_${VERSION}_${HASH_TYPE}" - # Remove any previously-generated hashes. - rm -f "$HASHFILE"* - - { - md5sum *"$VERSION"*."$PACKAGE_TYPE" ; - shasum -a 256 *"$VERSION"*."$PACKAGE_TYPE" ; - shasum -a 512 *"$VERSION"*."$PACKAGE_TYPE" ; - } >> "$HASHFILE" +# It seems that copying/mounting the gpg dir from another machine can result in insecure +# access privileges, so set the correct permissions to avoid the following warning: +# +# gpg: WARNING: unsafe permissions on homedir '/root/.gnupg' +# +find /root/.gnupg -type d -exec chmod 700 {} \; +find /root/.gnupg -type f -exec chmod 600 {} \; - gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$HASHFILE" - gpg -u "$SIGNING_KEY_ADDR" --clearsign "$HASHFILE" -} +mkdir -p "$PKG_DIR" +cd "$PKG_DIR" -make_sigs () { - local PACKAGE_TYPE=${1:-$PKG_TYPE} - - # Remove any previously-generated signatures. - rm -f ./*"$VERSION"*."$PACKAGE_TYPE".sig - - for item in *"$VERSION"*."$1" - do - gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$item" - done -} - -pushd "$PKG_DIR" +if [ -n "$S3_SOURCE" ] +then + aws s3 cp --recursive --exclude "*" --include "*$CHANNEL*$VERSION*" "s3://$S3_SOURCE/$CHANNEL/$VERSION" . +fi -GPG_HOME_DIR=$(gpgconf --list-dirs | grep homedir | awk -F: '{ print $2 }') -chmod 400 "$GPG_HOME_DIR" +# TODO: "$PKG_TYPE" == "source" -if [ "$PKG_TYPE" == "source" ] -then - git archive --prefix="algorand-$FULLVERSION/" "$BRANCH" | gzip >| "$PKG_DIR/algorand_${CHANNEL}_source_${VERSION}.tar.gz" - make_sigs tar.gz - make_hashes source tar.gz -else - if [ "$PKG_TYPE" == "rpm" ] +# https://unix.stackexchange.com/a/46259 +# Grab the directories directly underneath (max-depth 1) ./tmp/node_pkgs/ into a space-delimited string. +# This will help us target `linux`, `darwin` and (possibly) `windows` build assets. +# Note the surrounding parens turns the string created by `find` into an array. +OS_TYPES=($(find . -mindepth 1 -maxdepth 1 -type d -printf '%f\n')) +for os in "${OS_TYPES[@]}"; do + if [ "$os" = linux ] then - SIGNING_KEY_ADDR=rpm@algorand.com + ARCHS=(amd64 arm arm64) + for arch in "${ARCHS[@]}"; do + ( + mkdir -p "$os/$arch" + cd "$os/$arch" + + # Clean package directory of any previous operations. + rm -rf hashes* *.sig *.asc *.asc.gz + + for file in *.tar.gz *.deb + do + gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$file" + done + + for file in *.rpm + do + gpg -u rpm@algorand.com --detach-sign "$file" + done + + HASHFILE="hashes_${CHANNEL}_${os}_${arch}_${VERSION}" + md5sum *.tar.gz *.deb *.rpm >> "$HASHFILE" + shasum -a 256 *.tar.gz *.deb *.rpm >> "$HASHFILE" + shasum -a 512 *.tar.gz *.deb *.rpm >> "$HASHFILE" + + gpg -u "$SIGNING_KEY_ADDR" --detach-sign "$HASHFILE" + gpg -u "$SIGNING_KEY_ADDR" --clearsign "$HASHFILE" + + STATUSFILE="build_status_${CHANNEL}_${os}-${arch}_${VERSION}" + gpg -u "$SIGNING_KEY_ADDR" --clearsign "$STATUSFILE" + gzip -c "$STATUSFILE.asc" > "$STATUSFILE.asc.gz" + ) + done fi - - make_sigs "$PKG_TYPE" - make_hashes -fi - -popd +done echo date "+build_release end SIGN stage %Y%m%d_%H%M%S" diff --git a/scripts/release/mule/test/test.sh b/scripts/release/mule/test/test.sh index 831220ecc3..33b80c76d5 100755 --- a/scripts/release/mule/test/test.sh +++ b/scripts/release/mule/test/test.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# shellcheck disable=2045 set -ex @@ -10,35 +11,87 @@ export ARCH_TYPE OS_TYPE=$(./scripts/ostype.sh) export OS_TYPE -if [ -z "$VERSION" ]; then - VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} -fi -export VERSION - -if [ -z "$BRANCH" ]; then - BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)} -fi -export BRANCH +export BRANCH=${BRANCH:-$(./scripts/compute_branch.sh)} +export CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} +export NETWORK=${NETWORK:-$(./scripts/compute_branch_network.sh "$BRANCH")} +export SHA=${SHA:-$(git rev-parse HEAD)} +export VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} +ALGORAND_PACKAGE_NAME=$([ "$CHANNEL" = beta ] && echo algorand-beta || echo algorand) +DEVTOOLS_PACKAGE_NAME=$([ "$CHANNEL" = beta ] && echo algorand-devtools-beta || echo algorand-devtools) +export ALGORAND_PACKAGE_NAME +export DEVTOOLS_PACKAGE_NAME -if [ -z "$CHANNEL" ]; then - CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")} -fi -export CHANNEL +PKG_DIR="./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" -if [ -z "$SHA" ]; then - SHA=${SHA:-$(git rev-parse HEAD)} -fi -export SHA +mkdir -p "$PKG_DIR" +pushd "$PKG_DIR" -if ! $USE_CACHE +if [ -n "$S3_SOURCE" ] then - mule -f package-test.yaml "package-test-setup-$PKG_TYPE" + PREFIX="$S3_SOURCE/$CHANNEL/$VERSION" + + # deb + aws s3 cp "s3://$PREFIX/algorand_${CHANNEL}_${OS_TYPE}-${ARCH_TYPE}_${VERSION}.deb" . + aws s3 cp "s3://$PREFIX/algorand-devtools_${CHANNEL}_${OS_TYPE}-${ARCH_TYPE}_${VERSION}.deb" . + + # rpm + aws s3 cp "s3://$PREFIX/algorand-$VERSION-1.$ARCH_BIT.rpm" . + aws s3 cp "s3://$PREFIX/algorand-devtools-$VERSION-1.$ARCH_BIT.rpm" . fi -if [[ "$ARCH_TYPE" =~ "arm" ]] +popd + +for test in $(ls ./scripts/release/mule/test/tests/pre/*.sh) +do + echo ">>>>>>>>>> PRE TESTING $(basename "$test")" + bash "$test" +done + +pushd "$PKG_DIR" + +if [ "$PKG_TYPE" = deb ] then - ./scripts/release/mule/test/tests/run_tests -b "$BRANCH" -c "$CHANNEL" -h "$SHA" -r "$VERSION" + dpkg -i algorand_*"$VERSION"*.deb + dpkg -i algorand-devtools*"$VERSION"*.deb else - ./scripts/release/mule/test/util/test_package.sh + # We need to install this since it's not being installed by a package manager. + # Normally, this is installed for us b/c it's a dependency. + # See `./installer/rpm/algorand/algorand.spec`. + yum install yum-cron -y + # + # Note that the RPM package DOES NOT have the CHANNEL in its filename (unlike DEB), + # instead it contains the package name. + # + # deb: + # algorand_CHANNEL*VERSION.deb + # algorand-devtools_CHANNEL*VERSION.deb + # + # (this pattern is for all channels) + # + # rpm: + # (this pattern is for stable) + # algorand-VERSION*.rpm + # algorand-devtools-VERSION.rpm + # + # (this pattern is for beta) + # algorand-beta-VERSION*.rpm + # algorand-devtools-beta-VERSION.rpm + # + # SO..... + # ALGORAND_PACKAGE_NAME-VERSION*.rpm + # DEVTOOLS_PACKAGE_NAME-beta-VERSION.rpm + # + # Hope that makes sense :) + # + rpm -i "$ALGORAND_PACKAGE_NAME"-"$VERSION"-1."$ARCH_BIT".rpm + rpm -i "$DEVTOOLS_PACKAGE_NAME"-*"$VERSION"-1."$ARCH_BIT".rpm fi +popd + +for test in $(ls ./scripts/release/mule/test/tests/post/*.sh) +do + echo ">>>>>>>>>> POST TESTING $(basename "$test")" + bash "$test" +done + diff --git a/scripts/release/mule/test/tests/goal.sh b/scripts/release/mule/test/tests/goal.sh deleted file mode 100755 index b7812b4a93..0000000000 --- a/scripts/release/mule/test/tests/goal.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# Check that the installed version is now the current version. -algod -v | grep -q "${VERSION}.${CHANNEL}" - -mkdir -p /root/testnode -cp -p /var/lib/algorand/genesis.json /root/testnode - -goal node start -d /root/testnode -goal node wait -d /root/testnode -w 120 -goal node stop -d /root/testnode - diff --git a/scripts/release/mule/test/tests/post/verify_genesis_file.sh b/scripts/release/mule/test/tests/post/verify_genesis_file.sh new file mode 100755 index 0000000000..5dd515b67a --- /dev/null +++ b/scripts/release/mule/test/tests/post/verify_genesis_file.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -ex + +echo "[$0] Testing network string in genesis.json" + +# We're looking for a line that looks like the following: +# +# "network": "mainnet", +# + +GEN_FILE=/var/lib/algorand/genesis.json +cd "./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" + +if [ ! -f "$GEN_FILE" ] +then + echo "[$0] The genesis file is not present." + exit 1 +fi + +EXPECTED_NETWORK=$(jq -r '.network' $GEN_FILE) + +if [ "$NETWORK" != "$EXPECTED_NETWORK" ] +then + echo "[$0] The network value \`$NETWORK\` in \`$GEN_FILE\` is incorrect, it does not match \`$EXPECTED_NETWORK\`." + exit 1 +fi + +echo "[$0] The network value \`$NETWORK\` in \`$GEN_FILE\` is correct." + diff --git a/scripts/release/mule/test/tests/post/verify_package_binaries.sh b/scripts/release/mule/test/tests/post/verify_package_binaries.sh new file mode 100755 index 0000000000..1a61dba1e7 --- /dev/null +++ b/scripts/release/mule/test/tests/post/verify_package_binaries.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# shellcheck disable=2116 + +set -ex + +echo "[$0] Verifying installed binaries..." + +RET=0 +RPMTMP=$(mktemp -d) + +cd "./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE" + +if [ "$PKG_TYPE" = deb ] +then + dpkg -L "$ALGORAND_PACKAGE_NAME" > "$RPMTMP/algorand.install" + dpkg -L "$DEVTOOLS_PACKAGE_NAME" > "$RPMTMP/algorand-devtools.install" +else + rpm -ql "$ALGORAND_PACKAGE_NAME" > "$RPMTMP/algorand.install" + rpm -ql "$DEVTOOLS_PACKAGE_NAME" > "$RPMTMP/algorand-devtools.install" +fi + +ALGORAND_BINS=( + /usr/bin/algocfg + /usr/bin/algod + /usr/bin/algoh + /usr/bin/algokey + /usr/bin/ddconfig.sh + /usr/bin/diagcfg + /usr/bin/goal + /usr/bin/kmd + /usr/bin/node_exporter +) + +for bin in "${ALGORAND_BINS[@]}"; do + if ! grep "$bin" "$RPMTMP/algorand.install" > /dev/null + then + MISSING_ALGORAND_BINS+=("$bin") + fi +done + +DEVTOOLS_BINS=( + /usr/bin/carpenter + /usr/bin/catchupsrv + /usr/bin/msgpacktool + /usr/bin/tealcut + /usr/bin/tealdbg +) + +for bin in "${DEVTOOLS_BINS[@]}"; do + if ! grep "$bin" "$RPMTMP/algorand-devtools.install" > /dev/null + then + MISSING_DEVTOOLS_BINS+=("$bin") + fi +done + +LEN=$(echo ${#MISSING_ALGORAND_BINS[*]}) +if [ "$LEN" -gt 0 ] +then + echo "The following binaries are not contained in the \`algorand\` package:" + for (( i=0; i= 2.1.6) + # + + cp "./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE"/algorand-devtools*"$CHANNEL"*"$VERSION"*.deb "$RPMTMP" + cd "$RPMTMP" + ar xv *"$VERSION"*.deb + tar xf control.tar.xz + + if ! grep -F "Pre-Depends: $ALGORAND_PACKAGE_NAME (>= $VERSION)" control + then + echo "[$0] The dependency for $ALGORAND_PACKAGE_NAME version $VERSION is incorrect." + exit 1 + fi + + echo "[$0] The dependency for $ALGORAND_PACKAGE_NAME version $VERSION is correct." +else + # Note that the .spec file isn't packaged in the RPM. There are tools such `rpmrebuild` that + # attempt to generate the .spec file, but it doesn't give us the info we need. + # + # Instead, we'll just install using `dpkg` and grep the error stream. + # + # Also, note that the RPM package DOES NOT have the CHANNEL in its filename (unlike DEB)!! + if ! rpm -i "./tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE/algorand-devtools-"*"$VERSION"*"$ARCH_BIT".rpm 2> "$RPMTMP/rpm.install" + then + # + # We're looking for lines that looks like the following: + # + # error: Failed dependencies: + # algorand >= 2.1.86017 is needed by algorand-devtools-2.1.86017-1.x86_64 + # + if [[ $(cat "$RPMTMP/rpm.install") =~ "$ALGORAND_PACKAGE_NAME >= $VERSION is needed by $DEVTOOLS_PACKAGE_NAME-$VERSION" ]] + then + echo "[$0] The package \`algorand-devtools\` correctly has a dependency on package $ALGORAND_PACKAGE_NAME and failed to install." + exit 0 + fi + + echo "[$0] The package \`algorand-devtools\` failed to install because of a missing dependency other than the $ALGORAND_PACKAGE_NAME package." + exit 1 + else + echo "[$0] The package \`algorand-devtools\` was installed without any dependencies, while it should have a dependency on the $ALGORAND_PACKAGE_NAME package." + exit 1 + fi +fi + diff --git a/scripts/release/mule/test/tests/run_tests b/scripts/release/mule/test/tests/run_tests deleted file mode 100755 index c7ba6e23a9..0000000000 --- a/scripts/release/mule/test/tests/run_tests +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=2045 - -set -ex - -# This is currently used by `test_package.sh`. -# It is copied into a docker image at build time and then invoked at run time. - -BRANCH= -CHANNEL= -COMMIT_HASH= -FULLVERSION= - -while [ "$1" != "" ]; do - case "$1" in - -b) - shift - BRANCH="$1" - ;; - -c) - shift - CHANNEL="$1" - ;; - -h) - shift - COMMIT_HASH="$1" - ;; - -r) - shift - FULLVERSION="$1" - ;; - *) - echo "Unknown option" "$1" - exit 1 - ;; - esac - shift -done - -if [ -z "$BRANCH" ] || [ -z "$CHANNEL" ] || [ -z "$COMMIT_HASH" ] || [ -z "$FULLVERSION" ] -then - echo "[ERROR] $0 -b $BRANCH -c $CHANNEL -h $COMMIT_HASH -r $FULLVERSION" - exit 1 -fi - -if [ "$PKG_TYPE" == "deb" ] -then - for deb in $(ls "$WORKDIR/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE"/*"$FULLVERSION"*.deb); do - if [[ ! "$deb" =~ devtools ]]; then - dpkg -i "$deb" - fi - done -else - for rpm in $(ls "$WORKDIR/tmp/node_pkgs/$OS_TYPE/$ARCH_TYPE"/*"$FULLVERSION"*.rpm); do - if [[ ! "$rpm" =~ devtools ]]; then - yum install "$rpm" -y - fi - done -fi - -export BRANCH -export COMMIT_HASH -export CHANNEL -export FULLVERSION - -for test in $(ls ./scripts/release/mule/test/tests/*.sh) -do - bash "$test" -done - diff --git a/scripts/release/mule/test/util/test_package.sh b/scripts/release/mule/test/util/test_package.sh deleted file mode 100755 index e18c882c44..0000000000 --- a/scripts/release/mule/test/util/test_package.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -trap cleanup 0 - -OS_LIST= - -if [ "$PKG_TYPE" == "deb" ] -then - OS_LIST=( - ubuntu:16.04 - ubuntu:18.04 - ) -else - # TODO: The following error happens on centos:8 - # - # Error: - # Problem: conflicting requests - # - nothing provides yum-cron needed by algorand-2.0.4-1.x86_64 - # (try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages) - # algod: command not found - OS_LIST=( - centos:7 - # centos:8 - fedora:28 - ) -fi - -FAILED=() - -build_images () { - # We'll use this simple tokenized Dockerfile. - # https://serverfault.com/a/72511 - TOKENIZED=$(echo -e "\ -FROM {{OS}}\n\n\ -WORKDIR /root\n\ -COPY . .\n\ -CMD [\"/bin/bash\"]") - - for item in ${OS_LIST[*]} - do - # Use pattern substitution here (like sed). - # ${parameter/pattern/substitution} - echo -e "${TOKENIZED/\{\{OS\}\}/$item}" > Dockerfile - if ! docker build -t "${item}-run-tests" . - then - FAILED+=("$item") - fi - done -} - -run_images () { - for item in ${OS_LIST[*]} - do - echo "[$0] Running ${item}-test..." - - if ! docker run --rm --name algorand -e OS_TYPE="$OS_TYPE" -e ARCH_TYPE="$ARCH_TYPE" -e PKG_TYPE="$PKG_TYPE" -e WORKDIR="$WORKDIR" --volumes-from "$HOSTNAME" -t "${item}-run-tests" bash ./scripts/release/mule/test/tests/run_tests -b "$BRANCH" -c "$CHANNEL" -h "$SHA" -r "$VERSION" - then - FAILED+=("$item") - fi - done -} - -cleanup() { - rm -f Dockerfile -} - -check_failures() { - if [ "${#FAILED[@]}" -gt 0 ] - then - echo -e "\n[$0] The following images could not be $1:" - - for failed in ${FAILED[*]} - do - echo " - $failed" - done - - echo - exit 1 - fi -} - -build_images -check_failures built -echo "[$0] All builds completed with no failures." - -run_images -check_failures verified -echo "[$0] All runs completed with no failures." - diff --git a/scripts/travis/before_build.sh b/scripts/travis/before_build.sh index 49ad1a0108..b6f733518e 100755 --- a/scripts/travis/before_build.sh +++ b/scripts/travis/before_build.sh @@ -55,6 +55,11 @@ if [ "${OS}-${ARCH}" = "linux-arm" ]; then exit 0 fi +if [ "${OS}-${ARCH}" = "windows-amd64" ]; then + echo "Skipping running 'go vet'/gofmt/golint for windows builds" + exit 0 +fi + echo "Running go vet..." go vet $(GO111MODULE=off go list ./... | grep -v /test/e2e-go/) diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh index 25bcb88705..243899be7e 100755 --- a/scripts/travis/build.sh +++ b/scripts/travis/build.sh @@ -69,8 +69,9 @@ scripts/travis/before_build.sh # Force re-evaluation of genesis files to see if source files changed w/o running make touch gen/generate.go -if [ "${OS}-${ARCH}" = "linux-arm" ]; then +if [ "${OS}-${ARCH}" = "linux-arm" ] || [ "${OS}-${ARCH}" = "windows-amd64" ]; then # for arm, build just the basic distro + # for windows, we still have some issues with the enlistment checking, so we'll make it simple for now. MAKE_DEBUG_OPTION="" fi diff --git a/scripts/travis/configure_dev.sh b/scripts/travis/configure_dev.sh index f5d4e7c057..ed8e8b203e 100755 --- a/scripts/travis/configure_dev.sh +++ b/scripts/travis/configure_dev.sh @@ -24,6 +24,11 @@ elif [[ "${OS}" == "darwin" ]]; then brew update brew tap homebrew/cask brew pin boost || true +elif [[ "${OS}" == "windows" ]]; then + git config --global core.autocrlf true + # Golang probably is not installed under MSYS2 so add the environment variable temporarily + export GOPATH=$HOME/go + mkdir -p $GOPATH/bin fi "${SCRIPTPATH}/../configure_dev.sh" diff --git a/scripts/travis/test.sh b/scripts/travis/test.sh index 179ae9e265..ebbe815194 100755 --- a/scripts/travis/test.sh +++ b/scripts/travis/test.sh @@ -18,10 +18,11 @@ curl -sL -o ~/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gim chmod +x ~/gimme eval $(~/gimme "${GOLANG_VERSION}") -if [ "${OS}-${ARCH}" = "linux-arm" ]; then - # for arm, no tests need to be invoked. - exit 0 -fi +if [ "${OS}-${ARCH}" = "linux-arm" ] || [ "${OS}-${ARCH}" = "windows-amd64" ]; then + # for arm, no tests need to be invoked. + # for now, disable tests on windows. + exit 0 + fi GOPATHBIN=$(go env GOPATH)/bin export PATH=$PATH:$GOPATHBIN diff --git a/scripts/windows/instructions.md b/scripts/windows/instructions.md new file mode 100644 index 0000000000..b24370bcf7 --- /dev/null +++ b/scripts/windows/instructions.md @@ -0,0 +1,25 @@ +1. Download and install `MSYS2` package from [here](https://www.msys2.org/) + +2. Run `MSYS2 MingW 64-bit` application to open the MSYS2 terminal. + +3. Update MSYS2 package and dependency manager by running the following commands: + + ``` + pacman -Syu --disable-download-timeout + ``` + + NOTE: It is very likely MSYS2 will ask to close the window and repeat the command for furter updates. Check `MSYS2` web page for additional support. + +4. Install GIT on MSYS2 by executing the following command: + + ``` + pacman -S --disable-download-timeout --noconfirm git + ``` + +5. Clone repository with `git clone https://github.com/algorand/go-algorand`. + +6. Switch to source code directory with `cd go-algorand`. + +7. Run `./scripts/configure_dev.sh` to install required dependencies. + +8. Run `make`. diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go index efedb51254..ce1083e62b 100644 --- a/shared/pingpong/accounts.go +++ b/shared/pingpong/accounts.go @@ -89,9 +89,10 @@ func ensureAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]ui fmt.Printf("Located Source Account: %s -> %v\n", cfg.SrcAccount, accounts[cfg.SrcAccount]) } - // Only reuse existing accounts for non asset testing. + // Only reuse existing accounts for non asset testing and non app testing. // For asset testing, new participant accounts will be created since accounts are limited to 1000 assets. - if cfg.NumAsset == 0 { + // For app testing, new participant accounts will be created since accounts are limited to 10 aps. + if cfg.NumAsset == 0 && cfg.NumApp == 0 { // If we have more accounts than requested, pick the top N (not including src) if len(accounts) > int(cfg.NumPartAccounts+1) { fmt.Printf("Finding the richest %d accounts to use for transacting\n", cfg.NumPartAccounts) @@ -158,7 +159,9 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf return } assetName := fmt.Sprintf("pong%d", i) - fmt.Printf("Creating asset %s\n", assetName) + if !cfg.Quiet { + fmt.Printf("Creating asset %s\n", assetName) + } tx, createErr := client.MakeUnsignedAssetCreateTx(totalSupply, false, addr, addr, addr, addr, "ping", assetName, "", meta, 0) if createErr != nil { fmt.Printf("Cannot make asset create txn with meta %v\n", meta) @@ -196,7 +199,9 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf // 2) For each participant account, opt-in to assets of all other participant accounts for addr := range accounts { - fmt.Printf("Opting in assets from account %v\n", addr) + if !cfg.Quiet { + fmt.Printf("Opting in assets from account %v\n", addr) + } addrAccount, addrErr := client.AccountInformation(addr) if addrErr != nil { fmt.Printf("Cannot lookup source account\n") @@ -205,15 +210,21 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf } assetParams := addrAccount.AssetParams - fmt.Printf("Optining in %d assets %+v\n", len(assetParams), assetParams) + if !cfg.Quiet { + fmt.Printf("Optining in %d assets %+v\n", len(assetParams), assetParams) + } // Opt-in Accounts for each asset for k := range assetParams { - fmt.Printf("optin asset %+v\n", k) + if !cfg.Quiet { + fmt.Printf("optin asset %+v\n", k) + } for addr2 := range accounts { if addr != addr2 { - fmt.Printf("Opting in assets to account %v \n", addr2) + if !cfg.Quiet { + fmt.Printf("Opting in assets to account %v \n", addr2) + } _, addrErr2 := client.AccountInformation(addr2) if addrErr2 != nil { fmt.Printf("Cannot lookup optin account\n") @@ -251,7 +262,9 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf // Step 3) Evenly distribute the assets across all participant accounts for addr := range accounts { - fmt.Printf("Distributing assets from account %v\n", addr) + if !cfg.Quiet { + fmt.Printf("Distributing assets from account %v\n", addr) + } addrAccount, addrErr := client.AccountInformation(addr) if addrErr != nil { fmt.Printf("Cannot lookup source account\n") @@ -260,18 +273,22 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf } assetParams := addrAccount.AssetParams - fmt.Printf("Distributing %d assets\n", len(assetParams)) + if !cfg.Quiet { + fmt.Printf("Distributing %d assets\n", len(assetParams)) + } // Distribute assets to each account for k := range assetParams { - - fmt.Printf("Distributing asset %v \n", k) + if !cfg.Quiet { + fmt.Printf("Distributing asset %v \n", k) + } assetAmt := assetParams[k].Total / uint64(len(accounts)) for addr2 := range accounts { if addr != addr2 { - - fmt.Printf("Distributing assets from %v to %v \n", addr, addr2) + if !cfg.Quiet { + fmt.Printf("Distributing assets from %v to %v \n", addr, addr2) + } tx, sendErr := constructTxn(addr, addr2, cfg.MaxFee, assetAmt, k, client, cfg) if sendErr != nil { @@ -318,31 +335,15 @@ func signAndBroadcastTransaction(accounts map[string]uint64, sender string, tx t return } -func genBigNoOp(numOps uint32) []byte { - var progParts []string - progParts = append(progParts, `#pragma version 2`) - for i := uint32(0); i < numOps/2; i++ { - progParts = append(progParts, `int 1`) - progParts = append(progParts, `pop`) - } - progParts = append(progParts, `int 1`) - progParts = append(progParts, `return`) - progAsm := strings.Join(progParts, "\n") - progBytes, err := logic.AssembleString(progAsm) - if err != nil { - panic(err) - } - return progBytes -} - -func genBigHashes(numHashes int, numPad int, hash string) []byte { +func genBigNoOpAndBigHashes(numOps uint32, numHashes uint32, hashSize string) []byte { var progParts []string progParts = append(progParts, `#pragma version 2`) progParts = append(progParts, `byte base64 AA==`) - for i := 0; i < numHashes; i++ { - progParts = append(progParts, hash) + + for i := uint32(0); i < numHashes; i++ { + progParts = append(progParts, hashSize) } - for i := 0; i < numPad/2; i++ { + for i := uint32(0); i < numOps/2; i++ { progParts = append(progParts, `int 1`) progParts = append(progParts, `pop`) } @@ -425,12 +426,21 @@ func genMaxClone(numKeys int) []byte { } func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) (appParams map[uint64]v1.AppParams, err error) { - // get existing apps - account, accountErr := client.AccountInformation(cfg.SrcAccount) - if accountErr != nil { - fmt.Printf("Cannot lookup source account") - err = accountErr - return + + var appAccount v1.Account + for tempAccount := range accounts { + if tempAccount != cfg.SrcAccount { + appAccount, err = client.AccountInformation(tempAccount) + if err != nil { + fmt.Printf("Warning, cannot lookup tempAccount account %s", tempAccount) + return + } + break + } + } + + if !cfg.Quiet { + fmt.Printf("Selected temp account: %s\n", appAccount.Address) } // Get wallet handle token @@ -440,27 +450,30 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig return } - toCreate := int(cfg.NumApp) - len(account.AppParams) + toCreate := int(cfg.NumApp) // create apps in srcAccount for i := 0; i < toCreate; i++ { var tx transactions.Transaction // generate app program with roughly some number of operations - prog := genBigNoOp(cfg.AppProgOps) + prog := genBigNoOpAndBigHashes(cfg.AppProgOps, cfg.AppProgHashs, cfg.AppProgHashSize) + if !cfg.Quiet { + fmt.Printf("generated program: \n%s\n", prog) + } globSchema := basics.StateSchema{NumByteSlice: 64} locSchema := basics.StateSchema{} tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil) if err != nil { fmt.Printf("Cannot create app txn\n") - return + panic(err) } - tx, err = client.FillUnsignedTxTemplate(cfg.SrcAccount, 0, 0, cfg.MaxFee, tx) + tx, err = client.FillUnsignedTxTemplate(appAccount.Address, 0, 0, cfg.MaxFee, tx) if err != nil { fmt.Printf("Cannot fill app creation txn\n") - return + panic(err) } // Ensure different txids @@ -486,15 +499,15 @@ func prepareApps(accounts map[string]uint64, client libgoal.Client, cfg PpConfig fmt.Printf("Create a new app: txid=%s\n", txid) } - accounts[cfg.SrcAccount] -= tx.Fee.Raw + accounts[appAccount.Address] -= tx.Fee.Raw } + var account v1.Account // get these apps for { - account, accountErr = client.AccountInformation(cfg.SrcAccount) - if accountErr != nil { - fmt.Printf("Cannot lookup source account") - err = accountErr + account, err = client.AccountInformation(appAccount.Address) + if err != nil { + fmt.Printf("Warning, cannot lookup source account") return } if len(account.AppParams) >= int(cfg.NumApp) { diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go index 7941f7d278..40b61a0776 100644 --- a/shared/pingpong/config.go +++ b/shared/pingpong/config.go @@ -54,7 +54,10 @@ type PpConfig struct { MinAccountAsset uint64 NumApp uint32 AppProgOps uint32 + AppProgHashs uint32 + AppProgHashSize string Rekey bool + MaxRuntime time.Duration } // DefaultConfig object for Ping Pong @@ -78,7 +81,10 @@ var DefaultConfig = PpConfig{ MinAccountAsset: 10000000, NumApp: 0, AppProgOps: 0, + AppProgHashs: 0, + AppProgHashSize: "sha256", Rekey: false, + MaxRuntime: 0, } // LoadConfigFromFile reads and loads Ping Pong configuration diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go index b266e6a5f0..266d87a4e6 100644 --- a/shared/pingpong/pingpong.go +++ b/shared/pingpong/pingpong.go @@ -25,7 +25,7 @@ import ( "time" "github.com/algorand/go-algorand/crypto" - v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1" + "github.com/algorand/go-algorand/daemon/algod/api/spec/v1" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/libgoal" @@ -40,61 +40,77 @@ func PrepareAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]u return } + wallet, walletErr := ac.GetUnencryptedWalletHandle() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "unable to access wallet %v\n", walletErr) + err = walletErr + return + } if cfg.NumAsset > 0 { // zero out max amount for asset transactions cfg.MaxAmt = 0 - wallet, walletErr := ac.GetUnencryptedWalletHandle() + var assetAccounts map[string]uint64 + assetAccounts, err = prepareNewAccounts(ac, cfg, wallet, accounts) if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "unable to access wallet %v\n", walletErr) - err = walletErr + _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err) return } - fmt.Printf("Generating %v new accounts for asset transfer test\n", cfg.NumPartAccounts) - // remove existing accounts except for src account - for k := range accounts { - if k != cfg.SrcAccount { - delete(accounts, k) - } - } - // create new accounts for asset testing - assetAccounts := make(map[string]uint64) - assetAccounts, err = generateAccounts(ac, assetAccounts, cfg.NumPartAccounts-1, wallet) - for addr := range assetAccounts { - fmt.Printf("generated account %v\n", addr) + assetParams, err = prepareAssets(assetAccounts, ac, cfg) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "prepare assets failed %v\n", err) + return } - for k := range assetAccounts { - accounts[k] = assetAccounts[k] + if !cfg.Quiet { + for addr := range accounts { + fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, accounts[addr]) + } } - err = fundAccounts(accounts, ac, cfg) + } else if cfg.NumApp > 0 { + + var appAccounts map[string]uint64 + appAccounts, err = prepareNewAccounts(ac, cfg, wallet, accounts) if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err) return } - - assetParams, err = prepareAssets(assetAccounts, ac, cfg) + appParams, err = prepareApps(appAccounts, ac, cfg) if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "prepare assets failed %v\n", err) return } - - for k := range assetAccounts { - accounts[k] = assetAccounts[k] + if !cfg.Quiet { + for addr := range accounts { + fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, accounts[addr]) + } } - } else if cfg.NumApp > 0 { - appParams, err = prepareApps(accounts, ac, cfg) + } else { + err = fundAccounts(accounts, ac, cfg) if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err) return } } - for addr := range accounts { - fmt.Printf("**** participant account %v\n", addr) + return +} + +func prepareNewAccounts(client libgoal.Client, cfg PpConfig, wallet []byte, accounts map[string]uint64) (newAccounts map[string]uint64, err error) { + // remove existing accounts except for src account + for k := range accounts { + if k != cfg.SrcAccount { + delete(accounts, k) + } } + // create new accounts for testing + newAccounts = make(map[string]uint64) + newAccounts, err = generateAccounts(client, newAccounts, cfg.NumPartAccounts-1, wallet) - err = fundAccounts(accounts, ac, cfg) + for k := range newAccounts { + accounts[k] = newAccounts[k] + } + err = fundAccounts(accounts, client, cfg) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err) return @@ -107,6 +123,11 @@ func PrepareAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]u func computeAccountMinBalance(cfg PpConfig) (requiredBalance uint64) { const minActiveAccountBalance uint64 = 100000 // min balance for any active account + if cfg.NumApp > 0 { + requiredBalance = (cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*10) * 2 + fmt.Printf("required min balance for app accounts: %d\n", requiredBalance) + return + } var fee uint64 = 1000 if cfg.MinFee > fee { fee = cfg.MinFee @@ -148,18 +169,26 @@ func fundAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfi fmt.Printf("adjusting account balance to %d\n", minFund) for addr, balance := range accounts { - fmt.Printf("adjusting balance of account %v\n", addr) + if !cfg.Quiet { + fmt.Printf("adjusting balance of account %v\n", addr) + } if balance < minFund { toSend := minFund - balance if srcFunds <= toSend { return fmt.Errorf("source account %s has insufficient funds %d - needs %d", cfg.SrcAccount, srcFunds, toSend) } srcFunds -= toSend - _, err := client.SendPaymentFromUnencryptedWallet(cfg.SrcAccount, addr, fee, toSend, nil) + if !cfg.Quiet { + fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend) + } + _, err := sendPaymentFromUnencryptedWallet(client, cfg.SrcAccount, addr, fee, toSend, nil) if err != nil { return err } accounts[addr] = minFund + if !cfg.Quiet { + fmt.Printf("account balance for key %s is %d\n", addr, accounts[addr]) + } totalSent++ throttleTransactionRate(startTime, cfg, totalSent) @@ -168,6 +197,18 @@ func fundAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfi return nil } +func sendPaymentFromUnencryptedWallet(client libgoal.Client, from, to string, fee, amount uint64, note []byte) (transactions.Transaction, error) { + wh, err := client.GetUnencryptedWalletHandle() + if err != nil { + return transactions.Transaction{}, err + } + // generate a random lease to avoid duplicate transaction failures + var lease [32]byte + crypto.RandBytes(lease[:]) + + return client.SendPaymentFromWalletWithLease(wh, nil, from, to, fee, amount, note, "", lease, 0, 0) +} + func refreshAccounts(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) error { for addr := range accounts { amount, err := client.GetBalance(addr) @@ -221,6 +262,10 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin } else { runTime = 10000 * time.Hour // Effectively 'forever' } + var endTime time.Time + if cfg.MaxRuntime > 0 { + endTime = time.Now().Add(cfg.MaxRuntime) + } restTime := cfg.RestTime refreshTime := time.Now().Add(cfg.RefreshTime) @@ -234,6 +279,11 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin var totalSent, totalSucceeded uint64 for !time.Now().After(stopTime) { + if cfg.MaxRuntime > 0 && time.Now().After(endTime) { + fmt.Printf("Terminating after max run time of %.f seconds\n", cfg.MaxRuntime.Seconds()) + return + } + minimumAmount := cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*2 fromList := listSufficientAccounts(accounts, minimumAmount, cfg.SrcAccount) // in group tests txns are sent back and forth, so both parties need funds @@ -358,6 +408,9 @@ func sendFromTo( sentCount++ _, sendErr = client.BroadcastTransaction(stxn) + if sendErr != nil { + fmt.Printf("Warning, cannot broadcast txn, %s\n", sendErr) + } } else { // Generate txn group diff --git a/test/e2e-go/cli/algod/expect/algodTelemetryLocationTest.exp b/test/e2e-go/cli/algod/expect/algodTelemetryLocationTest.exp index d25628338a..72d84bfec2 100644 --- a/test/e2e-go/cli/algod/expect/algodTelemetryLocationTest.exp +++ b/test/e2e-go/cli/algod/expect/algodTelemetryLocationTest.exp @@ -2,8 +2,6 @@ set err 0 log_user 1 - - if { [catch { source algodExpectCommon.exp @@ -18,14 +16,11 @@ if { [catch { #allows script to be run outside of go context exec mkdir -p $TEST_PRIMARY_NODE_DIR - - exec goal node stop -d $TEST_PRIMARY_NODE_DIR exec rm -f $TEST_PRIMARY_NODE_DIR/logging.config ::Algod::ReadTelemetry $TEST_PRIMARY_NODE_DIR - exec rm -d -r -f $TEST_ALGO_DIR puts "Basic Algod Test Successful" exit 0 diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp index f09ec8d0a9..476546da90 100755 --- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp +++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp @@ -71,8 +71,6 @@ if { [catch { set ::GLOBAL_TEST_ROOT_DIR $TEST_ROOT_DIR set ::GLOBAL_NETWORK_NAME $NETWORK_NAME - set ::env(ALGOSMALLLAMBDAMSEC) 500 - # Start the Primary Node ::AlgorandGoal::StartNode $TEST_ROOT_DIR/Primary diff --git a/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp b/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp new file mode 100644 index 0000000000..3bba5b8776 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/goalAccountInfoTest.exp @@ -0,0 +1,159 @@ +#!/usr/bin/expect -f +set err 0 +log_user 1 + +if { [catch { + source goalExpectCommon.exp + set TEST_ALGO_DIR [lindex $argv 0] + set TEST_DATA_DIR [lindex $argv 1] + + puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" + puts "TEST_DATA_DIR: $TEST_DATA_DIR" + + set timeout 60 + set TIME_STAMP [clock seconds] + + set TEST_ROOT_DIR $TEST_ALGO_DIR/root + set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ + set NETWORK_NAME test_net_expect_$TIME_STAMP + set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50Each.json" + set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs" + + # Create network + ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + + # Start network + ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + + set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ] + puts "Primary Node Address: $PRIMARY_NODE_ADDRESS" + + set PRIMARY_WALLET_NAME unencrypted-default-wallet + + # Determine primary account + set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] + + set EMPTY_EXPECTED "Created Assets: +\t +Held Assets: +\t +Created Apps: +\t +Opted In Apps: +\t" + + # Check info with no assets + puts "goal account info -w $PRIMARY_WALLET_NAME -a $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR" + set EMPTY_ACTUAL [exec goal account info -w $PRIMARY_WALLET_NAME -a $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR] + puts $EMPTY_ACTUAL + + if { $EMPTY_ACTUAL ne $EMPTY_EXPECTED } { + ::AlgorandGoal::Abort "Invalid response for account info. Expected:\n$EMPTY_EXPECTED" + } + + # Create A-Coin + set ACOIN_UNIT_NAME "AC" + ::AlgorandGoal::AssetCreate $PRIMARY_ACCOUNT_ADDRESS $PRIMARY_WALLET_NAME "" 1000 0 "A-Coin" $ACOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR + + # Create B-Coin + set BCOIN_UNIT_NAME "BC" + ::AlgorandGoal::AssetCreate $PRIMARY_ACCOUNT_ADDRESS $PRIMARY_WALLET_NAME "" 1000 0 "" $BCOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR + + # Create C-Coin + set CCOIN_UNIT_NAME "" + ::AlgorandGoal::AssetCreate $PRIMARY_ACCOUNT_ADDRESS $PRIMARY_WALLET_NAME "" 1000 0 "C-Coin" $CCOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR + + # Create D-Coin + set DCOIN_UNIT_NAME "DC" + ::AlgorandGoal::AssetCreate $PRIMARY_ACCOUNT_ADDRESS $PRIMARY_WALLET_NAME "" 1000 2 "D-Coin" $DCOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR + + # wait about 2 rounds + set ASSET_WAIT 10 + puts "Wait $ASSET_WAIT for asset creation" + exec sleep $ASSET_WAIT + + set ACOIN_ASSET_ID [::AlgorandGoal::AssetLookup $PRIMARY_ACCOUNT_ADDRESS $ACOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR] + set BCOIN_ASSET_ID [::AlgorandGoal::AssetLookup $PRIMARY_ACCOUNT_ADDRESS $BCOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR] + set CCOIN_ASSET_ID [::AlgorandGoal::AssetLookup $PRIMARY_ACCOUNT_ADDRESS $CCOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR] + set DCOIN_ASSET_ID [::AlgorandGoal::AssetLookup $PRIMARY_ACCOUNT_ADDRESS $DCOIN_UNIT_NAME $TEST_PRIMARY_NODE_DIR] + + # Freeze D-Coin + ::AlgorandGoal::AssetFreeze $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $PRIMARY_ACCOUNT_ADDRESS $DCOIN_ASSET_ID true $TEST_PRIMARY_NODE_DIR + + # wait about 2 rounds + puts "Wait $ASSET_WAIT for asset freeze" + exec sleep $ASSET_WAIT + + set ASSET_EXPECTED "Created Assets: +\tID $ACOIN_ASSET_ID, A-Coin, supply 1000 $ACOIN_UNIT_NAME +\tID $BCOIN_ASSET_ID, , supply 1000 $BCOIN_UNIT_NAME +\tID $CCOIN_ASSET_ID, C-Coin, supply 1000 units +\tID $DCOIN_ASSET_ID, D-Coin, supply 10.00 $DCOIN_UNIT_NAME +Held Assets: +\tID $ACOIN_ASSET_ID, A-Coin, balance 1000 $ACOIN_UNIT_NAME +\tID $BCOIN_ASSET_ID, , balance 1000 $BCOIN_UNIT_NAME +\tID $CCOIN_ASSET_ID, C-Coin, balance 1000 units +\tID $DCOIN_ASSET_ID, D-Coin, balance 10.00 $DCOIN_UNIT_NAME (frozen) +Created Apps: +\t +Opted In Apps: +\t" + + # Check info with assets + puts "goal account info -w $PRIMARY_WALLET_NAME -a $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR" + set ASSET_ACTUAL [exec goal account info -w $PRIMARY_WALLET_NAME -a $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR] + puts $ASSET_ACTUAL + + if { $ASSET_ACTUAL ne $ASSET_EXPECTED } { + ::AlgorandGoal::Abort "Invalid response for account info. Expected:\n$ASSET_EXPECTED" + } + + puts "Creating global state app" + set GSTATE_GLOBAL_BYTE_SLICES 10 + set GSTATE_LOCAL_BYTE_SLICES 0 + set GSTATE_APP_ID [::AlgorandGoal::AppCreate $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS ${TEAL_PROGS_DIR}/globwrite.teal "str:value_to_write" $GSTATE_GLOBAL_BYTE_SLICES $GSTATE_LOCAL_BYTE_SLICES ${TEAL_PROGS_DIR}/clear_program_state.teal $TEST_PRIMARY_NODE_DIR] + + puts "Creating local state app" + set LSTATE_GLOBAL_BYTE_SLICES 0 + set LSTATE_LOCAL_BYTE_SLICES 1 + set LSTATE_APP_ID [::AlgorandGoal::AppCreateOnCompletion $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS ${TEAL_PROGS_DIR}/loccheck.teal "str:write" $LSTATE_GLOBAL_BYTE_SLICES $LSTATE_LOCAL_BYTE_SLICES ${TEAL_PROGS_DIR}/clear_program_state.teal $TEST_PRIMARY_NODE_DIR "optin"] + + # wait about 2 rounds + puts "Wait $ASSET_WAIT for app creation" + exec sleep $ASSET_WAIT + + set APP_AND_ASSET_EXPECTED "Created Assets: +\tID $ACOIN_ASSET_ID, A-Coin, supply 1000 $ACOIN_UNIT_NAME +\tID $BCOIN_ASSET_ID, , supply 1000 $BCOIN_UNIT_NAME +\tID $CCOIN_ASSET_ID, C-Coin, supply 1000 units +\tID $DCOIN_ASSET_ID, D-Coin, supply 10.00 $DCOIN_UNIT_NAME +Held Assets: +\tID $ACOIN_ASSET_ID, A-Coin, balance 1000 $ACOIN_UNIT_NAME +\tID $BCOIN_ASSET_ID, , balance 1000 $BCOIN_UNIT_NAME +\tID $CCOIN_ASSET_ID, C-Coin, balance 1000 units +\tID $DCOIN_ASSET_ID, D-Coin, balance 10.00 $DCOIN_UNIT_NAME (frozen) +Created Apps: +\tID $GSTATE_APP_ID, global state used 0/0 uints, 1/$GSTATE_GLOBAL_BYTE_SLICES byte slices +\tID $LSTATE_APP_ID, global state used 0/0 uints, 0/$LSTATE_GLOBAL_BYTE_SLICES byte slices +Opted In Apps: +\tID $LSTATE_APP_ID, local state used 0/1 uints, 1/$LSTATE_LOCAL_BYTE_SLICES byte slices" + + # Check info with assets and apps + puts "goal account info -w $PRIMARY_WALLET_NAME -a $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR" + set APP_AND_ASSET_ACTUAL [exec goal account info -w $PRIMARY_WALLET_NAME -a $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR] + puts $APP_AND_ASSET_ACTUAL + + if { $APP_AND_ASSET_ACTUAL ne $APP_AND_ASSET_EXPECTED } { + ::AlgorandGoal::Abort "Invalid response for account info. Expected:\n$APP_AND_ASSET_EXPECTED" + } + + # Shutdown the network + ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR + + puts "Goal Account Info Test Successful" + + exit 0 + +} EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in goalAccountInfoTest: $EXCEPTION" +} diff --git a/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp b/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp index 25378849f0..cdef4b936d 100644 --- a/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp +++ b/test/e2e-go/cli/goal/expect/goalAppAccountAddressTest.exp @@ -107,8 +107,9 @@ proc goalAppAccountAddress { TEST_ALGO_DIR TEST_DATA_DIR} { expect { timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" } "*committed in round*" {puts "app call successful"; close} - eof {close; ::AlgorandGoal::Abort "app call failed" } + eof {::AlgorandGoal::Abort "app call failed" } } + ::AlgorandGoal::CheckProcessReturnedCode 1 puts "Checking the results" set EXPECTED_OUTPUT "Account0*$PRIMARY_ACCOUNT_ADDRESS" @@ -124,20 +125,21 @@ proc goalAppAccountAddress { TEST_ALGO_DIR TEST_DATA_DIR} { expect { timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" } "*$EXPECTED_OUTPUT*" {puts "Local state read correctly"; close} - eof {close; ::AlgorandGoal::Abort "app read failed" } + eof {::AlgorandGoal::Abort "App read failed. Expected output includes: $EXPECTED_OUTPUT" } } + ::AlgorandGoal::CheckProcessReturnedCode 1 # check the local state of account 2 spawn goal app read --app-id $APP_ID --local --guess-format \ --from $ACCOUNT_2_ADDRESS -w $WALLET_1_NAME -d $TEST_PRIMARY_NODE_DIR - expect { timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" } "Please enter the password for wallet '$WALLET_1_NAME':" {send "$WALLET_1_PASSWORD\r" ; exp_continue} "*$EXPECTED_OUTPUT*" {puts "Local state read correctly"; close} - eof {close; ::AlgorandGoal::Abort "app read failed" } + eof {::AlgorandGoal::Abort "App read failed. Expected output includes: $EXPECTED_OUTPUT" } } - + ::AlgorandGoal::CheckProcessReturnedCode 1 + # call the app with a missing app-account. It should fail puts "Calling goal app call to get the local state params" spawn goal app call --app-id $APP_ID --from $PRIMARY_ACCOUNT_ADDRESS -w $PRIMARY_WALLET_NAME -d $TEST_PRIMARY_NODE_DIR \ @@ -145,10 +147,20 @@ proc goalAppAccountAddress { TEST_ALGO_DIR TEST_DATA_DIR} { --app-account $ACCOUNT_2_ADDRESS \ --app-account $ACCOUNT_4_ADDRESS expect { - timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" } - "*Couldn't broadcast tx with algod: HTTP 400 Bad Request: TransactionPool.Remember: transaction*invalid Accounts index 4*" \ - {puts "Error received successfully "; close} - eof {close; ::AlgorandGoal::Abort "failed to get the expected error" } + timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" } + "*Couldn't broadcast tx with algod: HTTP 400 Bad Request: TransactionPool.Remember: transaction*invalid Accounts index 4*" { + puts "\nError received successfully " + # wait until the eof signal is received + expect { + timeout { close; ::AlgorandGoal::Abort "failed to see goal terminating after outputing error message" } + eof { puts "eof received as expected after error message output" } + } + } + eof {::AlgorandGoal::Abort "failed to get the expected error" } + } + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$response != 1 || $OS_CODE != 0 || $ERR_CODE != 1} { + ::AlgorandGoal::Abort "failed to get the expected error. Expected ERR_CODE = 1 got ERR_CODE = $ERR_CODE" } # Shutdown the network @@ -171,8 +183,6 @@ if { [catch { goalAppAccountAddress $TEST_ALGO_DIR $TEST_DATA_DIR - exit 0 - } EXCEPTION ] } { ::AlgorandGoal::Abort "ERROR in goalAppAccountAddressTest: $EXCEPTION" } diff --git a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp index baaf242635..d9cff62725 100644 --- a/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp +++ b/test/e2e-go/cli/goal/expect/goalDryrunRestTest.exp @@ -101,6 +101,27 @@ if { [catch { timeout { ::AlgorandGoal::Abort "goal app create timeout" } } + # atomic transfer + set DRREQ_FILE_4 "$TEST_ROOT_DIR/atomic-tran-drreq.msgp" + set AT_TX1_FILE "$TEST_ROOT_DIR/atomic-tran-tx1.mspg" + set AT_TX2_FILE "$TEST_ROOT_DIR/atomic-tran-tx2.mspg" + set AT_COMBINED_FILE "$TEST_ROOT_DIR/atomic-tran-comb.mspg" + set AT_GROUPPED_FILE "$TEST_ROOT_DIR/atomic-tran-group.mspg" + spawn goal clerk send --from $PRIMARY_ACCOUNT_ADDRESS --to $PRIMARY_ACCOUNT_ADDRESS -a 1 --fee 1000 -d $TEST_PRIMARY_NODE_DIR -o $AT_TX1_FILE + expect { + timeout { ::AlgorandGoal::Abort "goal clerk send timeout" } + } + spawn goal app create --creator $PRIMARY_ACCOUNT_ADDRESS --approval-prog $TEAL_PROG_FILE --clear-prog $TEAL_PROG_FILE --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 -d $TEST_PRIMARY_NODE_DIR -o $AT_TX2_FILE + expect { + timeout { ::AlgorandGoal::Abort "goal app create timeout" } + } + exec cat $AT_TX1_FILE $AT_TX2_FILE > $AT_COMBINED_FILE + exec goal clerk group -i $AT_COMBINED_FILE -o $AT_GROUPPED_FILE + spawn goal clerk dryrun -t $AT_GROUPPED_FILE -d $TEST_PRIMARY_NODE_DIR -o $DRREQ_FILE_4 --dryrun-dump --dryrun-dump-format=msgp + expect { + timeout { ::AlgorandGoal::Abort "goal clerk dryrun timeout" } + } + # invalid app set INVALID_FILE_1 "$TEST_ROOT_DIR/invalid-app.json" set INVALID_FILE_1_ID [open $INVALID_FILE_1 "w"] @@ -111,6 +132,8 @@ if { [catch { TestGoalDryrun $DRREQ_FILE_1 $TEST_PRIMARY_NODE_DIR TestGoalDryrun $DRREQ_FILE_2 $TEST_PRIMARY_NODE_DIR TestGoalDryrun $DRREQ_FILE_3 $TEST_PRIMARY_NODE_DIR + TestGoalDryrun $DRREQ_FILE_4 $TEST_PRIMARY_NODE_DIR + TestGoalDryrunExitCode $DRREQ_FILE_3 $TEST_PRIMARY_NODE_DIR 0 "PASS" TestGoalDryrunExitCode "" $TEST_PRIMARY_NODE_DIR 1 "Cannot read file : open : no such file or directory" TestGoalDryrunExitCode $INVALID_FILE_1 $TEST_PRIMARY_NODE_DIR 1 "dryrun-remote: HTTP 400 Bad Request:" diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index 91272ffba9..96c434e9c8 100755 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -56,6 +56,43 @@ proc ::AlgorandGoal::Abort { ERROR } { exit 1 } +# Utility method to test the process returned value +# Returns 0 when no error code is detected +# When an error code is detected: +# If ABORT = 1 Calls AlgorandGoal::Abort +# if ABORT = 0 Returns 1 OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP +# If SIGHUP is detected, it ignores it. +proc ::AlgorandGoal::CheckProcessReturnedCode {ABORT} { + upvar spawn_id spawn_id + lassign [wait -i $spawn_id] PID SPAWNID OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + + if {$KILLED == "CHILDKILLED"} { + if {$KILL_SIGNAL == "SIGHUP" && $EXP == "hangup"} { + # this is caused by expect close. Will ignore. + return 0 + } + if {$ABORT} { + ::AlgorandGoal::Abort "process killed: $KILL_SIGNAL $EXP" + } + return [list 1 $OS_CODE $ERR_CODE $KILLED $KILL_SIGNAL $EXP] + } + + if {$OS_CODE == -1} { + if {$ABORT} { + ::AlgorandGoal::Abort "OS error code: $ERR_CODE" + } + return [list 1 $OS_CODE $ERR_CODE $KILLED $KILL_SIGNAL $EXP] + } else { + if {$ERR_CODE != 0} { + if {$ABORT} { + ::AlgorandGoal::Abort "porcess returned non-zero value: $ERR_CODE" + } + return [list 1 $OS_CODE $ERR_CODE $KILLED $KILL_SIGNAL $EXP] + } + } + return 0 +} + # Start the node proc ::AlgorandGoal::StartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED "False"} {PEER_ADDRESS ""} } { set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR @@ -163,6 +200,7 @@ proc ::AlgorandGoal::CreateNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ALGO_DIR # Start the network proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ALGO_DIR TEST_ROOT_DIR } { + set timeout 120 set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR set ::GLOBAL_TEST_ROOT_DIR $TEST_ROOT_DIR set ::GLOBAL_NETWORK_NAME $NETWORK_NAME @@ -414,8 +452,8 @@ proc ::AlgorandGoal::WaitForAccountBalance { WALLET_NAME ACCOUNT_ADDRESS EXPECTE if { $ACCOUNT_BALANCE == $EXPECTED_BALANCE } { puts "Account balance OK: $ACCOUNT_BALANCE"; break } else { - puts "Account balance: '$ACCOUNT_BALANCE' does not match expected balance: '$EXPECTED_BALANCE'" - if { $i >= 10 } then { ::AlgorandGoal::Abort "Account balance $ACCOUNT_BALANCE does not match expected amount: $EXPECTED_BALANCE"; break;} + puts "Account balance: '$ACCOUNT_BALANCE' does not match expected balance: '$EXPECTED_BALANCE', still waiting..." + if { $i >= 20 } then { ::AlgorandGoal::Abort "Account balance '$ACCOUNT_BALANCE' does not match expected amount: '$EXPECTED_BALANCE', waited too long, FAIL"; break;} } } } EXCEPTION ] } { @@ -425,10 +463,10 @@ proc ::AlgorandGoal::WaitForAccountBalance { WALLET_NAME ACCOUNT_ADDRESS EXPECTE } # Create an asset -proc ::AlgorandGoal::AssetCreate { CREATOR WALLET_NAME WALLET_PASSWORD TOTAL_SUPPLY UNIT_NAME TEST_PRIMARY_NODE_DIR } { +proc ::AlgorandGoal::AssetCreate { CREATOR WALLET_NAME WALLET_PASSWORD TOTAL_SUPPLY DECIMALS ASSET_NAME UNIT_NAME TEST_PRIMARY_NODE_DIR } { set timeout 40 if { [ catch { - spawn goal asset create -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME --creator $CREATOR --total $TOTAL_SUPPLY --unitname $UNIT_NAME + spawn goal asset create -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME --creator $CREATOR --total $TOTAL_SUPPLY --unitname $UNIT_NAME --name $ASSET_NAME --decimals $DECIMALS expect { timeout { ::AlgorandGoal::Abort "Timed out create asset" } "Please enter the password for wallet '$WALLET_NAME':" { send "$WALLET_PASSWORD\r"; exp_continue } @@ -466,12 +504,26 @@ proc ::AlgorandGoal::CreateAssetTransfer { FROM_ADDR TO_ADDR ASSET_ID ASSET_AMOU } } +# Freeze asset +proc ::AlgorandGoal::AssetFreeze { WALLET_NAME WALLET_PASSWORD FREEZE_ADDR ACCOUNT_ADDR ASSET_ID FREEZE_VALUE TEST_PRIMARY_NODE_DIR} { + if { [ catch { + spawn goal asset freeze -d $TEST_PRIMARY_NODE_DIR -w $WALLET_NAME --freezer $FREEZE_ADDR --account $ACCOUNT_ADDR --assetid $ASSET_ID --freeze=$FREEZE_VALUE + expect { + timeout { ::AlgorandGoal::Abort "Timed out asset transfer" } + "Please enter the password for wallet '$WALLET_NAME':" { send "$WALLET_PASSWORD\r"; exp_continue } + eof + } + } EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in AssetTransfer: $EXCEPTION" + } +} + # Get asset id proc ::AlgorandGoal::AssetLookup { CREATOR UNIT_NAME TEST_PRIMARY_NODE_DIR } { set timeout 10 if { [ catch { set ASSET_ID "NOT SET" - spawn goal asset info -d $TEST_PRIMARY_NODE_DIR --creator $CREATOR --asset $UNIT_NAME + spawn goal asset info -d $TEST_PRIMARY_NODE_DIR --creator $CREATOR --unitname $UNIT_NAME expect { timeout { ::AlgorandGoal::Abort "Timed out asset lookup" } -re {Asset ID:\s+([0-9]+)} {set ASSET_ID $expect_out(1,string); close } @@ -1107,3 +1159,30 @@ proc ::AlgorandGoal::InspectTransactionFile { TRX_FILE } { eof } } + +# Run pingpong test +proc ::AlgorandGoal::RunPingpong {DURATION PINGPONG_OPTIONS TEST_PRIMARY_NODE_DIR} { + set timeout [expr $DURATION + 60] + if { [ catch { + set pingpong_base "pingpong run --duration $DURATION -d $TEST_PRIMARY_NODE_DIR --quiet " + set pingpong_command [concat $pingpong_base $PINGPONG_OPTIONS] + puts "starting pingpong test with command: $pingpong_command" + eval spawn $pingpong_command + expect { + timeout { puts "pingpong test interrupted by timeout, terminating after $timeout seconds" } + -re {Sent (\d+) transactions \((\d+) attempted\).} { + set actual $expect_out(1,string) ; + set attempted $expect_out(2,string) ; + puts "actual: $actual, attempted: $attempted"; + if { $actual != $attempted } then { ::AlgorandGoal::Abort "Pingpong attempted to send $attempted transactions, but actual was $actual"; break;} + exp_continue + } + "Terminating after max run time of" {puts "end of ping pong test"} + eof {::AlgorandGoal::Abort "pingpong terminated unexpectedly: $expect_out(buffer)"} + "Error" {::AlgorandGoal::Abort "error running pingpong: $expect_out(buffer)"} + } + } EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in RunPingpong: $EXCEPTION" + } +} + diff --git a/test/e2e-go/cli/goal/expect/goalFormattingTest.exp b/test/e2e-go/cli/goal/expect/goalFormattingTest.exp new file mode 100644 index 0000000000..281c53c3db --- /dev/null +++ b/test/e2e-go/cli/goal/expect/goalFormattingTest.exp @@ -0,0 +1,55 @@ +#!/usr/bin/expect -f +set err 0 +log_user 1 + +if { [catch { + + source goalExpectCommon.exp + set TEST_ALGO_DIR [lindex $argv 0] + set TEST_DATA_DIR [lindex $argv 1] + + puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" + + set TIME_STAMP [clock seconds] + + # generate invalid transaction file. ( we want to see that goal is ommitting the control characters when printing out the error message ) + exec echo {{ "\u001b[0G\u001b[0K\u001b[33munexpected_key\u001b[0m": 2 }} > $TEST_ALGO_DIR/tx.json + exec cat $TEST_ALGO_DIR/tx.json | msgpacktool -e -b32 > $TEST_ALGO_DIR/tx + exec rm $TEST_ALGO_DIR/tx.json + + set NON_PRINTABLE_CHARS_WARNING 0 + set CANNOT_DECODE_MESSAGE 0 + spawn goal clerk inspect $TEST_ALGO_DIR/tx + expect { + timeout { close; ::AlgorandGoal::Abort "failed to inspect transaction file within timeout" } + {One or more non-printable characters were ommited from the following error message:} { + set NON_PRINTABLE_CHARS_WARNING 1 + exp_continue + } + {Cannot decode transactions from *: msgpack decode error \[pos 33\]: no matching struct field found when decoding stream map with key \[0G\[0K\[33munexpected_key\[0m} { + set CANNOT_DECODE_MESSAGE 1 + exp_continue + } + eof { + if {$CANNOT_DECODE_MESSAGE == 0 || $NON_PRINTABLE_CHARS_WARNING == 0} { + puts "eof received before the expected output " + exit 1 + } + } + } + + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERROR_CODE KILLED KILL_SIGNAL EXP + if {$ERROR_CODE != 1} { + puts "goal was expected to fail with 1 due to invalid transaction file, but returned error code $ERROR_CODE instead" + exit 1 + } + + exec rm $TEST_ALGO_DIR/tx + + puts "Goal Formatting Test Successful" + + exit 0 +} EXCEPTION] } { + puts "ERROR in goalFormattingTest: $EXCEPTION" + exit 1 +} diff --git a/test/e2e-go/cli/goal/expect/goalNodeTest.exp b/test/e2e-go/cli/goal/expect/goalNodeTest.exp index 2a71aa56d9..ca284ba238 100644 --- a/test/e2e-go/cli/goal/expect/goalNodeTest.exp +++ b/test/e2e-go/cli/goal/expect/goalNodeTest.exp @@ -36,6 +36,48 @@ if { [catch { # Stop node ::AlgorandGoal::StopNode $TEST_PRIMARY_NODE_DIR + # Try stopping the node again, should fail + spawn goal node stop -d $TEST_PRIMARY_NODE_DIR + expect { + timeout { close; ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected" } + "^Cannot kill node: no running node in directory '*'" {puts "Node failed successfully"; close} + eof { close; ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected" } + } + + #Try stopping node in invalid directory, should fail + spawn goal node stop -d '' + expect { + timeout { close; ::AlgorandGoal::Abort "Goal Node Fail did not fail as expected" } + "^Cannot kill node: the provided directory '*' does not exist" {puts "Node failed successfully"; close} + eof { close; ::AlgorandGoal::Abort "Goal Node Stop did not fail as expected" } + } + + # "break" the node by replacing it's ledger data files with "broken" ones. + lassign [exec find $TEST_PRIMARY_NODE_DIR -name "ledger.tracker.sqlite"] PRIMARY_TRACKER_DATABASE_FILE + exec find $TEST_PRIMARY_NODE_DIR -name "ledger.tracker.sqlite*" -delete + exec -- echo "1234" > $PRIMARY_TRACKER_DATABASE_FILE + + # try to start the primary node, and observe the expected failure + set timeout 15 + spawn goal node start -d $TEST_PRIMARY_NODE_DIR + expect { + timeout { close; ::AlgorandGoal::Abort "starting node exceeded timeout" } + -re {^Algorand node failed to start: node exited with an error code, check node\.log for more details : exit status 1} { + puts "\nExpected failuire : node failed to start" + # wait until the eof signal is received + expect { + timeout { close; ::AlgorandGoal::Abort "failed to see node terminating after outputing error message" } + eof { puts "eof received as expected after error message output" } + } + } + eof { ::AlgorandGoal::Abort "eof received before the expected output " } + } + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERROR_CODE KILLED KILL_SIGNAL EXP + if {$ERROR_CODE != 1} { + puts "Node was expected to fail with 1 due to invalid ledger file, but returned error code $ERROR_CODE instead" + exit 1 + } + # Shutdown the network ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR diff --git a/test/e2e-go/cli/goal/expect/limitOrderTest.exp b/test/e2e-go/cli/goal/expect/limitOrderTest.exp index 3bacb002b1..a2e4993db7 100644 --- a/test/e2e-go/cli/goal/expect/limitOrderTest.exp +++ b/test/e2e-go/cli/goal/expect/limitOrderTest.exp @@ -89,7 +89,7 @@ if { [catch { # create duckcoin set TOTAL_SUPPLY 1000000000 set UNIT_NAME "duckcoin" - ::AlgorandGoal::AssetCreate $ACCOUNT_1_ADDRESS $WALLET_1_NAME $WALLET_1_PASSWORD $TOTAL_SUPPLY $UNIT_NAME $TEST_PRIMARY_NODE_DIR + ::AlgorandGoal::AssetCreate $ACCOUNT_1_ADDRESS $WALLET_1_NAME $WALLET_1_PASSWORD $TOTAL_SUPPLY 0 "" $UNIT_NAME $TEST_PRIMARY_NODE_DIR # wait about 4 rounds set ASSET_CREATE_WAIT 20 diff --git a/test/e2e-go/cli/goal/expect/pingpongTest.exp b/test/e2e-go/cli/goal/expect/pingpongTest.exp new file mode 100644 index 0000000000..72b212f418 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/pingpongTest.exp @@ -0,0 +1,106 @@ +#!/usr/bin/expect -f +#exp_internal 1 +set err 0 +log_user 1 + +source goalExpectCommon.exp + +set TEST_ALGO_DIR [lindex $argv 0] +set TEST_DATA_DIR [lindex $argv 1] + +proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} { + + set timeout 60 + set TIME_STAMP [clock seconds] + + set TEST_ROOT_DIR $TEST_ALGO_DIR/root_$TIME_STAMP + set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ + set NETWORK_NAME test_net_expect_$TIME_STAMP + set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json" + + exec cp $TEST_DATA_DIR/../../gen/devnet/genesis.json $TEST_ALGO_DIR + + # Create network + ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + + # Start network + ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + + set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ] + puts "Primary Node Address: $PRIMARY_NODE_ADDRESS" + + set PRIMARY_WALLET_NAME unencrypted-default-wallet + + # Determine primary account + set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] + + # Check the balance of the primary account + set PRIMARY_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + puts "Primary Account Balance: $PRIMARY_ACCOUNT_BALANCE" + + ::AlgorandGoal::WaitForRound 1 $TEST_PRIMARY_NODE_DIR + + set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs" + + # Network Setup complete + #---------------------- + + # Run pingpong tests + #---------------------- + + + set pingpong_duration 5 + + set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 5 --minaccount 100000000" + set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + + set pingpongArray(10_payment_transaction) "--tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000" + set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000" + set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --rest 0 --refresh 10 --numaccounts 50" + + + foreach index [array names pingpongArray] { + puts "pingpongArray($index): $pingpongArray($index)" + ::AlgorandGoal::RunPingpong $pingpong_duration $pingpongArray($index) $TEST_PRIMARY_NODE_DIR + } + + # Shutdown the network + #---------------------- + ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR + + puts "Pinpong Test Successful" + +} + + +if { [catch { + source goalExpectCommon.exp + + puts "starting pinpongTest" + + puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" + puts "TEST_DATA_DIR: $TEST_DATA_DIR" + + pingpongTest $TEST_ALGO_DIR $TEST_DATA_DIR + + exit 0 + +} EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in pinpongTest: $EXCEPTION" +} diff --git a/test/e2e-go/cli/goal/expect/testInfraTest.exp b/test/e2e-go/cli/goal/expect/testInfraTest.exp new file mode 100644 index 0000000000..308ac59166 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/testInfraTest.exp @@ -0,0 +1,106 @@ +#!/usr/bin/expect -f +#exp_internal 1 +set err 0 +log_user 1 + +source goalExpectCommon.exp + +# This test tests the testing procedure CheckProcessReturnedCode +# When a process crashes, CheckProcessReturnedCode should return 1 +proc checkProcessReturnedCodeTest {} { + # Test the process killed branch + spawn /bin/bash -c "kill -11 $$" + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$response == 0} { + puts "expected failure code 1 not 0" + exit 1 + } + if {$KILLED != "CHILDKILLED" || $KILL_SIGNAL != "SIGSEGV" || $EXP != "segmentation violation"} { + puts "expected CHILDKILLED SIGSEGV segmentation violation" + puts "got: $KILLED $KILL_SIGNAL $EXP" + exit 1 + } + + # Test the sighup branch + spawn /bin/bash -c "kill -1 $$" + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$response != 0} { + puts "expected 0" + puts "got: $KILLED $KILL_SIGNAL $EXP" + exit 1 + } + + # TODO: test OS_CODE == -1 branch + + # test ERR_CODE != 0 branch + spawn /bin/bash -c "exit 33" + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$response == 0} { + puts "expected failure code 1 not 0" + exit 1 + } + if {$ERR_CODE != 33} { + puts "expected ERR_CODE 33 got: $ERR_CODE" + exit 1 + } + + # test ERR_CODE == 0 branch + spawn /bin/bash -c "exit 0" + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$response != 0} { + puts "expected failure code 0 not $response" + exit 1 + } + + # test close sending sighup + spawn /bin/bash -c "echo 44; sleep 2s; kill -11 $$" + expect { + 44 { + close + } + } + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$response != 0} { + puts "expected 0" + puts "got: $KILLED $KILL_SIGNAL $EXP" + exit 1 + } + + # same, without close. should get to segv + spawn /bin/bash -c "echo 44; sleep 2s; kill -11 $$" + expect { + 44 { + puts "not closing" + } + } + lassign [::AlgorandGoal::CheckProcessReturnedCode 0] response OS_CODE ERR_CODE KILLED KILL_SIGNAL EXP + if {$KILLED != "CHILDKILLED" || $KILL_SIGNAL != "SIGSEGV" || $EXP != "segmentation violation"} { + puts "expected CHILDKILLED SIGSEGV segmentation violation" + puts "got: $KILLED $KILL_SIGNAL $EXP" + exit 1 + } +} + +# When eof is expected, the spawn_id is no longer open +# This test confirms this behavior +proc closeOnEofTest {} { + spawn /bin/bash -c "echo this is some command" + expect { + eof { + if {[catch { + close + } EXCEPTION] } { + if {![string match {spawn_id: spawn id * not open} $EXCEPTION]} { + puts "expected: spawn_id: spawn id expID not open" + puts "got: $EXCEPTION" + exit 1 + } + } + } + } + +} + + +checkProcessReturnedCodeTest +closeOnEofTest diff --git a/test/e2e-go/cli/tealdbg/cdtmock/main.go b/test/e2e-go/cli/tealdbg/cdtmock/main.go new file mode 100644 index 0000000000..90c9fad5bf --- /dev/null +++ b/test/e2e-go/cli/tealdbg/cdtmock/main.go @@ -0,0 +1,131 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package main + +import ( + "fmt" + + "net/http" + "os" + "strings" + "time" + + "github.com/algorand/websocket" + + "github.com/algorand/go-algorand/cmd/tealdbg/cdt" +) + +type wsClient struct { + conn *websocket.Conn + received bool +} + +func (c *wsClient) Connect(url string) error { + var websocketDialer = websocket.Dialer{ + HandshakeTimeout: 45 * time.Second, + EnableCompression: false, + } + + requestHeader := make(http.Header) + conn, _, err := websocketDialer.Dial(url, requestHeader) + if err != nil { + return err + } + c.conn = conn + return nil +} + +func (c *wsClient) SendJSON(data interface{}) error { + return c.conn.WriteJSON(data) +} + +func (c *wsClient) Receive(buf []byte) (int, error) { + if !c.received { + c.conn.SetReadLimit(2 * 1024 * 1024) + c.received = true + } + _, msg, err := c.conn.ReadMessage() + if err != nil && !strings.HasSuffix(err.Error(), "close 1000 (normal)") { + return 0, err + } + copy(buf, msg) + return len(msg), nil +} + +func (c *wsClient) Close() { + c.conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Now().Add(5*time.Second)) + c.conn.CloseWithoutFlush() +} + +func main() { + if len(os.Args) < 2 { + fmt.Printf("Usage: %s \n", os.Args[0]) + os.Exit(1) + } + url := os.Args[1] + + var client wsClient + var err error + data := make([]byte, 1024) + + if err = client.Connect(url); err != nil { + fmt.Printf("Connect error: %v\n", err) + os.Exit(1) + } + + var counter int64 = 1 + req := cdt.ChromeRequest{ID: counter, Method: "Debugger.Enable"} + counter++ + + if err = client.SendJSON(req); err != nil { + fmt.Printf("Send error: %v", err) + os.Exit(1) + } + if _, err = client.Receive(data); err != nil { + fmt.Printf("Recv error: %v", err) + os.Exit(1) + } + fmt.Printf("%s\n", string(data)) + + req = cdt.ChromeRequest{ID: counter, Method: "Runtime.runIfWaitingForDebugger"} + counter++ + + if err = client.SendJSON(req); err != nil { + fmt.Printf("Send error: %v", err) + os.Exit(1) + } + if _, err = client.Receive(data); err != nil { + fmt.Printf("Recv error: %v", err) + os.Exit(1) + } + fmt.Printf("%s\n", string(data)) + + req = cdt.ChromeRequest{ID: counter, Method: "Debugger.resume"} + counter++ + + if err = client.SendJSON(req); err != nil { + fmt.Printf("Send error: %v", err) + os.Exit(1) + } + if _, err = client.Receive(data); err != nil { + fmt.Printf("Recv error: %v", err) + os.Exit(1) + } + fmt.Printf("%s\n", string(data)) + + client.Close() +} diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp new file mode 100644 index 0000000000..d18315cdc8 --- /dev/null +++ b/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp @@ -0,0 +1,61 @@ +#!/usr/bin/expect -f +set err 0 +log_user 1 + +if { [catch { + + set TEST_ALGO_DIR [lindex $argv 0] + set timeout 30 + + set TEST_DIR $TEST_ALGO_DIR + exec mkdir -p $TEST_DIR + + # this is simple escrow logic sig txn in form of dryrun request + # it is hardcoded to do not start a network so that speedup the test a bit + set DR_ENCODED_FILE "$TEST_DIR/drreq.base64" + set DR_FILE "$TEST_DIR/drreq.msgp" + exec echo "h6hhY2NvdW50c8CkYXBwc8CwbGF0ZXN0LXRpbWVzdGFtcACwcHJvdG9jb2wtdmVyc2lvbqClcm91bmQAp3NvdXJjZXPApHR4bnORgqRsc2lngaFsxAUCIAEAIqN0eG6Ko2FtdM0D6KNmZWXNA+iiZnbOAIouvaNnZW6sdGVzdG5ldC12MS4womdoxCBIY7UYpLPITsgQ8i1PEIHLD3HwWaesIN7GL39w5Qk6IqJsds4AijKlpG5vdGXECNUWZX6OlD7Yo3JjdsQgpyYUTlC0jaNxDE4C5LyEF3fcPjIrI6STeWXGbGv02ISjc25kxCCnJhROULSNo3EMTgLkvIQXd9w+MisjpJN5ZcZsa/TYhKR0eXBlo3BheQ==" > $DR_ENCODED_FILE + exec cat $DR_ENCODED_FILE | base64 --decode > $DR_FILE + + set URL_SPINOFF "" + set URL_WS "" + set PASSED 0 + spawn tealdbg debug -q + expect_background { + timeout { puts "tealdbg debug timed out"; exit 1 } + -re {listening for upcoming dryrun requests at (http://[.a-z0-9:/]+)} { set URL_SPINOFF $expect_out(1,string); } + } + + # wait until URL is set or timeout + set it 0 + while { $it < 10 && $URL_SPINOFF == "" } { + set it [expr {$it + 1}] + sleep 1 + } + if { $URL_SPINOFF == "" } { + puts "ERROR: SPINOFF URL is not set after timeout" + exit 1 + } + + spawn curl -X POST $URL_SPINOFF -H'Content-Type: octet/stream' --data-binary @$DR_FILE + expect { + timeout { puts "curl timed out"; exit 1 } + -re {(ws://[.a-z0-9:/]+)} { set URL_WS $expect_out(1,string); } + } + + spawn cdtmock $URL_WS + expect { + timeout { puts "cdt-mock debug timed out"; exit 1 } + -re {Debugger.paused} { set PASSED 1; } + eof { catch wait result; if { [lindex $result 3] == 0 } { puts "Expected non-zero exit code"; exit [lindex $result 3] } } + } + + if { $PASSED == 0 } { + puts "ERROR: have not found 'Debugger.paused' in cdtmock output" + exit 1 + } + +} EXCEPTION ] } { + puts "ERROR in teadbgTest: $EXCEPTION" + exit 1 +} diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp new file mode 100644 index 0000000000..57b0fb4052 --- /dev/null +++ b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp @@ -0,0 +1,50 @@ +#!/usr/bin/expect -f +set err 0 +log_user 1 + +if { [catch { + + set TEST_ALGO_DIR [lindex $argv 0] + set timeout 30 + + set TEST_DIR $TEST_ALGO_DIR + exec mkdir -p $TEST_DIR + + set TEAL_PROG_FILE "$TEST_DIR/trivial.teal" + exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE + + set URL "" + set PASSED 0 + spawn tealdbg debug -v $TEAL_PROG_FILE + expect_background { + timeout { puts "tealdbg debug timed out"; exit 1 } + -re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); } + } + + # wait until URL is set or timeout + set it 0 + while { $it < 10 && $URL == "" } { + set it [expr {$it + 1}] + sleep 1 + } + if { $URL == "" } { + puts "ERROR: URL is not set after timeout" + exit 1 + } + + spawn cdtmock $URL + expect { + timeout { puts "cdt-mock debug timed out"; exit 1 } + -re {Debugger.paused} { set PASSED 1; } + eof { catch wait result; if { [lindex $result 3] == 0 } { puts "Expected non-zero exit code"; exit [lindex $result 3] } } + } + + if { $PASSED == 0 } { + puts "ERROR: have not found 'Debugger.paused' in cdtmock output" + exit 1 + } + +} EXCEPTION ] } { + puts "ERROR in teadbgTest: $EXCEPTION" + exit 1 +} diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbg_expect_test.go b/test/e2e-go/cli/tealdbg/expect/tealdbg_expect_test.go new file mode 100644 index 0000000000..7dd648dcdc --- /dev/null +++ b/test/e2e-go/cli/tealdbg/expect/tealdbg_expect_test.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . +package expect + +import ( + "testing" + + "github.com/algorand/go-algorand/test/framework/fixtures" +) + +// TestTealdbgWithExpect processes all expect script files with suffix Test.exp within the test/e2e-go/cli/tealdbg/expect directory +func TestTealdbgWithExpect(t *testing.T) { + et := fixtures.MakeExpectTest(t) + et.Run() +} diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 3ade3d372e..16de802892 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -19,7 +19,6 @@ package catchup import ( "fmt" "net/http" - "os" "os/exec" "path/filepath" "runtime" @@ -86,11 +85,6 @@ func TestBasicCatchpointCatchup(t *testing.T) { a := require.New(t) log := logging.TestingLog(t) - if runtime.GOARCH == "amd64" { - // amd64 platforms are generally quite capable, so exceletate the round times to make the test run faster. - os.Setenv("ALGOSMALLLAMBDAMSEC", "500") - } - // Overview of this test: // Start a two-node network (primary has 100%, secondary has 0%) // Nodes are having a consensus allowing balances history of 32 rounds and transaction history of 33 rounds. @@ -110,6 +104,12 @@ func TestBasicCatchpointCatchup(t *testing.T) { catchpointCatchupProtocol.MaxBalLookback = 2 * catchpointCatchupProtocol.SeedLookback * catchpointCatchupProtocol.SeedRefreshInterval // 32 catchpointCatchupProtocol.MaxTxnLife = 33 + if runtime.GOARCH == "amd64" { + // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster. + catchpointCatchupProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second + catchpointCatchupProtocol.AgreementFilterTimeout = 1 * time.Second + } + consensus[consensusCatchpointCatchupTestProtocol] = catchpointCatchupProtocol var fixture fixtures.RestClientFixture diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go index 7827a1f141..56fdd44244 100644 --- a/test/e2e-go/features/transactions/accountv2_test.go +++ b/test/e2e-go/features/transactions/accountv2_test.go @@ -17,9 +17,9 @@ package transactions import ( - "os" "path/filepath" "testing" + "time" "github.com/stretchr/testify/require" @@ -38,11 +38,9 @@ func TestAccountInformationV2(t *testing.T) { var fixture fixtures.RestClientFixture proto, ok := config.Consensus[protocol.ConsensusFuture] a.True(ok) - os.Setenv("ALGOSMALLLAMBDAMSEC", "200") + proto.AgreementFilterTimeoutPeriod0 = 400 * time.Millisecond + proto.AgreementFilterTimeout = 400 * time.Millisecond fixture.SetConsensus(config.ConsensusProtocols{protocol.ConsensusFuture: proto}) - defer func() { - os.Unsetenv("ALGOSMALLLAMBDAMSEC") - }() fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) defer fixture.Shutdown() @@ -66,7 +64,7 @@ func TestAccountInformationV2(t *testing.T) { // Fund the manager, so it can issue transactions later on _, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil) a.NoError(err) - client.WaitForRound(round + 2) + client.WaitForRound(round + 4) // There should be no apps to start with ad, err := client.AccountData(creator) diff --git a/test/e2e-go/features/transactions/transactionPool_test.go b/test/e2e-go/features/transactions/transactionPool_test.go index fbcb272983..2103bb25dd 100644 --- a/test/e2e-go/features/transactions/transactionPool_test.go +++ b/test/e2e-go/features/transactions/transactionPool_test.go @@ -39,9 +39,8 @@ func TestTransactionPoolOrderingAndClearing(t *testing.T) { // stop the other node in this network so that no new blocks are produced otherNode, err := fixture.GetNodeController("Node") r.NoError(err, "should be able to get other node's controller") - alreadyStopped, err := otherNode.StopAlgod() + err = otherNode.StopAlgod() r.NoError(err, "should be able to stop other node") - r.False(alreadyStopped, "other node should have been running when it was stopped") // get the round that the network was stopped on, it will be used when the network restarts curStatus, _ := c.Status() stoppedRound := curStatus.LastRound @@ -126,9 +125,8 @@ func TestTransactionPoolExponentialFees(t *testing.T) { // stop the other node in this network so that no new blocks are produced otherNode, err := fixture.GetNodeController("Node") r.NoError(err, "should be able to get other node's controller") - alreadyStopped, err := otherNode.StopAlgod() + err = otherNode.StopAlgod() r.NoError(err, "should be able to stop other node") - r.False(alreadyStopped, "other node should have been running when it was stopped") // put transactions in the pool - they cannot be removed from the pool while the node is stopped transactionPoolSize := 50000 sourceAccount, err := fixture.GetRichestAccount() diff --git a/test/e2e-go/perf/basic_test.go b/test/e2e-go/perf/basic_test.go index e275740118..78370c8614 100644 --- a/test/e2e-go/perf/basic_test.go +++ b/test/e2e-go/perf/basic_test.go @@ -51,7 +51,7 @@ func queuePayments(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, q <-chan } fmt.Printf("Error broadcasting transaction: %v\n", err) - time.Sleep(2 * config.Protocol.SmallLambda) + time.Sleep(config.Consensus[protocol.ConsensusCurrentVersion].AgreementFilterTimeout) } } diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go index fa1ca6943d..2717fae961 100644 --- a/test/e2e-go/upgrades/application_support_test.go +++ b/test/e2e-go/upgrades/application_support_test.go @@ -17,8 +17,6 @@ package upgrades import ( - "fmt" - "os" "path/filepath" "testing" "time" @@ -67,18 +65,7 @@ func makeApplicationUpgradeConsensus(t *testing.T) (appConsensus config.Consensu // to a version that supports applications. It verify that prior to supporting applications, the node would not accept // any application transaction and after the upgrade is complete, it would support that. func TestApplicationsUpgradeOverREST(t *testing.T) { - // set the small lambda to 500 for the duration of this test. - roundTimeMs := 500 - lambda := os.Getenv("ALGOSMALLLAMBDAMSEC") - os.Setenv("ALGOSMALLLAMBDAMSEC", fmt.Sprintf("%d", roundTimeMs)) - defer func() { - if lambda == "" { - os.Unsetenv("ALGOSMALLLAMBDAMSEC") - } else { - os.Setenv("ALGOSMALLLAMBDAMSEC", lambda) - } - }() - + smallLambdaMs := 500 consensus := makeApplicationUpgradeConsensus(t) var fixture fixtures.RestClientFixture @@ -177,7 +164,7 @@ int 1 require.NoError(t, err) require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) - time.Sleep(time.Duration(roundTimeMs) * time.Millisecond) + time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } @@ -291,18 +278,7 @@ int 1 // to a version that supports applications. It verify that prior to supporting applications, the node would not accept // any application transaction and after the upgrade is complete, it would support that. func TestApplicationsUpgradeOverGossip(t *testing.T) { - // set the small lambda to 500 for the duration of this test. - roundTimeMs := 500 - lambda := os.Getenv("ALGOSMALLLAMBDAMSEC") - os.Setenv("ALGOSMALLLAMBDAMSEC", fmt.Sprintf("%d", roundTimeMs)) - defer func() { - if lambda == "" { - os.Unsetenv("ALGOSMALLLAMBDAMSEC") - } else { - os.Setenv("ALGOSMALLLAMBDAMSEC", lambda) - } - }() - + smallLambdaMs := 500 consensus := makeApplicationUpgradeConsensus(t) var fixture fixtures.RestClientFixture @@ -436,7 +412,7 @@ int 1 require.NoError(t, err) require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) - time.Sleep(time.Duration(roundTimeMs) * time.Millisecond) + time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go index 88d6d7c0b7..d3263e0717 100644 --- a/test/e2e-go/upgrades/rekey_support_test.go +++ b/test/e2e-go/upgrades/rekey_support_test.go @@ -17,8 +17,6 @@ package upgrades import ( - "fmt" - "os" "path/filepath" "testing" "time" @@ -33,18 +31,7 @@ import ( func TestRekeyUpgrade(t *testing.T) { a := require.New(t) - // set the small lambda to 500 for the duration of this test. - roundTimeMs := 500 - lambda := os.Getenv("ALGOSMALLLAMBDAMSEC") - os.Setenv("ALGOSMALLLAMBDAMSEC", fmt.Sprintf("%d", roundTimeMs)) - defer func() { - if lambda == "" { - os.Unsetenv("ALGOSMALLLAMBDAMSEC") - } else { - os.Setenv("ALGOSMALLLAMBDAMSEC", lambda) - } - }() - + smallLambdaMs := 500 consensus := makeApplicationUpgradeConsensus(t) var fixture fixtures.RestClientFixture @@ -108,7 +95,7 @@ func TestRekeyUpgrade(t *testing.T) { require.NoError(t, err) require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) - time.Sleep(time.Duration(roundTimeMs) * time.Millisecond) + time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } diff --git a/test/e2e-go/upgrades/send_receive_upgrade_test.go b/test/e2e-go/upgrades/send_receive_upgrade_test.go index 817324ac46..6cd8f115ba 100644 --- a/test/e2e-go/upgrades/send_receive_upgrade_test.go +++ b/test/e2e-go/upgrades/send_receive_upgrade_test.go @@ -18,7 +18,6 @@ package upgrades import ( "math/rand" - "os" "path/filepath" "testing" "time" @@ -117,6 +116,10 @@ func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtoc } fastUpgradeProtocols[consensusTestFastUpgrade(proto)] = fastParams + + // set the small lambda to 500 for the duration of dependent tests. + fastParams.AgreementFilterTimeout = time.Second + fastParams.AgreementFilterTimeoutPeriod0 = time.Second } return } @@ -124,7 +127,6 @@ func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtoc func testAccountsCanSendMoneyAcrossUpgrade(t *testing.T, templatePath string) { t.Parallel() a := require.New(t) - os.Setenv("ALGOSMALLLAMBDAMSEC", "500") consensus := generateFastUpgradeConsensus() diff --git a/test/framework/fixtures/auctionFixture.go b/test/framework/fixtures/auctionFixture.go index b4e8251996..1d4b9b85c6 100644 --- a/test/framework/fixtures/auctionFixture.go +++ b/test/framework/fixtures/auctionFixture.go @@ -47,6 +47,7 @@ import ( "github.com/algorand/go-algorand/libgoal" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util" ) const ( @@ -221,7 +222,7 @@ func (f *AuctionFixture) Stop(pidFile string) error { return err } - err = syscall.Kill(int(pid), syscall.SIGTERM) + err = util.KillProcess(int(pid), syscall.SIGTERM) if err != nil { f.t.Errorf("Unable to kill PID: %d", pid) return err @@ -235,7 +236,7 @@ func (f *AuctionFixture) Stop(pidFile string) error { } select { case <-waitLong: - return syscall.Kill(int(pid), syscall.SIGKILL) + return util.KillProcess(int(pid), syscall.SIGKILL) case <-time.After(time.Millisecond * 100): } } diff --git a/test/muleCI/Jenkinsfile b/test/muleCI/Jenkinsfile index 361781be91..ff8a41ec42 100644 --- a/test/muleCI/Jenkinsfile +++ b/test/muleCI/Jenkinsfile @@ -1,3 +1,3 @@ @Library('go-algorand-ci') _ -muleCI('test/muleCI/mule.yaml', '0.0.9') \ No newline at end of file +muleCI('test/muleCI/mule.yaml', '0.0.12') diff --git a/test/muleCI/mule.yaml b/test/muleCI/mule.yaml index 4c419f1f33..35fb059adb 100644 --- a/test/muleCI/mule.yaml +++ b/test/muleCI/mule.yaml @@ -1,4 +1,83 @@ +agents: + - name: cicd.ubuntu.amd64 + dockerFilePath: docker/build/cicd.ubuntu.Dockerfile + image: algorand/go-algorand-ci-linux-ubuntu + version: scripts/configure_dev-deps.sh + arch: amd64 + env: + - TRAVIS_BRANCH=${GIT_BRANCH} + buildArgs: + - GOLANG_VERSION=`./scripts/get_golang_version.sh` + - ARCH=amd64 + - name: cicd.centos.amd64 + dockerFilePath: docker/build/cicd.centos.Dockerfile + image: algorand/go-algorand-ci-linux-centos + version: scripts/configure_dev-deps.sh + arch: amd64 + env: + - TRAVIS_BRANCH=${GIT_BRANCH} + buildArgs: + - GOLANG_VERSION=`./scripts/get_golang_version.sh` + - ARCH=amd64 + - name: cicd.ubuntu.arm64 + dockerFilePath: docker/build/cicd.ubuntu.Dockerfile + image: algorand/go-algorand-ci-linux-ubuntu + version: scripts/configure_dev-deps.sh + arch: arm64v8 + env: + - TRAVIS_BRANCH=${GIT_BRANCH} + buildArgs: + - GOLANG_VERSION=`./scripts/get_golang_version.sh` + - ARCH=arm64v8 + - name: cicd.alpine.arm + dockerFilePath: docker/build/cicd.alpine.Dockerfile + image: algorand/go-algorand-ci-linux + version: scripts/configure_dev-deps.sh + arch: arm32v6 + env: + - TRAVIS_BRANCH=${GIT_BRANCH} + buildArgs: + - GOLANG_VERSION=`./scripts/get_golang_version.sh` + - ARCH=arm32v6 + - name: docker-ubuntu + dockerFilePath: docker/build/docker.ubuntu.Dockerfile + image: algorand/go-algorand-docker-linux-ubuntu + version: scripts/configure_dev-deps.sh + env: + - TRAVIS_BRANCH=${GIT_BRANCH} + buildArgs: + - GOLANG_VERSION=`./scripts/get_golang_version.sh` + volumes: + - /var/run/docker.sock:/var/run/docker.sock + tasks: + - task: docker.Make + name: build.amd64 + agent: cicd.ubuntu.amd64 + target: ci-build + - task: docker.Make + name: build.arm64 + agent: cicd.ubuntu.arm64 + target: ci-build + - task: docker.Make + name: build.arm + agent: cicd.alpine.arm + target: ci-build + + - task: docker.Make + name: archive.amd64 + agent: cicd.centos.amd64 + target: archive + + - task: docker.Make + name: rpm.amd64 + agent: cicd.centos.amd64 + target: mule-package-rpm + - task: docker.Make + name: deb.amd64 + agent: cicd.ubuntu.amd64 + target: mule-package-deb + # Stash tasks - task: stash.Stash name: linux-amd64 @@ -6,7 +85,6 @@ tasks: stashId: ${JENKINS_JOB_CACHE_ID}/linux-amd64 globSpecs: - tmp/node_pkgs/** - - crypto/libs/** - gen/devnet/genesis.json - gen/testnet/genesis.json - gen/mainnet/genesis.json @@ -16,7 +94,6 @@ tasks: stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64 globSpecs: - tmp/node_pkgs/** - - crypto/libs/** - gen/devnet/genesis.json - gen/testnet/genesis.json - gen/mainnet/genesis.json @@ -26,22 +103,26 @@ tasks: stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm64 globSpecs: - tmp/node_pkgs/** - - crypto/libs/** - gen/devnet/genesis.json - gen/testnet/genesis.json - gen/mainnet/genesis.json - task: stash.Stash - name: linux-arm32 + name: linux-arm bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm32 + stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm globSpecs: - tmp/node_pkgs/** - - crypto/libs/** - gen/devnet/genesis.json - gen/testnet/genesis.json - gen/mainnet/genesis.json + - task: stash.Stash + name: packages + bucketName: go-algorand-ci-cache + stashId: ${JENKINS_JOB_CACHE_ID}/packages + globSpecs: + - tmp/node_pkgs/** - # Unstash tasks + # Unstash tasks - task: stash.Unstash name: linux-arm64 bucketName: go-algorand-ci-cache @@ -55,145 +136,46 @@ tasks: bucketName: go-algorand-ci-cache stashId: ${JENKINS_JOB_CACHE_ID}/darwin-amd64 - task: stash.Unstash - name: linux-arm32 + name: linux-arm bucketName: go-algorand-ci-cache - stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm32 + stashId: ${JENKINS_JOB_CACHE_ID}/linux-arm + - task: stash.Unstash + name: packages + bucketName: go-algorand-ci-cache + stashId: ${JENKINS_JOB_CACHE_ID}/packages - # Docker tasks - - task: docker.Version - configFilePath: scripts/configure_dev-deps.sh - - task: shell.docker.Ensure - name: centos - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}' - dockerFilePath: docker/build/cicd.centos.Dockerfile - dependencies: docker.Version - - task: shell.docker.Ensure - name: alpine - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}' - dockerFilePath: docker/build/cicd.alpine.Dockerfile - dependencies: docker.Version - - task: docker.Make - name: build - docker: - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}' - workDir: /go/src/github.com/algorand/go-algorand - target: ci-build - - task: docker.Make - name: fulltest - docker: - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}' - workDir: /go/src/github.com/algorand/go-algorand - target: fulltest -j4 - task: docker.Make - name: integration-test - docker: - env: - - SHORTTEST=-short - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}' - workDir: /go/src/github.com/algorand/go-algorand - target: ci-integration -j4 - - task: docker.Make - name: archive - docker: - image: algorand/go-algorand-ci-linux - version: '{{ docker.Version.outputs.version }}' - workDir: /go/src/github.com/algorand/go-algorand - target: ci-archive - - # Local Tasks - - task: shell.Make - name: ci-deps build - target: ci-build - - task: shell.Make - name: fulltest - target: fulltest -j4 - - task: shell.Make - name: integration-test - target: ci-integration -j4 - - task: shell.Make - name: archive - target: archive + name: docker-image + agent: docker-ubuntu + target: mule-package-docker jobs: - # Linux amd64 jobs build-linux-amd64: - configs: - arch: amd64 tasks: - - shell.docker.Ensure.centos - - docker.Make.build + - docker.Make.build.amd64 - stash.Stash.linux-amd64 - test-linux-amd64-integration: - configs: - arch: amd64 - tasks: - - shell.docker.Ensure.centos - - stash.Unstash.linux-amd64 - - docker.Make.integration-test - test-linux-amd64-fulltest: - configs: - arch: amd64 - tasks: - - shell.docker.Ensure.centos - - docker.Make.fulltest - - # Darwin amd64 jobs -# build-darwin-amd64: -# configs: -# arch: amd64 -# tasks: -# - shell.Make.build -# - stash.Stash.darwin-amd64 -# test-darwin-amd64-integration: -# configs: -# arch: amd64 -# tasks: -# - stash.Unstash.darwin-amd64 -# - shell.Make.integration-test -# test-darwin-amd64-fulltest: -# configs: -# arch: amd64 -# tasks: -# - shell.Make.fulltest - - # Linux arm64 jobs build-linux-arm64: - configs: - arch: arm64v8 tasks: - - shell.docker.Ensure.centos - - docker.Make.build + - docker.Make.build.arm64 - stash.Stash.linux-arm64 - test-linux-arm64-integration: - configs: - arch: arm64v8 - tasks: - - shell.docker.Ensure.centos - - stash.Unstash.linux-arm64 - - docker.Make.integration-test - - # Linux arm32 jobs build-linux-arm32: - configs: - arch: arm32v6 tasks: - - shell.docker.Ensure.alpine - - docker.Make.build - - stash.Stash.linux-arm32 - - # Archive jobs - archive-linux-amd64: - configs: - arch: amd64 + - docker.Make.build.arm + - stash.Stash.linux-arm + + package-linux-amd64: tasks: - - shell.docker.Ensure.centos - stash.Unstash.linux-amd64 - # - stash.Unstash.darwin-amd64 - stash.Unstash.linux-arm64 - - stash.Unstash.linux-arm32 - - docker.Make.archive + - stash.Unstash.linux-arm + - docker.Make.deb.amd64 + - docker.Make.rpm.amd64 + - stash.Stash.packages + archive-linux-amd64: + tasks: + - stash.Unstash.packages + - docker.Make.archive.amd64 + + package-docker: + tasks: + - docker.Make.docker-image diff --git a/test/scripts/e2e_subs/e2e-app-real-assets-round.sh b/test/scripts/e2e_subs/e2e-app-real-assets-round.sh index 6793d6bb53..19314fa8d4 100755 --- a/test/scripts/e2e_subs/e2e-app-real-assets-round.sh +++ b/test/scripts/e2e_subs/e2e-app-real-assets-round.sh @@ -18,7 +18,7 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') # Create an ASA in account ${gcmd} asset create --creator ${ACCOUNT} --name bogocoin --unitname bogo --total 1337 -ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --asset bogo|grep 'Asset ID'|awk '{ print $3 }') +ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname bogo|grep 'Asset ID'|awk '{ print $3 }') # Create app that reads asset balance and checks asset details and checks round ROUND=$(goal node status | grep 'Last committed' | awk '{ print $4 }') diff --git a/test/scripts/e2e_subs/e2e-teal.sh b/test/scripts/e2e_subs/e2e-teal.sh index 59517b336c..367e4186d1 100755 --- a/test/scripts/e2e_subs/e2e-teal.sh +++ b/test/scripts/e2e_subs/e2e-teal.sh @@ -18,9 +18,9 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }') -TIMEOUT_ROUND=$((${ROUND} + 7)) +TIMEOUT_ROUND=$((${ROUND} + 14)) -# timeout after 7 rounds +# timeout after 14 rounds python ${GOPATH}/src/github.com/algorand/go-algorand/data/transactions/logic/tlhc.py --from ${ACCOUNT} --to ${ACCOUNTB} --timeout-round ${TIMEOUT_ROUND} > ${TEMPDIR}/tlhc.teal 2> ${TEMPDIR}/tlhc.teal.secret cat ${TEMPDIR}/tlhc.teal diff --git a/test/scripts/e2e_subs/keyreg-teal-test.sh b/test/scripts/e2e_subs/keyreg-teal-test.sh index 0dde49eaab..f5f0ee2719 100755 --- a/test/scripts/e2e_subs/keyreg-teal-test.sh +++ b/test/scripts/e2e_subs/keyreg-teal-test.sh @@ -16,8 +16,8 @@ ACCOUNTA=$(${gcmd} account new|awk '{ print $6 }') ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') LEASE=YmxhaCBibGFoIGxlYXNlIHdoYXRldmVyIGJsYWghISE= -DUR=4 -PERIOD=4 +DUR=8 +PERIOD=8 EXPIRE=10000 FEE=100000 diff --git a/test/scripts/e2e_subs/limit-swap-test.sh b/test/scripts/e2e_subs/limit-swap-test.sh index 52907b47ff..dd3d6fef90 100755 --- a/test/scripts/e2e_subs/limit-swap-test.sh +++ b/test/scripts/e2e_subs/limit-swap-test.sh @@ -16,7 +16,7 @@ ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ ${gcmd} asset create --creator ${ACCOUNT} --name bogocoin --unitname bogo --total 1000000000000 -ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --asset bogo|grep 'Asset ID'|awk '{ print $3 }') +ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname bogo|grep 'Asset ID'|awk '{ print $3 }') # Asset ID: 5 diff --git a/test/scripts/e2e_subs/periodic-teal-test.sh b/test/scripts/e2e_subs/periodic-teal-test.sh index 35ffe62325..8880af1437 100755 --- a/test/scripts/e2e_subs/periodic-teal-test.sh +++ b/test/scripts/e2e_subs/periodic-teal-test.sh @@ -16,13 +16,13 @@ ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') ZERO_ADDRESS=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ LEASE=YmxhaCBibGFoIGxlYXNlIHdoYXRldmVyIGJsYWghISE= -sed s/TMPL_RCV/${ACCOUNTB}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/periodic-payment-escrow.teal.tmpl | sed s/TMPL_PERIOD/5/g | sed s/TMPL_DUR/2/g | sed s/TMPL_AMT/1000000/g | sed s/TMPL_LEASE/${LEASE}/g | sed s/TMPL_TIMEOUT/16/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/periodic.teal +sed s/TMPL_RCV/${ACCOUNTB}/g < ${GOPATH}/src/github.com/algorand/go-algorand/tools/teal/templates/periodic-payment-escrow.teal.tmpl | sed s/TMPL_PERIOD/5/g | sed s/TMPL_DUR/2/g | sed s/TMPL_AMT/100000/g | sed s/TMPL_LEASE/${LEASE}/g | sed s/TMPL_TIMEOUT/16/g | sed s/TMPL_FEE/10000/g > ${TEMPDIR}/periodic.teal ACCOUNT_PERIODIC=$(${gcmd} clerk compile ${TEMPDIR}/periodic.teal -o ${TEMPDIR}/periodic.tealc|awk '{ print $2 }') ROUND=5 DUR_ROUND=$((${ROUND} + 2)) -${gcmd} clerk send -a 1000000 -t ${ACCOUNTB} --from-program ${TEMPDIR}/periodic.teal --firstvalid ${ROUND} --lastvalid ${DUR_ROUND} -x ${LEASE} -o ${TEMPDIR}/a.tx +${gcmd} clerk send -a 100000 -t ${ACCOUNTB} --from-program ${TEMPDIR}/periodic.teal --firstvalid ${ROUND} --lastvalid ${DUR_ROUND} -x ${LEASE} -o ${TEMPDIR}/a.tx ${gcmd} clerk dryrun -t ${TEMPDIR}/a.tx ${gcmd} clerk send -a 1000000000 -f ${ACCOUNT} -t ${ACCOUNT_PERIODIC} @@ -38,7 +38,7 @@ while [ $sendcount -lt 3 ]; do fi ROUND=$(goal node status | grep 'Last committed block:'|awk '{ print $4 }') DUR_ROUND=$((${ROUND} + 2)) - if ${gcmd} clerk send -a 1000000 -t ${ACCOUNTB} --from-program ${TEMPDIR}/periodic.teal --firstvalid ${ROUND} --lastvalid ${DUR_ROUND} -x ${LEASE}; then + if ${gcmd} clerk send -a 100000 -t ${ACCOUNTB} --from-program ${TEMPDIR}/periodic.teal --firstvalid ${ROUND} --lastvalid ${DUR_ROUND} -x ${LEASE}; then sendcount=$(($sendcount + 1)) date '+periodic-teal-test sent one at ${ROUND} %Y%m%d_%H%M%S' fi @@ -47,7 +47,7 @@ done BALANCEB=$(${gcmd} account balance -a ${ACCOUNTB}|awk '{ print $1 }') -if [ $BALANCEB -ne 3000000 ]; then +if [ $BALANCEB -ne 300000 ]; then date '+periodic-teal-test FAIL wanted balance=3000000 but got ${BALANCEB} %Y%m%d_%H%M%S' false fi diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh index 037579205f..3f915406b0 100755 --- a/test/scripts/e2e_subs/rest.sh +++ b/test/scripts/e2e_subs/rest.sh @@ -16,6 +16,9 @@ PUB_TOKEN=$(cat "$ALGORAND_DATA"/algod.token) ADMIN_TOKEN=$(cat "$ALGORAND_DATA"/algod.admin.token) NET=$(cat "$ALGORAND_DATA"/algod.net) +PRIMARY_NET=$(cat "$ALGORAND_DATA2"/algod.net) +PRIMARY_ADMIN_TOKEN=$(cat "$ALGORAND_DATA2"/algod.admin.token) + function base_call { curl -o "$3" -w "%{http_code}" -q -s -H "Authorization: Bearer $1" "$NET$2" @@ -112,7 +115,44 @@ function test_assets_endpoint { call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-fail=200" 400 'parameter detected: this-should-fail' } +function pprof_test { + # URL Auth - valid + CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/$PRIMARY_ADMIN_TOKEN/debug/pprof/block") + if [[ "$CODE" != "200" ]]; then + fail_and_exit "Call pprof with valid token" "/urlAuth/:token/debug/pprof" "Invalid exit code expected 200 (actual $CODE)" + fi + + # URL Auth - invalid + CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/invalid_token/debug/pprof/block") + if [[ "$CODE" != "401" ]]; then + fail_and_exit "Call pprof with invalid token" "/urlAuth/invalid_token/debug/pprof" "Invalid exit code expected 401 (actual $CODE)" + fi + + # Header Auth - valid + CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer $PRIMARY_ADMIN_TOKEN") + if [[ "$CODE" != "200" ]]; then + fail_and_exit "Call pprof with valid token" "/debug/pprof" "Invalid exit code expected 200 (actual $CODE)" + fi + + # Header Auth - invalid + CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer invalid_token") + if [[ "$CODE" != "401" ]]; then + fail_and_exit "Call pprof with invalid token" "/debug/pprof" "Invalid exit code expected 401 (actual $CODE)" + fi +} + +function test_genesis_endpoint { + call_and_verify "There should be a genesis endpoint." "/genesis" 200 ' + "id": "v1", + "network": "tbd", + "proto": "future", + "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU" +}' +} + # Run the tests. test_applications_endpoint test_assets_endpoint +pprof_test +test_genesis_endpoint diff --git a/test/testdata/consensus/catchpointtestingprotocol.json b/test/testdata/consensus/catchpointtestingprotocol.json index f6c587cfc2..05c0f2dd9d 100644 --- a/test/testdata/consensus/catchpointtestingprotocol.json +++ b/test/testdata/consensus/catchpointtestingprotocol.json @@ -33,6 +33,8 @@ "RedoCommitteeThreshold": 1768, "DownCommitteeSize": 6000, "DownCommitteeThreshold": 4560, + "AgreementFilterTimeout": 1000000000, + "AgreementFilterTimeoutPeriod0": 1000000000, "FastRecoveryLambda": 300000000000, "FastPartitionRecovery": true, "PaysetCommitFlat": true, diff --git a/test/testdata/deployednettemplates/generate-recipe/generate_network.py b/test/testdata/deployednettemplates/generate-recipe/generate_network.py new file mode 100755 index 0000000000..0804f7d96c --- /dev/null +++ b/test/testdata/deployednettemplates/generate-recipe/generate_network.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +import json +import argparse +import math +import subprocess +import shutil +import os + +def build_network(template): + with open(template) as f: + template_dict = json.load(f) + + template_path = os.path.abspath(os.path.dirname(args.template)) + script_path = os.path.dirname(__file__) + topology = build_topology(template_dict) + + gen_dir = f"{template_path}/generated" + if not os.path.isdir(gen_dir): + os.mkdir(gen_dir) + + shutil.copy(f"{script_path}/recipe.json", f"{template_path}/recipe.json") + + with open(f"{template_path}/generated/topology.json", 'w') as topology_file: + json.dump(topology, topology_file, indent=4) + + netgoal_params = build_netgoal_params(template_dict) + build_net(template_path, netgoal_params) + build_genesis(template_path, netgoal_params) + +def build_netgoal_params(template_dict): + instances = template_dict['instances'] + + relay_count = 0 + participating_node_count = 0 + non_participating_node_count = 0 + + for group in template_dict['groups']: + relay_count += getInstanceCount(instances['relays'], group['percent']['relays']) + participating_node_count += getInstanceCount(instances['participatingNodes'], group['percent']['participatingNodes']) + non_participating_node_count += getInstanceCount(instances['nonParticipatingNodes'], group['percent']['nonParticipatingNodes']) + + + relay_config = instances['relays']['config'] + participating_node_config = instances['participatingNodes']['config'] + non_participating_node_config = instances['nonParticipatingNodes']['config'] + + wallets_count = template_dict['network']['wallets'] + nodes_count = template_dict['network']['nodes'] + + return [ + '-w', str(wallets_count), + '-R', str(relay_count), + '-N', str(participating_node_count), + '-H', str(non_participating_node_count), + '-n', str(nodes_count), + '--relay-template', relay_config, + '--node-template', participating_node_config, + '--non-participating-node-template', non_participating_node_config + ] + +def build_net(template_path, netgoal_params): + args = [ + '-t', 'net', + '-o', f"{template_path}/generated/net.json" + ] + args.extend(netgoal_params) + netgoal(args, template_path) + +def build_genesis(template_path, netgoal_params): + args = [ + '-t', 'genesis', + '-o', f"{template_path}/generated/genesis.json" + ] + args.extend(netgoal_params) + netgoal(args, template_path) + +def netgoal(args, template_path='.'): + cmd = [ + 'netgoal', 'generate', + '-r', '/dev/null' + ] + cmd.extend(args) + subprocess.run(cmd, cwd=template_path) + +def build_topology(template_dict): + + instances = template_dict['instances'] + groups = template_dict['groups'] + + hosts = build_hosts(instances, groups) + return { + 'Hosts': hosts + } + +def build_hosts(instances, groups): + relays = [] + participating_nodes = [] + non_participating_nodes = [] + + relay_cfg = instances['relays'] + participating_node_cfg = instances['participatingNodes'] + non_participating_node_cfg = instances['nonParticipatingNodes'] + + for group in groups: + for i in range(getInstanceCount(relay_cfg, group['percent']['relays'])): + relays.append({ + "Name": f"R{len(relays) + 1}", + "Group": group['name'], + "Template": f"AWS-{group['region'].upper()}-{relay_cfg['type']}" + }) + for i in range(getInstanceCount(participating_node_cfg, group['percent']['participatingNodes'])): + participating_nodes.append({ + "Name": f"N{len(participating_nodes) + 1}", + "Group": group['name'], + "Template": f"AWS-{group['region'].upper()}-{participating_node_cfg['type']}" + }) + for i in range(getInstanceCount(non_participating_node_cfg, group['percent']['nonParticipatingNodes'])): + non_participating_nodes.append({ + "Name": f"NPN{len(non_participating_nodes) + 1}", + "Group": group['name'], + "Template": f"AWS-{group['region'].upper()}-{non_participating_node_cfg['type']}" + }) + + hosts = [] + hosts.extend(relays) + hosts.extend(participating_nodes) + hosts.extend(non_participating_nodes) + return hosts + +def getInstanceCount(instance, percent): + if (percent == 0): + return 0 + total_instance_count = instance['count'] + instance_count = math.floor(total_instance_count * percent / 100) + return max(instance_count, 1) + + +def validate_template(template_dict): + groups = template_dict['groups'] + total_percent = 0 + for group in groups: + total_percent += groups['percent'] + if total_percent != 100: + raise Exception(f"Total percentages of groups expected 100, got {total_percent}") + +parser = argparse.ArgumentParser( + description="", +) + +parser.add_argument( + '-f', + '--template', + help = 'Path to network template', + required=True +) + +args = parser.parse_args() + +if os.path.isfile(args.template): + build_network(args.template) +else: + print(f"Expected --template option to be set with a path to a network template, was {args.template}") + exit(2) diff --git a/test/testdata/deployednettemplates/generate-recipe/recipe.json b/test/testdata/deployednettemplates/generate-recipe/recipe.json new file mode 100644 index 0000000000..24f7b394e0 --- /dev/null +++ b/test/testdata/deployednettemplates/generate-recipe/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"generated/genesis.json", + "NetworkFile":"generated/net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "generated/topology.json" +} diff --git a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json index 627ed4772a..66484f38bd 100644 --- a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json +++ b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json @@ -1,5 +1,11 @@ { "Hosts": [ + { + "Name": "AWS-US-WEST-1-c5.xlarge", + "Provider": "AWS", + "Region": "us-west-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-US-WEST-1-Small", "Provider": "AWS", @@ -12,6 +18,12 @@ "Region": "us-west-1", "BaseConfiguration": "c5.4xlarge" }, + { + "Name": "AWS-US-WEST-1-m5d.large", + "Provider": "AWS", + "Region": "us-west-1", + "BaseConfiguration": "m5d.large" + }, { "Name": "AWS-US-WEST-1-m5d.2xl", "Provider": "AWS", @@ -30,6 +42,12 @@ "Region": "us-west-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-US-WEST-2-c5.xlarge", + "Provider": "AWS", + "Region": "us-west-2", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-US-WEST-2-Small", "Provider": "AWS", @@ -60,6 +78,12 @@ "Region": "us-west-2", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-US-EAST-1-c5.xlarge", + "Provider": "AWS", + "Region": "us-east-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-US-EAST-1-T2-Large", "Provider": "AWS", @@ -96,6 +120,12 @@ "Region": "us-east-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-US-EAST-2-c5.xlarge", + "Provider": "AWS", + "Region": "us-east-2", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-US-EAST-2-Small", "Provider": "AWS", @@ -126,6 +156,12 @@ "Region": "us-east-2", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-AP-SOUTH-1-c5.xlarge", + "Provider": "AWS", + "Region": "ap-south-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-AP-SOUTH-1-Small", "Provider": "AWS", @@ -156,6 +192,12 @@ "Region": "ap-south-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-AP-SOUTHEAST-1-c5.xlarge", + "Provider": "AWS", + "Region": "ap-southeast-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-AP-SOUTHEAST-1-Small", "Provider": "AWS", @@ -186,6 +228,12 @@ "Region": "ap-southeast-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-AP-SOUTHEAST-2-c5.xlarge", + "Provider": "AWS", + "Region": "ap-southeast-2", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-AP-SOUTHEAST-2-Small", "Provider": "AWS", @@ -216,6 +264,12 @@ "Region": "ap-southeast-2", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-AP-NORTHEAST-1-c5.xlarge", + "Provider": "AWS", + "Region": "ap-northeast-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-AP-NORTHEAST-1-Small", "Provider": "AWS", @@ -228,6 +282,12 @@ "Region": "ap-northeast-1", "BaseConfiguration": "c5.4xlarge" }, + { + "Name": "AWS-AP-NORTHEAST-2-c5.xlarge", + "Provider": "AWS", + "Region": "ap-northeast-2", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-AP-NORTHEAST-2-Small", "Provider": "AWS", @@ -240,6 +300,12 @@ "Region": "ap-northeast-2", "BaseConfiguration": "c5.4xlarge" }, + { + "Name": "AWS-EU-CENTRAL-1-c5.xlarge", + "Provider": "AWS", + "Region": "eu-central-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-EU-CENTRAL-1-Small", "Provider": "AWS", @@ -270,6 +336,12 @@ "Region": "eu-central-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-EU-WEST-1-c5.xlarge", + "Provider": "AWS", + "Region": "eu-west-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-EU-WEST-1-Small", "Provider": "AWS", @@ -300,6 +372,12 @@ "Region": "eu-west-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-EU-WEST-2-c5.xlarge", + "Provider": "AWS", + "Region": "eu-west-2", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-EU-WEST-2-Small", "Provider": "AWS", @@ -330,6 +408,12 @@ "Region": "eu-west-2", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-EU-WEST-3-c5.xlarge", + "Provider": "AWS", + "Region": "eu-west-3", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-EU-WEST-3-Small", "Provider": "AWS", @@ -360,6 +444,12 @@ "Region": "eu-west-3", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-EU-NORTH-1-c5.xlarge", + "Provider": "AWS", + "Region": "eu-north-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-EU-NORTH-1-Small", "Provider": "AWS", @@ -372,6 +462,12 @@ "Region": "eu-north-1", "BaseConfiguration": "c5.4xlarge" }, + { + "Name": "AWS-SA-EAST-1-c5.xlarge", + "Provider": "AWS", + "Region": "sa-east-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-SA-EAST-1-Small", "Provider": "AWS", @@ -402,6 +498,12 @@ "Region": "sa-east-1", "BaseConfiguration": "c5d.9xlarge" }, + { + "Name": "AWS-CA-CENTRAL-1-c5.xlarge", + "Provider": "AWS", + "Region": "ca-central-1", + "BaseConfiguration": "c5.xlarge" + }, { "Name": "AWS-CA-CENTRAL-1-m5d.2xl", "Provider": "AWS", diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/Makefile b/test/testdata/deployednettemplates/recipes/mainnet-model/Makefile new file mode 100644 index 0000000000..780b5931d9 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/Makefile @@ -0,0 +1,8 @@ +all: network_performance_rules + +network_performance_rules: generate_network_rules.js data/bandwidth.json data/countries.json data/latency.json configs/node.json configs/nonPartNode.json configs/relay.json + node generate_network_rules.js + ../../generate-recipe/generate_network.py --template ./network-tpl.json + +clean: + rm -f network_performance_rules diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/configs/node.json b/test/testdata/deployednettemplates/recipes/mainnet-model/configs/node.json new file mode 100644 index 0000000000..7749558800 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/configs/node.json @@ -0,0 +1,22 @@ +{ + "APIToken": "{{APIToken}}", + "EnableBlockStats": false, + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }", + "AltConfigs": [ + { + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }", + "FractionApply": 0.2 + } + ] +} + diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/configs/nonPartNode.json b/test/testdata/deployednettemplates/recipes/mainnet-model/configs/nonPartNode.json new file mode 100644 index 0000000000..3825bb420b --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/configs/nonPartNode.json @@ -0,0 +1,5 @@ +{ + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" +} diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/configs/relay.json b/test/testdata/deployednettemplates/recipes/mainnet-model/configs/relay.json new file mode 100644 index 0000000000..25bb6b5a26 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/configs/relay.json @@ -0,0 +1,11 @@ +{ + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" +} diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/data/bandwidth.json b/test/testdata/deployednettemplates/recipes/mainnet-model/data/bandwidth.json new file mode 100644 index 0000000000..ff688163ab --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/data/bandwidth.json @@ -0,0 +1,698 @@ +[ + [ + "Singapore", + 218.07 + ], + [ + "Hong Kong", + 205.69 + ], + [ + "Romania", + 175.39 + ], + [ + "Thailand", + 173.41 + ], + [ + "Switzerland", + 170.67 + ], + [ + "Liechtenstein", + 164.47 + ], + [ + "Monaco", + 164.31 + ], + [ + "South Korea", + 159.98 + ], + [ + "France", + 158.44 + ], + [ + "Hungary", + 156.66 + ], + [ + "United States", + 156.61 + ], + [ + "Macau", + 151.86 + ], + [ + "Denmark", + 151.23 + ], + [ + "Sweden", + 150.73 + ], + [ + "Spain", + 145.73 + ], + [ + "Canada", + 140.40 + ], + [ + "China", + 139.76 + ], + [ + "Norway", + 137.72 + ], + [ + "Luxembourg", + 135.48 + ], + [ + "New Zealand", + 129.97 + ], + [ + "Andorra", + 129.69 + ], + [ + "Chile", + 128.95 + ], + [ + "Japan", + 127.09 + ], + [ + "Taiwan", + 125.46 + ], + [ + "Netherlands", + 120.46 + ], + [ + "United Arab Emirates", + 119.14 + ], + [ + "Portugal", + 117.44 + ], + [ + "Lithuania", + 117.13 + ], + [ + "Kuwait", + 113.12 + ], + [ + "Israel", + 111.32 + ], + [ + "Malta", + 110.67 + ], + [ + "Latvia", + 107.03 + ], + [ + "Poland", + 106.52 + ], + [ + "Germany", + 106.41 + ], + [ + "Finland", + 101.92 + ], + [ + "Panama", + 98.59 + ], + [ + "San Marino", + 97.48 + ], + [ + "Belgium", + 95.90 + ], + [ + "Barbados", + 95.36 + ], + [ + "Qatar", + 90.80 + ], + [ + "Malaysia", + 87.90 + ], + [ + "Ireland", + 86.41 + ], + [ + "Slovakia", + 84.79 + ], + [ + "Moldova", + 81.11 + ], + [ + "Austria", + 80.03 + ], + [ + "Slovenia", + 78.52 + ], + [ + "Russian Federation", + 75.91 + ], + [ + "United Kingdom", + 72.36 + ], + [ + "Estonia", + 71.21 + ], + [ + "Belarus", + 70.84 + ], + [ + "Saudi Arabia", + 70.54 + ], + [ + "Former Czechoslovakia", + 67.49 + ], + [ + "Brazil", + 66.73 + ], + [ + "Italy", + 66.10 + ], + [ + "Serbia", + 64.52 + ], + [ + "Trinidad and Tobago", + 62.00 + ], + [ + "Bulgaria", + 60.83 + ], + [ + "Jordan", + 59.97 + ], + [ + "Ukraine", + 59.13 + ], + [ + "Vietnam", + 55.20 + ], + [ + "Australia", + 54.55 + ], + [ + "Uruguay", + 53.57 + ], + [ + "Grenada", + 48.84 + ], + [ + "Kosovo", + 48.68 + ], + [ + "Kazakhstan", + 47.43 + ], + [ + "Montenegro", + 47.17 + ], + [ + "Ghana", + 46.65 + ], + [ + "Argentina", + 44.28 + ], + [ + "Mongolia", + 43.62 + ], + [ + "Saint Vincent and the Grenadines", + 43.21 + ], + [ + "India", + 43.04 + ], + [ + "Kyrgyzstan", + 42.29 + ], + [ + "Mexico", + 42.27 + ], + [ + "Bahamas", + 41.69 + ], + [ + "Paraguay", + 39.99 + ], + [ + "Costa Rica", + 37.72 + ], + [ + "Belize", + 37.39 + ], + [ + "Saint Lucia", + 37.39 + ], + [ + "Croatia", + 37.20 + ], + [ + "Albania", + 37.11 + ], + [ + "Colombia", + 36.63 + ], + [ + "Oman", + 36.63 + ], + [ + "Jamaica", + 35.75 + ], + [ + "Peru", + 35.31 + ], + [ + "Seychelles", + 34.64 + ], + [ + "Bosnia and Herzegovina", + 34.43 + ], + [ + "South Africa", + 33.98 + ], + [ + "Madagascar", + 33.61 + ], + [ + "Tajikistan", + 32.61 + ], + [ + "Laos", + 31.53 + ], + [ + "Egypt", + 31.38 + ], + [ + "North Macedonia", + 31.26 + ], + [ + "Dominica", + 31.20 + ], + [ + "Cyprus", + 30.59 + ], + [ + "Armenia", + 30.49 + ], + [ + "Sri Lanka", + 30.35 + ], + [ + "Uzbekistan", + 29.80 + ], + [ + "Bangladesh", + 29.06 + ], + [ + "Greece", + 28.91 + ], + [ + "Turkey", + 27.95 + ], + [ + "Iraq", + 27.82 + ], + [ + "Georgia", + 26.91 + ], + [ + "Ecuador", + 26.83 + ], + [ + "Dominican Republic", + 25.99 + ], + [ + "Saint Kitts and Nevis", + 25.52 + ], + [ + "Philippines", + 25.34 + ], + [ + "Guyana", + 25.19 + ], + [ + "Cambodia", + 23.63 + ], + [ + "CI", + 22.84 + ], + [ + "Indonesia", + 22.53 + ], + [ + "Azerbaijan", + 22.29 + ], + [ + "Rwanda", + 22.00 + ], + [ + "Maldives", + 21.95 + ], + [ + "Nepal", + 21.91 + ], + [ + "Senegal", + 21.27 + ], + [ + "Gabon", + 20.88 + ], + [ + "Myanmar", + 20.87 + ], + [ + "Cape Verde", + 20.85 + ], + [ + "Iran", + 20.58 + ], + [ + "Western Sahara", + 20.12 + ], + [ + "Togo", + 19.91 + ], + [ + "Mauritius", + 19.80 + ], + [ + "Fiji Islands", + 19.47 + ], + [ + "Morocco", + 19.01 + ], + [ + "Bolivia", + 18.47 + ], + [ + "Liberia", + 18.26 + ], + [ + "Mali", + 17.99 + ], + [ + "Somalia", + 17.88 + ], + [ + "Honduras", + 17.71 + ], + [ + "Nicaragua", + 17.36 + ], + [ + "Antigua and Barbuda", + 16.85 + ], + [ + "Congo", + 16.20 + ], + [ + "El Salvador", + 16.18 + ], + [ + "Bhutan", + 16.06 + ], + [ + "Palestine", + 15.52 + ], + [ + "Kenya", + 15.23 + ], + [ + "Tanzania", + 14.96 + ], + [ + "Guatemala", + 14.96 + ], + [ + "Angola", + 14.77 + ], + [ + "Zimbabwe", + 14.74 + ], + [ + "Namibia", + 14.63 + ], + [ + "Zambia", + 14.14 + ], + [ + "Guinea", + 13.88 + ], + [ + "Djibouti", + 13.48 + ], + [ + "Swaziland", + 13.25 + ], + [ + "Haiti", + 13.09 + ], + [ + "Papua New Guinea", + 13.08 + ], + [ + "Nigeria", + 12.61 + ], + [ + "Uganda", + 12.11 + ], + [ + "Sierra Leone", + 11.69 + ], + [ + "Suriname", + 11.39 + ], + [ + "Malawi", + 11.16 + ], + [ + "Burkina Faso", + 11.05 + ], + [ + "Libyan Arab Jamahiriya", + 10.91 + ], + [ + "Cameroon", + 10.27 + ], + [ + "Benin", + 10.24 + ], + [ + "Lebanon", + 10.24 + ], + [ + "The Democratic Republic of Congo", + 9.82 + ], + [ + "Pakistan", + 9.56 + ], + [ + "Tunisia", + 9.52 + ], + [ + "Botswana", + 8.81 + ], + [ + "Ethiopia", + 8.72 + ], + [ + "Mozambique", + 8.61 + ], + [ + "Syria", + 8.25 + ], + [ + "Afghanistan", + 8.20 + ], + [ + "Burundi", + 7.89 + ], + [ + "Gambia", + 7.79 + ], + [ + "Sudan", + 6.26 + ], + [ + "Venezuela", + 6.15 + ], + [ + "Mauritania", + 5.50 + ], + [ + "Cuba", + 5.09 + ], + [ + "Yemen", + 4.35 + ], + [ + "Algeria", + 3.92 + ], + [ + "Turkmenistan", + 3.56 + ] +] \ No newline at end of file diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/data/countries.json b/test/testdata/deployednettemplates/recipes/mainnet-model/data/countries.json new file mode 100644 index 0000000000..4e9ab04181 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/data/countries.json @@ -0,0 +1,1026 @@ +[ + { + "country": "Afghanistan", + "continent": "Asia Pacific" + }, + { + "country": "Albania", + "continent": "Europe" + }, + { + "country": "Algeria", + "continent": "Africa" + }, + { + "country": "American Samoa", + "continent": "Asia Pacific" + }, + { + "country": "Andorra", + "continent": "Europe" + }, + { + "country": "Angola", + "continent": "Africa" + }, + { + "country": "Anguilla", + "continent": "North America" + }, + { + "country": "Antarctica", + "continent": "Antarctica" + }, + { + "country": "Antigua and Barbuda", + "continent": "North America" + }, + { + "country": "Argentina", + "continent": "South America" + }, + { + "country": "Armenia", + "continent": "Asia Pacific" + }, + { + "country": "Aruba", + "continent": "North America" + }, + { + "country": "Australia", + "continent": "Australia" + }, + { + "country": "Austria", + "continent": "Europe" + }, + { + "country": "Azerbaijan", + "continent": "Asia Pacific" + }, + { + "country": "Bahamas", + "continent": "North America" + }, + { + "country": "Bahrain", + "continent": "Asia Pacific" + }, + { + "country": "Bangladesh", + "continent": "Asia Pacific" + }, + { + "country": "Barbados", + "continent": "North America" + }, + { + "country": "Belarus", + "continent": "Europe" + }, + { + "country": "Belgium", + "continent": "Europe" + }, + { + "country": "Belize", + "continent": "North America" + }, + { + "country": "Benin", + "continent": "Africa" + }, + { + "country": "Bermuda", + "continent": "North America" + }, + { + "country": "Bhutan", + "continent": "Asia Pacific" + }, + { + "country": "Bolivia", + "continent": "South America" + }, + { + "country": "Bosnia and Herzegovina", + "continent": "Europe" + }, + { + "country": "Botswana", + "continent": "Africa" + }, + { + "country": "Bouvet Island", + "continent": "Antarctica" + }, + { + "country": "Brazil", + "continent": "South America" + }, + { + "country": "British Indian Ocean Territory", + "continent": "Africa" + }, + { + "country": "Brunei", + "continent": "Asia Pacific" + }, + { + "country": "Bulgaria", + "continent": "Europe" + }, + { + "country": "Burkina Faso", + "continent": "Africa" + }, + { + "country": "Burundi", + "continent": "Africa" + }, + { + "country": "Cambodia", + "continent": "Asia Pacific" + }, + { + "country": "Cameroon", + "continent": "Africa" + }, + { + "country": "Canada", + "continent": "North America" + }, + { + "country": "Cape Verde", + "continent": "Africa" + }, + { + "country": "Cayman Islands", + "continent": "North America" + }, + { + "country": "Central African Republic", + "continent": "Africa" + }, + { + "country": "Chad", + "continent": "Africa" + }, + { + "country": "Chile", + "continent": "South America" + }, + { + "country": "China", + "continent": "Asia Pacific" + }, + { + "country": "Christmas Island", + "continent": "Asia Pacific" + }, + { + "country": "Cocos (Keeling) Islands", + "continent": "Asia Pacific" + }, + { + "country": "Colombia", + "continent": "South America" + }, + { + "country": "Comoros", + "continent": "Africa" + }, + { + "country": "Congo", + "continent": "Africa" + }, + { + "country": "Cook Islands", + "continent": "Asia Pacific" + }, + { + "country": "Costa Rica", + "continent": "North America" + }, + { + "country": "Croatia", + "continent": "Europe" + }, + { + "country": "Cuba", + "continent": "North America" + }, + { + "country": "Cyprus", + "continent": "Asia Pacific" + }, + { + "country": "Czech Republic", + "continent": "Europe" + }, + { + "country": "Denmark", + "continent": "Europe" + }, + { + "country": "Djibouti", + "continent": "Africa" + }, + { + "country": "Dominica", + "continent": "North America" + }, + { + "country": "Dominican Republic", + "continent": "North America" + }, + { + "country": "East Timor", + "continent": "Asia Pacific" + }, + { + "country": "Ecuador", + "continent": "South America" + }, + { + "country": "Egypt", + "continent": "Africa" + }, + { + "country": "El Salvador", + "continent": "North America" + }, + { + "country": "England", + "continent": "Europe" + }, + { + "country": "Equatorial Guinea", + "continent": "Africa" + }, + { + "country": "Eritrea", + "continent": "Africa" + }, + { + "country": "Estonia", + "continent": "Europe" + }, + { + "country": "Ethiopia", + "continent": "Africa" + }, + { + "country": "Falkland Islands", + "continent": "South America" + }, + { + "country": "Faroe Islands", + "continent": "Europe" + }, + { + "country": "Fiji Islands", + "continent": "Asia Pacific" + }, + { + "country": "Finland", + "continent": "Europe" + }, + { + "country": "France", + "continent": "Europe" + }, + { + "country": "French Guiana", + "continent": "South America" + }, + { + "country": "French Polynesia", + "continent": "Asia Pacific" + }, + { + "country": "French Southern territories", + "continent": "Antarctica" + }, + { + "country": "Gabon", + "continent": "Africa" + }, + { + "country": "Gambia", + "continent": "Africa" + }, + { + "country": "Georgia", + "continent": "Asia Pacific" + }, + { + "country": "Germany", + "continent": "Europe" + }, + { + "country": "Ghana", + "continent": "Africa" + }, + { + "country": "Gibraltar", + "continent": "Europe" + }, + { + "country": "Greece", + "continent": "Europe" + }, + { + "country": "Greenland", + "continent": "North America" + }, + { + "country": "Grenada", + "continent": "North America" + }, + { + "country": "Guadeloupe", + "continent": "North America" + }, + { + "country": "Guam", + "continent": "Asia Pacific" + }, + { + "country": "Guatemala", + "continent": "North America" + }, + { + "country": "Guinea", + "continent": "Africa" + }, + { + "country": "Guinea-Bissau", + "continent": "Africa" + }, + { + "country": "Guyana", + "continent": "South America" + }, + { + "country": "Haiti", + "continent": "North America" + }, + { + "country": "Heard Island and McDonald Islands", + "continent": "Antarctica" + }, + { + "country": "Holy See (Vatican City State)", + "continent": "Europe" + }, + { + "country": "Honduras", + "continent": "North America" + }, + { + "country": "Hong Kong", + "continent": "Asia Pacific" + }, + { + "country": "Hungary", + "continent": "Europe" + }, + { + "country": "Iceland", + "continent": "Europe" + }, + { + "country": "India", + "continent": "Asia Pacific" + }, + { + "country": "Indonesia", + "continent": "Asia Pacific" + }, + { + "country": "Iran", + "continent": "Asia Pacific" + }, + { + "country": "Iraq", + "continent": "Asia Pacific" + }, + { + "country": "Ireland", + "continent": "Europe" + }, + { + "country": "Israel", + "continent": "Asia Pacific" + }, + { + "country": "Italy", + "continent": "Europe" + }, + { + "country": "Ivory Coast", + "continent": "Africa" + }, + { + "country": "Jamaica", + "continent": "North America" + }, + { + "country": "Japan", + "continent": "Asia Pacific" + }, + { + "country": "Jordan", + "continent": "Asia Pacific" + }, + { + "country": "Kazakhstan", + "continent": "Asia Pacific" + }, + { + "country": "Kenya", + "continent": "Africa" + }, + { + "country": "Kiribati", + "continent": "Asia Pacific" + }, + { + "country": "Kuwait", + "continent": "Asia Pacific" + }, + { + "country": "Kyrgyzstan", + "continent": "Asia Pacific" + }, + { + "country": "Laos", + "continent": "Asia Pacific" + }, + { + "country": "Latvia", + "continent": "Europe" + }, + { + "country": "Lebanon", + "continent": "Asia Pacific" + }, + { + "country": "Lesotho", + "continent": "Africa" + }, + { + "country": "Liberia", + "continent": "Africa" + }, + { + "country": "Libyan Arab Jamahiriya", + "continent": "Africa" + }, + { + "country": "Liechtenstein", + "continent": "Europe" + }, + { + "country": "Lithuania", + "continent": "Europe" + }, + { + "country": "Luxembourg", + "continent": "Europe" + }, + { + "country": "Macao", + "continent": "Asia Pacific" + }, + { + "country": "North Macedonia", + "continent": "Europe" + }, + { + "country": "Madagascar", + "continent": "Africa" + }, + { + "country": "Malawi", + "continent": "Africa" + }, + { + "country": "Malaysia", + "continent": "Asia Pacific" + }, + { + "country": "Maldives", + "continent": "Asia Pacific" + }, + { + "country": "Mali", + "continent": "Africa" + }, + { + "country": "Malta", + "continent": "Europe" + }, + { + "country": "Marshall Islands", + "continent": "Asia Pacific" + }, + { + "country": "Martinique", + "continent": "North America" + }, + { + "country": "Mauritania", + "continent": "Africa" + }, + { + "country": "Mauritius", + "continent": "Africa" + }, + { + "country": "Mayotte", + "continent": "Africa" + }, + { + "country": "Mexico", + "continent": "North America" + }, + { + "country": "Micronesia, Federated States of", + "continent": "Asia Pacific" + }, + { + "country": "Moldova", + "continent": "Europe" + }, + { + "country": "Monaco", + "continent": "Europe" + }, + { + "country": "Mongolia", + "continent": "Asia Pacific" + }, + { + "country": "Montenegro", + "continent": "Europe" + }, + { + "country": "Montserrat", + "continent": "North America" + }, + { + "country": "Morocco", + "continent": "Africa" + }, + { + "country": "Mozambique", + "continent": "Africa" + }, + { + "country": "Myanmar", + "continent": "Asia Pacific" + }, + { + "country": "Namibia", + "continent": "Africa" + }, + { + "country": "Nauru", + "continent": "Asia Pacific" + }, + { + "country": "Nepal", + "continent": "Asia Pacific" + }, + { + "country": "Netherlands", + "continent": "Europe" + }, + { + "country": "Netherlands Antilles", + "continent": "North America" + }, + { + "country": "New Caledonia", + "continent": "Asia Pacific" + }, + { + "country": "New Zealand", + "continent": "Asia Pacific" + }, + { + "country": "Nicaragua", + "continent": "North America" + }, + { + "country": "Niger", + "continent": "Africa" + }, + { + "country": "Nigeria", + "continent": "Africa" + }, + { + "country": "Niue", + "continent": "Asia Pacific" + }, + { + "country": "Norfolk Island", + "continent": "Asia Pacific" + }, + { + "country": "North Korea", + "continent": "Asia Pacific" + }, + { + "country": "Northern Ireland", + "continent": "Europe" + }, + { + "country": "Northern Mariana Islands", + "continent": "Asia Pacific" + }, + { + "country": "Norway", + "continent": "Europe" + }, + { + "country": "Oman", + "continent": "Asia Pacific" + }, + { + "country": "Pakistan", + "continent": "Asia Pacific" + }, + { + "country": "Palau", + "continent": "Asia Pacific" + }, + { + "country": "Palestine", + "continent": "Asia Pacific" + }, + { + "country": "Panama", + "continent": "North America" + }, + { + "country": "Papua New Guinea", + "continent": "Asia Pacific" + }, + { + "country": "Paraguay", + "continent": "South America" + }, + { + "country": "Peru", + "continent": "South America" + }, + { + "country": "Philippines", + "continent": "Asia Pacific" + }, + { + "country": "Pitcairn", + "continent": "Asia Pacific" + }, + { + "country": "Poland", + "continent": "Europe" + }, + { + "country": "Portugal", + "continent": "Europe" + }, + { + "country": "Puerto Rico", + "continent": "North America" + }, + { + "country": "Qatar", + "continent": "Asia Pacific" + }, + { + "country": "Reunion", + "continent": "Africa" + }, + { + "country": "Romania", + "continent": "Europe" + }, + { + "country": "Russian Federation", + "continent": "Europe" + }, + { + "country": "Former USSR", + "continent": "Europe" + }, + { + "country": "Azerbaidjan", + "continent": "Europe" + }, + { + "country": "Macau", + "continent": "Asia Pacific" + }, + { + "country": "Svalbard and Jan Mayen Islands", + "continent": "Europe" + }, + { + "country": "Saint Tome (Sao Tome) and Principe", + "continent": "Africa" + }, + { + "country": "Serbia", + "continent": "Europe" + }, + { + "country": "Moldavia", + "continent": "Europe" + }, + { + "country": "CI", + "continent": "Africa" + }, + { + "country": "Rwanda", + "continent": "Africa" + }, + { + "country": "Saint Helena", + "continent": "Africa" + }, + { + "country": "Saint Kitts and Nevis", + "continent": "North America" + }, + { + "country": "Saint Lucia", + "continent": "North America" + }, + { + "country": "Saint Pierre and Miquelon", + "continent": "North America" + }, + { + "country": "Saint Vincent and the Grenadines", + "continent": "North America" + }, + { + "country": "Samoa", + "continent": "Asia Pacific" + }, + { + "country": "San Marino", + "continent": "Europe" + }, + { + "country": "Sao Tome and Principe", + "continent": "Africa" + }, + { + "country": "Saudi Arabia", + "continent": "Asia Pacific" + }, + { + "country": "Scotland", + "continent": "Europe" + }, + { + "country": "Senegal", + "continent": "Africa" + }, + { + "country": "Seychelles", + "continent": "Africa" + }, + { + "country": "Sierra Leone", + "continent": "Africa" + }, + { + "country": "Singapore", + "continent": "Asia Pacific" + }, + { + "country": "Former Czechoslovakia", + "continent": "Europe" + }, + { + "country": "Slovakia", + "continent": "Europe" + }, + { + "country": "Taiwan", + "continent": "Asia Pacific" + }, + { + "country": "Kosovo", + "continent": "Europe" + }, + { + "country": "Tadjikistan", + "continent": "Asia Pacific" + }, + { + "country": "Slovenia", + "continent": "Europe" + }, + { + "country": "Solomon Islands", + "continent": "Asia Pacific" + }, + { + "country": "Somalia", + "continent": "Africa" + }, + { + "country": "South Africa", + "continent": "Africa" + }, + { + "country": "South Georgia and the South Sandwich Islands", + "continent": "Antarctica" + }, + { + "country": "South Korea", + "continent": "Asia Pacific" + }, + { + "country": "South Sudan", + "continent": "Africa" + }, + { + "country": "Spain", + "continent": "Europe" + }, + { + "country": "Sri Lanka", + "continent": "Asia Pacific" + }, + { + "country": "Sudan", + "continent": "Africa" + }, + { + "country": "Suriname", + "continent": "South America" + }, + { + "country": "Svalbard and Jan Mayen", + "continent": "Europe" + }, + { + "country": "Swaziland", + "continent": "Africa" + }, + { + "country": "Sweden", + "continent": "Europe" + }, + { + "country": "Switzerland", + "continent": "Europe" + }, + { + "country": "Syria", + "continent": "Asia Pacific" + }, + { + "country": "Tajikistan", + "continent": "Asia Pacific" + }, + { + "country": "Tanzania", + "continent": "Africa" + }, + { + "country": "Thailand", + "continent": "Asia Pacific" + }, + { + "country": "The Democratic Republic of Congo", + "continent": "Africa" + }, + { + "country": "Togo", + "continent": "Africa" + }, + { + "country": "Tokelau", + "continent": "Asia Pacific" + }, + { + "country": "Tonga", + "continent": "Asia Pacific" + }, + { + "country": "Trinidad and Tobago", + "continent": "North America" + }, + { + "country": "Tunisia", + "continent": "Africa" + }, + { + "country": "Turkey", + "continent": "Asia Pacific" + }, + { + "country": "Turkmenistan", + "continent": "Asia Pacific" + }, + { + "country": "Turks and Caicos Islands", + "continent": "North America" + }, + { + "country": "Tuvalu", + "continent": "Asia Pacific" + }, + { + "country": "Uganda", + "continent": "Africa" + }, + { + "country": "Ukraine", + "continent": "Europe" + }, + { + "country": "United Arab Emirates", + "continent": "Asia Pacific" + }, + { + "country": "United Kingdom", + "continent": "Europe" + }, + { + "country": "United States", + "continent": "North America" + }, + { + "country": "United States Minor Outlying Islands", + "continent": "Asia Pacific" + }, + { + "country": "Uruguay", + "continent": "South America" + }, + { + "country": "Uzbekistan", + "continent": "Asia Pacific" + }, + { + "country": "Vanuatu", + "continent": "Asia Pacific" + }, + { + "country": "Venezuela", + "continent": "South America" + }, + { + "country": "Vietnam", + "continent": "Asia Pacific" + }, + { + "country": "Virgin Islands, British", + "continent": "North America" + }, + { + "country": "Virgin Islands, U.S.", + "continent": "North America" + }, + { + "country": "Wales", + "continent": "Europe" + }, + { + "country": "Wallis and Futuna", + "continent": "Asia Pacific" + }, + { + "country": "Western Sahara", + "continent": "Africa" + }, + { + "country": "Yemen", + "continent": "Asia Pacific" + }, + { + "country": "Yugoslavia", + "continent": "Europe" + }, + { + "country": "Zambia", + "continent": "Africa" + }, + { + "country": "Zimbabwe", + "continent": "Africa" + } +] \ No newline at end of file diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/data/latency.json b/test/testdata/deployednettemplates/recipes/mainnet-model/data/latency.json new file mode 100644 index 0000000000..ae3f59ea9e --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/data/latency.json @@ -0,0 +1,127 @@ +[ + { + "source": "Europe", + "target": "Europe", + "latency": 16 + }, + { + "source": "Europe", + "target": "North America", + "latency": 39.7 + }, + { + "source": "Europe", + "target": "Australia", + "latency": 135 + }, + { + "source": "Europe", + "target": "Africa", + "latency": 84.5 + }, + { + "source": "Europe", + "target": "Asia Pacific", + "latency": 75.3 + }, + { + "source": "North America", + "target": "Europe", + "latency": 39.7 + }, + { + "source": "North America", + "target": "North America", + "latency": 22 + }, + { + "source": "North America", + "target": "Australia", + "latency": 101.5 + }, + { + "source": "North America", + "target": "Africa", + "latency": 124 + }, + { + "source": "North America", + "target": "Asia Pacific", + "latency": 120 + }, + { + "source": "Australia", + "target": "Europe", + "latency": 135.5 + }, + { + "source": "Australia", + "target": "North America", + "latency": 101.5 + }, + { + "source": "Australia", + "target": "Australia", + "latency": 23 + }, + { + "source": "Australia", + "target": "Africa", + "latency": 216.5 + }, + { + "source": "Australia", + "target": "Asia Pacific", + "latency": 47 + }, + { + "source": "Africa", + "target": "Europe", + "latency": 84.5 + }, + { + "source": "Africa", + "target": "North America", + "latency": 118 + }, + { + "source": "Africa", + "target": "Australia", + "latency": 216 + }, + { + "source": "Africa", + "target": "Africa", + "latency": 70 + }, + { + "source": "Africa", + "target": "Asia Pacific", + "latency": 202 + }, + { + "source": "Asia Pacific", + "target": "Europe", + "latency": 75 + }, + { + "source": "Asia Pacific", + "target": "North America", + "latency": 120 + }, + { + "source": "Asia Pacific", + "target": "Australia", + "latency": 47 + }, + { + "source": "Asia Pacific", + "target": "Africa", + "latency": 202 + }, + { + "source": "Asia Pacific", + "target": "Asia Pacific", + "latency": 32 + } +] diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generate_network_rules.js b/test/testdata/deployednettemplates/recipes/mainnet-model/generate_network_rules.js new file mode 100644 index 0000000000..8b8fc4bdf3 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/generate_network_rules.js @@ -0,0 +1,80 @@ +const fs = require('fs'); + +const RELAY_BANDWIDTH = 1000 +const SAME_REGION_RELAY_TO_RELAY_LATENCY = 10 +const CROSS_REGION_NODE_BANDWIDTH_FACTOR = 0.8 + +const countries = JSON.parse(fs.readFileSync('./data/countries.json')) +const countryBandwidths = JSON.parse(fs.readFileSync('./data/bandwidth.json')) +const latencies = JSON.parse(fs.readFileSync('./data/latency.json')) + +const continentToGroup = { + "North America": "us", + "Europe": "eu", + "Asia Pacific": "ap", + "Africa": "af", + "Australia": "au", +} + +var latencyMap = [] +var countryToContinent = [] +var continentBandwidths = [] + +latencies.forEach((latency) => { + if (!latencyMap[latency.source]) { + latencyMap[latency.source] = [] + } + latencyMap[latency.source][latency.target] = latency.latency +}) + +countries.forEach((country) => { + countryToContinent[country.country] = country.continent +}) + +countryBandwidths.forEach((countryBandwidth) => { + const continent = countryToContinent[countryBandwidth[0]] + if (!continent) { + console.log(countryBandwidth) + } + if(Object.keys(continentBandwidths).indexOf(continent) == -1) { + continentBandwidths[continent] = { + bandwidths: [] + } + + } + continentBandwidths[continent].bandwidths.push(countryBandwidth[1]) +}) + +const average = (data) => { + var sum = data.reduce(function(sum, value){ + return sum + value; + }, 0); + return sum / data.length +} + +var writer = fs.createWriteStream('./network_performance_rules', { + flags: 'w' +}) + +Object.keys(continentToGroup).forEach((source) => { + Object.keys(continentToGroup).forEach((target) => { + sourceGroup = continentToGroup[source] + targetGroup = continentToGroup[target] + const bandwidth = average(continentBandwidths[source]['bandwidths']) + const latency = latencyMap[source][target] + var relay_to_relay_latency + var node_bandwidth_factor + if (sourceGroup==targetGroup) { + relay_to_relay_latency = SAME_REGION_RELAY_TO_RELAY_LATENCY + node_bandwidth_factor = 1.0 + } else { + relay_to_relay_latency = latency + node_bandwidth_factor = CROSS_REGION_NODE_BANDWIDTH_FACTOR + } + writer.write(`${sourceGroup}-n ${targetGroup}-r ${Math.round(bandwidth*node_bandwidth_factor)} ${Math.round(latency)}\n`) + writer.write(`${sourceGroup}-r ${targetGroup}-n ${RELAY_BANDWIDTH} ${Math.round(latency)}\n`) + writer.write(`${sourceGroup}-r ${targetGroup}-r ${RELAY_BANDWIDTH} ${Math.round(relay_to_relay_latency)}\n`) + }) +}) + +writer.end() diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generated/genesis.json b/test/testdata/deployednettemplates/recipes/mainnet-model/generated/genesis.json new file mode 100644 index 0000000000..c1f7c184e9 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/generated/genesis.json @@ -0,0 +1,1013 @@ +{ + "NetworkName": "", + "VersionModifier": "", + "ConsensusProtocol": "", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 3000000, + "PartKeyDilution": 0, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet5", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet6", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet7", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet8", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet9", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet10", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet11", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet12", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet13", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet14", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet15", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet16", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet17", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet18", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet19", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet20", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet21", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet22", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet23", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet24", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet25", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet26", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet27", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet28", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet29", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet30", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet31", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet32", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet33", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet34", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet35", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet36", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet37", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet38", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet39", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet40", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet41", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet42", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet43", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet44", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet45", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet46", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet47", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet48", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet49", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet50", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet51", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet52", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet53", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet54", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet55", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet56", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet57", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet58", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet59", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet60", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet61", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet62", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet63", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet64", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet65", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet66", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet67", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet68", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet69", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet70", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet71", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet72", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet73", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet74", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet75", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet76", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet77", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet78", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet79", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet80", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet81", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet82", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet83", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet84", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet85", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet86", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet87", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet88", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet89", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet90", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet91", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet92", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet93", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet94", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet95", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet96", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet97", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet98", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet99", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet100", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet101", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet102", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet103", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet104", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet105", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet106", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet107", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet108", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet109", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet110", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet111", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet112", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet113", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet114", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet115", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet116", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet117", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet118", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet119", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet120", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet121", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet122", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet123", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet124", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet125", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet126", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet127", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet128", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet129", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet130", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet131", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet132", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet133", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet134", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet135", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet136", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet137", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet138", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet139", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet140", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet141", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet142", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet143", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet144", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet145", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet146", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet147", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet148", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet149", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet150", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet151", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet152", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet153", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet154", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet155", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet156", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet157", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet158", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet159", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet160", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet161", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet162", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet163", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet164", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet165", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet166", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet167", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet168", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet169", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet170", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet171", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet172", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet173", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet174", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet175", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet176", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet177", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet178", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet179", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet180", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet181", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet182", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet183", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet184", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet185", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet186", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet187", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet188", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet189", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet190", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet191", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet192", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet193", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet194", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet195", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet196", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet197", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet198", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet199", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet200", + "Stake": 0.5, + "Online": false + } + ], + "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "Comment": "" +} diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generated/net.json b/test/testdata/deployednettemplates/recipes/mainnet-model/generated/net.json new file mode 100644 index 0000000000..45e48856b5 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/generated/net.json @@ -0,0 +1,1959 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Group": "", + "Nodes": [ + { + "Name": "relay1", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R2", + "Group": "", + "Nodes": [ + { + "Name": "relay2", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R3", + "Group": "", + "Nodes": [ + { + "Name": "relay3", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R4", + "Group": "", + "Nodes": [ + { + "Name": "relay4", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R5", + "Group": "", + "Nodes": [ + { + "Name": "relay5", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R6", + "Group": "", + "Nodes": [ + { + "Name": "relay6", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R7", + "Group": "", + "Nodes": [ + { + "Name": "relay7", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R8", + "Group": "", + "Nodes": [ + { + "Name": "relay8", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R9", + "Group": "", + "Nodes": [ + { + "Name": "relay9", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R10", + "Group": "", + "Nodes": [ + { + "Name": "relay10", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R11", + "Group": "", + "Nodes": [ + { + "Name": "relay11", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R12", + "Group": "", + "Nodes": [ + { + "Name": "relay12", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R13", + "Group": "", + "Nodes": [ + { + "Name": "relay13", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R14", + "Group": "", + "Nodes": [ + { + "Name": "relay14", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R15", + "Group": "", + "Nodes": [ + { + "Name": "relay15", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R16", + "Group": "", + "Nodes": [ + { + "Name": "relay16", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R17", + "Group": "", + "Nodes": [ + { + "Name": "relay17", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R18", + "Group": "", + "Nodes": [ + { + "Name": "relay18", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R19", + "Group": "", + "Nodes": [ + { + "Name": "relay19", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R20", + "Group": "", + "Nodes": [ + { + "Name": "relay20", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R21", + "Group": "", + "Nodes": [ + { + "Name": "relay21", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R22", + "Group": "", + "Nodes": [ + { + "Name": "relay22", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R23", + "Group": "", + "Nodes": [ + { + "Name": "relay23", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R24", + "Group": "", + "Nodes": [ + { + "Name": "relay24", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R25", + "Group": "", + "Nodes": [ + { + "Name": "relay25", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N1", + "Group": "", + "Nodes": [ + { + "Name": "node1", + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + }, + { + "Name": "Wallet26", + "ParticipationOnly": false + }, + { + "Name": "Wallet51", + "ParticipationOnly": false + }, + { + "Name": "Wallet76", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N2", + "Group": "", + "Nodes": [ + { + "Name": "node2", + "Wallets": [ + { + "Name": "Wallet2", + "ParticipationOnly": false + }, + { + "Name": "Wallet27", + "ParticipationOnly": false + }, + { + "Name": "Wallet52", + "ParticipationOnly": false + }, + { + "Name": "Wallet77", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N3", + "Group": "", + "Nodes": [ + { + "Name": "node3", + "Wallets": [ + { + "Name": "Wallet3", + "ParticipationOnly": false + }, + { + "Name": "Wallet28", + "ParticipationOnly": false + }, + { + "Name": "Wallet53", + "ParticipationOnly": false + }, + { + "Name": "Wallet78", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N4", + "Group": "", + "Nodes": [ + { + "Name": "node4", + "Wallets": [ + { + "Name": "Wallet4", + "ParticipationOnly": false + }, + { + "Name": "Wallet29", + "ParticipationOnly": false + }, + { + "Name": "Wallet54", + "ParticipationOnly": false + }, + { + "Name": "Wallet79", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N5", + "Group": "", + "Nodes": [ + { + "Name": "node5", + "Wallets": [ + { + "Name": "Wallet5", + "ParticipationOnly": false + }, + { + "Name": "Wallet30", + "ParticipationOnly": false + }, + { + "Name": "Wallet55", + "ParticipationOnly": false + }, + { + "Name": "Wallet80", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N6", + "Group": "", + "Nodes": [ + { + "Name": "node6", + "Wallets": [ + { + "Name": "Wallet6", + "ParticipationOnly": false + }, + { + "Name": "Wallet31", + "ParticipationOnly": false + }, + { + "Name": "Wallet56", + "ParticipationOnly": false + }, + { + "Name": "Wallet81", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N7", + "Group": "", + "Nodes": [ + { + "Name": "node7", + "Wallets": [ + { + "Name": "Wallet7", + "ParticipationOnly": false + }, + { + "Name": "Wallet32", + "ParticipationOnly": false + }, + { + "Name": "Wallet57", + "ParticipationOnly": false + }, + { + "Name": "Wallet82", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N8", + "Group": "", + "Nodes": [ + { + "Name": "node8", + "Wallets": [ + { + "Name": "Wallet8", + "ParticipationOnly": false + }, + { + "Name": "Wallet33", + "ParticipationOnly": false + }, + { + "Name": "Wallet58", + "ParticipationOnly": false + }, + { + "Name": "Wallet83", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N9", + "Group": "", + "Nodes": [ + { + "Name": "node9", + "Wallets": [ + { + "Name": "Wallet9", + "ParticipationOnly": false + }, + { + "Name": "Wallet34", + "ParticipationOnly": false + }, + { + "Name": "Wallet59", + "ParticipationOnly": false + }, + { + "Name": "Wallet84", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N10", + "Group": "", + "Nodes": [ + { + "Name": "node10", + "Wallets": [ + { + "Name": "Wallet10", + "ParticipationOnly": false + }, + { + "Name": "Wallet35", + "ParticipationOnly": false + }, + { + "Name": "Wallet60", + "ParticipationOnly": false + }, + { + "Name": "Wallet85", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N11", + "Group": "", + "Nodes": [ + { + "Name": "node11", + "Wallets": [ + { + "Name": "Wallet11", + "ParticipationOnly": false + }, + { + "Name": "Wallet36", + "ParticipationOnly": false + }, + { + "Name": "Wallet61", + "ParticipationOnly": false + }, + { + "Name": "Wallet86", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N12", + "Group": "", + "Nodes": [ + { + "Name": "node12", + "Wallets": [ + { + "Name": "Wallet12", + "ParticipationOnly": false + }, + { + "Name": "Wallet37", + "ParticipationOnly": false + }, + { + "Name": "Wallet62", + "ParticipationOnly": false + }, + { + "Name": "Wallet87", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N13", + "Group": "", + "Nodes": [ + { + "Name": "node13", + "Wallets": [ + { + "Name": "Wallet13", + "ParticipationOnly": false + }, + { + "Name": "Wallet38", + "ParticipationOnly": false + }, + { + "Name": "Wallet63", + "ParticipationOnly": false + }, + { + "Name": "Wallet88", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N14", + "Group": "", + "Nodes": [ + { + "Name": "node14", + "Wallets": [ + { + "Name": "Wallet14", + "ParticipationOnly": false + }, + { + "Name": "Wallet39", + "ParticipationOnly": false + }, + { + "Name": "Wallet64", + "ParticipationOnly": false + }, + { + "Name": "Wallet89", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N15", + "Group": "", + "Nodes": [ + { + "Name": "node15", + "Wallets": [ + { + "Name": "Wallet15", + "ParticipationOnly": false + }, + { + "Name": "Wallet40", + "ParticipationOnly": false + }, + { + "Name": "Wallet65", + "ParticipationOnly": false + }, + { + "Name": "Wallet90", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N16", + "Group": "", + "Nodes": [ + { + "Name": "node16", + "Wallets": [ + { + "Name": "Wallet16", + "ParticipationOnly": false + }, + { + "Name": "Wallet41", + "ParticipationOnly": false + }, + { + "Name": "Wallet66", + "ParticipationOnly": false + }, + { + "Name": "Wallet91", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N17", + "Group": "", + "Nodes": [ + { + "Name": "node17", + "Wallets": [ + { + "Name": "Wallet17", + "ParticipationOnly": false + }, + { + "Name": "Wallet42", + "ParticipationOnly": false + }, + { + "Name": "Wallet67", + "ParticipationOnly": false + }, + { + "Name": "Wallet92", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N18", + "Group": "", + "Nodes": [ + { + "Name": "node18", + "Wallets": [ + { + "Name": "Wallet18", + "ParticipationOnly": false + }, + { + "Name": "Wallet43", + "ParticipationOnly": false + }, + { + "Name": "Wallet68", + "ParticipationOnly": false + }, + { + "Name": "Wallet93", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N19", + "Group": "", + "Nodes": [ + { + "Name": "node19", + "Wallets": [ + { + "Name": "Wallet19", + "ParticipationOnly": false + }, + { + "Name": "Wallet44", + "ParticipationOnly": false + }, + { + "Name": "Wallet69", + "ParticipationOnly": false + }, + { + "Name": "Wallet94", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N20", + "Group": "", + "Nodes": [ + { + "Name": "node20", + "Wallets": [ + { + "Name": "Wallet20", + "ParticipationOnly": false + }, + { + "Name": "Wallet45", + "ParticipationOnly": false + }, + { + "Name": "Wallet70", + "ParticipationOnly": false + }, + { + "Name": "Wallet95", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N21", + "Group": "", + "Nodes": [ + { + "Name": "node21", + "Wallets": [ + { + "Name": "Wallet21", + "ParticipationOnly": false + }, + { + "Name": "Wallet46", + "ParticipationOnly": false + }, + { + "Name": "Wallet71", + "ParticipationOnly": false + }, + { + "Name": "Wallet96", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N22", + "Group": "", + "Nodes": [ + { + "Name": "node22", + "Wallets": [ + { + "Name": "Wallet22", + "ParticipationOnly": false + }, + { + "Name": "Wallet47", + "ParticipationOnly": false + }, + { + "Name": "Wallet72", + "ParticipationOnly": false + }, + { + "Name": "Wallet97", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N23", + "Group": "", + "Nodes": [ + { + "Name": "node23", + "Wallets": [ + { + "Name": "Wallet23", + "ParticipationOnly": false + }, + { + "Name": "Wallet48", + "ParticipationOnly": false + }, + { + "Name": "Wallet73", + "ParticipationOnly": false + }, + { + "Name": "Wallet98", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N24", + "Group": "", + "Nodes": [ + { + "Name": "node24", + "Wallets": [ + { + "Name": "Wallet24", + "ParticipationOnly": false + }, + { + "Name": "Wallet49", + "ParticipationOnly": false + }, + { + "Name": "Wallet74", + "ParticipationOnly": false + }, + { + "Name": "Wallet99", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N25", + "Group": "", + "Nodes": [ + { + "Name": "node25", + "Wallets": [ + { + "Name": "Wallet25", + "ParticipationOnly": false + }, + { + "Name": "Wallet50", + "ParticipationOnly": false + }, + { + "Name": "Wallet75", + "ParticipationOnly": false + }, + { + "Name": "Wallet100", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "NPN1", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode1", + "Wallets": [ + { + "Name": "Wallet101", + "ParticipationOnly": false + }, + { + "Name": "Wallet111", + "ParticipationOnly": false + }, + { + "Name": "Wallet121", + "ParticipationOnly": false + }, + { + "Name": "Wallet131", + "ParticipationOnly": false + }, + { + "Name": "Wallet141", + "ParticipationOnly": false + }, + { + "Name": "Wallet151", + "ParticipationOnly": false + }, + { + "Name": "Wallet161", + "ParticipationOnly": false + }, + { + "Name": "Wallet171", + "ParticipationOnly": false + }, + { + "Name": "Wallet181", + "ParticipationOnly": false + }, + { + "Name": "Wallet191", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN2", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode2", + "Wallets": [ + { + "Name": "Wallet102", + "ParticipationOnly": false + }, + { + "Name": "Wallet112", + "ParticipationOnly": false + }, + { + "Name": "Wallet122", + "ParticipationOnly": false + }, + { + "Name": "Wallet132", + "ParticipationOnly": false + }, + { + "Name": "Wallet142", + "ParticipationOnly": false + }, + { + "Name": "Wallet152", + "ParticipationOnly": false + }, + { + "Name": "Wallet162", + "ParticipationOnly": false + }, + { + "Name": "Wallet172", + "ParticipationOnly": false + }, + { + "Name": "Wallet182", + "ParticipationOnly": false + }, + { + "Name": "Wallet192", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN3", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode3", + "Wallets": [ + { + "Name": "Wallet103", + "ParticipationOnly": false + }, + { + "Name": "Wallet113", + "ParticipationOnly": false + }, + { + "Name": "Wallet123", + "ParticipationOnly": false + }, + { + "Name": "Wallet133", + "ParticipationOnly": false + }, + { + "Name": "Wallet143", + "ParticipationOnly": false + }, + { + "Name": "Wallet153", + "ParticipationOnly": false + }, + { + "Name": "Wallet163", + "ParticipationOnly": false + }, + { + "Name": "Wallet173", + "ParticipationOnly": false + }, + { + "Name": "Wallet183", + "ParticipationOnly": false + }, + { + "Name": "Wallet193", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN4", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode4", + "Wallets": [ + { + "Name": "Wallet104", + "ParticipationOnly": false + }, + { + "Name": "Wallet114", + "ParticipationOnly": false + }, + { + "Name": "Wallet124", + "ParticipationOnly": false + }, + { + "Name": "Wallet134", + "ParticipationOnly": false + }, + { + "Name": "Wallet144", + "ParticipationOnly": false + }, + { + "Name": "Wallet154", + "ParticipationOnly": false + }, + { + "Name": "Wallet164", + "ParticipationOnly": false + }, + { + "Name": "Wallet174", + "ParticipationOnly": false + }, + { + "Name": "Wallet184", + "ParticipationOnly": false + }, + { + "Name": "Wallet194", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN5", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode5", + "Wallets": [ + { + "Name": "Wallet105", + "ParticipationOnly": false + }, + { + "Name": "Wallet115", + "ParticipationOnly": false + }, + { + "Name": "Wallet125", + "ParticipationOnly": false + }, + { + "Name": "Wallet135", + "ParticipationOnly": false + }, + { + "Name": "Wallet145", + "ParticipationOnly": false + }, + { + "Name": "Wallet155", + "ParticipationOnly": false + }, + { + "Name": "Wallet165", + "ParticipationOnly": false + }, + { + "Name": "Wallet175", + "ParticipationOnly": false + }, + { + "Name": "Wallet185", + "ParticipationOnly": false + }, + { + "Name": "Wallet195", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN6", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode6", + "Wallets": [ + { + "Name": "Wallet106", + "ParticipationOnly": false + }, + { + "Name": "Wallet116", + "ParticipationOnly": false + }, + { + "Name": "Wallet126", + "ParticipationOnly": false + }, + { + "Name": "Wallet136", + "ParticipationOnly": false + }, + { + "Name": "Wallet146", + "ParticipationOnly": false + }, + { + "Name": "Wallet156", + "ParticipationOnly": false + }, + { + "Name": "Wallet166", + "ParticipationOnly": false + }, + { + "Name": "Wallet176", + "ParticipationOnly": false + }, + { + "Name": "Wallet186", + "ParticipationOnly": false + }, + { + "Name": "Wallet196", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN7", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode7", + "Wallets": [ + { + "Name": "Wallet107", + "ParticipationOnly": false + }, + { + "Name": "Wallet117", + "ParticipationOnly": false + }, + { + "Name": "Wallet127", + "ParticipationOnly": false + }, + { + "Name": "Wallet137", + "ParticipationOnly": false + }, + { + "Name": "Wallet147", + "ParticipationOnly": false + }, + { + "Name": "Wallet157", + "ParticipationOnly": false + }, + { + "Name": "Wallet167", + "ParticipationOnly": false + }, + { + "Name": "Wallet177", + "ParticipationOnly": false + }, + { + "Name": "Wallet187", + "ParticipationOnly": false + }, + { + "Name": "Wallet197", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN8", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode8", + "Wallets": [ + { + "Name": "Wallet108", + "ParticipationOnly": false + }, + { + "Name": "Wallet118", + "ParticipationOnly": false + }, + { + "Name": "Wallet128", + "ParticipationOnly": false + }, + { + "Name": "Wallet138", + "ParticipationOnly": false + }, + { + "Name": "Wallet148", + "ParticipationOnly": false + }, + { + "Name": "Wallet158", + "ParticipationOnly": false + }, + { + "Name": "Wallet168", + "ParticipationOnly": false + }, + { + "Name": "Wallet178", + "ParticipationOnly": false + }, + { + "Name": "Wallet188", + "ParticipationOnly": false + }, + { + "Name": "Wallet198", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN9", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode9", + "Wallets": [ + { + "Name": "Wallet109", + "ParticipationOnly": false + }, + { + "Name": "Wallet119", + "ParticipationOnly": false + }, + { + "Name": "Wallet129", + "ParticipationOnly": false + }, + { + "Name": "Wallet139", + "ParticipationOnly": false + }, + { + "Name": "Wallet149", + "ParticipationOnly": false + }, + { + "Name": "Wallet159", + "ParticipationOnly": false + }, + { + "Name": "Wallet169", + "ParticipationOnly": false + }, + { + "Name": "Wallet179", + "ParticipationOnly": false + }, + { + "Name": "Wallet189", + "ParticipationOnly": false + }, + { + "Name": "Wallet199", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN10", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode10", + "Wallets": [ + { + "Name": "Wallet110", + "ParticipationOnly": false + }, + { + "Name": "Wallet120", + "ParticipationOnly": false + }, + { + "Name": "Wallet130", + "ParticipationOnly": false + }, + { + "Name": "Wallet140", + "ParticipationOnly": false + }, + { + "Name": "Wallet150", + "ParticipationOnly": false + }, + { + "Name": "Wallet160", + "ParticipationOnly": false + }, + { + "Name": "Wallet170", + "ParticipationOnly": false + }, + { + "Name": "Wallet180", + "ParticipationOnly": false + }, + { + "Name": "Wallet190", + "ParticipationOnly": false + }, + { + "Name": "Wallet200", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/generated/topology.json b/test/testdata/deployednettemplates/recipes/mainnet-model/generated/topology.json new file mode 100644 index 0000000000..db766f1233 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/generated/topology.json @@ -0,0 +1,304 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Group": "us-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R2", + "Group": "us-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R3", + "Group": "us-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R4", + "Group": "us-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R5", + "Group": "us-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R6", + "Group": "eu-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R7", + "Group": "eu-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R8", + "Group": "eu-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R9", + "Group": "eu-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R10", + "Group": "eu-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R11", + "Group": "ap-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R12", + "Group": "ap-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R13", + "Group": "ap-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R14", + "Group": "ap-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R15", + "Group": "ap-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R16", + "Group": "af-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R17", + "Group": "af-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R18", + "Group": "af-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R19", + "Group": "af-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R20", + "Group": "af-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R21", + "Group": "au-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R22", + "Group": "au-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R23", + "Group": "au-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R24", + "Group": "au-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "R25", + "Group": "au-r", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N1", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N2", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N3", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N4", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N5", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N6", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N7", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N8", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N9", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N10", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N11", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N12", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N13", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N14", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N15", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N16", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N17", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N18", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N19", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N20", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N21", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N22", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N23", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N24", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "N25", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN1", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN2", + "Group": "us-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN3", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN4", + "Group": "eu-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN5", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN6", + "Group": "ap-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN7", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN8", + "Group": "af-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN9", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + }, + { + "Name": "NPN10", + "Group": "au-n", + "Template": "AWS-US-WEST-1-c5.xlarge" + } + ] +} \ No newline at end of file diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json b/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json new file mode 100644 index 0000000000..3ab40e1ce1 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/network-tpl.json @@ -0,0 +1,115 @@ +{ + "network": { + "wallets": 100, + "nodes": 25 + }, + "instances": { + "relays": { + "config": "./configs/relay.json", + "type": "c5.xlarge", + "count": 25 + }, + "participatingNodes": { + "config": "./configs/node.json", + "type": "c5.xlarge", + "count": 25 + }, + "nonParticipatingNodes": { + "config": "./configs/nonPartNode.json", + "type": "c5.xlarge", + "count": 10 + } + }, + "groups": [ + { + "name": "us-r", + "percent": { + "relays": 20, + "participatingNodes": 0, + "nonParticipatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "us-n", + "percent": { + "relays": 0, + "participatingNodes": 20, + "nonParticipatingNodes": 20 + }, + "region": "us-west-1" + }, + { + "name": "eu-r", + "percent": { + "relays": 20, + "participatingNodes": 0, + "nonParticipatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "eu-n", + "percent": { + "relays": 0, + "participatingNodes": 20, + "nonParticipatingNodes": 20 + }, + "region": "us-west-1" + }, + { + "name": "ap-r", + "percent": { + "relays": 20, + "participatingNodes": 0, + "nonParticipatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "ap-n", + "percent": { + "relays": 0, + "participatingNodes": 20, + "nonParticipatingNodes": 20 + }, + "region": "us-west-1" + }, + { + "name": "af-r", + "percent": { + "relays": 20, + "participatingNodes": 0, + "nonParticipatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "af-n", + "percent": { + "relays": 0, + "participatingNodes": 20, + "nonParticipatingNodes": 20 + }, + "region": "us-west-1" + }, + { + "name": "au-r", + "percent": { + "relays": 20, + "participatingNodes": 0, + "nonParticipatingNodes": 0 + }, + "region": "us-west-1" + }, + { + "name": "au-n", + "percent": { + "relays": 0, + "participatingNodes": 20, + "nonParticipatingNodes": 20 + }, + "region": "us-west-1" + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/network_performance_rules b/test/testdata/deployednettemplates/recipes/mainnet-model/network_performance_rules new file mode 100644 index 0000000000..7193b24a6f --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/network_performance_rules @@ -0,0 +1,75 @@ +us-n us-r 46 22 +us-r us-n 1000 22 +us-r us-r 1000 10 +us-n eu-r 37 40 +us-r eu-n 1000 40 +us-r eu-r 1000 40 +us-n ap-r 37 120 +us-r ap-n 1000 120 +us-r ap-r 1000 120 +us-n af-r 37 124 +us-r af-n 1000 124 +us-r af-r 1000 124 +us-n au-r 37 102 +us-r au-n 1000 102 +us-r au-r 1000 102 +eu-n us-r 78 40 +eu-r us-n 1000 40 +eu-r us-r 1000 40 +eu-n eu-r 98 16 +eu-r eu-n 1000 16 +eu-r eu-r 1000 10 +eu-n ap-r 78 75 +eu-r ap-n 1000 75 +eu-r ap-r 1000 75 +eu-n af-r 78 85 +eu-r af-n 1000 85 +eu-r af-r 1000 85 +eu-n au-r 78 135 +eu-r au-n 1000 135 +eu-r au-r 1000 135 +ap-n us-r 47 120 +ap-r us-n 1000 120 +ap-r us-r 1000 120 +ap-n eu-r 47 75 +ap-r eu-n 1000 75 +ap-r eu-r 1000 75 +ap-n ap-r 59 32 +ap-r ap-n 1000 32 +ap-r ap-r 1000 10 +ap-n af-r 47 202 +ap-r af-n 1000 202 +ap-r af-r 1000 202 +ap-n au-r 47 47 +ap-r au-n 1000 47 +ap-r au-r 1000 47 +af-n us-r 13 118 +af-r us-n 1000 118 +af-r us-r 1000 118 +af-n eu-r 13 85 +af-r eu-n 1000 85 +af-r eu-r 1000 85 +af-n ap-r 13 202 +af-r ap-n 1000 202 +af-r ap-r 1000 202 +af-n af-r 16 70 +af-r af-n 1000 70 +af-r af-r 1000 10 +af-n au-r 13 216 +af-r au-n 1000 216 +af-r au-r 1000 216 +au-n us-r 44 102 +au-r us-n 1000 102 +au-r us-r 1000 102 +au-n eu-r 44 136 +au-r eu-n 1000 136 +au-r eu-r 1000 136 +au-n ap-r 44 47 +au-r ap-n 1000 47 +au-r ap-r 1000 47 +au-n af-r 44 217 +au-r af-n 1000 217 +au-r af-r 1000 217 +au-n au-r 55 23 +au-r au-n 1000 23 +au-r au-r 1000 10 diff --git a/test/testdata/deployednettemplates/recipes/mainnet-model/recipe.json b/test/testdata/deployednettemplates/recipes/mainnet-model/recipe.json new file mode 100644 index 0000000000..24f7b394e0 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/mainnet-model/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"generated/genesis.json", + "NetworkFile":"generated/net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "generated/topology.json" +} diff --git a/test/testdata/deployednettemplates/recipes/network-partition/Makefile b/test/testdata/deployednettemplates/recipes/network-partition/Makefile new file mode 100644 index 0000000000..24226bc5b6 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/Makefile @@ -0,0 +1,12 @@ +PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json + +all: net.json genesis.json + +net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal + netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS} + +genesis.json: ${GOPATH}/bin/netgoal + netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS} + +clean: + rm -f net.json genesis.json diff --git a/test/testdata/deployednettemplates/recipes/network-partition/gen_topology.py b/test/testdata/deployednettemplates/recipes/network-partition/gen_topology.py new file mode 100644 index 0000000000..3de7bea638 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/gen_topology.py @@ -0,0 +1,71 @@ +import random + +node_types = {"R":8, "N":20, "NPN":2} +node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"} +partitions = {"A":50, "B":20, "C":10, "D":10, "E":5, "F":5} +regions = [ + "AWS-US-EAST-2", + "AWS-US-WEST-1" +] + +def gen_topology(ranges): + f = open("topology.json", "w") + f.write("{ \"Hosts\":\n [") + node_groups = {} + + region_count = len(regions) + first = True + for x in node_types: + node_type = x + node_count = node_types[x] + region_size = node_size[x] + for i in range(node_count): + node_name = node_type + str(i+1) + region = regions[i%region_count] + # randomly assign the node to a partition + partition = get_partition(ranges) + node_groups.setdefault(partition,[]).append(node_name); + if (first ): + first = False + else: + f.write(",") + f.write ("\n {\n \"Name\": \"" + node_name + "\",\n \"Group\": \"" + partition + "\",\n \"Template\": \"" + region + region_size + "\"\n }" ) + + f.write("\n ]\n}\n") + f.close() + + for node_group in node_groups: + f = open("group_" + node_group + ".txt", "w") + for node in node_groups[node_group]: + f.write(node +"\n") + f.close() + + +def get_partition(ranges): + random_value = random.randint(1,100) + for partition_name in ranges: + partition_value = ranges[partition_name] + if random_value >= partition_value['start'] and random_value <= partition_value['end'] : + return partition_name + print("error, partition not found for random_value ", random_value) + exit(1) + +def get_ranges(): + ranges = {} + start_pos = 1; + for name, size in partitions.items(): + if (start_pos > 100) : + print("error, range exceeded 100") + exit(1) + end_pos = start_pos + size - 1 + ranges[name] = {"start": start_pos, "end": end_pos} + start_pos = end_pos + 1 + print(ranges) + return ranges + + +# create the group ranges based on group percent size +ranges = get_ranges() + +# gen the topology.json file based and assign groups +gen_topology(ranges) diff --git a/test/testdata/deployednettemplates/recipes/network-partition/genesis.json b/test/testdata/deployednettemplates/recipes/network-partition/genesis.json new file mode 100644 index 0000000000..c1f7c184e9 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/genesis.json @@ -0,0 +1,1013 @@ +{ + "NetworkName": "", + "VersionModifier": "", + "ConsensusProtocol": "", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 3000000, + "PartKeyDilution": 0, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet5", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet6", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet7", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet8", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet9", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet10", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet11", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet12", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet13", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet14", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet15", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet16", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet17", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet18", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet19", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet20", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet21", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet22", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet23", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet24", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet25", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet26", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet27", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet28", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet29", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet30", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet31", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet32", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet33", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet34", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet35", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet36", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet37", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet38", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet39", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet40", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet41", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet42", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet43", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet44", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet45", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet46", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet47", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet48", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet49", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet50", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet51", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet52", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet53", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet54", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet55", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet56", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet57", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet58", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet59", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet60", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet61", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet62", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet63", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet64", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet65", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet66", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet67", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet68", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet69", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet70", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet71", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet72", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet73", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet74", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet75", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet76", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet77", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet78", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet79", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet80", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet81", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet82", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet83", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet84", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet85", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet86", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet87", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet88", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet89", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet90", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet91", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet92", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet93", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet94", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet95", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet96", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet97", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet98", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet99", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet100", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet101", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet102", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet103", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet104", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet105", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet106", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet107", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet108", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet109", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet110", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet111", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet112", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet113", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet114", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet115", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet116", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet117", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet118", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet119", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet120", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet121", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet122", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet123", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet124", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet125", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet126", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet127", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet128", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet129", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet130", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet131", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet132", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet133", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet134", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet135", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet136", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet137", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet138", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet139", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet140", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet141", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet142", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet143", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet144", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet145", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet146", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet147", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet148", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet149", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet150", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet151", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet152", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet153", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet154", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet155", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet156", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet157", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet158", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet159", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet160", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet161", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet162", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet163", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet164", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet165", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet166", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet167", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet168", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet169", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet170", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet171", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet172", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet173", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet174", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet175", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet176", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet177", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet178", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet179", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet180", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet181", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet182", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet183", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet184", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet185", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet186", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet187", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet188", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet189", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet190", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet191", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet192", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet193", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet194", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet195", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet196", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet197", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet198", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet199", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet200", + "Stake": 0.5, + "Online": false + } + ], + "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "Comment": "" +} diff --git a/test/testdata/deployednettemplates/recipes/network-partition/net.json b/test/testdata/deployednettemplates/recipes/network-partition/net.json new file mode 100644 index 0000000000..3b2b5df750 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/net.json @@ -0,0 +1,2564 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Group": "", + "Nodes": [ + { + "Name": "relay1", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R2", + "Group": "", + "Nodes": [ + { + "Name": "relay2", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R3", + "Group": "", + "Nodes": [ + { + "Name": "relay3", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R4", + "Group": "", + "Nodes": [ + { + "Name": "relay4", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R5", + "Group": "", + "Nodes": [ + { + "Name": "relay5", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R6", + "Group": "", + "Nodes": [ + { + "Name": "relay6", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R7", + "Group": "", + "Nodes": [ + { + "Name": "relay7", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R8", + "Group": "", + "Nodes": [ + { + "Name": "relay8", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N1", + "Group": "", + "Nodes": [ + { + "Name": "node1", + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node21", + "Wallets": [ + { + "Name": "Wallet2", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node41", + "Wallets": [ + { + "Name": "Wallet3", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node61", + "Wallets": [ + { + "Name": "Wallet4", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node81", + "Wallets": [ + { + "Name": "Wallet5", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N2", + "Group": "", + "Nodes": [ + { + "Name": "node2", + "Wallets": [ + { + "Name": "Wallet6", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node22", + "Wallets": [ + { + "Name": "Wallet7", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node42", + "Wallets": [ + { + "Name": "Wallet8", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node62", + "Wallets": [ + { + "Name": "Wallet9", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node82", + "Wallets": [ + { + "Name": "Wallet10", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N3", + "Group": "", + "Nodes": [ + { + "Name": "node3", + "Wallets": [ + { + "Name": "Wallet11", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node23", + "Wallets": [ + { + "Name": "Wallet12", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node43", + "Wallets": [ + { + "Name": "Wallet13", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node63", + "Wallets": [ + { + "Name": "Wallet14", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node83", + "Wallets": [ + { + "Name": "Wallet15", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N4", + "Group": "", + "Nodes": [ + { + "Name": "node4", + "Wallets": [ + { + "Name": "Wallet16", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node24", + "Wallets": [ + { + "Name": "Wallet17", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node44", + "Wallets": [ + { + "Name": "Wallet18", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node64", + "Wallets": [ + { + "Name": "Wallet19", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node84", + "Wallets": [ + { + "Name": "Wallet20", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N5", + "Group": "", + "Nodes": [ + { + "Name": "node5", + "Wallets": [ + { + "Name": "Wallet21", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node25", + "Wallets": [ + { + "Name": "Wallet22", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node45", + "Wallets": [ + { + "Name": "Wallet23", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node65", + "Wallets": [ + { + "Name": "Wallet24", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node85", + "Wallets": [ + { + "Name": "Wallet25", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N6", + "Group": "", + "Nodes": [ + { + "Name": "node6", + "Wallets": [ + { + "Name": "Wallet26", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node26", + "Wallets": [ + { + "Name": "Wallet27", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node46", + "Wallets": [ + { + "Name": "Wallet28", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node66", + "Wallets": [ + { + "Name": "Wallet29", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node86", + "Wallets": [ + { + "Name": "Wallet30", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N7", + "Group": "", + "Nodes": [ + { + "Name": "node7", + "Wallets": [ + { + "Name": "Wallet31", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node27", + "Wallets": [ + { + "Name": "Wallet32", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node47", + "Wallets": [ + { + "Name": "Wallet33", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node67", + "Wallets": [ + { + "Name": "Wallet34", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node87", + "Wallets": [ + { + "Name": "Wallet35", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N8", + "Group": "", + "Nodes": [ + { + "Name": "node8", + "Wallets": [ + { + "Name": "Wallet36", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node28", + "Wallets": [ + { + "Name": "Wallet37", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node48", + "Wallets": [ + { + "Name": "Wallet38", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node68", + "Wallets": [ + { + "Name": "Wallet39", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node88", + "Wallets": [ + { + "Name": "Wallet40", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N9", + "Group": "", + "Nodes": [ + { + "Name": "node9", + "Wallets": [ + { + "Name": "Wallet41", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node29", + "Wallets": [ + { + "Name": "Wallet42", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node49", + "Wallets": [ + { + "Name": "Wallet43", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node69", + "Wallets": [ + { + "Name": "Wallet44", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node89", + "Wallets": [ + { + "Name": "Wallet45", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N10", + "Group": "", + "Nodes": [ + { + "Name": "node10", + "Wallets": [ + { + "Name": "Wallet46", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node30", + "Wallets": [ + { + "Name": "Wallet47", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node50", + "Wallets": [ + { + "Name": "Wallet48", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node70", + "Wallets": [ + { + "Name": "Wallet49", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node90", + "Wallets": [ + { + "Name": "Wallet50", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N11", + "Group": "", + "Nodes": [ + { + "Name": "node11", + "Wallets": [ + { + "Name": "Wallet51", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node31", + "Wallets": [ + { + "Name": "Wallet52", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node51", + "Wallets": [ + { + "Name": "Wallet53", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node71", + "Wallets": [ + { + "Name": "Wallet54", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node91", + "Wallets": [ + { + "Name": "Wallet55", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N12", + "Group": "", + "Nodes": [ + { + "Name": "node12", + "Wallets": [ + { + "Name": "Wallet56", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node32", + "Wallets": [ + { + "Name": "Wallet57", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node52", + "Wallets": [ + { + "Name": "Wallet58", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node72", + "Wallets": [ + { + "Name": "Wallet59", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node92", + "Wallets": [ + { + "Name": "Wallet60", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N13", + "Group": "", + "Nodes": [ + { + "Name": "node13", + "Wallets": [ + { + "Name": "Wallet61", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node33", + "Wallets": [ + { + "Name": "Wallet62", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node53", + "Wallets": [ + { + "Name": "Wallet63", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node73", + "Wallets": [ + { + "Name": "Wallet64", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node93", + "Wallets": [ + { + "Name": "Wallet65", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N14", + "Group": "", + "Nodes": [ + { + "Name": "node14", + "Wallets": [ + { + "Name": "Wallet66", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node34", + "Wallets": [ + { + "Name": "Wallet67", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node54", + "Wallets": [ + { + "Name": "Wallet68", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node74", + "Wallets": [ + { + "Name": "Wallet69", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node94", + "Wallets": [ + { + "Name": "Wallet70", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N15", + "Group": "", + "Nodes": [ + { + "Name": "node15", + "Wallets": [ + { + "Name": "Wallet71", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node35", + "Wallets": [ + { + "Name": "Wallet72", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node55", + "Wallets": [ + { + "Name": "Wallet73", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node75", + "Wallets": [ + { + "Name": "Wallet74", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node95", + "Wallets": [ + { + "Name": "Wallet75", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N16", + "Group": "", + "Nodes": [ + { + "Name": "node16", + "Wallets": [ + { + "Name": "Wallet76", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node36", + "Wallets": [ + { + "Name": "Wallet77", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node56", + "Wallets": [ + { + "Name": "Wallet78", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node76", + "Wallets": [ + { + "Name": "Wallet79", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node96", + "Wallets": [ + { + "Name": "Wallet80", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N17", + "Group": "", + "Nodes": [ + { + "Name": "node17", + "Wallets": [ + { + "Name": "Wallet81", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node37", + "Wallets": [ + { + "Name": "Wallet82", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node57", + "Wallets": [ + { + "Name": "Wallet83", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node77", + "Wallets": [ + { + "Name": "Wallet84", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node97", + "Wallets": [ + { + "Name": "Wallet85", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N18", + "Group": "", + "Nodes": [ + { + "Name": "node18", + "Wallets": [ + { + "Name": "Wallet86", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node38", + "Wallets": [ + { + "Name": "Wallet87", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node58", + "Wallets": [ + { + "Name": "Wallet88", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node78", + "Wallets": [ + { + "Name": "Wallet89", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node98", + "Wallets": [ + { + "Name": "Wallet90", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N19", + "Group": "", + "Nodes": [ + { + "Name": "node19", + "Wallets": [ + { + "Name": "Wallet91", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node39", + "Wallets": [ + { + "Name": "Wallet92", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node59", + "Wallets": [ + { + "Name": "Wallet93", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node79", + "Wallets": [ + { + "Name": "Wallet94", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node99", + "Wallets": [ + { + "Name": "Wallet95", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N20", + "Group": "", + "Nodes": [ + { + "Name": "node20", + "Wallets": [ + { + "Name": "Wallet96", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node40", + "Wallets": [ + { + "Name": "Wallet97", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node60", + "Wallets": [ + { + "Name": "Wallet98", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node80", + "Wallets": [ + { + "Name": "Wallet99", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + }, + { + "Name": "node100", + "Wallets": [ + { + "Name": "Wallet100", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "NPN1", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode1", + "Wallets": [ + { + "Name": "Wallet101", + "ParticipationOnly": false + }, + { + "Name": "Wallet111", + "ParticipationOnly": false + }, + { + "Name": "Wallet121", + "ParticipationOnly": false + }, + { + "Name": "Wallet131", + "ParticipationOnly": false + }, + { + "Name": "Wallet141", + "ParticipationOnly": false + }, + { + "Name": "Wallet151", + "ParticipationOnly": false + }, + { + "Name": "Wallet161", + "ParticipationOnly": false + }, + { + "Name": "Wallet171", + "ParticipationOnly": false + }, + { + "Name": "Wallet181", + "ParticipationOnly": false + }, + { + "Name": "Wallet191", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN2", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode2", + "Wallets": [ + { + "Name": "Wallet102", + "ParticipationOnly": false + }, + { + "Name": "Wallet112", + "ParticipationOnly": false + }, + { + "Name": "Wallet122", + "ParticipationOnly": false + }, + { + "Name": "Wallet132", + "ParticipationOnly": false + }, + { + "Name": "Wallet142", + "ParticipationOnly": false + }, + { + "Name": "Wallet152", + "ParticipationOnly": false + }, + { + "Name": "Wallet162", + "ParticipationOnly": false + }, + { + "Name": "Wallet172", + "ParticipationOnly": false + }, + { + "Name": "Wallet182", + "ParticipationOnly": false + }, + { + "Name": "Wallet192", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN3", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode3", + "Wallets": [ + { + "Name": "Wallet103", + "ParticipationOnly": false + }, + { + "Name": "Wallet113", + "ParticipationOnly": false + }, + { + "Name": "Wallet123", + "ParticipationOnly": false + }, + { + "Name": "Wallet133", + "ParticipationOnly": false + }, + { + "Name": "Wallet143", + "ParticipationOnly": false + }, + { + "Name": "Wallet153", + "ParticipationOnly": false + }, + { + "Name": "Wallet163", + "ParticipationOnly": false + }, + { + "Name": "Wallet173", + "ParticipationOnly": false + }, + { + "Name": "Wallet183", + "ParticipationOnly": false + }, + { + "Name": "Wallet193", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN4", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode4", + "Wallets": [ + { + "Name": "Wallet104", + "ParticipationOnly": false + }, + { + "Name": "Wallet114", + "ParticipationOnly": false + }, + { + "Name": "Wallet124", + "ParticipationOnly": false + }, + { + "Name": "Wallet134", + "ParticipationOnly": false + }, + { + "Name": "Wallet144", + "ParticipationOnly": false + }, + { + "Name": "Wallet154", + "ParticipationOnly": false + }, + { + "Name": "Wallet164", + "ParticipationOnly": false + }, + { + "Name": "Wallet174", + "ParticipationOnly": false + }, + { + "Name": "Wallet184", + "ParticipationOnly": false + }, + { + "Name": "Wallet194", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN5", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode5", + "Wallets": [ + { + "Name": "Wallet105", + "ParticipationOnly": false + }, + { + "Name": "Wallet115", + "ParticipationOnly": false + }, + { + "Name": "Wallet125", + "ParticipationOnly": false + }, + { + "Name": "Wallet135", + "ParticipationOnly": false + }, + { + "Name": "Wallet145", + "ParticipationOnly": false + }, + { + "Name": "Wallet155", + "ParticipationOnly": false + }, + { + "Name": "Wallet165", + "ParticipationOnly": false + }, + { + "Name": "Wallet175", + "ParticipationOnly": false + }, + { + "Name": "Wallet185", + "ParticipationOnly": false + }, + { + "Name": "Wallet195", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN6", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode6", + "Wallets": [ + { + "Name": "Wallet106", + "ParticipationOnly": false + }, + { + "Name": "Wallet116", + "ParticipationOnly": false + }, + { + "Name": "Wallet126", + "ParticipationOnly": false + }, + { + "Name": "Wallet136", + "ParticipationOnly": false + }, + { + "Name": "Wallet146", + "ParticipationOnly": false + }, + { + "Name": "Wallet156", + "ParticipationOnly": false + }, + { + "Name": "Wallet166", + "ParticipationOnly": false + }, + { + "Name": "Wallet176", + "ParticipationOnly": false + }, + { + "Name": "Wallet186", + "ParticipationOnly": false + }, + { + "Name": "Wallet196", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN7", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode7", + "Wallets": [ + { + "Name": "Wallet107", + "ParticipationOnly": false + }, + { + "Name": "Wallet117", + "ParticipationOnly": false + }, + { + "Name": "Wallet127", + "ParticipationOnly": false + }, + { + "Name": "Wallet137", + "ParticipationOnly": false + }, + { + "Name": "Wallet147", + "ParticipationOnly": false + }, + { + "Name": "Wallet157", + "ParticipationOnly": false + }, + { + "Name": "Wallet167", + "ParticipationOnly": false + }, + { + "Name": "Wallet177", + "ParticipationOnly": false + }, + { + "Name": "Wallet187", + "ParticipationOnly": false + }, + { + "Name": "Wallet197", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN8", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode8", + "Wallets": [ + { + "Name": "Wallet108", + "ParticipationOnly": false + }, + { + "Name": "Wallet118", + "ParticipationOnly": false + }, + { + "Name": "Wallet128", + "ParticipationOnly": false + }, + { + "Name": "Wallet138", + "ParticipationOnly": false + }, + { + "Name": "Wallet148", + "ParticipationOnly": false + }, + { + "Name": "Wallet158", + "ParticipationOnly": false + }, + { + "Name": "Wallet168", + "ParticipationOnly": false + }, + { + "Name": "Wallet178", + "ParticipationOnly": false + }, + { + "Name": "Wallet188", + "ParticipationOnly": false + }, + { + "Name": "Wallet198", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN9", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode9", + "Wallets": [ + { + "Name": "Wallet109", + "ParticipationOnly": false + }, + { + "Name": "Wallet119", + "ParticipationOnly": false + }, + { + "Name": "Wallet129", + "ParticipationOnly": false + }, + { + "Name": "Wallet139", + "ParticipationOnly": false + }, + { + "Name": "Wallet149", + "ParticipationOnly": false + }, + { + "Name": "Wallet159", + "ParticipationOnly": false + }, + { + "Name": "Wallet169", + "ParticipationOnly": false + }, + { + "Name": "Wallet179", + "ParticipationOnly": false + }, + { + "Name": "Wallet189", + "ParticipationOnly": false + }, + { + "Name": "Wallet199", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + }, + { + "Name": "NPN10", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode10", + "Wallets": [ + { + "Name": "Wallet110", + "ParticipationOnly": false + }, + { + "Name": "Wallet120", + "ParticipationOnly": false + }, + { + "Name": "Wallet130", + "ParticipationOnly": false + }, + { + "Name": "Wallet140", + "ParticipationOnly": false + }, + { + "Name": "Wallet150", + "ParticipationOnly": false + }, + { + "Name": "Wallet160", + "ParticipationOnly": false + }, + { + "Name": "Wallet170", + "ParticipationOnly": false + }, + { + "Name": "Wallet180", + "ParticipationOnly": false + }, + { + "Name": "Wallet190", + "ParticipationOnly": false + }, + { + "Name": "Wallet200", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" + } + ] + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/network-partition/node.json b/test/testdata/deployednettemplates/recipes/network-partition/node.json new file mode 100644 index 0000000000..4386fa3fd7 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/node.json @@ -0,0 +1,22 @@ +{ + "APIToken": "{{APIToken}}", + "EnableBlockStats": false, + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }", + "AltConfigs": [ + { + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }", + "FractionApply": 0.2 + } + ] +} + diff --git a/test/testdata/deployednettemplates/recipes/network-partition/nonPartNode.json b/test/testdata/deployednettemplates/recipes/network-partition/nonPartNode.json new file mode 100644 index 0000000000..3825bb420b --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/nonPartNode.json @@ -0,0 +1,5 @@ +{ + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4 }" +} diff --git a/test/testdata/deployednettemplates/recipes/network-partition/recipe.json b/test/testdata/deployednettemplates/recipes/network-partition/recipe.json new file mode 100644 index 0000000000..a2f88f63b4 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"genesis.json", + "NetworkFile":"net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "topology.json" +} diff --git a/test/testdata/deployednettemplates/recipes/network-partition/relay.json b/test/testdata/deployednettemplates/recipes/network-partition/relay.json new file mode 100644 index 0000000000..25bb6b5a26 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/relay.json @@ -0,0 +1,11 @@ +{ + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" +} diff --git a/test/testdata/deployednettemplates/recipes/network-partition/topology.json b/test/testdata/deployednettemplates/recipes/network-partition/topology.json new file mode 100644 index 0000000000..e050f67368 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/network-partition/topology.json @@ -0,0 +1,154 @@ +{ "Hosts": + [ + { + "Name": "R1", + "Group": "F", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R2", + "Group": "B", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "R3", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R4", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "R5", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R6", + "Group": "B", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "R7", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R8", + "Group": "B", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N1", + "Group": "B", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N2", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N3", + "Group": "B", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N4", + "Group": "B", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N5", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N6", + "Group": "E", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N7", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N8", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N9", + "Group": "B", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N10", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N11", + "Group": "E", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N12", + "Group": "B", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N13", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N14", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N15", + "Group": "B", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N16", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N17", + "Group": "B", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N18", + "Group": "A", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "N19", + "Group": "C", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N20", + "Group": "D", + "Template": "AWS-US-WEST-1-m5d.4xl" + }, + { + "Name": "NPN1", + "Group": "A", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN2", + "Group": "D", + "Template": "AWS-US-WEST-1-m5d.4xl" + } + ] +} diff --git a/test/testdata/nettemplates/TwoNodesOneRelay1000Accounts.json b/test/testdata/nettemplates/TwoNodesOneRelay1000Accounts.json new file mode 100644 index 0000000000..9e2c09a299 --- /dev/null +++ b/test/testdata/nettemplates/TwoNodesOneRelay1000Accounts.json @@ -0,0 +1,9026 @@ +{ + "Genesis": { + "NetworkName": "tbd", + "PartKeyDilution": 100, + "LastPartKeyRound": 20000, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet5", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet6", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet7", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet8", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet9", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet10", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet11", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet12", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet13", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet14", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet15", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet16", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet17", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet18", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet19", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet20", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet21", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet22", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet23", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet24", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet25", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet26", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet27", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet28", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet29", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet30", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet31", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet32", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet33", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet34", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet35", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet36", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet37", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet38", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet39", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet40", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet41", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet42", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet43", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet44", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet45", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet46", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet47", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet48", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet49", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet50", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet51", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet52", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet53", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet54", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet55", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet56", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet57", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet58", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet59", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet60", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet61", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet62", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet63", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet64", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet65", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet66", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet67", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet68", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet69", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet70", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet71", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet72", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet73", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet74", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet75", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet76", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet77", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet78", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet79", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet80", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet81", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet82", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet83", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet84", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet85", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet86", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet87", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet88", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet89", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet90", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet91", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet92", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet93", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet94", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet95", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet96", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet97", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet98", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet99", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet100", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet101", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet102", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet103", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet104", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet105", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet106", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet107", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet108", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet109", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet110", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet111", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet112", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet113", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet114", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet115", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet116", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet117", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet118", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet119", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet120", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet121", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet122", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet123", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet124", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet125", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet126", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet127", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet128", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet129", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet130", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet131", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet132", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet133", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet134", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet135", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet136", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet137", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet138", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet139", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet140", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet141", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet142", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet143", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet144", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet145", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet146", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet147", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet148", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet149", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet150", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet151", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet152", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet153", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet154", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet155", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet156", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet157", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet158", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet159", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet160", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet161", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet162", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet163", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet164", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet165", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet166", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet167", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet168", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet169", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet170", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet171", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet172", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet173", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet174", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet175", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet176", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet177", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet178", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet179", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet180", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet181", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet182", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet183", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet184", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet185", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet186", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet187", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet188", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet189", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet190", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet191", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet192", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet193", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet194", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet195", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet196", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet197", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet198", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet199", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet200", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet201", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet202", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet203", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet204", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet205", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet206", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet207", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet208", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet209", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet210", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet211", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet212", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet213", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet214", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet215", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet216", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet217", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet218", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet219", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet220", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet221", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet222", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet223", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet224", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet225", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet226", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet227", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet228", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet229", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet230", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet231", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet232", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet233", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet234", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet235", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet236", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet237", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet238", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet239", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet240", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet241", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet242", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet243", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet244", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet245", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet246", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet247", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet248", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet249", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet250", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet251", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet252", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet253", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet254", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet255", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet256", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet257", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet258", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet259", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet260", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet261", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet262", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet263", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet264", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet265", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet266", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet267", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet268", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet269", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet270", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet271", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet272", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet273", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet274", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet275", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet276", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet277", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet278", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet279", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet280", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet281", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet282", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet283", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet284", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet285", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet286", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet287", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet288", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet289", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet290", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet291", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet292", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet293", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet294", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet295", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet296", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet297", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet298", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet299", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet300", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet301", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet302", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet303", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet304", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet305", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet306", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet307", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet308", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet309", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet310", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet311", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet312", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet313", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet314", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet315", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet316", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet317", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet318", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet319", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet320", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet321", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet322", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet323", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet324", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet325", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet326", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet327", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet328", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet329", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet330", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet331", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet332", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet333", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet334", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet335", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet336", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet337", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet338", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet339", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet340", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet341", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet342", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet343", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet344", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet345", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet346", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet347", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet348", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet349", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet350", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet351", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet352", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet353", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet354", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet355", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet356", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet357", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet358", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet359", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet360", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet361", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet362", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet363", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet364", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet365", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet366", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet367", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet368", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet369", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet370", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet371", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet372", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet373", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet374", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet375", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet376", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet377", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet378", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet379", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet380", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet381", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet382", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet383", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet384", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet385", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet386", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet387", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet388", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet389", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet390", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet391", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet392", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet393", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet394", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet395", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet396", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet397", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet398", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet399", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet400", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet401", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet402", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet403", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet404", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet405", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet406", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet407", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet408", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet409", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet410", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet411", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet412", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet413", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet414", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet415", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet416", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet417", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet418", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet419", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet420", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet421", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet422", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet423", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet424", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet425", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet426", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet427", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet428", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet429", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet430", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet431", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet432", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet433", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet434", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet435", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet436", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet437", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet438", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet439", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet440", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet441", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet442", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet443", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet444", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet445", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet446", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet447", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet448", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet449", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet450", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet451", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet452", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet453", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet454", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet455", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet456", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet457", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet458", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet459", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet460", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet461", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet462", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet463", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet464", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet465", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet466", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet467", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet468", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet469", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet470", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet471", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet472", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet473", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet474", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet475", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet476", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet477", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet478", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet479", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet480", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet481", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet482", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet483", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet484", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet485", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet486", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet487", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet488", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet489", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet490", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet491", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet492", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet493", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet494", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet495", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet496", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet497", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet498", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet499", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet500", + "Stake": 0.1, + "Online": true + }, + { + "Name": "Wallet501", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet502", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet503", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet504", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet505", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet506", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet507", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet508", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet509", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet510", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet511", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet512", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet513", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet514", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet515", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet516", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet517", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet518", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet519", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet520", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet521", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet522", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet523", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet524", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet525", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet526", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet527", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet528", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet529", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet530", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet531", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet532", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet533", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet534", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet535", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet536", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet537", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet538", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet539", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet540", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet541", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet542", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet543", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet544", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet545", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet546", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet547", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet548", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet549", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet550", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet551", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet552", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet553", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet554", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet555", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet556", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet557", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet558", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet559", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet560", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet561", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet562", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet563", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet564", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet565", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet566", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet567", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet568", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet569", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet570", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet571", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet572", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet573", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet574", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet575", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet576", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet577", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet578", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet579", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet580", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet581", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet582", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet583", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet584", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet585", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet586", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet587", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet588", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet589", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet590", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet591", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet592", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet593", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet594", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet595", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet596", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet597", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet598", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet599", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet600", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet601", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet602", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet603", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet604", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet605", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet606", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet607", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet608", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet609", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet610", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet611", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet612", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet613", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet614", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet615", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet616", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet617", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet618", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet619", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet620", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet621", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet622", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet623", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet624", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet625", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet626", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet627", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet628", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet629", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet630", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet631", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet632", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet633", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet634", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet635", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet636", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet637", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet638", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet639", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet640", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet641", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet642", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet643", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet644", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet645", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet646", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet647", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet648", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet649", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet650", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet651", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet652", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet653", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet654", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet655", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet656", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet657", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet658", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet659", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet660", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet661", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet662", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet663", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet664", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet665", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet666", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet667", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet668", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet669", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet670", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet671", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet672", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet673", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet674", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet675", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet676", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet677", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet678", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet679", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet680", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet681", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet682", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet683", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet684", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet685", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet686", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet687", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet688", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet689", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet690", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet691", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet692", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet693", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet694", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet695", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet696", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet697", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet698", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet699", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet700", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet701", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet702", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet703", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet704", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet705", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet706", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet707", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet708", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet709", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet710", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet711", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet712", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet713", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet714", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet715", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet716", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet717", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet718", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet719", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet720", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet721", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet722", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet723", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet724", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet725", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet726", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet727", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet728", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet729", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet730", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet731", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet732", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet733", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet734", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet735", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet736", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet737", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet738", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet739", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet740", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet741", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet742", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet743", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet744", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet745", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet746", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet747", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet748", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet749", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet750", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet751", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet752", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet753", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet754", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet755", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet756", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet757", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet758", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet759", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet760", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet761", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet762", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet763", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet764", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet765", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet766", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet767", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet768", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet769", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet770", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet771", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet772", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet773", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet774", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet775", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet776", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet777", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet778", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet779", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet780", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet781", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet782", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet783", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet784", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet785", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet786", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet787", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet788", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet789", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet790", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet791", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet792", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet793", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet794", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet795", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet796", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet797", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet798", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet799", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet800", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet801", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet802", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet803", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet804", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet805", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet806", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet807", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet808", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet809", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet810", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet811", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet812", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet813", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet814", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet815", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet816", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet817", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet818", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet819", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet820", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet821", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet822", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet823", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet824", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet825", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet826", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet827", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet828", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet829", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet830", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet831", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet832", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet833", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet834", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet835", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet836", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet837", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet838", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet839", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet840", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet841", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet842", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet843", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet844", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet845", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet846", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet847", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet848", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet849", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet850", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet851", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet852", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet853", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet854", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet855", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet856", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet857", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet858", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet859", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet860", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet861", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet862", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet863", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet864", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet865", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet866", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet867", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet868", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet869", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet870", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet871", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet872", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet873", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet874", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet875", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet876", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet877", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet878", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet879", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet880", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet881", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet882", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet883", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet884", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet885", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet886", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet887", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet888", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet889", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet890", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet891", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet892", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet893", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet894", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet895", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet896", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet897", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet898", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet899", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet900", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet901", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet902", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet903", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet904", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet905", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet906", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet907", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet908", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet909", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet910", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet911", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet912", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet913", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet914", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet915", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet916", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet917", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet918", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet919", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet920", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet921", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet922", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet923", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet924", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet925", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet926", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet927", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet928", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet929", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet930", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet931", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet932", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet933", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet934", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet935", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet936", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet937", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet938", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet939", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet940", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet941", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet942", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet943", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet944", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet945", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet946", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet947", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet948", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet949", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet950", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet951", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet952", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet953", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet954", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet955", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet956", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet957", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet958", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet959", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet960", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet961", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet962", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet963", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet964", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet965", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet966", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet967", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet968", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet969", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet970", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet971", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet972", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet973", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet974", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet975", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet976", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet977", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet978", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet979", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet980", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet981", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet982", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet983", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet984", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet985", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet986", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet987", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet988", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet989", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet990", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet991", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet992", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet993", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet994", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet995", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet996", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet997", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet998", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet999", + "Stake": 0.1, + "Online": false + }, + { + "Name": "Wallet1000", + "Stake": 0.1, + "Online": false + } + ] + }, + "Nodes": [ + { + "Name": "Relay", + "IsRelay": true, + "Wallets": [] + }, + { + "Name": "Node1", + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + }, + { + "Name": "Wallet2", + "ParticipationOnly": false + }, + { + "Name": "Wallet3", + "ParticipationOnly": false + }, + { + "Name": "Wallet4", + "ParticipationOnly": false + }, + { + "Name": "Wallet5", + "ParticipationOnly": false + }, + { + "Name": "Wallet6", + "ParticipationOnly": false + }, + { + "Name": "Wallet7", + "ParticipationOnly": false + }, + { + "Name": "Wallet8", + "ParticipationOnly": false + }, + { + "Name": "Wallet9", + "ParticipationOnly": false + }, + { + "Name": "Wallet10", + "ParticipationOnly": false + }, + { + "Name": "Wallet11", + "ParticipationOnly": false + }, + { + "Name": "Wallet12", + "ParticipationOnly": false + }, + { + "Name": "Wallet13", + "ParticipationOnly": false + }, + { + "Name": "Wallet14", + "ParticipationOnly": false + }, + { + "Name": "Wallet15", + "ParticipationOnly": false + }, + { + "Name": "Wallet16", + "ParticipationOnly": false + }, + { + "Name": "Wallet17", + "ParticipationOnly": false + }, + { + "Name": "Wallet18", + "ParticipationOnly": false + }, + { + "Name": "Wallet19", + "ParticipationOnly": false + }, + { + "Name": "Wallet20", + "ParticipationOnly": false + }, + { + "Name": "Wallet21", + "ParticipationOnly": false + }, + { + "Name": "Wallet22", + "ParticipationOnly": false + }, + { + "Name": "Wallet23", + "ParticipationOnly": false + }, + { + "Name": "Wallet24", + "ParticipationOnly": false + }, + { + "Name": "Wallet25", + "ParticipationOnly": false + }, + { + "Name": "Wallet26", + "ParticipationOnly": false + }, + { + "Name": "Wallet27", + "ParticipationOnly": false + }, + { + "Name": "Wallet28", + "ParticipationOnly": false + }, + { + "Name": "Wallet29", + "ParticipationOnly": false + }, + { + "Name": "Wallet30", + "ParticipationOnly": false + }, + { + "Name": "Wallet31", + "ParticipationOnly": false + }, + { + "Name": "Wallet32", + "ParticipationOnly": false + }, + { + "Name": "Wallet33", + "ParticipationOnly": false + }, + { + "Name": "Wallet34", + "ParticipationOnly": false + }, + { + "Name": "Wallet35", + "ParticipationOnly": false + }, + { + "Name": "Wallet36", + "ParticipationOnly": false + }, + { + "Name": "Wallet37", + "ParticipationOnly": false + }, + { + "Name": "Wallet38", + "ParticipationOnly": false + }, + { + "Name": "Wallet39", + "ParticipationOnly": false + }, + { + "Name": "Wallet40", + "ParticipationOnly": false + }, + { + "Name": "Wallet41", + "ParticipationOnly": false + }, + { + "Name": "Wallet42", + "ParticipationOnly": false + }, + { + "Name": "Wallet43", + "ParticipationOnly": false + }, + { + "Name": "Wallet44", + "ParticipationOnly": false + }, + { + "Name": "Wallet45", + "ParticipationOnly": false + }, + { + "Name": "Wallet46", + "ParticipationOnly": false + }, + { + "Name": "Wallet47", + "ParticipationOnly": false + }, + { + "Name": "Wallet48", + "ParticipationOnly": false + }, + { + "Name": "Wallet49", + "ParticipationOnly": false + }, + { + "Name": "Wallet50", + "ParticipationOnly": false + }, + { + "Name": "Wallet51", + "ParticipationOnly": false + }, + { + "Name": "Wallet52", + "ParticipationOnly": false + }, + { + "Name": "Wallet53", + "ParticipationOnly": false + }, + { + "Name": "Wallet54", + "ParticipationOnly": false + }, + { + "Name": "Wallet55", + "ParticipationOnly": false + }, + { + "Name": "Wallet56", + "ParticipationOnly": false + }, + { + "Name": "Wallet57", + "ParticipationOnly": false + }, + { + "Name": "Wallet58", + "ParticipationOnly": false + }, + { + "Name": "Wallet59", + "ParticipationOnly": false + }, + { + "Name": "Wallet60", + "ParticipationOnly": false + }, + { + "Name": "Wallet61", + "ParticipationOnly": false + }, + { + "Name": "Wallet62", + "ParticipationOnly": false + }, + { + "Name": "Wallet63", + "ParticipationOnly": false + }, + { + "Name": "Wallet64", + "ParticipationOnly": false + }, + { + "Name": "Wallet65", + "ParticipationOnly": false + }, + { + "Name": "Wallet66", + "ParticipationOnly": false + }, + { + "Name": "Wallet67", + "ParticipationOnly": false + }, + { + "Name": "Wallet68", + "ParticipationOnly": false + }, + { + "Name": "Wallet69", + "ParticipationOnly": false + }, + { + "Name": "Wallet70", + "ParticipationOnly": false + }, + { + "Name": "Wallet71", + "ParticipationOnly": false + }, + { + "Name": "Wallet72", + "ParticipationOnly": false + }, + { + "Name": "Wallet73", + "ParticipationOnly": false + }, + { + "Name": "Wallet74", + "ParticipationOnly": false + }, + { + "Name": "Wallet75", + "ParticipationOnly": false + }, + { + "Name": "Wallet76", + "ParticipationOnly": false + }, + { + "Name": "Wallet77", + "ParticipationOnly": false + }, + { + "Name": "Wallet78", + "ParticipationOnly": false + }, + { + "Name": "Wallet79", + "ParticipationOnly": false + }, + { + "Name": "Wallet80", + "ParticipationOnly": false + }, + { + "Name": "Wallet81", + "ParticipationOnly": false + }, + { + "Name": "Wallet82", + "ParticipationOnly": false + }, + { + "Name": "Wallet83", + "ParticipationOnly": false + }, + { + "Name": "Wallet84", + "ParticipationOnly": false + }, + { + "Name": "Wallet85", + "ParticipationOnly": false + }, + { + "Name": "Wallet86", + "ParticipationOnly": false + }, + { + "Name": "Wallet87", + "ParticipationOnly": false + }, + { + "Name": "Wallet88", + "ParticipationOnly": false + }, + { + "Name": "Wallet89", + "ParticipationOnly": false + }, + { + "Name": "Wallet90", + "ParticipationOnly": false + }, + { + "Name": "Wallet91", + "ParticipationOnly": false + }, + { + "Name": "Wallet92", + "ParticipationOnly": false + }, + { + "Name": "Wallet93", + "ParticipationOnly": false + }, + { + "Name": "Wallet94", + "ParticipationOnly": false + }, + { + "Name": "Wallet95", + "ParticipationOnly": false + }, + { + "Name": "Wallet96", + "ParticipationOnly": false + }, + { + "Name": "Wallet97", + "ParticipationOnly": false + }, + { + "Name": "Wallet98", + "ParticipationOnly": false + }, + { + "Name": "Wallet99", + "ParticipationOnly": false + }, + { + "Name": "Wallet100", + "ParticipationOnly": false + }, + { + "Name": "Wallet101", + "ParticipationOnly": false + }, + { + "Name": "Wallet102", + "ParticipationOnly": false + }, + { + "Name": "Wallet103", + "ParticipationOnly": false + }, + { + "Name": "Wallet104", + "ParticipationOnly": false + }, + { + "Name": "Wallet105", + "ParticipationOnly": false + }, + { + "Name": "Wallet106", + "ParticipationOnly": false + }, + { + "Name": "Wallet107", + "ParticipationOnly": false + }, + { + "Name": "Wallet108", + "ParticipationOnly": false + }, + { + "Name": "Wallet109", + "ParticipationOnly": false + }, + { + "Name": "Wallet110", + "ParticipationOnly": false + }, + { + "Name": "Wallet111", + "ParticipationOnly": false + }, + { + "Name": "Wallet112", + "ParticipationOnly": false + }, + { + "Name": "Wallet113", + "ParticipationOnly": false + }, + { + "Name": "Wallet114", + "ParticipationOnly": false + }, + { + "Name": "Wallet115", + "ParticipationOnly": false + }, + { + "Name": "Wallet116", + "ParticipationOnly": false + }, + { + "Name": "Wallet117", + "ParticipationOnly": false + }, + { + "Name": "Wallet118", + "ParticipationOnly": false + }, + { + "Name": "Wallet119", + "ParticipationOnly": false + }, + { + "Name": "Wallet120", + "ParticipationOnly": false + }, + { + "Name": "Wallet121", + "ParticipationOnly": false + }, + { + "Name": "Wallet122", + "ParticipationOnly": false + }, + { + "Name": "Wallet123", + "ParticipationOnly": false + }, + { + "Name": "Wallet124", + "ParticipationOnly": false + }, + { + "Name": "Wallet125", + "ParticipationOnly": false + }, + { + "Name": "Wallet126", + "ParticipationOnly": false + }, + { + "Name": "Wallet127", + "ParticipationOnly": false + }, + { + "Name": "Wallet128", + "ParticipationOnly": false + }, + { + "Name": "Wallet129", + "ParticipationOnly": false + }, + { + "Name": "Wallet130", + "ParticipationOnly": false + }, + { + "Name": "Wallet131", + "ParticipationOnly": false + }, + { + "Name": "Wallet132", + "ParticipationOnly": false + }, + { + "Name": "Wallet133", + "ParticipationOnly": false + }, + { + "Name": "Wallet134", + "ParticipationOnly": false + }, + { + "Name": "Wallet135", + "ParticipationOnly": false + }, + { + "Name": "Wallet136", + "ParticipationOnly": false + }, + { + "Name": "Wallet137", + "ParticipationOnly": false + }, + { + "Name": "Wallet138", + "ParticipationOnly": false + }, + { + "Name": "Wallet139", + "ParticipationOnly": false + }, + { + "Name": "Wallet140", + "ParticipationOnly": false + }, + { + "Name": "Wallet141", + "ParticipationOnly": false + }, + { + "Name": "Wallet142", + "ParticipationOnly": false + }, + { + "Name": "Wallet143", + "ParticipationOnly": false + }, + { + "Name": "Wallet144", + "ParticipationOnly": false + }, + { + "Name": "Wallet145", + "ParticipationOnly": false + }, + { + "Name": "Wallet146", + "ParticipationOnly": false + }, + { + "Name": "Wallet147", + "ParticipationOnly": false + }, + { + "Name": "Wallet148", + "ParticipationOnly": false + }, + { + "Name": "Wallet149", + "ParticipationOnly": false + }, + { + "Name": "Wallet150", + "ParticipationOnly": false + }, + { + "Name": "Wallet151", + "ParticipationOnly": false + }, + { + "Name": "Wallet152", + "ParticipationOnly": false + }, + { + "Name": "Wallet153", + "ParticipationOnly": false + }, + { + "Name": "Wallet154", + "ParticipationOnly": false + }, + { + "Name": "Wallet155", + "ParticipationOnly": false + }, + { + "Name": "Wallet156", + "ParticipationOnly": false + }, + { + "Name": "Wallet157", + "ParticipationOnly": false + }, + { + "Name": "Wallet158", + "ParticipationOnly": false + }, + { + "Name": "Wallet159", + "ParticipationOnly": false + }, + { + "Name": "Wallet160", + "ParticipationOnly": false + }, + { + "Name": "Wallet161", + "ParticipationOnly": false + }, + { + "Name": "Wallet162", + "ParticipationOnly": false + }, + { + "Name": "Wallet163", + "ParticipationOnly": false + }, + { + "Name": "Wallet164", + "ParticipationOnly": false + }, + { + "Name": "Wallet165", + "ParticipationOnly": false + }, + { + "Name": "Wallet166", + "ParticipationOnly": false + }, + { + "Name": "Wallet167", + "ParticipationOnly": false + }, + { + "Name": "Wallet168", + "ParticipationOnly": false + }, + { + "Name": "Wallet169", + "ParticipationOnly": false + }, + { + "Name": "Wallet170", + "ParticipationOnly": false + }, + { + "Name": "Wallet171", + "ParticipationOnly": false + }, + { + "Name": "Wallet172", + "ParticipationOnly": false + }, + { + "Name": "Wallet173", + "ParticipationOnly": false + }, + { + "Name": "Wallet174", + "ParticipationOnly": false + }, + { + "Name": "Wallet175", + "ParticipationOnly": false + }, + { + "Name": "Wallet176", + "ParticipationOnly": false + }, + { + "Name": "Wallet177", + "ParticipationOnly": false + }, + { + "Name": "Wallet178", + "ParticipationOnly": false + }, + { + "Name": "Wallet179", + "ParticipationOnly": false + }, + { + "Name": "Wallet180", + "ParticipationOnly": false + }, + { + "Name": "Wallet181", + "ParticipationOnly": false + }, + { + "Name": "Wallet182", + "ParticipationOnly": false + }, + { + "Name": "Wallet183", + "ParticipationOnly": false + }, + { + "Name": "Wallet184", + "ParticipationOnly": false + }, + { + "Name": "Wallet185", + "ParticipationOnly": false + }, + { + "Name": "Wallet186", + "ParticipationOnly": false + }, + { + "Name": "Wallet187", + "ParticipationOnly": false + }, + { + "Name": "Wallet188", + "ParticipationOnly": false + }, + { + "Name": "Wallet189", + "ParticipationOnly": false + }, + { + "Name": "Wallet190", + "ParticipationOnly": false + }, + { + "Name": "Wallet191", + "ParticipationOnly": false + }, + { + "Name": "Wallet192", + "ParticipationOnly": false + }, + { + "Name": "Wallet193", + "ParticipationOnly": false + }, + { + "Name": "Wallet194", + "ParticipationOnly": false + }, + { + "Name": "Wallet195", + "ParticipationOnly": false + }, + { + "Name": "Wallet196", + "ParticipationOnly": false + }, + { + "Name": "Wallet197", + "ParticipationOnly": false + }, + { + "Name": "Wallet198", + "ParticipationOnly": false + }, + { + "Name": "Wallet199", + "ParticipationOnly": false + }, + { + "Name": "Wallet200", + "ParticipationOnly": false + }, + { + "Name": "Wallet201", + "ParticipationOnly": false + }, + { + "Name": "Wallet202", + "ParticipationOnly": false + }, + { + "Name": "Wallet203", + "ParticipationOnly": false + }, + { + "Name": "Wallet204", + "ParticipationOnly": false + }, + { + "Name": "Wallet205", + "ParticipationOnly": false + }, + { + "Name": "Wallet206", + "ParticipationOnly": false + }, + { + "Name": "Wallet207", + "ParticipationOnly": false + }, + { + "Name": "Wallet208", + "ParticipationOnly": false + }, + { + "Name": "Wallet209", + "ParticipationOnly": false + }, + { + "Name": "Wallet210", + "ParticipationOnly": false + }, + { + "Name": "Wallet211", + "ParticipationOnly": false + }, + { + "Name": "Wallet212", + "ParticipationOnly": false + }, + { + "Name": "Wallet213", + "ParticipationOnly": false + }, + { + "Name": "Wallet214", + "ParticipationOnly": false + }, + { + "Name": "Wallet215", + "ParticipationOnly": false + }, + { + "Name": "Wallet216", + "ParticipationOnly": false + }, + { + "Name": "Wallet217", + "ParticipationOnly": false + }, + { + "Name": "Wallet218", + "ParticipationOnly": false + }, + { + "Name": "Wallet219", + "ParticipationOnly": false + }, + { + "Name": "Wallet220", + "ParticipationOnly": false + }, + { + "Name": "Wallet221", + "ParticipationOnly": false + }, + { + "Name": "Wallet222", + "ParticipationOnly": false + }, + { + "Name": "Wallet223", + "ParticipationOnly": false + }, + { + "Name": "Wallet224", + "ParticipationOnly": false + }, + { + "Name": "Wallet225", + "ParticipationOnly": false + }, + { + "Name": "Wallet226", + "ParticipationOnly": false + }, + { + "Name": "Wallet227", + "ParticipationOnly": false + }, + { + "Name": "Wallet228", + "ParticipationOnly": false + }, + { + "Name": "Wallet229", + "ParticipationOnly": false + }, + { + "Name": "Wallet230", + "ParticipationOnly": false + }, + { + "Name": "Wallet231", + "ParticipationOnly": false + }, + { + "Name": "Wallet232", + "ParticipationOnly": false + }, + { + "Name": "Wallet233", + "ParticipationOnly": false + }, + { + "Name": "Wallet234", + "ParticipationOnly": false + }, + { + "Name": "Wallet235", + "ParticipationOnly": false + }, + { + "Name": "Wallet236", + "ParticipationOnly": false + }, + { + "Name": "Wallet237", + "ParticipationOnly": false + }, + { + "Name": "Wallet238", + "ParticipationOnly": false + }, + { + "Name": "Wallet239", + "ParticipationOnly": false + }, + { + "Name": "Wallet240", + "ParticipationOnly": false + }, + { + "Name": "Wallet241", + "ParticipationOnly": false + }, + { + "Name": "Wallet242", + "ParticipationOnly": false + }, + { + "Name": "Wallet243", + "ParticipationOnly": false + }, + { + "Name": "Wallet244", + "ParticipationOnly": false + }, + { + "Name": "Wallet245", + "ParticipationOnly": false + }, + { + "Name": "Wallet246", + "ParticipationOnly": false + }, + { + "Name": "Wallet247", + "ParticipationOnly": false + }, + { + "Name": "Wallet248", + "ParticipationOnly": false + }, + { + "Name": "Wallet249", + "ParticipationOnly": false + }, + { + "Name": "Wallet250", + "ParticipationOnly": false + }, + { + "Name": "Wallet251", + "ParticipationOnly": false + }, + { + "Name": "Wallet252", + "ParticipationOnly": false + }, + { + "Name": "Wallet253", + "ParticipationOnly": false + }, + { + "Name": "Wallet254", + "ParticipationOnly": false + }, + { + "Name": "Wallet255", + "ParticipationOnly": false + }, + { + "Name": "Wallet256", + "ParticipationOnly": false + }, + { + "Name": "Wallet257", + "ParticipationOnly": false + }, + { + "Name": "Wallet258", + "ParticipationOnly": false + }, + { + "Name": "Wallet259", + "ParticipationOnly": false + }, + { + "Name": "Wallet260", + "ParticipationOnly": false + }, + { + "Name": "Wallet261", + "ParticipationOnly": false + }, + { + "Name": "Wallet262", + "ParticipationOnly": false + }, + { + "Name": "Wallet263", + "ParticipationOnly": false + }, + { + "Name": "Wallet264", + "ParticipationOnly": false + }, + { + "Name": "Wallet265", + "ParticipationOnly": false + }, + { + "Name": "Wallet266", + "ParticipationOnly": false + }, + { + "Name": "Wallet267", + "ParticipationOnly": false + }, + { + "Name": "Wallet268", + "ParticipationOnly": false + }, + { + "Name": "Wallet269", + "ParticipationOnly": false + }, + { + "Name": "Wallet270", + "ParticipationOnly": false + }, + { + "Name": "Wallet271", + "ParticipationOnly": false + }, + { + "Name": "Wallet272", + "ParticipationOnly": false + }, + { + "Name": "Wallet273", + "ParticipationOnly": false + }, + { + "Name": "Wallet274", + "ParticipationOnly": false + }, + { + "Name": "Wallet275", + "ParticipationOnly": false + }, + { + "Name": "Wallet276", + "ParticipationOnly": false + }, + { + "Name": "Wallet277", + "ParticipationOnly": false + }, + { + "Name": "Wallet278", + "ParticipationOnly": false + }, + { + "Name": "Wallet279", + "ParticipationOnly": false + }, + { + "Name": "Wallet280", + "ParticipationOnly": false + }, + { + "Name": "Wallet281", + "ParticipationOnly": false + }, + { + "Name": "Wallet282", + "ParticipationOnly": false + }, + { + "Name": "Wallet283", + "ParticipationOnly": false + }, + { + "Name": "Wallet284", + "ParticipationOnly": false + }, + { + "Name": "Wallet285", + "ParticipationOnly": false + }, + { + "Name": "Wallet286", + "ParticipationOnly": false + }, + { + "Name": "Wallet287", + "ParticipationOnly": false + }, + { + "Name": "Wallet288", + "ParticipationOnly": false + }, + { + "Name": "Wallet289", + "ParticipationOnly": false + }, + { + "Name": "Wallet290", + "ParticipationOnly": false + }, + { + "Name": "Wallet291", + "ParticipationOnly": false + }, + { + "Name": "Wallet292", + "ParticipationOnly": false + }, + { + "Name": "Wallet293", + "ParticipationOnly": false + }, + { + "Name": "Wallet294", + "ParticipationOnly": false + }, + { + "Name": "Wallet295", + "ParticipationOnly": false + }, + { + "Name": "Wallet296", + "ParticipationOnly": false + }, + { + "Name": "Wallet297", + "ParticipationOnly": false + }, + { + "Name": "Wallet298", + "ParticipationOnly": false + }, + { + "Name": "Wallet299", + "ParticipationOnly": false + }, + { + "Name": "Wallet300", + "ParticipationOnly": false + }, + { + "Name": "Wallet301", + "ParticipationOnly": false + }, + { + "Name": "Wallet302", + "ParticipationOnly": false + }, + { + "Name": "Wallet303", + "ParticipationOnly": false + }, + { + "Name": "Wallet304", + "ParticipationOnly": false + }, + { + "Name": "Wallet305", + "ParticipationOnly": false + }, + { + "Name": "Wallet306", + "ParticipationOnly": false + }, + { + "Name": "Wallet307", + "ParticipationOnly": false + }, + { + "Name": "Wallet308", + "ParticipationOnly": false + }, + { + "Name": "Wallet309", + "ParticipationOnly": false + }, + { + "Name": "Wallet310", + "ParticipationOnly": false + }, + { + "Name": "Wallet311", + "ParticipationOnly": false + }, + { + "Name": "Wallet312", + "ParticipationOnly": false + }, + { + "Name": "Wallet313", + "ParticipationOnly": false + }, + { + "Name": "Wallet314", + "ParticipationOnly": false + }, + { + "Name": "Wallet315", + "ParticipationOnly": false + }, + { + "Name": "Wallet316", + "ParticipationOnly": false + }, + { + "Name": "Wallet317", + "ParticipationOnly": false + }, + { + "Name": "Wallet318", + "ParticipationOnly": false + }, + { + "Name": "Wallet319", + "ParticipationOnly": false + }, + { + "Name": "Wallet320", + "ParticipationOnly": false + }, + { + "Name": "Wallet321", + "ParticipationOnly": false + }, + { + "Name": "Wallet322", + "ParticipationOnly": false + }, + { + "Name": "Wallet323", + "ParticipationOnly": false + }, + { + "Name": "Wallet324", + "ParticipationOnly": false + }, + { + "Name": "Wallet325", + "ParticipationOnly": false + }, + { + "Name": "Wallet326", + "ParticipationOnly": false + }, + { + "Name": "Wallet327", + "ParticipationOnly": false + }, + { + "Name": "Wallet328", + "ParticipationOnly": false + }, + { + "Name": "Wallet329", + "ParticipationOnly": false + }, + { + "Name": "Wallet330", + "ParticipationOnly": false + }, + { + "Name": "Wallet331", + "ParticipationOnly": false + }, + { + "Name": "Wallet332", + "ParticipationOnly": false + }, + { + "Name": "Wallet333", + "ParticipationOnly": false + }, + { + "Name": "Wallet334", + "ParticipationOnly": false + }, + { + "Name": "Wallet335", + "ParticipationOnly": false + }, + { + "Name": "Wallet336", + "ParticipationOnly": false + }, + { + "Name": "Wallet337", + "ParticipationOnly": false + }, + { + "Name": "Wallet338", + "ParticipationOnly": false + }, + { + "Name": "Wallet339", + "ParticipationOnly": false + }, + { + "Name": "Wallet340", + "ParticipationOnly": false + }, + { + "Name": "Wallet341", + "ParticipationOnly": false + }, + { + "Name": "Wallet342", + "ParticipationOnly": false + }, + { + "Name": "Wallet343", + "ParticipationOnly": false + }, + { + "Name": "Wallet344", + "ParticipationOnly": false + }, + { + "Name": "Wallet345", + "ParticipationOnly": false + }, + { + "Name": "Wallet346", + "ParticipationOnly": false + }, + { + "Name": "Wallet347", + "ParticipationOnly": false + }, + { + "Name": "Wallet348", + "ParticipationOnly": false + }, + { + "Name": "Wallet349", + "ParticipationOnly": false + }, + { + "Name": "Wallet350", + "ParticipationOnly": false + }, + { + "Name": "Wallet351", + "ParticipationOnly": false + }, + { + "Name": "Wallet352", + "ParticipationOnly": false + }, + { + "Name": "Wallet353", + "ParticipationOnly": false + }, + { + "Name": "Wallet354", + "ParticipationOnly": false + }, + { + "Name": "Wallet355", + "ParticipationOnly": false + }, + { + "Name": "Wallet356", + "ParticipationOnly": false + }, + { + "Name": "Wallet357", + "ParticipationOnly": false + }, + { + "Name": "Wallet358", + "ParticipationOnly": false + }, + { + "Name": "Wallet359", + "ParticipationOnly": false + }, + { + "Name": "Wallet360", + "ParticipationOnly": false + }, + { + "Name": "Wallet361", + "ParticipationOnly": false + }, + { + "Name": "Wallet362", + "ParticipationOnly": false + }, + { + "Name": "Wallet363", + "ParticipationOnly": false + }, + { + "Name": "Wallet364", + "ParticipationOnly": false + }, + { + "Name": "Wallet365", + "ParticipationOnly": false + }, + { + "Name": "Wallet366", + "ParticipationOnly": false + }, + { + "Name": "Wallet367", + "ParticipationOnly": false + }, + { + "Name": "Wallet368", + "ParticipationOnly": false + }, + { + "Name": "Wallet369", + "ParticipationOnly": false + }, + { + "Name": "Wallet370", + "ParticipationOnly": false + }, + { + "Name": "Wallet371", + "ParticipationOnly": false + }, + { + "Name": "Wallet372", + "ParticipationOnly": false + }, + { + "Name": "Wallet373", + "ParticipationOnly": false + }, + { + "Name": "Wallet374", + "ParticipationOnly": false + }, + { + "Name": "Wallet375", + "ParticipationOnly": false + }, + { + "Name": "Wallet376", + "ParticipationOnly": false + }, + { + "Name": "Wallet377", + "ParticipationOnly": false + }, + { + "Name": "Wallet378", + "ParticipationOnly": false + }, + { + "Name": "Wallet379", + "ParticipationOnly": false + }, + { + "Name": "Wallet380", + "ParticipationOnly": false + }, + { + "Name": "Wallet381", + "ParticipationOnly": false + }, + { + "Name": "Wallet382", + "ParticipationOnly": false + }, + { + "Name": "Wallet383", + "ParticipationOnly": false + }, + { + "Name": "Wallet384", + "ParticipationOnly": false + }, + { + "Name": "Wallet385", + "ParticipationOnly": false + }, + { + "Name": "Wallet386", + "ParticipationOnly": false + }, + { + "Name": "Wallet387", + "ParticipationOnly": false + }, + { + "Name": "Wallet388", + "ParticipationOnly": false + }, + { + "Name": "Wallet389", + "ParticipationOnly": false + }, + { + "Name": "Wallet390", + "ParticipationOnly": false + }, + { + "Name": "Wallet391", + "ParticipationOnly": false + }, + { + "Name": "Wallet392", + "ParticipationOnly": false + }, + { + "Name": "Wallet393", + "ParticipationOnly": false + }, + { + "Name": "Wallet394", + "ParticipationOnly": false + }, + { + "Name": "Wallet395", + "ParticipationOnly": false + }, + { + "Name": "Wallet396", + "ParticipationOnly": false + }, + { + "Name": "Wallet397", + "ParticipationOnly": false + }, + { + "Name": "Wallet398", + "ParticipationOnly": false + }, + { + "Name": "Wallet399", + "ParticipationOnly": false + }, + { + "Name": "Wallet400", + "ParticipationOnly": false + }, + { + "Name": "Wallet401", + "ParticipationOnly": false + }, + { + "Name": "Wallet402", + "ParticipationOnly": false + }, + { + "Name": "Wallet403", + "ParticipationOnly": false + }, + { + "Name": "Wallet404", + "ParticipationOnly": false + }, + { + "Name": "Wallet405", + "ParticipationOnly": false + }, + { + "Name": "Wallet406", + "ParticipationOnly": false + }, + { + "Name": "Wallet407", + "ParticipationOnly": false + }, + { + "Name": "Wallet408", + "ParticipationOnly": false + }, + { + "Name": "Wallet409", + "ParticipationOnly": false + }, + { + "Name": "Wallet410", + "ParticipationOnly": false + }, + { + "Name": "Wallet411", + "ParticipationOnly": false + }, + { + "Name": "Wallet412", + "ParticipationOnly": false + }, + { + "Name": "Wallet413", + "ParticipationOnly": false + }, + { + "Name": "Wallet414", + "ParticipationOnly": false + }, + { + "Name": "Wallet415", + "ParticipationOnly": false + }, + { + "Name": "Wallet416", + "ParticipationOnly": false + }, + { + "Name": "Wallet417", + "ParticipationOnly": false + }, + { + "Name": "Wallet418", + "ParticipationOnly": false + }, + { + "Name": "Wallet419", + "ParticipationOnly": false + }, + { + "Name": "Wallet420", + "ParticipationOnly": false + }, + { + "Name": "Wallet421", + "ParticipationOnly": false + }, + { + "Name": "Wallet422", + "ParticipationOnly": false + }, + { + "Name": "Wallet423", + "ParticipationOnly": false + }, + { + "Name": "Wallet424", + "ParticipationOnly": false + }, + { + "Name": "Wallet425", + "ParticipationOnly": false + }, + { + "Name": "Wallet426", + "ParticipationOnly": false + }, + { + "Name": "Wallet427", + "ParticipationOnly": false + }, + { + "Name": "Wallet428", + "ParticipationOnly": false + }, + { + "Name": "Wallet429", + "ParticipationOnly": false + }, + { + "Name": "Wallet430", + "ParticipationOnly": false + }, + { + "Name": "Wallet431", + "ParticipationOnly": false + }, + { + "Name": "Wallet432", + "ParticipationOnly": false + }, + { + "Name": "Wallet433", + "ParticipationOnly": false + }, + { + "Name": "Wallet434", + "ParticipationOnly": false + }, + { + "Name": "Wallet435", + "ParticipationOnly": false + }, + { + "Name": "Wallet436", + "ParticipationOnly": false + }, + { + "Name": "Wallet437", + "ParticipationOnly": false + }, + { + "Name": "Wallet438", + "ParticipationOnly": false + }, + { + "Name": "Wallet439", + "ParticipationOnly": false + }, + { + "Name": "Wallet440", + "ParticipationOnly": false + }, + { + "Name": "Wallet441", + "ParticipationOnly": false + }, + { + "Name": "Wallet442", + "ParticipationOnly": false + }, + { + "Name": "Wallet443", + "ParticipationOnly": false + }, + { + "Name": "Wallet444", + "ParticipationOnly": false + }, + { + "Name": "Wallet445", + "ParticipationOnly": false + }, + { + "Name": "Wallet446", + "ParticipationOnly": false + }, + { + "Name": "Wallet447", + "ParticipationOnly": false + }, + { + "Name": "Wallet448", + "ParticipationOnly": false + }, + { + "Name": "Wallet449", + "ParticipationOnly": false + }, + { + "Name": "Wallet450", + "ParticipationOnly": false + }, + { + "Name": "Wallet451", + "ParticipationOnly": false + }, + { + "Name": "Wallet452", + "ParticipationOnly": false + }, + { + "Name": "Wallet453", + "ParticipationOnly": false + }, + { + "Name": "Wallet454", + "ParticipationOnly": false + }, + { + "Name": "Wallet455", + "ParticipationOnly": false + }, + { + "Name": "Wallet456", + "ParticipationOnly": false + }, + { + "Name": "Wallet457", + "ParticipationOnly": false + }, + { + "Name": "Wallet458", + "ParticipationOnly": false + }, + { + "Name": "Wallet459", + "ParticipationOnly": false + }, + { + "Name": "Wallet460", + "ParticipationOnly": false + }, + { + "Name": "Wallet461", + "ParticipationOnly": false + }, + { + "Name": "Wallet462", + "ParticipationOnly": false + }, + { + "Name": "Wallet463", + "ParticipationOnly": false + }, + { + "Name": "Wallet464", + "ParticipationOnly": false + }, + { + "Name": "Wallet465", + "ParticipationOnly": false + }, + { + "Name": "Wallet466", + "ParticipationOnly": false + }, + { + "Name": "Wallet467", + "ParticipationOnly": false + }, + { + "Name": "Wallet468", + "ParticipationOnly": false + }, + { + "Name": "Wallet469", + "ParticipationOnly": false + }, + { + "Name": "Wallet470", + "ParticipationOnly": false + }, + { + "Name": "Wallet471", + "ParticipationOnly": false + }, + { + "Name": "Wallet472", + "ParticipationOnly": false + }, + { + "Name": "Wallet473", + "ParticipationOnly": false + }, + { + "Name": "Wallet474", + "ParticipationOnly": false + }, + { + "Name": "Wallet475", + "ParticipationOnly": false + }, + { + "Name": "Wallet476", + "ParticipationOnly": false + }, + { + "Name": "Wallet477", + "ParticipationOnly": false + }, + { + "Name": "Wallet478", + "ParticipationOnly": false + }, + { + "Name": "Wallet479", + "ParticipationOnly": false + }, + { + "Name": "Wallet480", + "ParticipationOnly": false + }, + { + "Name": "Wallet481", + "ParticipationOnly": false + }, + { + "Name": "Wallet482", + "ParticipationOnly": false + }, + { + "Name": "Wallet483", + "ParticipationOnly": false + }, + { + "Name": "Wallet484", + "ParticipationOnly": false + }, + { + "Name": "Wallet485", + "ParticipationOnly": false + }, + { + "Name": "Wallet486", + "ParticipationOnly": false + }, + { + "Name": "Wallet487", + "ParticipationOnly": false + }, + { + "Name": "Wallet488", + "ParticipationOnly": false + }, + { + "Name": "Wallet489", + "ParticipationOnly": false + }, + { + "Name": "Wallet490", + "ParticipationOnly": false + }, + { + "Name": "Wallet491", + "ParticipationOnly": false + }, + { + "Name": "Wallet492", + "ParticipationOnly": false + }, + { + "Name": "Wallet493", + "ParticipationOnly": false + }, + { + "Name": "Wallet494", + "ParticipationOnly": false + }, + { + "Name": "Wallet495", + "ParticipationOnly": false + }, + { + "Name": "Wallet496", + "ParticipationOnly": false + }, + { + "Name": "Wallet497", + "ParticipationOnly": false + }, + { + "Name": "Wallet498", + "ParticipationOnly": false + }, + { + "Name": "Wallet499", + "ParticipationOnly": false + }, + { + "Name": "Wallet500", + "ParticipationOnly": false + } + ] + }, + { + "Name": "Node2", + "Wallets": [ + { + "Name": "Wallet501", + "ParticipationOnly": false + }, + { + "Name": "Wallet502", + "ParticipationOnly": false + }, + { + "Name": "Wallet503", + "ParticipationOnly": false + }, + { + "Name": "Wallet504", + "ParticipationOnly": false + }, + { + "Name": "Wallet505", + "ParticipationOnly": false + }, + { + "Name": "Wallet506", + "ParticipationOnly": false + }, + { + "Name": "Wallet507", + "ParticipationOnly": false + }, + { + "Name": "Wallet508", + "ParticipationOnly": false + }, + { + "Name": "Wallet509", + "ParticipationOnly": false + }, + { + "Name": "Wallet510", + "ParticipationOnly": false + }, + { + "Name": "Wallet511", + "ParticipationOnly": false + }, + { + "Name": "Wallet512", + "ParticipationOnly": false + }, + { + "Name": "Wallet513", + "ParticipationOnly": false + }, + { + "Name": "Wallet514", + "ParticipationOnly": false + }, + { + "Name": "Wallet515", + "ParticipationOnly": false + }, + { + "Name": "Wallet516", + "ParticipationOnly": false + }, + { + "Name": "Wallet517", + "ParticipationOnly": false + }, + { + "Name": "Wallet518", + "ParticipationOnly": false + }, + { + "Name": "Wallet519", + "ParticipationOnly": false + }, + { + "Name": "Wallet520", + "ParticipationOnly": false + }, + { + "Name": "Wallet521", + "ParticipationOnly": false + }, + { + "Name": "Wallet522", + "ParticipationOnly": false + }, + { + "Name": "Wallet523", + "ParticipationOnly": false + }, + { + "Name": "Wallet524", + "ParticipationOnly": false + }, + { + "Name": "Wallet525", + "ParticipationOnly": false + }, + { + "Name": "Wallet526", + "ParticipationOnly": false + }, + { + "Name": "Wallet527", + "ParticipationOnly": false + }, + { + "Name": "Wallet528", + "ParticipationOnly": false + }, + { + "Name": "Wallet529", + "ParticipationOnly": false + }, + { + "Name": "Wallet530", + "ParticipationOnly": false + }, + { + "Name": "Wallet531", + "ParticipationOnly": false + }, + { + "Name": "Wallet532", + "ParticipationOnly": false + }, + { + "Name": "Wallet533", + "ParticipationOnly": false + }, + { + "Name": "Wallet534", + "ParticipationOnly": false + }, + { + "Name": "Wallet535", + "ParticipationOnly": false + }, + { + "Name": "Wallet536", + "ParticipationOnly": false + }, + { + "Name": "Wallet537", + "ParticipationOnly": false + }, + { + "Name": "Wallet538", + "ParticipationOnly": false + }, + { + "Name": "Wallet539", + "ParticipationOnly": false + }, + { + "Name": "Wallet540", + "ParticipationOnly": false + }, + { + "Name": "Wallet541", + "ParticipationOnly": false + }, + { + "Name": "Wallet542", + "ParticipationOnly": false + }, + { + "Name": "Wallet543", + "ParticipationOnly": false + }, + { + "Name": "Wallet544", + "ParticipationOnly": false + }, + { + "Name": "Wallet545", + "ParticipationOnly": false + }, + { + "Name": "Wallet546", + "ParticipationOnly": false + }, + { + "Name": "Wallet547", + "ParticipationOnly": false + }, + { + "Name": "Wallet548", + "ParticipationOnly": false + }, + { + "Name": "Wallet549", + "ParticipationOnly": false + }, + { + "Name": "Wallet550", + "ParticipationOnly": false + }, + { + "Name": "Wallet551", + "ParticipationOnly": false + }, + { + "Name": "Wallet552", + "ParticipationOnly": false + }, + { + "Name": "Wallet553", + "ParticipationOnly": false + }, + { + "Name": "Wallet554", + "ParticipationOnly": false + }, + { + "Name": "Wallet555", + "ParticipationOnly": false + }, + { + "Name": "Wallet556", + "ParticipationOnly": false + }, + { + "Name": "Wallet557", + "ParticipationOnly": false + }, + { + "Name": "Wallet558", + "ParticipationOnly": false + }, + { + "Name": "Wallet559", + "ParticipationOnly": false + }, + { + "Name": "Wallet560", + "ParticipationOnly": false + }, + { + "Name": "Wallet561", + "ParticipationOnly": false + }, + { + "Name": "Wallet562", + "ParticipationOnly": false + }, + { + "Name": "Wallet563", + "ParticipationOnly": false + }, + { + "Name": "Wallet564", + "ParticipationOnly": false + }, + { + "Name": "Wallet565", + "ParticipationOnly": false + }, + { + "Name": "Wallet566", + "ParticipationOnly": false + }, + { + "Name": "Wallet567", + "ParticipationOnly": false + }, + { + "Name": "Wallet568", + "ParticipationOnly": false + }, + { + "Name": "Wallet569", + "ParticipationOnly": false + }, + { + "Name": "Wallet570", + "ParticipationOnly": false + }, + { + "Name": "Wallet571", + "ParticipationOnly": false + }, + { + "Name": "Wallet572", + "ParticipationOnly": false + }, + { + "Name": "Wallet573", + "ParticipationOnly": false + }, + { + "Name": "Wallet574", + "ParticipationOnly": false + }, + { + "Name": "Wallet575", + "ParticipationOnly": false + }, + { + "Name": "Wallet576", + "ParticipationOnly": false + }, + { + "Name": "Wallet577", + "ParticipationOnly": false + }, + { + "Name": "Wallet578", + "ParticipationOnly": false + }, + { + "Name": "Wallet579", + "ParticipationOnly": false + }, + { + "Name": "Wallet580", + "ParticipationOnly": false + }, + { + "Name": "Wallet581", + "ParticipationOnly": false + }, + { + "Name": "Wallet582", + "ParticipationOnly": false + }, + { + "Name": "Wallet583", + "ParticipationOnly": false + }, + { + "Name": "Wallet584", + "ParticipationOnly": false + }, + { + "Name": "Wallet585", + "ParticipationOnly": false + }, + { + "Name": "Wallet586", + "ParticipationOnly": false + }, + { + "Name": "Wallet587", + "ParticipationOnly": false + }, + { + "Name": "Wallet588", + "ParticipationOnly": false + }, + { + "Name": "Wallet589", + "ParticipationOnly": false + }, + { + "Name": "Wallet590", + "ParticipationOnly": false + }, + { + "Name": "Wallet591", + "ParticipationOnly": false + }, + { + "Name": "Wallet592", + "ParticipationOnly": false + }, + { + "Name": "Wallet593", + "ParticipationOnly": false + }, + { + "Name": "Wallet594", + "ParticipationOnly": false + }, + { + "Name": "Wallet595", + "ParticipationOnly": false + }, + { + "Name": "Wallet596", + "ParticipationOnly": false + }, + { + "Name": "Wallet597", + "ParticipationOnly": false + }, + { + "Name": "Wallet598", + "ParticipationOnly": false + }, + { + "Name": "Wallet599", + "ParticipationOnly": false + }, + { + "Name": "Wallet600", + "ParticipationOnly": false + }, + { + "Name": "Wallet601", + "ParticipationOnly": false + }, + { + "Name": "Wallet602", + "ParticipationOnly": false + }, + { + "Name": "Wallet603", + "ParticipationOnly": false + }, + { + "Name": "Wallet604", + "ParticipationOnly": false + }, + { + "Name": "Wallet605", + "ParticipationOnly": false + }, + { + "Name": "Wallet606", + "ParticipationOnly": false + }, + { + "Name": "Wallet607", + "ParticipationOnly": false + }, + { + "Name": "Wallet608", + "ParticipationOnly": false + }, + { + "Name": "Wallet609", + "ParticipationOnly": false + }, + { + "Name": "Wallet610", + "ParticipationOnly": false + }, + { + "Name": "Wallet611", + "ParticipationOnly": false + }, + { + "Name": "Wallet612", + "ParticipationOnly": false + }, + { + "Name": "Wallet613", + "ParticipationOnly": false + }, + { + "Name": "Wallet614", + "ParticipationOnly": false + }, + { + "Name": "Wallet615", + "ParticipationOnly": false + }, + { + "Name": "Wallet616", + "ParticipationOnly": false + }, + { + "Name": "Wallet617", + "ParticipationOnly": false + }, + { + "Name": "Wallet618", + "ParticipationOnly": false + }, + { + "Name": "Wallet619", + "ParticipationOnly": false + }, + { + "Name": "Wallet620", + "ParticipationOnly": false + }, + { + "Name": "Wallet621", + "ParticipationOnly": false + }, + { + "Name": "Wallet622", + "ParticipationOnly": false + }, + { + "Name": "Wallet623", + "ParticipationOnly": false + }, + { + "Name": "Wallet624", + "ParticipationOnly": false + }, + { + "Name": "Wallet625", + "ParticipationOnly": false + }, + { + "Name": "Wallet626", + "ParticipationOnly": false + }, + { + "Name": "Wallet627", + "ParticipationOnly": false + }, + { + "Name": "Wallet628", + "ParticipationOnly": false + }, + { + "Name": "Wallet629", + "ParticipationOnly": false + }, + { + "Name": "Wallet630", + "ParticipationOnly": false + }, + { + "Name": "Wallet631", + "ParticipationOnly": false + }, + { + "Name": "Wallet632", + "ParticipationOnly": false + }, + { + "Name": "Wallet633", + "ParticipationOnly": false + }, + { + "Name": "Wallet634", + "ParticipationOnly": false + }, + { + "Name": "Wallet635", + "ParticipationOnly": false + }, + { + "Name": "Wallet636", + "ParticipationOnly": false + }, + { + "Name": "Wallet637", + "ParticipationOnly": false + }, + { + "Name": "Wallet638", + "ParticipationOnly": false + }, + { + "Name": "Wallet639", + "ParticipationOnly": false + }, + { + "Name": "Wallet640", + "ParticipationOnly": false + }, + { + "Name": "Wallet641", + "ParticipationOnly": false + }, + { + "Name": "Wallet642", + "ParticipationOnly": false + }, + { + "Name": "Wallet643", + "ParticipationOnly": false + }, + { + "Name": "Wallet644", + "ParticipationOnly": false + }, + { + "Name": "Wallet645", + "ParticipationOnly": false + }, + { + "Name": "Wallet646", + "ParticipationOnly": false + }, + { + "Name": "Wallet647", + "ParticipationOnly": false + }, + { + "Name": "Wallet648", + "ParticipationOnly": false + }, + { + "Name": "Wallet649", + "ParticipationOnly": false + }, + { + "Name": "Wallet650", + "ParticipationOnly": false + }, + { + "Name": "Wallet651", + "ParticipationOnly": false + }, + { + "Name": "Wallet652", + "ParticipationOnly": false + }, + { + "Name": "Wallet653", + "ParticipationOnly": false + }, + { + "Name": "Wallet654", + "ParticipationOnly": false + }, + { + "Name": "Wallet655", + "ParticipationOnly": false + }, + { + "Name": "Wallet656", + "ParticipationOnly": false + }, + { + "Name": "Wallet657", + "ParticipationOnly": false + }, + { + "Name": "Wallet658", + "ParticipationOnly": false + }, + { + "Name": "Wallet659", + "ParticipationOnly": false + }, + { + "Name": "Wallet660", + "ParticipationOnly": false + }, + { + "Name": "Wallet661", + "ParticipationOnly": false + }, + { + "Name": "Wallet662", + "ParticipationOnly": false + }, + { + "Name": "Wallet663", + "ParticipationOnly": false + }, + { + "Name": "Wallet664", + "ParticipationOnly": false + }, + { + "Name": "Wallet665", + "ParticipationOnly": false + }, + { + "Name": "Wallet666", + "ParticipationOnly": false + }, + { + "Name": "Wallet667", + "ParticipationOnly": false + }, + { + "Name": "Wallet668", + "ParticipationOnly": false + }, + { + "Name": "Wallet669", + "ParticipationOnly": false + }, + { + "Name": "Wallet670", + "ParticipationOnly": false + }, + { + "Name": "Wallet671", + "ParticipationOnly": false + }, + { + "Name": "Wallet672", + "ParticipationOnly": false + }, + { + "Name": "Wallet673", + "ParticipationOnly": false + }, + { + "Name": "Wallet674", + "ParticipationOnly": false + }, + { + "Name": "Wallet675", + "ParticipationOnly": false + }, + { + "Name": "Wallet676", + "ParticipationOnly": false + }, + { + "Name": "Wallet677", + "ParticipationOnly": false + }, + { + "Name": "Wallet678", + "ParticipationOnly": false + }, + { + "Name": "Wallet679", + "ParticipationOnly": false + }, + { + "Name": "Wallet680", + "ParticipationOnly": false + }, + { + "Name": "Wallet681", + "ParticipationOnly": false + }, + { + "Name": "Wallet682", + "ParticipationOnly": false + }, + { + "Name": "Wallet683", + "ParticipationOnly": false + }, + { + "Name": "Wallet684", + "ParticipationOnly": false + }, + { + "Name": "Wallet685", + "ParticipationOnly": false + }, + { + "Name": "Wallet686", + "ParticipationOnly": false + }, + { + "Name": "Wallet687", + "ParticipationOnly": false + }, + { + "Name": "Wallet688", + "ParticipationOnly": false + }, + { + "Name": "Wallet689", + "ParticipationOnly": false + }, + { + "Name": "Wallet690", + "ParticipationOnly": false + }, + { + "Name": "Wallet691", + "ParticipationOnly": false + }, + { + "Name": "Wallet692", + "ParticipationOnly": false + }, + { + "Name": "Wallet693", + "ParticipationOnly": false + }, + { + "Name": "Wallet694", + "ParticipationOnly": false + }, + { + "Name": "Wallet695", + "ParticipationOnly": false + }, + { + "Name": "Wallet696", + "ParticipationOnly": false + }, + { + "Name": "Wallet697", + "ParticipationOnly": false + }, + { + "Name": "Wallet698", + "ParticipationOnly": false + }, + { + "Name": "Wallet699", + "ParticipationOnly": false + }, + { + "Name": "Wallet700", + "ParticipationOnly": false + }, + { + "Name": "Wallet701", + "ParticipationOnly": false + }, + { + "Name": "Wallet702", + "ParticipationOnly": false + }, + { + "Name": "Wallet703", + "ParticipationOnly": false + }, + { + "Name": "Wallet704", + "ParticipationOnly": false + }, + { + "Name": "Wallet705", + "ParticipationOnly": false + }, + { + "Name": "Wallet706", + "ParticipationOnly": false + }, + { + "Name": "Wallet707", + "ParticipationOnly": false + }, + { + "Name": "Wallet708", + "ParticipationOnly": false + }, + { + "Name": "Wallet709", + "ParticipationOnly": false + }, + { + "Name": "Wallet710", + "ParticipationOnly": false + }, + { + "Name": "Wallet711", + "ParticipationOnly": false + }, + { + "Name": "Wallet712", + "ParticipationOnly": false + }, + { + "Name": "Wallet713", + "ParticipationOnly": false + }, + { + "Name": "Wallet714", + "ParticipationOnly": false + }, + { + "Name": "Wallet715", + "ParticipationOnly": false + }, + { + "Name": "Wallet716", + "ParticipationOnly": false + }, + { + "Name": "Wallet717", + "ParticipationOnly": false + }, + { + "Name": "Wallet718", + "ParticipationOnly": false + }, + { + "Name": "Wallet719", + "ParticipationOnly": false + }, + { + "Name": "Wallet720", + "ParticipationOnly": false + }, + { + "Name": "Wallet721", + "ParticipationOnly": false + }, + { + "Name": "Wallet722", + "ParticipationOnly": false + }, + { + "Name": "Wallet723", + "ParticipationOnly": false + }, + { + "Name": "Wallet724", + "ParticipationOnly": false + }, + { + "Name": "Wallet725", + "ParticipationOnly": false + }, + { + "Name": "Wallet726", + "ParticipationOnly": false + }, + { + "Name": "Wallet727", + "ParticipationOnly": false + }, + { + "Name": "Wallet728", + "ParticipationOnly": false + }, + { + "Name": "Wallet729", + "ParticipationOnly": false + }, + { + "Name": "Wallet730", + "ParticipationOnly": false + }, + { + "Name": "Wallet731", + "ParticipationOnly": false + }, + { + "Name": "Wallet732", + "ParticipationOnly": false + }, + { + "Name": "Wallet733", + "ParticipationOnly": false + }, + { + "Name": "Wallet734", + "ParticipationOnly": false + }, + { + "Name": "Wallet735", + "ParticipationOnly": false + }, + { + "Name": "Wallet736", + "ParticipationOnly": false + }, + { + "Name": "Wallet737", + "ParticipationOnly": false + }, + { + "Name": "Wallet738", + "ParticipationOnly": false + }, + { + "Name": "Wallet739", + "ParticipationOnly": false + }, + { + "Name": "Wallet740", + "ParticipationOnly": false + }, + { + "Name": "Wallet741", + "ParticipationOnly": false + }, + { + "Name": "Wallet742", + "ParticipationOnly": false + }, + { + "Name": "Wallet743", + "ParticipationOnly": false + }, + { + "Name": "Wallet744", + "ParticipationOnly": false + }, + { + "Name": "Wallet745", + "ParticipationOnly": false + }, + { + "Name": "Wallet746", + "ParticipationOnly": false + }, + { + "Name": "Wallet747", + "ParticipationOnly": false + }, + { + "Name": "Wallet748", + "ParticipationOnly": false + }, + { + "Name": "Wallet749", + "ParticipationOnly": false + }, + { + "Name": "Wallet750", + "ParticipationOnly": false + }, + { + "Name": "Wallet751", + "ParticipationOnly": false + }, + { + "Name": "Wallet752", + "ParticipationOnly": false + }, + { + "Name": "Wallet753", + "ParticipationOnly": false + }, + { + "Name": "Wallet754", + "ParticipationOnly": false + }, + { + "Name": "Wallet755", + "ParticipationOnly": false + }, + { + "Name": "Wallet756", + "ParticipationOnly": false + }, + { + "Name": "Wallet757", + "ParticipationOnly": false + }, + { + "Name": "Wallet758", + "ParticipationOnly": false + }, + { + "Name": "Wallet759", + "ParticipationOnly": false + }, + { + "Name": "Wallet760", + "ParticipationOnly": false + }, + { + "Name": "Wallet761", + "ParticipationOnly": false + }, + { + "Name": "Wallet762", + "ParticipationOnly": false + }, + { + "Name": "Wallet763", + "ParticipationOnly": false + }, + { + "Name": "Wallet764", + "ParticipationOnly": false + }, + { + "Name": "Wallet765", + "ParticipationOnly": false + }, + { + "Name": "Wallet766", + "ParticipationOnly": false + }, + { + "Name": "Wallet767", + "ParticipationOnly": false + }, + { + "Name": "Wallet768", + "ParticipationOnly": false + }, + { + "Name": "Wallet769", + "ParticipationOnly": false + }, + { + "Name": "Wallet770", + "ParticipationOnly": false + }, + { + "Name": "Wallet771", + "ParticipationOnly": false + }, + { + "Name": "Wallet772", + "ParticipationOnly": false + }, + { + "Name": "Wallet773", + "ParticipationOnly": false + }, + { + "Name": "Wallet774", + "ParticipationOnly": false + }, + { + "Name": "Wallet775", + "ParticipationOnly": false + }, + { + "Name": "Wallet776", + "ParticipationOnly": false + }, + { + "Name": "Wallet777", + "ParticipationOnly": false + }, + { + "Name": "Wallet778", + "ParticipationOnly": false + }, + { + "Name": "Wallet779", + "ParticipationOnly": false + }, + { + "Name": "Wallet780", + "ParticipationOnly": false + }, + { + "Name": "Wallet781", + "ParticipationOnly": false + }, + { + "Name": "Wallet782", + "ParticipationOnly": false + }, + { + "Name": "Wallet783", + "ParticipationOnly": false + }, + { + "Name": "Wallet784", + "ParticipationOnly": false + }, + { + "Name": "Wallet785", + "ParticipationOnly": false + }, + { + "Name": "Wallet786", + "ParticipationOnly": false + }, + { + "Name": "Wallet787", + "ParticipationOnly": false + }, + { + "Name": "Wallet788", + "ParticipationOnly": false + }, + { + "Name": "Wallet789", + "ParticipationOnly": false + }, + { + "Name": "Wallet790", + "ParticipationOnly": false + }, + { + "Name": "Wallet791", + "ParticipationOnly": false + }, + { + "Name": "Wallet792", + "ParticipationOnly": false + }, + { + "Name": "Wallet793", + "ParticipationOnly": false + }, + { + "Name": "Wallet794", + "ParticipationOnly": false + }, + { + "Name": "Wallet795", + "ParticipationOnly": false + }, + { + "Name": "Wallet796", + "ParticipationOnly": false + }, + { + "Name": "Wallet797", + "ParticipationOnly": false + }, + { + "Name": "Wallet798", + "ParticipationOnly": false + }, + { + "Name": "Wallet799", + "ParticipationOnly": false + }, + { + "Name": "Wallet800", + "ParticipationOnly": false + }, + { + "Name": "Wallet801", + "ParticipationOnly": false + }, + { + "Name": "Wallet802", + "ParticipationOnly": false + }, + { + "Name": "Wallet803", + "ParticipationOnly": false + }, + { + "Name": "Wallet804", + "ParticipationOnly": false + }, + { + "Name": "Wallet805", + "ParticipationOnly": false + }, + { + "Name": "Wallet806", + "ParticipationOnly": false + }, + { + "Name": "Wallet807", + "ParticipationOnly": false + }, + { + "Name": "Wallet808", + "ParticipationOnly": false + }, + { + "Name": "Wallet809", + "ParticipationOnly": false + }, + { + "Name": "Wallet810", + "ParticipationOnly": false + }, + { + "Name": "Wallet811", + "ParticipationOnly": false + }, + { + "Name": "Wallet812", + "ParticipationOnly": false + }, + { + "Name": "Wallet813", + "ParticipationOnly": false + }, + { + "Name": "Wallet814", + "ParticipationOnly": false + }, + { + "Name": "Wallet815", + "ParticipationOnly": false + }, + { + "Name": "Wallet816", + "ParticipationOnly": false + }, + { + "Name": "Wallet817", + "ParticipationOnly": false + }, + { + "Name": "Wallet818", + "ParticipationOnly": false + }, + { + "Name": "Wallet819", + "ParticipationOnly": false + }, + { + "Name": "Wallet820", + "ParticipationOnly": false + }, + { + "Name": "Wallet821", + "ParticipationOnly": false + }, + { + "Name": "Wallet822", + "ParticipationOnly": false + }, + { + "Name": "Wallet823", + "ParticipationOnly": false + }, + { + "Name": "Wallet824", + "ParticipationOnly": false + }, + { + "Name": "Wallet825", + "ParticipationOnly": false + }, + { + "Name": "Wallet826", + "ParticipationOnly": false + }, + { + "Name": "Wallet827", + "ParticipationOnly": false + }, + { + "Name": "Wallet828", + "ParticipationOnly": false + }, + { + "Name": "Wallet829", + "ParticipationOnly": false + }, + { + "Name": "Wallet830", + "ParticipationOnly": false + }, + { + "Name": "Wallet831", + "ParticipationOnly": false + }, + { + "Name": "Wallet832", + "ParticipationOnly": false + }, + { + "Name": "Wallet833", + "ParticipationOnly": false + }, + { + "Name": "Wallet834", + "ParticipationOnly": false + }, + { + "Name": "Wallet835", + "ParticipationOnly": false + }, + { + "Name": "Wallet836", + "ParticipationOnly": false + }, + { + "Name": "Wallet837", + "ParticipationOnly": false + }, + { + "Name": "Wallet838", + "ParticipationOnly": false + }, + { + "Name": "Wallet839", + "ParticipationOnly": false + }, + { + "Name": "Wallet840", + "ParticipationOnly": false + }, + { + "Name": "Wallet841", + "ParticipationOnly": false + }, + { + "Name": "Wallet842", + "ParticipationOnly": false + }, + { + "Name": "Wallet843", + "ParticipationOnly": false + }, + { + "Name": "Wallet844", + "ParticipationOnly": false + }, + { + "Name": "Wallet845", + "ParticipationOnly": false + }, + { + "Name": "Wallet846", + "ParticipationOnly": false + }, + { + "Name": "Wallet847", + "ParticipationOnly": false + }, + { + "Name": "Wallet848", + "ParticipationOnly": false + }, + { + "Name": "Wallet849", + "ParticipationOnly": false + }, + { + "Name": "Wallet850", + "ParticipationOnly": false + }, + { + "Name": "Wallet851", + "ParticipationOnly": false + }, + { + "Name": "Wallet852", + "ParticipationOnly": false + }, + { + "Name": "Wallet853", + "ParticipationOnly": false + }, + { + "Name": "Wallet854", + "ParticipationOnly": false + }, + { + "Name": "Wallet855", + "ParticipationOnly": false + }, + { + "Name": "Wallet856", + "ParticipationOnly": false + }, + { + "Name": "Wallet857", + "ParticipationOnly": false + }, + { + "Name": "Wallet858", + "ParticipationOnly": false + }, + { + "Name": "Wallet859", + "ParticipationOnly": false + }, + { + "Name": "Wallet860", + "ParticipationOnly": false + }, + { + "Name": "Wallet861", + "ParticipationOnly": false + }, + { + "Name": "Wallet862", + "ParticipationOnly": false + }, + { + "Name": "Wallet863", + "ParticipationOnly": false + }, + { + "Name": "Wallet864", + "ParticipationOnly": false + }, + { + "Name": "Wallet865", + "ParticipationOnly": false + }, + { + "Name": "Wallet866", + "ParticipationOnly": false + }, + { + "Name": "Wallet867", + "ParticipationOnly": false + }, + { + "Name": "Wallet868", + "ParticipationOnly": false + }, + { + "Name": "Wallet869", + "ParticipationOnly": false + }, + { + "Name": "Wallet870", + "ParticipationOnly": false + }, + { + "Name": "Wallet871", + "ParticipationOnly": false + }, + { + "Name": "Wallet872", + "ParticipationOnly": false + }, + { + "Name": "Wallet873", + "ParticipationOnly": false + }, + { + "Name": "Wallet874", + "ParticipationOnly": false + }, + { + "Name": "Wallet875", + "ParticipationOnly": false + }, + { + "Name": "Wallet876", + "ParticipationOnly": false + }, + { + "Name": "Wallet877", + "ParticipationOnly": false + }, + { + "Name": "Wallet878", + "ParticipationOnly": false + }, + { + "Name": "Wallet879", + "ParticipationOnly": false + }, + { + "Name": "Wallet880", + "ParticipationOnly": false + }, + { + "Name": "Wallet881", + "ParticipationOnly": false + }, + { + "Name": "Wallet882", + "ParticipationOnly": false + }, + { + "Name": "Wallet883", + "ParticipationOnly": false + }, + { + "Name": "Wallet884", + "ParticipationOnly": false + }, + { + "Name": "Wallet885", + "ParticipationOnly": false + }, + { + "Name": "Wallet886", + "ParticipationOnly": false + }, + { + "Name": "Wallet887", + "ParticipationOnly": false + }, + { + "Name": "Wallet888", + "ParticipationOnly": false + }, + { + "Name": "Wallet889", + "ParticipationOnly": false + }, + { + "Name": "Wallet890", + "ParticipationOnly": false + }, + { + "Name": "Wallet891", + "ParticipationOnly": false + }, + { + "Name": "Wallet892", + "ParticipationOnly": false + }, + { + "Name": "Wallet893", + "ParticipationOnly": false + }, + { + "Name": "Wallet894", + "ParticipationOnly": false + }, + { + "Name": "Wallet895", + "ParticipationOnly": false + }, + { + "Name": "Wallet896", + "ParticipationOnly": false + }, + { + "Name": "Wallet897", + "ParticipationOnly": false + }, + { + "Name": "Wallet898", + "ParticipationOnly": false + }, + { + "Name": "Wallet899", + "ParticipationOnly": false + }, + { + "Name": "Wallet900", + "ParticipationOnly": false + }, + { + "Name": "Wallet901", + "ParticipationOnly": false + }, + { + "Name": "Wallet902", + "ParticipationOnly": false + }, + { + "Name": "Wallet903", + "ParticipationOnly": false + }, + { + "Name": "Wallet904", + "ParticipationOnly": false + }, + { + "Name": "Wallet905", + "ParticipationOnly": false + }, + { + "Name": "Wallet906", + "ParticipationOnly": false + }, + { + "Name": "Wallet907", + "ParticipationOnly": false + }, + { + "Name": "Wallet908", + "ParticipationOnly": false + }, + { + "Name": "Wallet909", + "ParticipationOnly": false + }, + { + "Name": "Wallet910", + "ParticipationOnly": false + }, + { + "Name": "Wallet911", + "ParticipationOnly": false + }, + { + "Name": "Wallet912", + "ParticipationOnly": false + }, + { + "Name": "Wallet913", + "ParticipationOnly": false + }, + { + "Name": "Wallet914", + "ParticipationOnly": false + }, + { + "Name": "Wallet915", + "ParticipationOnly": false + }, + { + "Name": "Wallet916", + "ParticipationOnly": false + }, + { + "Name": "Wallet917", + "ParticipationOnly": false + }, + { + "Name": "Wallet918", + "ParticipationOnly": false + }, + { + "Name": "Wallet919", + "ParticipationOnly": false + }, + { + "Name": "Wallet920", + "ParticipationOnly": false + }, + { + "Name": "Wallet921", + "ParticipationOnly": false + }, + { + "Name": "Wallet922", + "ParticipationOnly": false + }, + { + "Name": "Wallet923", + "ParticipationOnly": false + }, + { + "Name": "Wallet924", + "ParticipationOnly": false + }, + { + "Name": "Wallet925", + "ParticipationOnly": false + }, + { + "Name": "Wallet926", + "ParticipationOnly": false + }, + { + "Name": "Wallet927", + "ParticipationOnly": false + }, + { + "Name": "Wallet928", + "ParticipationOnly": false + }, + { + "Name": "Wallet929", + "ParticipationOnly": false + }, + { + "Name": "Wallet930", + "ParticipationOnly": false + }, + { + "Name": "Wallet931", + "ParticipationOnly": false + }, + { + "Name": "Wallet932", + "ParticipationOnly": false + }, + { + "Name": "Wallet933", + "ParticipationOnly": false + }, + { + "Name": "Wallet934", + "ParticipationOnly": false + }, + { + "Name": "Wallet935", + "ParticipationOnly": false + }, + { + "Name": "Wallet936", + "ParticipationOnly": false + }, + { + "Name": "Wallet937", + "ParticipationOnly": false + }, + { + "Name": "Wallet938", + "ParticipationOnly": false + }, + { + "Name": "Wallet939", + "ParticipationOnly": false + }, + { + "Name": "Wallet940", + "ParticipationOnly": false + }, + { + "Name": "Wallet941", + "ParticipationOnly": false + }, + { + "Name": "Wallet942", + "ParticipationOnly": false + }, + { + "Name": "Wallet943", + "ParticipationOnly": false + }, + { + "Name": "Wallet944", + "ParticipationOnly": false + }, + { + "Name": "Wallet945", + "ParticipationOnly": false + }, + { + "Name": "Wallet946", + "ParticipationOnly": false + }, + { + "Name": "Wallet947", + "ParticipationOnly": false + }, + { + "Name": "Wallet948", + "ParticipationOnly": false + }, + { + "Name": "Wallet949", + "ParticipationOnly": false + }, + { + "Name": "Wallet950", + "ParticipationOnly": false + }, + { + "Name": "Wallet951", + "ParticipationOnly": false + }, + { + "Name": "Wallet952", + "ParticipationOnly": false + }, + { + "Name": "Wallet953", + "ParticipationOnly": false + }, + { + "Name": "Wallet954", + "ParticipationOnly": false + }, + { + "Name": "Wallet955", + "ParticipationOnly": false + }, + { + "Name": "Wallet956", + "ParticipationOnly": false + }, + { + "Name": "Wallet957", + "ParticipationOnly": false + }, + { + "Name": "Wallet958", + "ParticipationOnly": false + }, + { + "Name": "Wallet959", + "ParticipationOnly": false + }, + { + "Name": "Wallet960", + "ParticipationOnly": false + }, + { + "Name": "Wallet961", + "ParticipationOnly": false + }, + { + "Name": "Wallet962", + "ParticipationOnly": false + }, + { + "Name": "Wallet963", + "ParticipationOnly": false + }, + { + "Name": "Wallet964", + "ParticipationOnly": false + }, + { + "Name": "Wallet965", + "ParticipationOnly": false + }, + { + "Name": "Wallet966", + "ParticipationOnly": false + }, + { + "Name": "Wallet967", + "ParticipationOnly": false + }, + { + "Name": "Wallet968", + "ParticipationOnly": false + }, + { + "Name": "Wallet969", + "ParticipationOnly": false + }, + { + "Name": "Wallet970", + "ParticipationOnly": false + }, + { + "Name": "Wallet971", + "ParticipationOnly": false + }, + { + "Name": "Wallet972", + "ParticipationOnly": false + }, + { + "Name": "Wallet973", + "ParticipationOnly": false + }, + { + "Name": "Wallet974", + "ParticipationOnly": false + }, + { + "Name": "Wallet975", + "ParticipationOnly": false + }, + { + "Name": "Wallet976", + "ParticipationOnly": false + }, + { + "Name": "Wallet977", + "ParticipationOnly": false + }, + { + "Name": "Wallet978", + "ParticipationOnly": false + }, + { + "Name": "Wallet979", + "ParticipationOnly": false + }, + { + "Name": "Wallet980", + "ParticipationOnly": false + }, + { + "Name": "Wallet981", + "ParticipationOnly": false + }, + { + "Name": "Wallet982", + "ParticipationOnly": false + }, + { + "Name": "Wallet983", + "ParticipationOnly": false + }, + { + "Name": "Wallet984", + "ParticipationOnly": false + }, + { + "Name": "Wallet985", + "ParticipationOnly": false + }, + { + "Name": "Wallet986", + "ParticipationOnly": false + }, + { + "Name": "Wallet987", + "ParticipationOnly": false + }, + { + "Name": "Wallet988", + "ParticipationOnly": false + }, + { + "Name": "Wallet989", + "ParticipationOnly": false + }, + { + "Name": "Wallet990", + "ParticipationOnly": false + }, + { + "Name": "Wallet991", + "ParticipationOnly": false + }, + { + "Name": "Wallet992", + "ParticipationOnly": false + }, + { + "Name": "Wallet993", + "ParticipationOnly": false + }, + { + "Name": "Wallet994", + "ParticipationOnly": false + }, + { + "Name": "Wallet995", + "ParticipationOnly": false + }, + { + "Name": "Wallet996", + "ParticipationOnly": false + }, + { + "Name": "Wallet997", + "ParticipationOnly": false + }, + { + "Name": "Wallet998", + "ParticipationOnly": false + }, + { + "Name": "Wallet999", + "ParticipationOnly": false + }, + { + "Name": "Wallet1000", + "ParticipationOnly": false + } + ] + } + ] + } \ No newline at end of file diff --git a/tools/network/cloudflare/cloudflare.go b/tools/network/cloudflare/cloudflare.go index 80c4f13544..122fe00c22 100644 --- a/tools/network/cloudflare/cloudflare.go +++ b/tools/network/cloudflare/cloudflare.go @@ -103,7 +103,7 @@ func (d *DNS) ClearSRVRecord(ctx context.Context, name string, target string, se return err } if len(entries) == 0 { - fmt.Printf("No SRV entry for '%s'='%s'.\n", name, target) + fmt.Printf("No SRV entry for '[%s.%s.]%s'='%s'.\n", service, protocol, name, target) return nil } diff --git a/tools/teal/examples/limitorder.sh b/tools/teal/examples/limitorder.sh index 09e0a5360a..5d6b7bb95b 100755 --- a/tools/teal/examples/limitorder.sh +++ b/tools/teal/examples/limitorder.sh @@ -5,7 +5,7 @@ goal asset create -d . --creator G5PM2K5RIEHHO7ZKR2ZTQDYY6DVBYOMGOFZMMNGJCW4BYNM # > Issued transaction from account G5PM2K5RIEHHO7ZKR2ZTQDYY6DVBYOMGOFZMMNGJCW4BYNMT7HC4HTZIDU, txid JH7M5L43YLQ5DTRIVVBUUB2E4BFE7TPVAPPEGCUVNYSFRLT55Z3Q (fee 1000) # > Transaction JH7M5L43YLQ5DTRIVVBUUB2E4BFE7TPVAPPEGCUVNYSFRLT55Z3Q still pending as of round 148369 # > Transaction JH7M5L43YLQ5DTRIVVBUUB2E4BFE7TPVAPPEGCUVNYSFRLT55Z3Q committed in round 148371 -goal asset info --creator G5PM2K5RIEHHO7ZKR2ZTQDYY6DVBYOMGOFZMMNGJCW4BYNMT7HC4HTZIDU -d . --asset e.g.Coin +goal asset info --creator G5PM2K5RIEHHO7ZKR2ZTQDYY6DVBYOMGOFZMMNGJCW4BYNMT7HC4HTZIDU -d . --unitname e.g.Coin # > Asset ID: 39 # > Creator: G5PM2K5RIEHHO7ZKR2ZTQDYY6DVBYOMGOFZMMNGJCW4BYNMT7HC4HTZIDU # > Asset name: diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index 3230094023..21ec4454f6 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -96,6 +96,14 @@ func (f *Filter) MarshalBinary() ([]byte, error) { return data, nil } +// BinaryMarshalLength returns the length of a binary marshaled filter ( in bytes ) using the +// optimal configuration for the given number of elements with the desired false positive rate. +func BinaryMarshalLength(numElements int, falsePositiveRate float64) int64 { + sizeBits, _ := Optimal(numElements, falsePositiveRate) + filterBytes := int64((sizeBits + 7) / 8) // convert bits -> bytes. + return filterBytes + 8 // adding 8 to match 4 prefix array, plus 4 bytes for the numHashes uint32 +} + // UnmarshalBinary restores the state of the filter from raw data func UnmarshalBinary(data []byte) (*Filter, error) { f := &Filter{} diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go index 9f7486b504..f755fb62af 100644 --- a/util/bloom/bloom_test.go +++ b/util/bloom/bloom_test.go @@ -12,6 +12,8 @@ import ( "log" "math" "testing" + + "github.com/stretchr/testify/require" ) func TestBitset(t *testing.T) { @@ -228,3 +230,20 @@ func TestEmptyFilter(t *testing.T) { f.Test([]byte{1, 2, 3, 4, 5}) } } + +// TestBinaryMarshalLength tests various sizes of bloom filters and ensures that the encoded binary +// size is equal to the one reported by BinaryMarshalLength. +func TestBinaryMarshalLength(t *testing.T) { + for _, elementCount := range []int{2, 16, 1024, 32768, 5101, 100237, 144539} { + for _, falsePositiveRate := range []float64{0.2, 0.1, 0.01, 0.001, 0.00001, 0.0000001} { + sizeBits, numHashes := Optimal(elementCount, falsePositiveRate) + filter := New(sizeBits, numHashes, 1234) + require.NotNil(t, filter) + bytes, err := filter.MarshalBinary() + require.NoError(t, err) + require.NotZero(t, len(bytes)) + calculatedBytesLength := BinaryMarshalLength(elementCount, falsePositiveRate) + require.Equal(t, calculatedBytesLength, int64(len(bytes))) + } + } +} diff --git a/util/db/dbutil_test.go b/util/db/dbutil_test.go index 0cb7af8a56..49717619a5 100644 --- a/util/db/dbutil_test.go +++ b/util/db/dbutil_test.go @@ -418,3 +418,47 @@ func TestSetSynchronousMode(t *testing.T) { } } } + +// TestReadingWhileWriting tests the SQLite behaviour when we're using two transactions, writing with one and reading from the other. +// it demonstates that at any time before we're calling Commit, the database content can be read, and it's containing it's pre-transaction +// value. +func TestReadingWhileWriting(t *testing.T) { + writeAcc, err := MakeAccessor("fn.db", false, false) + require.NoError(t, err) + defer os.Remove("fn.db") + defer os.Remove("fn.db-shm") + defer os.Remove("fn.db-wal") + defer writeAcc.Close() + readAcc, err := MakeAccessor("fn.db", true, false) + require.NoError(t, err) + defer readAcc.Close() + _, err = writeAcc.Handle.Exec("CREATE TABLE foo (a INTEGER, b INTEGER)") + require.NoError(t, err) + + _, err = writeAcc.Handle.Exec("INSERT INTO foo(a,b) VALUES (1,1)") + require.NoError(t, err) + + var count int + err = readAcc.Handle.QueryRow("SELECT count(*) FROM foo").Scan(&count) + require.NoError(t, err) + require.Equal(t, 1, count) + + err = writeAcc.Atomic(func(ctx context.Context, tx *sql.Tx) error { + _, err = tx.Exec("INSERT INTO foo(a,b) VALUES (2,2)") + if err != nil { + return err + } + err = readAcc.Handle.QueryRow("SELECT count(*) FROM foo").Scan(&count) + if err != nil { + return err + } + return nil + }) + require.NoError(t, err) + // this should be 1, since it was queried before the commit. + require.Equal(t, 1, count) + err = readAcc.Handle.QueryRow("SELECT count(*) FROM foo").Scan(&count) + require.NoError(t, err) + require.Equal(t, 2, count) + +} diff --git a/util/metrics/counter.go b/util/metrics/counter.go index 8641b1f4be..2cf614d491 100644 --- a/util/metrics/counter.go +++ b/util/metrics/counter.go @@ -21,6 +21,7 @@ import ( "strconv" "strings" "sync/atomic" + "time" ) // MakeCounter create a new counter with the provided name and description. @@ -36,6 +37,11 @@ func MakeCounter(metric MetricName) *Counter { return c } +// NewCounter is a shortcut to MakeCounter in one shorter line. +func NewCounter(name, desc string) *Counter { + return MakeCounter(MetricName{Name: name, Description: desc}) +} + // Register registers the counter with the default/specific registry func (counter *Counter) Register(reg *Registry) { if reg == nil { @@ -99,6 +105,12 @@ func (counter *Counter) AddUint64(x uint64, labels map[string]string) { } } +// AddMicrosecondsSince increases counter by microseconds between Time t and now. +// Fastest if labels is nil +func (counter *Counter) AddMicrosecondsSince(t time.Time, labels map[string]string) { + counter.AddUint64(uint64(time.Now().Sub(t).Microseconds()), labels) +} + func (counter *Counter) fastAddUint64(x uint64) { if atomic.AddUint64(&counter.intValue, x) == x { // What we just added is the whole value, this diff --git a/util/process_common.go b/util/process_common.go new file mode 100644 index 0000000000..ae74344470 --- /dev/null +++ b/util/process_common.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build !windows + +package util + +import ( + "syscall" +) + +// KillProcess kills a running OS process +func KillProcess(pid int, sig syscall.Signal) error { + return syscall.Kill(pid, sig) +} diff --git a/util/process_windows.go b/util/process_windows.go new file mode 100644 index 0000000000..9bb6703253 --- /dev/null +++ b/util/process_windows.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build windows + +package util + +import ( + "os" + "unsafe" + + "golang.org/x/sys/windows" +) + +// KillProcess kills a running OS process +func KillProcess(pid int, _ os.Signal) error { + + p, err := os.FindProcess(pid) + if err == nil { + + for _, v := range getChildrenProcesses(pid) { + _ = v.Kill() + } + + err = p.Kill() + } + return err +} + +func getChildrenProcesses(parentPid int) []*os.Process { + out := []*os.Process{} + snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0)) + if err == nil { + var pe32 windows.ProcessEntry32 + + defer windows.CloseHandle(snap) + + pe32.Size = uint32(unsafe.Sizeof(pe32)) + if err := windows.Process32First(snap, &pe32); err == nil { + for { + if pe32.ParentProcessID == uint32(parentPid) { + p, err := os.FindProcess(int(pe32.ProcessID)) + if err == nil { + out = append(out, p) + } + } + if err = windows.Process32Next(snap, &pe32); err != nil { + break + } + } + } + } + return out +} diff --git a/util/util.go b/util/util.go index 2959e4904f..7dcb5ba310 100644 --- a/util/util.go +++ b/util/util.go @@ -14,6 +14,8 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . +// +build !windows + package util import ( @@ -37,3 +39,24 @@ func RaiseRlimit(amount uint64) error { } return nil } + +// Getrusage gets file descriptors usage statistics +func Getrusage(who int, rusage *syscall.Rusage) (err error) { + err = syscall.Getrusage(who, rusage) + return +} + +// GetCurrentProcessTimes gets current process kernel and usermode times +func GetCurrentProcessTimes() (utime int64, stime int64, err error) { + var usage syscall.Rusage + + err = syscall.Getrusage(syscall.RUSAGE_SELF, &usage) + if err == nil { + utime = usage.Utime.Nano() + stime = usage.Stime.Nano() + } else { + utime = 0 + stime = 0 + } + return +} diff --git a/util/util_windows.go b/util/util_windows.go new file mode 100644 index 0000000000..a43de1e3be --- /dev/null +++ b/util/util_windows.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2020 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package util + +import ( + "errors" + "syscall" + "time" +) + +/* misc */ + +// RaiseRlimit increases the number of file descriptors we can have +func RaiseRlimit(_ uint64) error { + return nil +} + +// Getrusage gets file descriptors usage statistics +func Getrusage(who int, rusage *syscall.Rusage) (err error) { + if rusage != nil { + *rusage = syscall.Rusage{} + err = nil + } else { + err = errors.New("invalid parameter") + } + return +} + +// GetCurrentProcessTimes gets current process kernel and usermode times +func GetCurrentProcessTimes() (utime int64, stime int64, err error) { + var Ktime, Utime syscall.Filetime + var handle syscall.Handle + + handle, err = syscall.GetCurrentProcess() + if err == nil { + err = syscall.GetProcessTimes(handle, nil, nil, &Ktime, &Utime) + } + if err == nil { + utime = filetimeToDuration(&Utime).Nanoseconds() + stime = filetimeToDuration(&Ktime).Nanoseconds() + } else { + utime = 0 + stime = 0 + } + return +} + +func filetimeToDuration(ft *syscall.Filetime) time.Duration { + n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals + return time.Duration(n * 100) +}