Skip to content

Commit

Permalink
(NOBIDS) improve fetching of burn page data
Browse files Browse the repository at this point in the history
  • Loading branch information
peterbitfly committed Nov 14, 2023
1 parent 2107f54 commit de229eb
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 44 deletions.
66 changes: 66 additions & 0 deletions db/bigtable.go
Original file line number Diff line number Diff line change
Expand Up @@ -2635,6 +2635,58 @@ func (bigtable *Bigtable) GetAggregatedValidatorIncomeDetailsHistory(validators
return resultContainer.res, nil
}

// GetTotalValidatorIncomeDetailsHistory returns the total validator income for a given range of epochs
// It is considerably faster than fetching the individual income for each validator and aggregating it
// startEpoch & endEpoch are inclusive
func (bigtable *Bigtable) GetTotalValidatorIncomeDetailsHistory(startEpoch uint64, endEpoch uint64) (map[uint64]*itypes.ValidatorEpochIncome, error) {
tmr := time.AfterFunc(REPORT_TIMEOUT, func() {
logger.WithFields(logrus.Fields{
"startEpoch": startEpoch,
"endEpoch": endEpoch,
}).Warnf("%s call took longer than %v", utils.GetCurrentFuncName(), REPORT_TIMEOUT)
})
defer tmr.Stop()

if startEpoch > endEpoch {
startEpoch = 0
}

ctx, cancel := context.WithTimeout(context.Background(), time.Second*180)
defer cancel()

res := make(map[uint64]*itypes.ValidatorEpochIncome, endEpoch-startEpoch+1)

filter := gcp_bigtable.LimitRows(int64(endEpoch - startEpoch + 1))

rowRange := bigtable.getTotalIncomeEpochRanges(startEpoch, endEpoch)
err := bigtable.tableValidatorsHistory.ReadRows(ctx, rowRange, func(r gcp_bigtable.Row) bool {
keySplit := strings.Split(r.Key(), ":")

epoch, err := strconv.ParseUint(keySplit[2], 10, 64)
if err != nil {
logger.Errorf("error parsing epoch from row key %v: %v", r.Key(), err)
return false
}

for _, ri := range r[STATS_COLUMN_FAMILY] {
incomeDetails := &itypes.ValidatorEpochIncome{}
err = proto.Unmarshal(ri.Value, incomeDetails)
if err != nil {
logger.Errorf("error decoding validator income data for row %v: %v", r.Key(), err)
return false
}

res[MAX_EPOCH-epoch] = incomeDetails
}
return true
}, filter)

if err != nil {
return nil, err
}
return res, nil
}

// Deletes all block data from bigtable
func (bigtable *Bigtable) DeleteEpoch(epoch uint64) error {
// TOTO: Implement
Expand Down Expand Up @@ -2663,6 +2715,20 @@ func (bigtable *Bigtable) getValidatorsEpochRanges(validatorIndices []uint64, pr
return ranges
}

func (bigtable *Bigtable) getTotalIncomeEpochRanges(startEpoch uint64, endEpoch uint64) gcp_bigtable.RowRange {
if endEpoch > math.MaxInt64 {
endEpoch = 0
}
if endEpoch < startEpoch { // handle overflows
startEpoch = 0
}

rangeEnd := fmt.Sprintf("%s:%s:%s", bigtable.chainId, SUM_COLUMN, bigtable.reversedPaddedEpoch(startEpoch), "\x00")
rangeStart := fmt.Sprintf("%s:%s:%s", bigtable.chainId, SUM_COLUMN, bigtable.reversedPaddedEpoch(endEpoch))

return gcp_bigtable.NewRange(rangeStart, rangeEnd)
}

func (bigtable *Bigtable) getValidatorsEpochSlotRanges(validatorIndices []uint64, prefix string, startEpoch uint64, endEpoch uint64) gcp_bigtable.RowRangeList {

if endEpoch > math.MaxInt64 {
Expand Down
73 changes: 29 additions & 44 deletions services/services.go
Original file line number Diff line number Diff line change
Expand Up @@ -1518,9 +1518,18 @@ func getBurnPageData() (*types.BurnPageData, error) {
data := &types.BurnPageData{}
start := time.Now()

latestEpoch := LatestEpoch()
latestFinalizedEpoch := LatestFinalizedEpoch()
latestBlock := LatestEth1BlockNumber()

lookbackEpoch := latestFinalizedEpoch - 10
if lookbackEpoch > latestFinalizedEpoch {
lookbackEpoch = 0
}
lookbackDayEpoch := latestFinalizedEpoch - utils.EpochsPerDay()
if lookbackDayEpoch > latestFinalizedEpoch {
lookbackDayEpoch = 0
}

// Check db to have at least one entry (will error otherwise anyway)
burnedFeesCount := 0
if err := db.ReaderDb.Get(&burnedFeesCount, "SELECT COUNT(*) FROM chart_series WHERE indicator = 'BURNED_FEES'"); err != nil {
Expand All @@ -1541,12 +1550,6 @@ func getBurnPageData() (*types.BurnPageData, error) {
}

cutOffEpoch := utils.TimeToEpoch(cutOff)
// logger.Infof("cutoff epoch: %v", cutOffEpoch)
// var blockLastHour uint64
// db.ReaderDb.Get(&blockLastHour, "select ")

// var blockLastDay uint64
// db.ReaderDb.Get(&blockLastDay)

additionalBurned := float64(0)
err := db.ReaderDb.Get(&additionalBurned, "SELECT COALESCE(SUM(exec_base_fee_per_gas::numeric * exec_gas_used::numeric), 0) AS burnedfees FROM blocks WHERE epoch > $1", cutOffEpoch)
Expand All @@ -1556,49 +1559,33 @@ func getBurnPageData() (*types.BurnPageData, error) {
// logger.Infof("additonal burn: %v", additionalBurned)
data.TotalBurned += additionalBurned

err = db.ReaderDb.Get(&data.BurnRate1h, "SELECT COALESCE(SUM(exec_base_fee_per_gas::numeric * exec_gas_used::numeric) / 60, 0) AS burnedfees FROM blocks WHERE epoch > $1", latestEpoch-10)
err = db.ReaderDb.Get(&data.BurnRate1h, "SELECT COALESCE(SUM(exec_base_fee_per_gas::numeric * exec_gas_used::numeric) / 60, 0) AS burnedfees FROM blocks WHERE epoch > $1", lookbackEpoch)
if err != nil {
return nil, fmt.Errorf("error retrieving burn rate (1h) from blocks table: %v", err)
}

// err = db.ReaderDb.Get(&data.Emission, "select total_rewards_wei as emission from eth_store_stats order by day desc limit 1")
// if err != nil {
// return nil, fmt.Errorf("error retrieving emission (24h): %v", err)
// }

// swap this for GetEpochIncomeHistory in the future

validators, err := db.GetValidatorIndices()
if err != nil {
return nil, err
}

income, err := db.BigtableClient.GetValidatorIncomeDetailsHistory(validators, latestEpoch-10, latestEpoch)
income, err := db.BigtableClient.GetTotalValidatorIncomeDetailsHistory(lookbackEpoch, latestFinalizedEpoch)
if err != nil {
logger.WithError(err).Error("error getting validator income history")
}

total := &itypes.ValidatorEpochIncome{}

for _, epochs := range income {
// logger.Infof("epochs: %+v", epochs)
for _, details := range epochs {
// logger.Infof("income: %+v", details)
total.AttestationHeadReward += details.AttestationHeadReward
total.AttestationSourceReward += details.AttestationSourceReward
total.AttestationSourcePenalty += details.AttestationSourcePenalty
total.AttestationTargetReward += details.AttestationTargetReward
total.AttestationTargetPenalty += details.AttestationTargetPenalty
total.FinalityDelayPenalty += details.FinalityDelayPenalty
total.ProposerSlashingInclusionReward += details.ProposerSlashingInclusionReward
total.ProposerAttestationInclusionReward += details.ProposerAttestationInclusionReward
total.ProposerSyncInclusionReward += details.ProposerSyncInclusionReward
total.SyncCommitteeReward += details.SyncCommitteeReward
total.SyncCommitteePenalty += details.SyncCommitteePenalty
total.SlashingReward += details.SlashingReward
total.SlashingPenalty += details.SlashingPenalty
total.TxFeeRewardWei = utils.AddBigInts(total.TxFeeRewardWei, details.TxFeeRewardWei)
}
for _, details := range income {
total.AttestationHeadReward += details.AttestationHeadReward
total.AttestationSourceReward += details.AttestationSourceReward
total.AttestationSourcePenalty += details.AttestationSourcePenalty
total.AttestationTargetReward += details.AttestationTargetReward
total.AttestationTargetPenalty += details.AttestationTargetPenalty
total.FinalityDelayPenalty += details.FinalityDelayPenalty
total.ProposerSlashingInclusionReward += details.ProposerSlashingInclusionReward
total.ProposerAttestationInclusionReward += details.ProposerAttestationInclusionReward
total.ProposerSyncInclusionReward += details.ProposerSyncInclusionReward
total.SyncCommitteeReward += details.SyncCommitteeReward
total.SyncCommitteePenalty += details.SyncCommitteePenalty
total.SlashingReward += details.SlashingReward
total.SlashingPenalty += details.SlashingPenalty
total.TxFeeRewardWei = utils.AddBigInts(total.TxFeeRewardWei, details.TxFeeRewardWei)
}

rewards := decimal.NewFromBigInt(new(big.Int).SetBytes(total.TxFeeRewardWei), 0)
Expand Down Expand Up @@ -1627,12 +1614,12 @@ func getBurnPageData() (*types.BurnPageData, error) {
logger.Infof("burn rate per min: %v inflation per min: %v emission: %v", data.BurnRate1h, rewards.InexactFloat64(), data.Emission)
// logger.Infof("calculated emission: %v", data.Emission)

err = db.ReaderDb.Get(&data.BurnRate24h, "select COALESCE(SUM(exec_base_fee_per_gas::numeric * exec_gas_used::numeric) / (60 * 24), 0) as burnedfees from blocks where epoch >= $1", latestEpoch-utils.EpochsPerDay())
err = db.ReaderDb.Get(&data.BurnRate24h, "select COALESCE(SUM(exec_base_fee_per_gas::numeric * exec_gas_used::numeric) / (60 * 24), 0) as burnedfees from blocks where epoch >= $1", lookbackDayEpoch)
if err != nil {
return nil, fmt.Errorf("error retrieving burn rate (24h) from blocks table: %v", err)
}

err = db.ReaderDb.Get(&data.BlockUtilization, "select avg(exec_gas_used::numeric * 100 / exec_gas_limit) from blocks where epoch >= $1 and exec_gas_used > 0 and exec_gas_limit > 0", latestEpoch-utils.EpochsPerDay())
err = db.ReaderDb.Get(&data.BlockUtilization, "select avg(exec_gas_used::numeric * 100 / exec_gas_limit) from blocks where epoch >= $1 and exec_gas_used > 0 and exec_gas_limit > 0", lookbackDayEpoch)
if err != nil {
return nil, fmt.Errorf("error retrieving block utilization from blocks table: %v", err)
}
Expand All @@ -1642,8 +1629,6 @@ func getBurnPageData() (*types.BurnPageData, error) {
return nil, err
}

// db.BigAdminClient

data.Blocks = make([]*types.BurnPageDataBlock, 0, 1000)
for _, blk := range blocks {

Expand Down

0 comments on commit de229eb

Please sign in to comment.