-
Notifications
You must be signed in to change notification settings - Fork 608
/
Copy pathincentives.go
1175 lines (1000 loc) · 51.3 KB
/
incentives.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package concentrated_liquidity
import (
"bytes"
"fmt"
"strconv"
"time"
sdkprefix "github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
"golang.org/x/exp/slices"
"github.com/osmosis-labs/osmosis/osmoutils"
"github.com/osmosis-labs/osmosis/osmoutils/accum"
"github.com/osmosis-labs/osmosis/v16/x/concentrated-liquidity/math"
"github.com/osmosis-labs/osmosis/v16/x/concentrated-liquidity/model"
"github.com/osmosis-labs/osmosis/v16/x/concentrated-liquidity/types"
gammtypes "github.com/osmosis-labs/osmosis/v16/x/gamm/types"
)
// createUptimeAccumulators creates accumulator objects in store for each supported uptime for the given poolId.
// The accumulators are initialized with the default (zero) values.
func (k Keeper) createUptimeAccumulators(ctx sdk.Context, poolId uint64) error {
for uptimeIndex := range types.SupportedUptimes {
err := accum.MakeAccumulator(ctx.KVStore(k.storeKey), types.KeyUptimeAccumulator(poolId, uint64(uptimeIndex)))
if err != nil {
return err
}
}
return nil
}
// getUptimeTrackerValues extracts the values of an array of uptime trackers
func getUptimeTrackerValues(uptimeTrackers []model.UptimeTracker) []sdk.DecCoins {
trackerValues := []sdk.DecCoins{}
for _, uptimeTracker := range uptimeTrackers {
trackerValues = append(trackerValues, uptimeTracker.UptimeGrowthOutside)
}
return trackerValues
}
// GetUptimeAccumulators gets the uptime accumulator objects for the given poolId
// Returns error if accumulator for the given poolId does not exist.
func (k Keeper) GetUptimeAccumulators(ctx sdk.Context, poolId uint64) ([]accum.AccumulatorObject, error) {
accums := make([]accum.AccumulatorObject, len(types.SupportedUptimes))
for uptimeIndex := range types.SupportedUptimes {
acc, err := accum.GetAccumulator(ctx.KVStore(k.storeKey), types.KeyUptimeAccumulator(poolId, uint64(uptimeIndex)))
if err != nil {
return []accum.AccumulatorObject{}, err
}
accums[uptimeIndex] = acc
}
return accums, nil
}
// GetUptimeAccumulatorValues gets the accumulator values for the supported uptimes for the given poolId
// Returns error if accumulator for the given poolId does not exist.
func (k Keeper) GetUptimeAccumulatorValues(ctx sdk.Context, poolId uint64) ([]sdk.DecCoins, error) {
uptimeAccums, err := k.GetUptimeAccumulators(ctx, poolId)
if err != nil {
return []sdk.DecCoins{}, err
}
uptimeValues := []sdk.DecCoins{}
for _, uptimeAccum := range uptimeAccums {
uptimeValues = append(uptimeValues, uptimeAccum.GetValue())
}
return uptimeValues, nil
}
// getInitialUptimeGrowthOppositeDirectionOfLastTraversalForTick returns an array of the initial values
// of uptime growth opposite the direction of last traversal for each supported uptime for a given tick.
// This value depends on the provided tick's location relative to the current tick. If the provided tick
// is greater than the current tick, then the value is zero. Otherwise, the value is the value of the
// current global spread reward growth.
//
// Similar to spread factors, by convention the value is chosen as if all of the uptime (seconds per liquidity) to date has
// occurred below the tick.
// Returns error if the pool with the given id does not exist or if fails to get any of the uptime accumulators.
func (k Keeper) getInitialUptimeGrowthOppositeDirectionOfLastTraversalForTick(ctx sdk.Context, poolId uint64, tick int64) ([]sdk.DecCoins, error) {
pool, err := k.getPoolById(ctx, poolId)
if err != nil {
return []sdk.DecCoins{}, err
}
currentTick := pool.GetCurrentTick()
if currentTick >= tick {
uptimeAccumulatorValues, err := k.GetUptimeAccumulatorValues(ctx, poolId)
if err != nil {
return []sdk.DecCoins{}, err
}
return uptimeAccumulatorValues, nil
}
// If currentTick < tick, we return len(SupportedUptimes) empty DecCoins
emptyUptimeValues := []sdk.DecCoins{}
for range types.SupportedUptimes {
emptyUptimeValues = append(emptyUptimeValues, emptyCoins)
}
return emptyUptimeValues, nil
}
// prepareBalancerPoolAsFullRange find the canonical Balancer pool that corresponds to the given CL poolId and,
// if it exists, adds the number of full range shares it qualifies for to the CL pool uptime accumulators.
// This is functionally equivalent to treating the Balancer pool shares as a single full range position on the CL pool,
// but just for the purposes of incentives. The Balancer pool liquidity is not actually traded against in CL pool swaps.
// The given uptime accumulators are mutated to reflect the added full range shares.
//
// If no canonical Balancer pool exists, this function is a no-op.
//
// Returns the Balancer pool ID if it exists (otherwise 0), and number of full range shares it qualifies for.
// Returns error if a canonical pool ID exists but there is an issue when retrieving the pool assets for this pool.
//
// CONTRACT: canonical Balancer pool has the same denoms as the CL pool and is an even-weighted 2-asset pool.
// CONTRACT: the caller validates that the pool with the given id exists.
// CONTRACT: caller is responsible for the uptimeAccums to be up-to-date.
// CONTRACT: uptimeAccums are associated with the given pool id.
func (k Keeper) prepareBalancerPoolAsFullRange(ctx sdk.Context, clPoolId uint64, uptimeAccums []accum.AccumulatorObject) (uint64, sdk.Dec, error) {
// Get CL pool from ID
clPool, err := k.getPoolById(ctx, clPoolId)
if err != nil {
return 0, sdk.ZeroDec(), err
}
// We let this check fail quietly if no canonical Balancer pool ID exists.
canonicalBalancerPoolId, _ := k.gammKeeper.GetLinkedBalancerPoolID(ctx, clPoolId)
if canonicalBalancerPoolId == 0 {
return 0, sdk.ZeroDec(), nil
}
// Get total balancer pool liquidity (denominated in pool coins)
totalBalancerPoolLiquidity, err := k.gammKeeper.GetTotalPoolLiquidity(ctx, canonicalBalancerPoolId)
if err != nil {
return 0, sdk.ZeroDec(), err
}
// Get total balancer shares for Balancer pool
totalBalancerPoolShares, err := k.gammKeeper.GetTotalPoolShares(ctx, canonicalBalancerPoolId)
if err != nil {
return 0, sdk.ZeroDec(), err
}
// Get total shares bonded on the longest lockup duration for Balancer pool
longestDuration, err := k.poolIncentivesKeeper.GetLongestLockableDuration(ctx)
if err != nil {
return 0, sdk.ZeroDec(), err
}
bondedShares := k.lockupKeeper.GetLockedDenom(ctx, gammtypes.GetPoolShareDenom(canonicalBalancerPoolId), longestDuration)
// We fail quietly if the Balancer pool has no bonded shares.
if bondedShares.IsZero() {
return 0, sdk.ZeroDec(), nil
}
// Calculate portion of Balancer pool shares that are bonded
bondedShareRatio := bondedShares.ToDec().Quo(totalBalancerPoolShares.ToDec())
// Calculate rough number of assets in Balancer pool that are bonded
balancerPoolLiquidity := sdk.NewCoins()
for _, liquidityToken := range totalBalancerPoolLiquidity {
// Rounding behavior is not critical here, but for simplicity we do bankers multiplication then truncate.
bondedLiquidityAmount := liquidityToken.Amount.ToDec().Mul(bondedShareRatio).TruncateInt()
balancerPoolLiquidity = balancerPoolLiquidity.Add(sdk.NewCoin(liquidityToken.Denom, bondedLiquidityAmount))
}
// Validate Balancer pool liquidity. These properties should already be guaranteed by the caller,
// but we check them anyway as an additional guardrail in case migration link validation is ever
// relaxed in the future.
// Note that we check denom compatibility later, and pool weights technically do not matter as they
// are analogous to changing the spot price, which is handled by our lower bounding.
if len(balancerPoolLiquidity) != 2 {
return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity}
}
// We ensure that the asset ordering is correct when passing Balancer assets into the CL pool.
var asset0Amount, asset1Amount sdk.Int
if balancerPoolLiquidity[0].Denom == clPool.GetToken0() {
asset0Amount = balancerPoolLiquidity[0].Amount
asset1Amount = balancerPoolLiquidity[1].Amount
// Ensure second denom matches (bal1 -> CL1)
if balancerPoolLiquidity[1].Denom != clPool.GetToken1() {
return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity}
}
} else {
asset0Amount = balancerPoolLiquidity[1].Amount
asset1Amount = balancerPoolLiquidity[0].Amount
// Ensure second denom matches (bal1 -> CL0)
if balancerPoolLiquidity[1].Denom != clPool.GetToken0() {
return 0, sdk.ZeroDec(), types.ErrInvalidBalancerPoolLiquidityError{ClPoolId: clPoolId, BalancerPoolId: canonicalBalancerPoolId, BalancerPoolLiquidity: balancerPoolLiquidity}
}
}
// Calculate the amount of liquidity the Balancer amounts qualify in the CL pool. Note that since we use the CL spot price, this is
// safe against prices drifting apart between the two pools (we take the lower bound on the qualifying liquidity in this case).
// The `sqrtPriceLowerTick` and `sqrtPriceUpperTick` fields are set to the appropriate values for a full range position.
qualifyingFullRangeSharesPreDiscount := math.GetLiquidityFromAmounts(clPool.GetCurrentSqrtPrice(), types.MinSqrtPrice, types.MaxSqrtPrice, asset0Amount, asset1Amount)
// Get discount ratio from governance-set discount rate. Note that the case we check for is technically impossible, but we include
// the check as a guardrail anyway. Specifically, we error if the discount ratio is not [0, 1].
// Note that discount rate is the amount that is being discounted by (e.g. 0.05 for a 5% discount), while discount ratio is what
// we multiply by to apply the discount (e.g. 0.95 for a 5% discount).
balancerSharesDiscountRatio := sdk.OneDec().Sub(k.GetParams(ctx).BalancerSharesRewardDiscount)
if !balancerSharesDiscountRatio.GTE(sdk.ZeroDec()) || !balancerSharesDiscountRatio.LTE(sdk.OneDec()) {
return 0, sdk.ZeroDec(), types.InvalidDiscountRateError{DiscountRate: k.GetParams(ctx).BalancerSharesRewardDiscount}
}
// Apply discount rate to qualifying full range shares
qualifyingFullRangeShares := balancerSharesDiscountRatio.Mul(qualifyingFullRangeSharesPreDiscount)
// Create a temporary position record on all uptime accumulators with this amount. We expect this to be cleared later
// with `claimAndResetFullRangeBalancerPool`
// Add full range equivalent shares to each uptime accumulator.
// Note that we expect spot price divergence between the CL and balancer pools to be handled by `GetLiquidityFromAmounts`
// returning a lower bound on qualifying liquidity.
for uptimeIndex := range uptimeAccums {
balancerPositionName := string(types.KeyBalancerFullRange(clPoolId, canonicalBalancerPoolId, uint64(uptimeIndex)))
err := uptimeAccums[uptimeIndex].NewPosition(balancerPositionName, qualifyingFullRangeShares, nil)
if err != nil {
return 0, sdk.ZeroDec(), err
}
}
return canonicalBalancerPoolId, qualifyingFullRangeShares, nil
}
// claimAndResetFullRangeBalancerPool claims rewards for the "full range" shares corresponding to the given Balancer pool, and
// then deletes the record from the uptime accumulators. It adds the claimed rewards to the gauge corresponding to the longest duration
// lock on the Balancer pool. Importantly, this is a dynamic check such that if a longer duration lock is added in the future, it will
// begin using that lock. The given uptime accumulators are mutated to reflect the claimed rewards.
//
// Returns the number of coins that were claimed and distributed.
// Returns error if either reward claiming, record deletion or adding to the gauge fails.
// CONTRACT: the caller validates that the pool with the given id exists.
// CONTRACT: caller is responsible for the uptimeAccums to be up-to-date.
// CONTRACT: uptimeAccums are associated with the given pool id.
func (k Keeper) claimAndResetFullRangeBalancerPool(ctx sdk.Context, clPoolId uint64, balPoolId uint64, uptimeAccums []accum.AccumulatorObject) (sdk.Coins, error) {
// Get CL pool from ID. This also serves as an early pool existence check.
clPool, err := k.getPoolById(ctx, clPoolId)
if err != nil {
return sdk.Coins{}, err
}
// Get longest lockup period for pool
longestDuration, err := k.poolIncentivesKeeper.GetLongestLockableDuration(ctx)
if err != nil {
return sdk.Coins{}, err
}
// Get gauge corresponding to the longest lockup period
gaugeId, err := k.poolIncentivesKeeper.GetPoolGaugeId(ctx, balPoolId, longestDuration)
if err != nil {
return sdk.Coins{}, err
}
// Claim rewards on each uptime accumulator. Delete each record after claiming.
totalRewards := sdk.NewCoins()
for uptimeIndex := range uptimeAccums {
// Generate key for the record on the the current uptime accumulator
balancerPositionName := string(types.KeyBalancerFullRange(clPoolId, balPoolId, uint64(uptimeIndex)))
// Ensure that the given balancer pool has a record on the given uptime accumulator.
// We expect this to have been set in a prior call to `prepareBalancerAsFullRange`, which
// should precede all calls of `claimAndResetFullRangeBalancerPool`
recordExists, err := uptimeAccums[uptimeIndex].HasPosition(balancerPositionName)
if err != nil {
return sdk.Coins{}, err
}
if !recordExists {
return sdk.Coins{}, types.BalancerRecordNotFoundError{ClPoolId: clPoolId, BalancerPoolId: balPoolId, UptimeIndex: uint64(uptimeIndex)}
}
// Remove shares from record so it gets cleared when rewards are claimed.
// Note that we expect these shares to be correctly updated in a prior call to `prepareBalancerAsFullRange`.
numShares, err := uptimeAccums[uptimeIndex].GetPositionSize(balancerPositionName)
if err != nil {
return sdk.Coins{}, err
}
err = uptimeAccums[uptimeIndex].RemoveFromPosition(balancerPositionName, numShares)
if err != nil {
return sdk.Coins{}, err
}
// Claim rewards and log the amount claimed to be added to the relevant gauge later
claimedRewards, _, err := uptimeAccums[uptimeIndex].ClaimRewards(balancerPositionName)
if err != nil {
return sdk.Coins{}, err
}
totalRewards = totalRewards.Add(claimedRewards...)
// Ensure record was deleted
recordExists, err = uptimeAccums[uptimeIndex].HasPosition(balancerPositionName)
if err != nil {
return sdk.Coins{}, err
}
if recordExists {
return sdk.Coins{}, types.BalancerRecordNotClearedError{ClPoolId: clPoolId, BalancerPoolId: balPoolId, UptimeIndex: uint64(uptimeIndex)}
}
}
// After claiming accrued rewards from all uptime accumulators, add the total claimed amount to the
// Balancer pool's longest duration gauge. To avoid unnecessarily triggering gauge-related listeners,
// we only run this is there are nonzero rewards.
if !totalRewards.Empty() {
err = k.incentivesKeeper.AddToGaugeRewards(ctx, clPool.GetIncentivesAddress(), totalRewards, gaugeId)
if err != nil {
return sdk.Coins{}, err
}
}
return totalRewards, nil
}
// updatePoolUptimeAccumulatorsToNow syncs all uptime accumulators that are refetched from state for the given
// poold id to be up to date for the given pool. Updates the pool last liquidity update time with
// the current block time and writes the updated pool to state.
// Specifically, it gets the time elapsed since the last update and divides it
// by the qualifying liquidity on the active tick. It then adds this value to the
// respective accumulator and updates relevant time trackers accordingly.
// WARNING: this method may mutate the pool, make sure to refetch the pool after calling this method.
// Note: the following are the differences of this function from updateGivenPoolUptimeAccumulatorsToNow:
// * this function fetches the uptime accumulators from state.
// * this function fetches a pool from state by id.
// updateGivenPoolUptimeAccumulatorsToNow is used in swaps for performance reasons to minimize state reads.
// updatePoolUptimeAccumulatorsToNow is used in all other cases.
func (k Keeper) updatePoolUptimeAccumulatorsToNow(ctx sdk.Context, poolId uint64) error {
pool, err := k.getPoolById(ctx, poolId)
if err != nil {
return err
}
uptimeAccums, err := k.GetUptimeAccumulators(ctx, poolId)
if err != nil {
return err
}
if err := k.updateGivenPoolUptimeAccumulatorsToNow(ctx, pool, uptimeAccums); err != nil {
return err
}
return nil
}
// updateGivenPoolUptimeAccumulatorsToNow syncs all given uptime accumulators for a given pool id
// Updates the pool last liquidity update time with the current block time and writes the updated pool to state.
// If last liquidity update happened in the current block, this function is a no-op.
// Specifically, it gets the time elapsed since the last update and divides it
// by the qualifying liquidity for each uptime. It then adds this value to the
// respective accumulator and updates relevant time trackers accordingly.
// CONTRACT: the caller validates that the pool with the given id exists.
// CONTRACT: given uptimeAccums are associated with the given pool id.
// CONTRACT: caller is responsible for the uptimeAccums to be up-to-date.
// WARNING: this method may mutate the pool, make sure to refetch the pool after calling this method.
// Note: the following are the differences of this function from updatePoolUptimeAccumulatorsToNow:
// * this function does not refetch the uptime accumulators from state.
// * this function operates on the given pool directly, instead of fetching it from state.
// This is to avoid unnecessary state reads during swaps for performance reasons.
func (k Keeper) updateGivenPoolUptimeAccumulatorsToNow(ctx sdk.Context, pool types.ConcentratedPoolExtension, uptimeAccums []accum.AccumulatorObject) error {
if pool == nil {
return types.ErrPoolNil
}
// Since our base unit of time is nanoseconds, we divide with truncation by 10^9 (10e8) to get
// time elapsed in seconds
timeElapsedNanoSec := sdk.NewDec(int64(ctx.BlockTime().Sub(pool.GetLastLiquidityUpdate())))
timeElapsedSec := timeElapsedNanoSec.Quo(sdk.NewDec(10e8))
// If no time has elapsed, this function is a no-op
if timeElapsedSec.Equal(sdk.ZeroDec()) {
return nil
}
if timeElapsedSec.LT(sdk.ZeroDec()) {
return types.TimeElapsedNotPositiveError{TimeElapsed: timeElapsedSec}
}
poolId := pool.GetId()
// Set up canonical balancer pool as a full range position for the purposes of incentives.
// Note that this function fails quietly if no canonical balancer pool exists and only errors
// if it does exist and there is a lower level inconsistency.
balancerPoolId, qualifyingBalancerShares, err := k.prepareBalancerPoolAsFullRange(ctx, poolId, uptimeAccums)
if err != nil {
return err
}
// Get relevant pool-level values
poolIncentiveRecords, err := k.GetAllIncentiveRecordsForPool(ctx, poolId)
if err != nil {
return err
}
// We optimistically assume that all liquidity on the active tick qualifies and handle
// uptime-related checks in forfeiting logic.
qualifyingLiquidity := pool.GetLiquidity().Add(qualifyingBalancerShares)
for uptimeIndex := range uptimeAccums {
// Get relevant uptime-level values
curUptimeDuration := types.SupportedUptimes[uptimeIndex]
// If there is no share to be incentivized for the current uptime accumulator, we leave it unchanged
if qualifyingLiquidity.LT(sdk.OneDec()) {
continue
}
incentivesToAddToCurAccum, updatedPoolRecords, err := calcAccruedIncentivesForAccum(ctx, curUptimeDuration, qualifyingLiquidity, timeElapsedSec, poolIncentiveRecords)
if err != nil {
return err
}
// Emit incentives to current uptime accumulator
uptimeAccums[uptimeIndex].AddToAccumulator(incentivesToAddToCurAccum)
// Update pool records (stored in state after loop)
poolIncentiveRecords = updatedPoolRecords
}
// Update pool incentive records and LastLiquidityUpdate time in state to reflect emitted incentives
err = k.setMultipleIncentiveRecords(ctx, poolIncentiveRecords)
if err != nil {
return err
}
pool.SetLastLiquidityUpdate(ctx.BlockTime())
err = k.setPool(ctx, pool)
if err != nil {
return err
}
// Claim and clear the balancer full range shares from the current pool's uptime accumulators.
// This is to avoid having to update accumulators every time the canonical balancer pool changes state.
// Even though this exposes CL LPs to getting immediately diluted by a large Balancer position, this would
// require a lot of capital to be tied up in a two week bond, which is a viable tradeoff given the relative
// simplicity of this approach.
if balancerPoolId != 0 {
_, err := k.claimAndResetFullRangeBalancerPool(ctx, poolId, balancerPoolId, uptimeAccums)
if err != nil {
return err
}
}
return nil
}
// calcAccruedIncentivesForAccum calculates IncentivesPerLiquidity to be added to an accum.
// This function is non-mutative. It operates on and returns an updated _copy_ of the passed in incentives records.
// Returns the IncentivesPerLiquidity value and an updated list of IncentiveRecords that
// reflect emitted incentives
// Returns error if the qualifying liquidity/time elapsed are zero.
func calcAccruedIncentivesForAccum(ctx sdk.Context, accumUptime time.Duration, liquidityInAccum sdk.Dec, timeElapsed sdk.Dec, poolIncentiveRecords []types.IncentiveRecord) (sdk.DecCoins, []types.IncentiveRecord, error) {
if !liquidityInAccum.IsPositive() || !timeElapsed.IsPositive() {
return sdk.DecCoins{}, []types.IncentiveRecord{}, types.QualifyingLiquidityOrTimeElapsedNotPositiveError{QualifyingLiquidity: liquidityInAccum, TimeElapsed: timeElapsed}
}
copyPoolIncentiveRecords := make([]types.IncentiveRecord, len(poolIncentiveRecords))
copy(copyPoolIncentiveRecords, poolIncentiveRecords)
incentivesToAddToCurAccum := sdk.NewDecCoins()
for incentiveIndex, incentiveRecord := range copyPoolIncentiveRecords {
// We consider all incentives matching the current uptime that began emitting before the current blocktime
if incentiveRecord.IncentiveRecordBody.StartTime.UTC().Before(ctx.BlockTime().UTC()) && incentiveRecord.MinUptime == accumUptime {
// Total amount emitted = time elapsed * emission
totalEmittedAmount := timeElapsed.Mul(incentiveRecord.IncentiveRecordBody.EmissionRate)
// Incentives to emit per unit of qualifying liquidity = total emitted / liquidityInAccum
// Note that we truncate to ensure we do not overdistribute incentives
incentivesPerLiquidity := totalEmittedAmount.QuoTruncate(liquidityInAccum)
emittedIncentivesPerLiquidity := sdk.NewDecCoinFromDec(incentiveRecord.IncentiveDenom, incentivesPerLiquidity)
// Ensure that we only emit if there are enough incentives remaining to be emitted
remainingRewards := poolIncentiveRecords[incentiveIndex].IncentiveRecordBody.RemainingAmount
// if total amount emitted does not exceed remaining rewards,
if totalEmittedAmount.LTE(remainingRewards) {
incentivesToAddToCurAccum = incentivesToAddToCurAccum.Add(emittedIncentivesPerLiquidity)
// Update incentive record to reflect the incentives that were emitted
remainingRewards = remainingRewards.Sub(totalEmittedAmount)
// Each incentive record should only be modified once
copyPoolIncentiveRecords[incentiveIndex].IncentiveRecordBody.RemainingAmount = remainingRewards
} else {
// If there are not enough incentives remaining to be emitted, we emit the remaining rewards.
// When the returned records are set in state, all records with remaining rewards of zero will be cleared.
remainingIncentivesPerLiquidity := remainingRewards.QuoTruncate(liquidityInAccum)
emittedIncentivesPerLiquidity = sdk.NewDecCoinFromDec(incentiveRecord.IncentiveDenom, remainingIncentivesPerLiquidity)
incentivesToAddToCurAccum = incentivesToAddToCurAccum.Add(emittedIncentivesPerLiquidity)
copyPoolIncentiveRecords[incentiveIndex].IncentiveRecordBody.RemainingAmount = sdk.ZeroDec()
}
}
}
return incentivesToAddToCurAccum, copyPoolIncentiveRecords, nil
}
// findUptimeIndex finds the uptime index for the passed in min uptime.
// Returns error if uptime index cannot be found.
func findUptimeIndex(uptime time.Duration) (int, error) {
index := slices.IndexFunc(types.SupportedUptimes, func(e time.Duration) bool { return e == uptime })
if index == -1 {
return index, types.InvalidUptimeIndexError{MinUptime: uptime, SupportedUptimes: types.SupportedUptimes}
}
return index, nil
}
// setIncentiveRecords sets the passed in incentive records in state
// Errors if the incentive record has an unsupported min uptime.
func (k Keeper) setIncentiveRecord(ctx sdk.Context, incentiveRecord types.IncentiveRecord) error {
store := ctx.KVStore(k.storeKey)
incentiveCreator, err := sdk.AccAddressFromBech32(incentiveRecord.IncentiveCreatorAddr)
if err != nil {
return err
}
uptimeIndex, err := findUptimeIndex(incentiveRecord.MinUptime)
if err != nil {
return err
}
key := types.KeyIncentiveRecord(incentiveRecord.PoolId, uptimeIndex, incentiveRecord.IncentiveDenom, incentiveCreator)
incentiveRecordBody := types.IncentiveRecordBody{
RemainingAmount: incentiveRecord.IncentiveRecordBody.RemainingAmount,
EmissionRate: incentiveRecord.IncentiveRecordBody.EmissionRate,
StartTime: incentiveRecord.IncentiveRecordBody.StartTime,
}
// If the remaining amount is zero and the record already exists in state, we delete the record from state.
// If it's zero and the record doesn't exist in state, we do a no-op.
// In all other cases, we update the record in state
if store.Has(key) && incentiveRecordBody.RemainingAmount.IsZero() {
store.Delete(key)
} else if incentiveRecordBody.RemainingAmount.GT(sdk.ZeroDec()) {
osmoutils.MustSet(store, key, &incentiveRecordBody)
}
return nil
}
// setMultipleIncentiveRecords sets multiple incentive records in state
func (k Keeper) setMultipleIncentiveRecords(ctx sdk.Context, incentiveRecords []types.IncentiveRecord) error {
for _, incentiveRecord := range incentiveRecords {
err := k.setIncentiveRecord(ctx, incentiveRecord)
if err != nil {
return err
}
}
return nil
}
// GetIncentiveRecord gets the incentive record corresponding to the passed in values from store
func (k Keeper) GetIncentiveRecord(ctx sdk.Context, poolId uint64, denom string, minUptime time.Duration, incentiveCreator sdk.AccAddress) (types.IncentiveRecord, error) {
store := ctx.KVStore(k.storeKey)
incentiveBodyStruct := types.IncentiveRecordBody{}
uptimeIndex, err := findUptimeIndex(minUptime)
if err != nil {
return types.IncentiveRecord{}, err
}
key := types.KeyIncentiveRecord(poolId, uptimeIndex, denom, incentiveCreator)
found, err := osmoutils.Get(store, key, &incentiveBodyStruct)
if err != nil {
return types.IncentiveRecord{}, err
}
if !found {
return types.IncentiveRecord{}, types.IncentiveRecordNotFoundError{PoolId: poolId, IncentiveDenom: denom, MinUptime: minUptime, IncentiveCreatorStr: incentiveCreator.String()}
}
return types.IncentiveRecord{
PoolId: poolId,
IncentiveDenom: denom,
IncentiveCreatorAddr: incentiveCreator.String(),
MinUptime: minUptime,
IncentiveRecordBody: incentiveBodyStruct,
}, nil
}
// GetAllIncentiveRecordsForPool gets all the incentive records for poolId
// Returns error if it is unable to retrieve records.
func (k Keeper) GetAllIncentiveRecordsForPool(ctx sdk.Context, poolId uint64) ([]types.IncentiveRecord, error) {
return osmoutils.GatherValuesFromStorePrefixWithKeyParser(ctx.KVStore(k.storeKey), types.KeyPoolIncentiveRecords(poolId), ParseFullIncentiveRecordFromBz)
}
// GetIncentiveRecordSerialized gets incentive records based on limit set by pagination request.
func (k Keeper) GetIncentiveRecordSerialized(ctx sdk.Context, poolId uint64, pagination *query.PageRequest) ([]types.IncentiveRecord, *query.PageResponse, error) {
incentivesRecordStore := sdkprefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPoolIncentiveRecords(poolId))
incentiveRecords := []types.IncentiveRecord{}
pageRes, err := query.Paginate(incentivesRecordStore, pagination, func(key, value []byte) error {
parts := bytes.Split(key, []byte(types.KeySeparator))
minUptimeIndex, err := strconv.ParseUint(string(parts[1]), 10, 64)
if err != nil {
return err
}
denom := string(parts[2])
incentiveCreator, err := sdk.AccAddressFromBech32(string(parts[3]))
if err != nil {
return err
}
incRecord, err := k.GetIncentiveRecord(ctx, poolId, denom, types.SupportedUptimes[minUptimeIndex], incentiveCreator)
if err != nil {
return err
}
incentiveRecords = append(incentiveRecords, incRecord)
return nil
})
if err != nil {
return nil, nil, err
}
return incentiveRecords, pageRes, err
}
// getAllIncentiveRecordsForUptime gets all the incentive records for the given poolId and minUptime
// Returns error if the passed in uptime is not supported or it is unable to retrieve records.
func (k Keeper) getAllIncentiveRecordsForUptime(ctx sdk.Context, poolId uint64, minUptime time.Duration) ([]types.IncentiveRecord, error) {
// Ensure pool exists in state
_, err := k.getPoolById(ctx, poolId)
if err != nil {
return []types.IncentiveRecord{}, err
}
uptimeIndex, err := findUptimeIndex(minUptime)
if err != nil {
return []types.IncentiveRecord{}, err
}
return osmoutils.GatherValuesFromStorePrefixWithKeyParser(ctx.KVStore(k.storeKey), types.KeyUptimeIncentiveRecords(poolId, uptimeIndex), ParseFullIncentiveRecordFromBz)
}
// GetUptimeGrowthInsideRange returns the uptime growth within the given tick range for all supported uptimes.
// UptimeGrowthInside tracks the incentives accured by a specific LP within a pool. It keeps track of the cumulative amount of incentives
// collected by a specific LP within a pool. This function also measures the growth of incentives accured by a particular LP since the last
// time incentives were collected.
// WARNING: this method may mutate the pool, make sure to refetch the pool after calling this method.
// The mutation occurs in the call to GetTickInfo().
func (k Keeper) GetUptimeGrowthInsideRange(ctx sdk.Context, poolId uint64, lowerTick int64, upperTick int64) ([]sdk.DecCoins, error) {
pool, err := k.getPoolById(ctx, poolId)
if err != nil {
return []sdk.DecCoins{}, err
}
// Get global uptime accumulator values
globalUptimeValues, err := k.GetUptimeAccumulatorValues(ctx, poolId)
if err != nil {
return []sdk.DecCoins{}, err
}
// Get current, lower, and upper ticks
currentTick := pool.GetCurrentTick()
lowerTickInfo, err := k.GetTickInfo(ctx, poolId, lowerTick)
if err != nil {
return []sdk.DecCoins{}, err
}
upperTickInfo, err := k.GetTickInfo(ctx, poolId, upperTick)
if err != nil {
return []sdk.DecCoins{}, err
}
// Calculate uptime growth between lower and upper ticks
// Note that we regard "within range" to mean [lowerTick, upperTick),
// inclusive of lowerTick and exclusive of upperTick.
lowerTickUptimeValues := getUptimeTrackerValues(lowerTickInfo.UptimeTrackers.List)
upperTickUptimeValues := getUptimeTrackerValues(upperTickInfo.UptimeTrackers.List)
// If current tick is below range, we subtract uptime growth of upper tick from that of lower tick
if currentTick < lowerTick {
return osmoutils.SubDecCoinArrays(lowerTickUptimeValues, upperTickUptimeValues)
} else if currentTick < upperTick {
// If current tick is within range, we subtract uptime growth of lower and upper tick from global growth
globalMinusUpper, err := osmoutils.SubDecCoinArrays(globalUptimeValues, upperTickUptimeValues)
if err != nil {
return []sdk.DecCoins{}, err
}
return osmoutils.SubDecCoinArrays(globalMinusUpper, lowerTickUptimeValues)
} else {
// If current tick is above range, we subtract uptime growth of lower tick from that of upper tick
return osmoutils.SubDecCoinArrays(upperTickUptimeValues, lowerTickUptimeValues)
}
}
// GetUptimeGrowthOutsideRange returns the uptime growth outside the given tick range for all supported uptimes.
// UptimeGrowthOutside tracks the incentive accured by the entire pool. It keeps track of the cumulative amount of incentives collected
// by a specific pool since the last time incentives were accured.
// We use this function to calculate the total amount of incentives owed to the LPs when they withdraw their liquidity or when they
// attempt to claim their incentives.
// When LPs are ready to claim their incentives we calculate it using: (shares of # of LP) * (uptimeGrowthOutside - uptimeGrowthInside)
func (k Keeper) GetUptimeGrowthOutsideRange(ctx sdk.Context, poolId uint64, lowerTick int64, upperTick int64) ([]sdk.DecCoins, error) {
globalUptimeValues, err := k.GetUptimeAccumulatorValues(ctx, poolId)
if err != nil {
return []sdk.DecCoins{}, err
}
uptimeGrowthInside, err := k.GetUptimeGrowthInsideRange(ctx, poolId, lowerTick, upperTick)
if err != nil {
return []sdk.DecCoins{}, err
}
return osmoutils.SubDecCoinArrays(globalUptimeValues, uptimeGrowthInside)
}
// initOrUpdatePositionUptimeAccumulators either initializes or updates liquidity for uptime position accumulators for every supported uptime.
// It syncs the uptime accumulators to the current block time. If this is a new position, it creates a new position accumulator for every supported uptime accumulator.
// If this is an existing position, it updates the existing position accumulator for every supported uptime accumulator.
// Returns error if:
// - fails to update global uptime accumulators
// - fails to get global uptime accumulators
// - fails to calculate uptime growth inside range
// - fails to calculate uptime growth outside range
// - fails to determine if position accumulator is new or existing
// - fails to create/update position uptime accumulators
// WARNING: this method may mutate the pool, make sure to refetch the pool after calling this method.
func (k Keeper) initOrUpdatePositionUptimeAccumulators(ctx sdk.Context, poolId uint64, liquidity sdk.Dec, owner sdk.AccAddress, lowerTick, upperTick int64, liquidityDelta sdk.Dec, positionId uint64) error {
// We update accumulators _prior_ to any position-related updates to ensure
// past rewards aren't distributed to new liquidity. We also update pool's
// LastLiquidityUpdate here.
err := k.updatePoolUptimeAccumulatorsToNow(ctx, poolId)
if err != nil {
return err
}
// Get uptime accumulators for every supported uptime.
uptimeAccumulators, err := k.GetUptimeAccumulators(ctx, poolId)
if err != nil {
return err
}
globalUptimeGrowthInsideRange, err := k.GetUptimeGrowthInsideRange(ctx, poolId, lowerTick, upperTick)
if err != nil {
return err
}
globalUptimeGrowthOutsideRange, err := k.GetUptimeGrowthOutsideRange(ctx, poolId, lowerTick, upperTick)
if err != nil {
return err
}
// Loop through uptime accums for all supported uptimes on the pool and init or update position's records
positionName := string(types.KeyPositionId(positionId))
for uptimeIndex, curUptimeAccum := range uptimeAccumulators {
// If a record does not exist for this uptime accumulator, create a new position.
// Otherwise, add to existing record.
recordExists, err := curUptimeAccum.HasPosition(positionName)
if err != nil {
return err
}
if !recordExists {
// Liquidity cannot be negative for a new position
if !liquidityDelta.IsPositive() {
return types.NonPositiveLiquidityForNewPositionError{LiquidityDelta: liquidityDelta, PositionId: positionId}
}
// Since the position should only be entitled to uptime growth within its range, we checkpoint globalUptimeGrowthInsideRange as
// its accumulator's init value. During the claiming (or, equivalently, position updating) process, we ensure that incentives are
// not overpaid.
err = curUptimeAccum.NewPositionIntervalAccumulation(positionName, liquidity, globalUptimeGrowthInsideRange[uptimeIndex], emptyOptions)
if err != nil {
return err
}
} else {
// Prep accum since we claim rewards first under the hood before any update (otherwise we would overpay)
err = updatePositionToInitValuePlusGrowthOutside(curUptimeAccum, positionName, globalUptimeGrowthOutsideRange[uptimeIndex])
if err != nil {
return err
}
// Note that even though "unclaimed rewards" accrue in the accumulator prior to reaching minUptime, since position withdrawal
// and incentive collection are only allowed when current time is past minUptime these rewards are not accessible until then.
err = curUptimeAccum.UpdatePositionIntervalAccumulation(positionName, liquidityDelta, globalUptimeGrowthInsideRange[uptimeIndex])
if err != nil {
return err
}
}
}
return nil
}
// updateAccumAndClaimRewards claims and returns the rewards that `positionKey` is entitled to, updating the accumulator's value before
// and after claiming to ensure that rewards are never overdistributed.
// CONTRACT: position accumulator value prior to this call is equal to the growth inside the position at the time of last update.
// Returns error if:
// - fails to prepare position accumulator
// - fails to claim rewards
// - fails to check if position record exists
// - fails to update position accumulator with the current growth inside the position
func updateAccumAndClaimRewards(accum accum.AccumulatorObject, positionKey string, growthOutside sdk.DecCoins) (sdk.Coins, sdk.DecCoins, error) {
// Set the position's accumulator value to it's initial value at creation time plus the growth outside at this moment.
err := updatePositionToInitValuePlusGrowthOutside(accum, positionKey, growthOutside)
if err != nil {
return sdk.Coins{}, sdk.DecCoins{}, err
}
// Claim rewards, set the unclaimed rewards to zero, and update the position's accumulator value to reflect the current accumulator value.
// Removes the position state from accum if remaining liquidity is zero for the position.
incentivesClaimedCurrAccum, dust, err := accum.ClaimRewards(positionKey)
if err != nil {
return sdk.Coins{}, sdk.DecCoins{}, err
}
// Check if position record was deleted after claiming rewards.
hasPosition, err := accum.HasPosition(positionKey)
if err != nil {
return sdk.Coins{}, sdk.DecCoins{}, err
}
// If position still exists, we update the position's accumulator value to be the current accumulator value minus the growth outside.
if hasPosition {
// The position accumulator value must always equal to the growth inside at the time of last update.
// Since this is the time we update the accumulator, we must subtract the growth outside from the global accumulator value
// to get growth inside at the current block time.
currentGrowthInsideForPosition := accum.GetValue().Sub(growthOutside)
err := accum.SetPositionIntervalAccumulation(positionKey, currentGrowthInsideForPosition)
if err != nil {
return sdk.Coins{}, sdk.DecCoins{}, err
}
}
return incentivesClaimedCurrAccum, dust, nil
}
// moveRewardsToNewPositionAndDeleteOldAcc claims the rewards from the old position and moves them to the new position.
// Deletes the position tracker associated with the old position name.
// The positions must be associated with the given accumulator.
// The given growth outside the positions range is used for claim rewards accounting.
// The rewards are moved as "unclaimed rewards" to the new position.
// Returns nil on success. Error otherwise.
// NOTE: It is only used by fungifyChargedPosition which we disabled for launch.
// nolint: unused
func moveRewardsToNewPositionAndDeleteOldAcc(ctx sdk.Context, accum accum.AccumulatorObject, oldPositionName, newPositionName string, growthOutside sdk.DecCoins) error {
if oldPositionName == newPositionName {
return types.ModifySamePositionAccumulatorError{PositionAccName: oldPositionName}
}
hasPosition, err := accum.HasPosition(oldPositionName)
if err != nil {
return err
}
if !hasPosition {
return fmt.Errorf("position %s does not exist", oldPositionName)
}
if err := updatePositionToInitValuePlusGrowthOutside(accum, oldPositionName, growthOutside); err != nil {
return err
}
unclaimedRewards, err := accum.DeletePosition(oldPositionName)
if err != nil {
return err
}
err = accum.AddToUnclaimedRewards(newPositionName, unclaimedRewards)
if err != nil {
return err
}
// Ensure that the new position's accumulator value is the growth inside.
currentGrowthInsideForPosition := accum.GetValue().Sub(growthOutside)
err = accum.SetPositionIntervalAccumulation(newPositionName, currentGrowthInsideForPosition)
if err != nil {
return err
}
return nil
}
// claimAllIncentivesForPosition claims and returns all the incentives for a given position.
// It claims all the incentives that the position is eligible for and forfeits the rest by redepositing them back
// into the accumulator (effectively redistributing them to the other LPs).
//
// Returns the amount of successfully claimed incentives and the amount of forfeited incentives.
// Returns error if the position/uptime accumulators don't exist, or if there is an issue that arises while claiming.
func (k Keeper) claimAllIncentivesForPosition(ctx sdk.Context, positionId uint64) (sdk.Coins, sdk.Coins, error) {
// Retrieve the position with the given ID.
position, err := k.GetPosition(ctx, positionId)
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
err = k.updatePoolUptimeAccumulatorsToNow(ctx, position.PoolId)
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
// Compute the age of the position.
positionAge := ctx.BlockTime().Sub(position.JoinTime)
if positionAge < 0 {
return sdk.Coins{}, sdk.Coins{}, types.NegativeDurationError{Duration: positionAge}
}
// Retrieve the uptime accumulators for the position's pool.
uptimeAccumulators, err := k.GetUptimeAccumulators(ctx, position.PoolId)
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
// Compute uptime growth outside of the range between lower tick and upper tick
uptimeGrowthOutside, err := k.GetUptimeGrowthOutsideRange(ctx, position.PoolId, position.LowerTick, position.UpperTick)
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
// Create a variable to hold the name of the position.
positionName := string(types.KeyPositionId(positionId))
// Create variables to hold the total collected and forfeited incentives for the position.
collectedIncentivesForPosition := sdk.Coins{}
forfeitedIncentivesForPosition := sdk.DecCoins{}
supportedUptimes := types.SupportedUptimes
// Loop through each uptime accumulator for the pool.
for uptimeIndex, uptimeAccum := range uptimeAccumulators {
// Check if the accumulator contains the position.
hasPosition, err := uptimeAccum.HasPosition(positionName)
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
// If the accumulator contains the position, claim the position's incentives.
if hasPosition {
collectedIncentivesForUptime, dust, err := updateAccumAndClaimRewards(uptimeAccum, positionName, uptimeGrowthOutside[uptimeIndex])
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
// If the claimed incentives are forfeited, deposit them back into the accumulator to be distributed
// to other qualifying positions.
if positionAge < supportedUptimes[uptimeIndex] {
totalSharesAccum, err := uptimeAccum.GetTotalShares()
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
if totalSharesAccum.IsZero() {
pool, err := k.getPoolById(ctx, position.PoolId)
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
// If totalSharesAccum is zero, then there are no other qualifying positions to distribute the forfeited
// incentives to. This might happen if this is the last position in the pool and it is being withdrawn.
// Therefore, we send the forfeited amount to the community pool in this case.
err = k.communityPoolKeeper.FundCommunityPool(ctx, collectedIncentivesForUptime, pool.GetIncentivesAddress())
if err != nil {
return sdk.Coins{}, sdk.Coins{}, err
}
forfeitedIncentivesForPosition = forfeitedIncentivesForPosition.Add(sdk.NewDecCoinsFromCoins(collectedIncentivesForUptime...)...)
continue
}
var forfeitedIncentivesPerShare sdk.DecCoins
for _, coin := range collectedIncentivesForUptime {
// updated forfeitedIncentivesPerShare to add back = collectedIncentivesPerShare / totalSharesAccum
forfeitedIncentivesPerShare = append(forfeitedIncentivesPerShare, sdk.NewDecCoinFromDec(coin.Denom, coin.Amount.ToDec().Add(dust.AmountOf(coin.Denom)).Quo(totalSharesAccum)))
// convert to DecCoin to merge back with dust.
forfeitedIncentivesForPosition = forfeitedIncentivesForPosition.Add(sdk.NewDecCoinFromDec(coin.Denom, coin.Amount.ToDec().Add(dust.AmountOf(coin.Denom))))
}
uptimeAccum.AddToAccumulator(forfeitedIncentivesPerShare)
continue
}
collectedIncentivesForPosition = collectedIncentivesForPosition.Add(collectedIncentivesForUptime...)
}
}
totalForfeited, _ := forfeitedIncentivesForPosition.TruncateDecimal()
return collectedIncentivesForPosition, totalForfeited, nil
}
func (k Keeper) GetClaimableIncentives(ctx sdk.Context, positionId uint64) (sdk.Coins, sdk.Coins, error) {
// Since this is a query, we don't want to modify the state and therefore use a cache context.
cacheCtx, _ := ctx.CacheContext()
return k.claimAllIncentivesForPosition(cacheCtx, positionId)
}
// collectIncentives collects incentives for all uptime accumulators for the specified position id.
//
// Upon successful collection, it bank sends the incentives from the pool address to the owner and returns the collected coins.