diff --git a/mempool/iterator.go b/mempool/iterator.go index b7a75b91c..3caf5736e 100644 --- a/mempool/iterator.go +++ b/mempool/iterator.go @@ -35,17 +35,24 @@ type EVMMempoolIterator struct { /** Chain Params **/ bondDenom string - chainID *big.Int /** Blockchain Access **/ blockchain *Blockchain + chainID *big.Int } // NewEVMMempoolIterator creates a new unified iterator over EVM and Cosmos transactions. // It combines iterators from both transaction pools and selects transactions based on fee priority. // Returns nil if both iterators are empty or nil. The bondDenom parameter specifies the native // token denomination for fee comparisons, and chainId is used for EVM transaction conversion. -func NewEVMMempoolIterator(evmIterator *miner.TransactionsByPriceAndNonce, cosmosIterator mempool.Iterator, logger log.Logger, txConfig client.TxConfig, bondDenom string, chainID *big.Int, blockchain *Blockchain) mempool.Iterator { +func NewEVMMempoolIterator( + evmIterator *miner.TransactionsByPriceAndNonce, + cosmosIterator mempool.Iterator, + logger log.Logger, + txConfig client.TxConfig, + bondDenom string, + blockchain *Blockchain, +) mempool.Iterator { // Check if we have any transactions at all hasEVM := evmIterator != nil && !evmIterator.Empty() hasCosmos := cosmosIterator != nil && cosmosIterator.Tx() != nil @@ -64,8 +71,8 @@ func NewEVMMempoolIterator(evmIterator *miner.TransactionsByPriceAndNonce, cosmo logger: logger, txConfig: txConfig, bondDenom: bondDenom, - chainID: chainID, blockchain: blockchain, + chainID: blockchain.Config().ChainID, } } diff --git a/mempool/mempool.go b/mempool/mempool.go index a470dffb0..0bb347c64 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -4,7 +4,9 @@ import ( "context" "errors" "fmt" + "math/big" "sync" + "time" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -57,6 +59,7 @@ type ( blockchain *Blockchain blockGasLimit uint64 // Block gas limit from consensus parameters minTip *uint256.Int + chainID *big.Int /** Verification **/ anteHandler sdk.AnteHandler @@ -192,6 +195,7 @@ func NewExperimentalEVMMempool( minTip: config.MinTip, anteHandler: config.AnteHandler, operateExclusively: config.OperateExclusively, + chainID: blockchain.Config().ChainID, reapList: NewReapList(txEncoder), } @@ -386,21 +390,55 @@ func (m *ExperimentalEVMMempool) ReapNewValidTxs(maxBytes uint64, maxGas uint64) // Select returns a unified iterator over both EVM and Cosmos transactions. // The iterator prioritizes transactions based on their fees and manages proper -// sequencing. The i parameter contains transaction hashes to exclude from selection. -func (m *ExperimentalEVMMempool) Select(goCtx context.Context, i [][]byte) sdkmempool.Iterator { +// sequencing. The txs parameter contains transactions from cometbft consensus. +func (m *ExperimentalEVMMempool) Select(ctx context.Context, txs [][]byte) sdkmempool.Iterator { m.mtx.Lock() defer m.mtx.Unlock() - ctx := sdk.UnwrapSDKContext(goCtx) - // Wait for the legacypool to Reset at >= blockHeight (this may have - // already happened), to ensure all txs in pending pool are valid. - m.legacyTxPool.WaitForReorgHeight(ctx, ctx.BlockHeight()) + return m.selectAfterReorg(ctx, txs) +} - evmIterator, cosmosIterator := m.getIterators(goCtx, i) +// SelectBy iterates through transactions until the provided filter function returns false. +// It uses the same unified iterator as Select but allows early termination based on +// custom criteria defined by the filter function. +func (m *ExperimentalEVMMempool) SelectBy(ctx context.Context, txs [][]byte, filter func(sdk.Tx) bool) { + m.mtx.Lock() + defer m.mtx.Unlock() - combinedIterator := NewEVMMempoolIterator(evmIterator, cosmosIterator, m.logger, m.txConfig, m.vmKeeper.GetEvmCoinInfo(ctx).Denom, m.blockchain.Config().ChainID, m.blockchain) + iterator := m.selectAfterReorg(ctx, txs) - return combinedIterator + for iterator != nil && filter(iterator.Tx()) { + iterator = iterator.Next() + } +} + +// selectAfterReorg selects transactions from the EVM and Cosmos mempools after a (partial) reorg has completed. +// Note that actually txs arg isn't used neither in the evm nor cosmos mempools :harold: +func (m *ExperimentalEVMMempool) selectAfterReorg(ctx context.Context, txs [][]byte) sdkmempool.Iterator { + // max time for waiting for a reorg to complete. + // if the reorg doesn't complete in this time, we grab only subset of txs. + // this ensures consensus liveness and prevents the mempool from being a bottleneck + // during txs congestions and slow re-checks. + const maxReorgWait = 300 * time.Millisecond + + var ( + sdkCtx = sdk.UnwrapSDKContext(ctx) + denom = m.vmKeeper.GetEvmCoinInfo(sdkCtx).Denom + blockHeight = sdkCtx.BlockHeight() + ) + + m.legacyTxPool.WaitForReorgHeight(ctx, blockHeight, maxReorgWait) + + evmIterator, cosmosIterator := m.getIterators(ctx, txs) + + return NewEVMMempoolIterator( + evmIterator, + cosmosIterator, + m.logger, + m.txConfig, + denom, + m.blockchain, + ) } // CountTx returns the total number of transactions in both EVM and Cosmos pools. @@ -501,23 +539,6 @@ func (m *ExperimentalEVMMempool) shouldRemoveFromEVMPool(hash common.Hash, reaso return true } -// SelectBy iterates through transactions until the provided filter function returns false. -// It uses the same unified iterator as Select but allows early termination based on -// custom criteria defined by the filter function. -func (m *ExperimentalEVMMempool) SelectBy(goCtx context.Context, i [][]byte, f func(sdk.Tx) bool) { - m.mtx.Lock() - defer m.mtx.Unlock() - ctx := sdk.UnwrapSDKContext(goCtx) - - evmIterator, cosmosIterator := m.getIterators(goCtx, i) - - combinedIterator := NewEVMMempoolIterator(evmIterator, cosmosIterator, m.logger, m.txConfig, m.vmKeeper.GetEvmCoinInfo(ctx).Denom, m.blockchain.Config().ChainID, m.blockchain) - - for combinedIterator != nil && f(combinedIterator.Tx()) { - combinedIterator = combinedIterator.Next() - } -} - // SetEventBus sets CometBFT event bus to listen for new block header event. func (m *ExperimentalEVMMempool) SetEventBus(eventBus *cmttypes.EventBus) { if m.HasEventBus() { @@ -576,9 +597,10 @@ func (m *ExperimentalEVMMempool) getEVMMessage(tx sdk.Tx) (*evmtypes.MsgEthereum // getIterators prepares iterators over pending EVM and Cosmos transactions. // It configures EVM transactions with proper base fee filtering and priority ordering, // while setting up the Cosmos iterator with the provided exclusion list. -func (m *ExperimentalEVMMempool) getIterators(goCtx context.Context, i [][]byte) (*miner.TransactionsByPriceAndNonce, sdkmempool.Iterator) { +func (m *ExperimentalEVMMempool) getIterators(goCtx context.Context, txs [][]byte) (*miner.TransactionsByPriceAndNonce, sdkmempool.Iterator) { ctx := sdk.UnwrapSDKContext(goCtx) baseFee := m.vmKeeper.GetBaseFee(ctx) + var baseFeeUint *uint256.Int if baseFee != nil { baseFeeUint = uint256.MustFromBig(baseFee) @@ -593,10 +615,11 @@ func (m *ExperimentalEVMMempool) getIterators(goCtx context.Context, i [][]byte) OnlyPlainTxs: true, OnlyBlobTxs: false, } + evmPendingTxes := m.txPool.Pending(pendingFilter) - orderedEVMPendingTxes := miner.NewTransactionsByPriceAndNonce(nil, evmPendingTxes, baseFee) + evmIterator := miner.NewTransactionsByPriceAndNonce(nil, evmPendingTxes, baseFee) - cosmosPendingTxes := m.cosmosPool.Select(ctx, i) + cosmosIterator := m.cosmosPool.Select(ctx, txs) - return orderedEVMPendingTxes, cosmosPendingTxes + return evmIterator, cosmosIterator } diff --git a/mempool/txpool/legacypool/legacypool.go b/mempool/txpool/legacypool/legacypool.go index 537276a63..583bc6bd5 100644 --- a/mempool/txpool/legacypool/legacypool.go +++ b/mempool/txpool/legacypool/legacypool.go @@ -308,15 +308,16 @@ type LegacyPool struct { all *lookup // All transactions to allow lookups priced *pricedList // All transactions sorted by price - reqResetCh chan *txpoolResetRequest - reqPromoteCh chan *accountSet - queueTxEventCh chan *types.Transaction - reorgDoneCh chan chan struct{} - reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop - reorgSubscriptionCh chan struct{} // notifies the reorg loop that a subscriber wants to wait on nextDone - wg sync.WaitGroup // tracks loop, scheduleReorgLoop - initDoneCh chan struct{} // is closed once the pool is initialized (for tests) - latestReorgHeight atomic.Int64 // Latest height that the reorg loop has completed + reqResetCh chan *txpoolResetRequest + reqPromoteCh chan *accountSet + queueTxEventCh chan *types.Transaction + reorgDoneCh chan chan struct{} + reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop + reorgSubscriptionCh chan struct{} // notifies the reorg loop that a subscriber wants to wait on nextDone + reorgCancelRequested atomic.Bool // indicates that the reorg loop should be cancelled + wg sync.WaitGroup // tracks loop, scheduleReorgLoop + initDoneCh chan struct{} // is closed once the pool is initialized (for tests) + latestReorgHeight atomic.Int64 // Latest height that the reorg loop has completed changesSinceReorg int // A counter for how many drops we've performed in-between reorg. @@ -350,23 +351,25 @@ func New(config Config, chain BlockChain, opts ...Option) *LegacyPool { // Create the transaction pool with its initial settings pool := &LegacyPool{ - config: config, - chain: chain, - chainconfig: chain.Config(), - signer: types.LatestSigner(chain.Config()), - pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), - beats: make(map[common.Address]time.Time), - all: newLookup(), - rechecker: newNopRechecker(), - reqResetCh: make(chan *txpoolResetRequest), - reqPromoteCh: make(chan *accountSet), - queueTxEventCh: make(chan *types.Transaction), - reorgDoneCh: make(chan chan struct{}), - reorgShutdownCh: make(chan struct{}), - reorgSubscriptionCh: make(chan struct{}), - initDoneCh: make(chan struct{}), + config: config, + chain: chain, + chainconfig: chain.Config(), + signer: types.LatestSigner(chain.Config()), + pending: make(map[common.Address]*list), + queue: make(map[common.Address]*list), + beats: make(map[common.Address]time.Time), + all: newLookup(), + rechecker: newNopRechecker(), + reqResetCh: make(chan *txpoolResetRequest), + reqPromoteCh: make(chan *accountSet), + queueTxEventCh: make(chan *types.Transaction), + reorgDoneCh: make(chan chan struct{}), + reorgShutdownCh: make(chan struct{}), + reorgSubscriptionCh: make(chan struct{}), + reorgCancelRequested: atomic.Bool{}, + initDoneCh: make(chan struct{}), } + pool.priced = newPricedList(pool.all) pool.latestReorgHeight.Store(0) @@ -601,6 +604,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] if filter.OnlyBlobTxs { return nil } + pool.mu.Lock() defer pool.mu.Unlock() @@ -615,8 +619,17 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] if filter.BaseFee != nil { baseFeeBig = filter.BaseFee.ToBig() } + + currentHeight := pool.currentHead.Load().Number.Int64() pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) + for addr, list := range pool.pending { + // skip accounts that were NOT yet re-checked in demoteUnexecutables due to + // reorg cancellation (timeout) + if list.getLastCheckedHeight() < currentHeight { + continue + } + txs := list.Flatten() // If the miner requests tip enforcement, cap the lists now @@ -628,6 +641,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] } } } + if len(txs) > 0 { lazies := make([]*txpool.LazyTransaction, len(txs)) for i := 0; i < len(txs); i++ { @@ -645,6 +659,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] pending[addr] = lazies } } + return pending } @@ -1370,9 +1385,10 @@ func (pool *LegacyPool) scheduleReorgLoop() { // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*SortedMap) { - defer func(t0 time.Time) { - reorgDurationTimer.Update(time.Since(t0)) - }(time.Now()) + // reset the cancel requested flag + pool.reorgCancelRequested.Store(false) + + defer func(start time.Time) { reorgDurationTimer.UpdateSince(start) }(time.Now()) defer close(done) var promoteAddrs []common.Address @@ -1385,6 +1401,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // the flatten operation can be avoided. promoteAddrs = dirtyAccounts.flatten() } + pool.mu.Lock() if reset != nil { // Reset from the old head to the new, rescheduling any reorged transactions @@ -1404,14 +1421,24 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, } } - // Check for pending transactions for every account that sent new ones - promoted := pool.promoteExecutables(promoteAddrs, reset) + // Check for pending transactions for every account that sent new ones. + // If reorg cancelled, submit leftovers (possiblePromotions) for another re-check + promoted, possiblePromotions := pool.promoteExecutables(promoteAddrs, reset) + if possiblePromotions != nil { + defer pool.requestPromoteExecutables(possiblePromotions) + } // If a new block appeared, validate the pool of pending transactions. This will // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset != nil { - pool.demoteUnexecutables() + height := int64(0) + if reset.newHead != nil { + height = reset.newHead.Number.Int64() + } + + pool.demoteUnexecutables(height) + if reset.newHead != nil { if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) @@ -1490,16 +1517,16 @@ func (pool *LegacyPool) resetInternalState(newHead *types.Header, reinject types // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. -func (pool *LegacyPool) promoteExecutables(accounts []common.Address, reset *txpoolResetRequest) []*types.Transaction { - // Track the promoted transactions to broadcast them at once - var promoted []*types.Transaction - +func (pool *LegacyPool) promoteExecutables( + accounts []common.Address, + reset *txpoolResetRequest, +) (promoted []*types.Transaction, leftovers *accountSet) { // Get a branch of the latest pending context for recheck ctx, write := pool.rechecker.GetContext() // Iterate over all accounts and promote any executable transactions gasLimit := pool.currentHead.Load().GasLimit - for _, addr := range accounts { + for i, addr := range accounts { list := pool.queue[addr] if list == nil { continue // Just in case someone calls with a non existing account @@ -1520,6 +1547,12 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address, reset *txp log.Trace("Removed unpayable queued transactions", "count", len(costDrops)) queuedNofundsMeter.Mark(int64(len(costDrops))) + // if reorg cancellation requested, return the promoted txs and leftovers + // we want to place this check here as checks/filters above are "cheap" + if pool.reorgCancelRequested.Load() && i < len(accounts)-1 { + return promoted, newAccountSet(pool.signer, accounts[i+1:]...) + } + // Drop all transactions that now fail the pools RecheckTxFn // // Note this is happening after the nonce removal above since this @@ -1581,7 +1614,8 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address, reset *txp } } } - return promoted + + return promoted, nil } // truncatePending removes transactions from the pending queue if the pool is above the @@ -1716,7 +1750,7 @@ func (pool *LegacyPool) truncateQueue() { // Note: transactions are not marked as removed in the priced list because re-heaping // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function -func (pool *LegacyPool) demoteUnexecutables() { +func (pool *LegacyPool) demoteUnexecutables(height int64) { // Iterate over all accounts and demote any non-executable transactions gasLimit := pool.currentHead.Load().GasLimit for addr, list := range pool.pending { @@ -1798,6 +1832,15 @@ func (pool *LegacyPool) demoteUnexecutables() { if _, ok := pool.queue[addr]; !ok { pool.reserver.Release(addr) } + } else { + // set the last checked height for the list + // this is used to return only checked accounts in Pending method + list.setLastCheckedHeight(height) + } + + // if reorg cancellation requested, return early + if pool.reorgCancelRequested.Load() { + return } } } @@ -2071,7 +2114,10 @@ func (pool *LegacyPool) Clear() { // WaitForReorgHeight blocks until the reorg loop has reset at a head with // height >= height. If the context is cancelled or the pool is shutting down, // this will also return. -func (pool *LegacyPool) WaitForReorgHeight(ctx context.Context, height int64) { +func (pool *LegacyPool) WaitForReorgHeight(ctx context.Context, height int64, timeout time.Duration) { + ctx, cancel := reorgContext(ctx, timeout) + defer cancel() + for pool.latestReorgHeight.Load() < height { // reorg loop has not run at the target height, subscribe to the // outcome of the next reorg loop iteration to know when to check again @@ -2089,11 +2135,20 @@ func (pool *LegacyPool) WaitForReorgHeight(ctx context.Context, height int64) { select { case <-sub: case <-ctx.Done(): + pool.cancelReorgIteration() return } } } +// cancelReorgIteration cancels the current reorg loop iteration in order to return +// a subset of "ready" txs as fast as possible +func (pool *LegacyPool) cancelReorgIteration() { + if pool.reorgCancelRequested.CompareAndSwap(false, true) { + log.Debug("Reorg cancellation requested") + } +} + // SubscribeToNextReorg returns a channel that will close when the next reorg // loop completes. An error is returned if the loop is shutting down. func (pool *LegacyPool) SubscribeToNextReorg() (chan struct{}, error) { @@ -2125,6 +2180,32 @@ func (pool *LegacyPool) markTxRemoved(tx *types.Transaction) { } } +// reorgContext returns a context with deadline that is either the original context's deadline or the timeout, +// whichever is earlier. For original context with deadline, we subtract to reserve it for other operations. +func reorgContext(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout == 0 { + return ctx, func() {} + } + + dl, ok := ctx.Deadline() + if !ok { + return context.WithTimeout(ctx, timeout) + } + + // if ctx already has deadline, we pick the earlier one, but subtract some time from the origin + // to reserve some time for other operations. + const reserveThreshold = 100 * time.Millisecond + + dl = dl.Add(-reserveThreshold) + ourDL := time.Now().Add(timeout) + + if ourDL.Before(dl) { + dl = ourDL + } + + return context.WithDeadline(ctx, dl) +} + // tolerateRecheckErr returns nil if err is an error string that should be // ignored from recheck, i.e. we do not want to drop txs from the mempool if we // have received specific errors from recheck. diff --git a/mempool/txpool/legacypool/legacypool_test.go b/mempool/txpool/legacypool/legacypool_test.go index f94822ade..2e2765b01 100644 --- a/mempool/txpool/legacypool/legacypool_test.go +++ b/mempool/txpool/legacypool/legacypool_test.go @@ -41,6 +41,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/holiman/uint256" + "github.com/stretchr/testify/require" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/evm/mempool/txpool" @@ -2694,7 +2695,7 @@ func TestWaitForReorgHeight(t *testing.T) { go func() { defer wg.Done() ctx := context.Background() - pool.WaitForReorgHeight(ctx, 5) + pool.WaitForReorgHeight(ctx, 5, 0) waitCompleted.Store(true) }() @@ -2743,7 +2744,7 @@ func TestWaitForReorgHeight(t *testing.T) { go func() { defer wg.Done() ctx := context.Background() - pool.WaitForReorgHeight(ctx, 10) + pool.WaitForReorgHeight(ctx, 10, 0) waitCompleted.Store(true) }() @@ -2785,7 +2786,7 @@ func TestWaitForReorgHeight(t *testing.T) { wg.Add(1) go func(id int) { defer wg.Done() - pool.WaitForReorgHeight(context.Background(), 7) + pool.WaitForReorgHeight(context.Background(), 7, 0) }(i) } @@ -2884,6 +2885,68 @@ func TestPromoteExecutablesRecheckTx(t *testing.T) { pool.mu.RUnlock() } +func TestPromoteExecutablesCancelReorg(t *testing.T) { + // ARRANGE + // Given a pool with txs + pool, rechecker, _ := setupPool() + defer pool.Close() + + const txsCount = 10 + + // Create for different account + keyGen := func() *ecdsa.PrivateKey { + key, err := crypto.GenerateKey() + require.NoError(t, err) + return key + } + + txs := make([]*types.Transaction, 0, txsCount) + + for i := 0; i < txsCount; i++ { + tx := transaction(uint64(i), 100000, keyGen()) + from, _ := deriveSender(tx) + txs = append(txs, tx) + + testAddBalance(pool, from, big.NewInt(100000000000000)) + } + + // Given a recheck fn that imitates tx processing on cosmos-sdk side + const ( + recheckDuration = 120 * time.Millisecond + maxReorgWait = 2 * recheckDuration + ) + + recheck := func(ctx sdk.Context, tx *types.Transaction) (sdk.Context, error) { + txHash := tx.Hash().Hex() + + t.Logf("start: recheck: tx %s", txHash) + time.Sleep(recheckDuration) + t.Logf("end: recheck: tx %s", txHash) + return ctx, nil + } + + rechecker.SetRecheck(recheck) + + // ACT + go func() { + // Add all txs, should trigger requestPromoteExecutables + errs := pool.Add(txs, false) + require.NoError(t, errors.Join(errs...)) + }() + + // Wait for the reorg to complete + pool.WaitForReorgHeight(context.Background(), 1, maxReorgWait) + + // ASSERT + // if true, that means that ONGOING cancellation is in progress + // because each reorg loop starts with reorgCancelRequested set to false + checkReorgCancelRequested := func() bool { + return pool.reorgCancelRequested.Load() + } + + require.Eventually(t, checkReorgCancelRequested, 3*maxReorgWait, maxReorgWait/10) +} + // TestDemoteUnexecutablesRecheckTx tests that demoteUnexecutables properly // removes a transaction from all pools if it fails the RecheckTxFn and that // subsequent nonces are moved back into the queued pool. @@ -3005,7 +3068,7 @@ func benchmarkPendingDemotion(b *testing.B, size int) { // Benchmark the speed of pool validation b.ResetTimer() for i := 0; i < b.N; i++ { - pool.demoteUnexecutables() + pool.demoteUnexecutables(1) } } diff --git a/mempool/txpool/legacypool/list.go b/mempool/txpool/legacypool/list.go index 7ddab01b9..20896f9d2 100644 --- a/mempool/txpool/legacypool/list.go +++ b/mempool/txpool/legacypool/list.go @@ -313,6 +313,8 @@ type list struct { costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) totalcost *uint256.Int // Total cost of all transactions in the list + + lastCheckedHeight int64 // the height at which the list was last checked } // newList creates a new transaction list for maintaining nonce-indexable fast, @@ -542,6 +544,18 @@ func (l *list) subTotalCost(txs []*types.Transaction) { } } +func (l *list) setLastCheckedHeight(height int64) { + if height == 0 { + return + } + + l.lastCheckedHeight = height +} + +func (l *list) getLastCheckedHeight() int64 { + return l.lastCheckedHeight +} + // priceHeap is a heap.Interface implementation over transactions for retrieving // price-sorted transactions to discard when the pool fills up. If baseFee is set // then the heap is sorted based on the effective tip based on the given base fee.