From 40064c04e04c88230adb0b6fc18a5bb186a39412 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 10:45:38 -0500 Subject: [PATCH 01/19] tmp --- tests/systemtests/main_test.go | 130 +++++------ tests/systemtests/mempool/test_broadcast.go | 233 ++++++++++++++++++++ tests/systemtests/mempool/test_suite.go | 68 ++++++ 3 files changed, 367 insertions(+), 64 deletions(-) create mode 100644 tests/systemtests/mempool/test_broadcast.go diff --git a/tests/systemtests/main_test.go b/tests/systemtests/main_test.go index 50ab0c319..824f899d4 100644 --- a/tests/systemtests/main_test.go +++ b/tests/systemtests/main_test.go @@ -5,9 +5,6 @@ package systemtests import ( "testing" - "github.com/cosmos/evm/tests/systemtests/accountabstraction" - "github.com/cosmos/evm/tests/systemtests/chainupgrade" - "github.com/cosmos/evm/tests/systemtests/eip712" "github.com/cosmos/evm/tests/systemtests/mempool" "github.com/cosmos/evm/tests/systemtests/suite" @@ -18,66 +15,71 @@ func TestMain(m *testing.M) { systemtests.RunTests(m) } -/* - * Mempool Tests - */ -func TestMempoolTxsOrdering(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) +// +///* +// * Mempool Tests +// */ +//func TestMempoolTxsOrdering(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) +//} +// +//func TestMempoolTxsReplacement(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) +//} +// +//func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) +//} +// +//func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) +//} +// +//func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) +//} +// +//func TestMempoolTxRebroadcasting(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxRebroadcasting) +//} + +func TestMempoolTxBroadcasting(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) } -func TestMempoolTxsReplacement(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) -} - -func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) -} - -func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) -} - -func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) -} - -func TestMempoolTxRebroadcasting(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxRebroadcasting) -} - -func TestMinimumGasPricesZero(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero) -} - -func TestMempoolCosmosTxsCompatibility(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) -} - -/* - * EIP-712 Tests - */ -func TestEIP712BankSend(t *testing.T) { - suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) -} - -func TestEIP712BankSendWithBalanceCheck(t *testing.T) { - suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) -} - -func TestEIP712MultipleBankSends(t *testing.T) { - suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) -} - -/* - * Account Abstraction Tests - */ -func TestAccountAbstractionEIP7702(t *testing.T) { - suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) -} - -/* - * Chain Upgrade Tests - */ -func TestChainUpgrade(t *testing.T) { - suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) -} +//func TestMinimumGasPricesZero(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero) +//} +// +//func TestMempoolCosmosTxsCompatibility(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) +//} +// +///* +// * EIP-712 Tests +// */ +//func TestEIP712BankSend(t *testing.T) { +// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) +//} +// +//func TestEIP712BankSendWithBalanceCheck(t *testing.T) { +// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) +//} +// +//func TestEIP712MultipleBankSends(t *testing.T) { +// suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) +//} +// +///* +// * Account Abstraction Tests +// */ +//func TestAccountAbstractionEIP7702(t *testing.T) { +// suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) +//} +// +///* +// * Chain Upgrade Tests +// */ +//func TestChainUpgrade(t *testing.T) { +// suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) +//} diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go new file mode 100644 index 000000000..5bf54f064 --- /dev/null +++ b/tests/systemtests/mempool/test_broadcast.go @@ -0,0 +1,233 @@ +//go:build system_test + +package mempool + +import ( + "context" + "fmt" + "slices" + "testing" + "time" + + "github.com/test-go/testify/require" + + "github.com/cosmos/evm/tests/systemtests/suite" +) + +// RunTxBroadcasting tests that transactions are broadcast to other nodes via mempool gossip +// before blocks are committed. This verifies that the mempool rebroadcast functionality works +// correctly and transactions propagate through the network via the mempool gossip protocol, +// not just via block propagation. +// +// The test uses a slower block time (5 seconds) to ensure we have enough time to verify +// that transactions appear in other nodes' mempools before a block is produced. +func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { + testCases := []struct { + name string + actions []func(*TestSuite, *TestContext) + }{ + { + name: "tx broadcast to other nodes %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Verify the transaction appears in other nodes' mempools BEFORE any block is committed + // This proves transactions are broadcast via mempool gossip, not just block propagation + maxWaitTime := 3 * time.Second + checkInterval := 100 * time.Millisecond + + for _, nodeIdx := range []int{1, 2, 3} { + func(nodeIdx int) { + nodeID := s.Node(nodeIdx) + found := false + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to %s within %s - mempool gossip may not be working", + tx1.TxHash, nodeID, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(nodeID, suite.TxTypeEVM, 5*time.Second) + if err != nil { + // Retry on error + continue + } + + if slices.Contains(pendingTxs, tx1.TxHash) { + t.Logf("✓ Transaction %s successfully broadcast to %s", tx1.TxHash, nodeID) + found = true + } + } + } + }(nodeIdx) + } + + // Now set expected state and let the transaction commit normally + ctx.SetExpPendingTxs(tx1) + }, + func(s *TestSuite, ctx *TestContext) { + // Test with nonce-gapped transactions to verify rebroadcast/promotion + signer := s.Acc(0) + + // Send tx with nonce 2 to node1 (creating a gap since current nonce is 1) + tx3, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx with nonce 2") + + // This transaction should be queued on node1, not pending + maxWaitTime := 2 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + queuedOnNode1 := false + for !queuedOnNode1 { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not queued on node1 within %s", + tx3.TxHash, maxWaitTime, + )) + case <-ticker.C: + _, queuedTxs, err := s.TxPoolContent(s.Node(1), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + + if slices.Contains(queuedTxs, tx3.TxHash) { + t.Logf("✓ Transaction %s is queued on node1 (as expected due to nonce gap)", tx3.TxHash) + queuedOnNode1 = true + } + } + } + + // Verify the queued transaction is NOT broadcast to other nodes + // (queued txs should not be gossiped) + time.Sleep(1 * time.Second) // Give some time for any potential gossip + + for _, nodeIdx := range []int{0, 2, 3} { + nodeID := s.Node(nodeIdx) + pendingTxs, queuedTxs, err := s.TxPoolContent(nodeID, suite.TxTypeEVM, 5*time.Second) + require.NoError(t, err, "failed to get txpool content from %s", nodeID) + + require.NotContains(t, pendingTxs, tx3.TxHash, + "queued transaction should not be in pending pool of %s", nodeID) + require.NotContains(t, queuedTxs, tx3.TxHash, + "queued transaction should not be broadcast to %s", nodeID) + } + + // Now send the missing transaction (nonce 1) + tx2, err := s.SendTx(t, s.Node(2), signer.ID, 1, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx with nonce 1") + + // tx2 should be broadcast to all nodes + // tx3 should be promoted to pending on node1 and then broadcast to all nodes + maxWaitTime = 3 * time.Second + ticker2 := time.NewTicker(checkInterval) + defer ticker2.Stop() + + for _, nodeIdx := range []int{0, 1, 3} { + func(nodeIdx int) { + nodeID := s.Node(nodeIdx) + foundTx2 := false + foundTx3 := false + + timeoutCtx2, cancel2 := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel2() + + for !foundTx2 || !foundTx3 { + select { + case <-timeoutCtx2.Done(): + if !foundTx2 { + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to %s within %s", + tx2.TxHash, nodeID, maxWaitTime, + )) + } + if !foundTx3 { + require.FailNow(t, fmt.Sprintf( + "transaction %s (promoted from queued) was not broadcast to %s within %s", + tx3.TxHash, nodeID, maxWaitTime, + )) + } + case <-ticker2.C: + pendingTxs, _, err := s.TxPoolContent(nodeID, suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + + if !foundTx2 && slices.Contains(pendingTxs, tx2.TxHash) { + t.Logf("✓ Transaction %s broadcast to %s", tx2.TxHash, nodeID) + foundTx2 = true + } + + if !foundTx3 && slices.Contains(pendingTxs, tx3.TxHash) { + t.Logf("✓ Transaction %s (promoted) broadcast to %s", tx3.TxHash, nodeID) + foundTx3 = true + } + } + } + }(nodeIdx) + } + + ctx.SetExpPendingTxs(tx2, tx3) + }, + }, + }, + } + + testOptions := []*suite.TestOptions{ + { + Description: "EVM LegacyTx", + TxType: suite.TxTypeEVM, + IsDynamicFeeTx: false, + }, + { + Description: "EVM DynamicFeeTx", + TxType: suite.TxTypeEVM, + IsDynamicFeeTx: true, + }, + } + + s := NewTestSuite(base) + + // First, setup the chain with default configuration + s.SetupTest(t) + + // Now modify the consensus timeout to slow down block production + // This gives us time to verify broadcasting happens before blocks are committed + s.ModifyConsensusTimeout(t, "5s") + + for _, to := range testOptions { + s.SetOptions(to) + for _, tc := range testCases { + testName := fmt.Sprintf(tc.name, to.Description) + t.Run(testName, func(t *testing.T) { + ctx := NewTestContext() + s.BeforeEachCase(t, ctx) + for _, action := range tc.actions { + action(s, ctx) + // NOTE: We don't call AfterEachAction here because we're manually + // checking the mempool state in the action functions + } + s.AfterEachCase(t, ctx) + }) + } + } +} \ No newline at end of file diff --git a/tests/systemtests/mempool/test_suite.go b/tests/systemtests/mempool/test_suite.go index c77620262..187119ece 100644 --- a/tests/systemtests/mempool/test_suite.go +++ b/tests/systemtests/mempool/test_suite.go @@ -3,9 +3,14 @@ package mempool import ( + "fmt" + "os" + "path/filepath" "testing" "time" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" "github.com/stretchr/testify/require" "github.com/cosmos/evm/tests/systemtests/suite" @@ -91,3 +96,66 @@ func (c *TestContext) PromoteExpTxs(count int) { c.ExpPending = append(c.ExpPending, promoted...) c.ExpQueued = c.ExpQueued[count:] } + +// ModifyConsensusTimeout modifies the consensus timeout_commit in the config.toml +// for all nodes and restarts the chain with the new configuration. +func (s *TestSuite) ModifyConsensusTimeout(t *testing.T, timeout string) { + t.Helper() + + // Stop the chain if running + if s.ChainStarted { + s.ResetChain(t) + } + + // Modify config.toml for each node + for i := 0; i < s.NodesCount(); i++ { + nodeDir := s.NodeDir(i) + configPath := filepath.Join(nodeDir, "config", "config.toml") + + err := editToml(configPath, func(doc *tomledit.Document) { + setValue(doc, timeout, "consensus", "timeout_commit") + }) + require.NoError(t, err, "failed to modify config.toml for node %d", i) + } + + // Restart the chain with modified config + s.StartChain(t, suite.DefaultNodeArgs()...) + s.AwaitNBlocks(t, 2) +} + +// editToml is a helper to edit TOML files +func editToml(filename string, f func(doc *tomledit.Document)) error { + tomlFile, err := os.OpenFile(filename, os.O_RDWR, 0o600) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + defer tomlFile.Close() + + doc, err := tomledit.Parse(tomlFile) + if err != nil { + return fmt.Errorf("failed to parse toml: %w", err) + } + + f(doc) + + if _, err := tomlFile.Seek(0, 0); err != nil { + return fmt.Errorf("failed to seek: %w", err) + } + if err := tomlFile.Truncate(0); err != nil { + return fmt.Errorf("failed to truncate: %w", err) + } + if err := tomledit.Format(tomlFile, doc); err != nil { + return fmt.Errorf("failed to format: %w", err) + } + + return nil +} + +// setValue sets a value in a TOML document +func setValue(doc *tomledit.Document, newVal string, xpath ...string) { + e := doc.First(xpath...) + if e == nil { + panic(fmt.Sprintf("not found: %v", xpath)) + } + e.Value = parser.MustValue(fmt.Sprintf("%q", newVal)) +} From ec95da6eb7f19b16f684762b70b1b3df15fc515f Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 10:57:15 -0500 Subject: [PATCH 02/19] a --- tests/systemtests/mempool/test_broadcast.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 5bf54f064..aa44485d2 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -212,7 +212,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Now modify the consensus timeout to slow down block production // This gives us time to verify broadcasting happens before blocks are committed - s.ModifyConsensusTimeout(t, "5s") + //s.ModifyConsensusTimeout(t, "5s") for _, to := range testOptions { s.SetOptions(to) @@ -230,4 +230,4 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { }) } } -} \ No newline at end of file +} From 5916befe9dbfb599c67ed278161bb4d6e4457aed Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 11:01:55 -0500 Subject: [PATCH 03/19] Revert "a" This reverts commit ed90439768f372281c10763c0c8b948e51d99cbb. --- tests/systemtests/mempool/test_broadcast.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index aa44485d2..5bf54f064 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -212,7 +212,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Now modify the consensus timeout to slow down block production // This gives us time to verify broadcasting happens before blocks are committed - //s.ModifyConsensusTimeout(t, "5s") + s.ModifyConsensusTimeout(t, "5s") for _, to := range testOptions { s.SetOptions(to) @@ -230,4 +230,4 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { }) } } -} +} \ No newline at end of file From 1c585024a44e781f685e0d8bd9211a8e275414b9 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 11:08:36 -0500 Subject: [PATCH 04/19] remove rebroadcast test --- tests/systemtests/main_test.go | 124 +++++++++---------- tests/systemtests/mempool/test_exceptions.go | 84 ------------- 2 files changed, 61 insertions(+), 147 deletions(-) diff --git a/tests/systemtests/main_test.go b/tests/systemtests/main_test.go index 824f899d4..8705a0fab 100644 --- a/tests/systemtests/main_test.go +++ b/tests/systemtests/main_test.go @@ -3,6 +3,9 @@ package systemtests import ( + "github.com/cosmos/evm/tests/systemtests/accountabstraction" + "github.com/cosmos/evm/tests/systemtests/chainupgrade" + "github.com/cosmos/evm/tests/systemtests/eip712" "testing" "github.com/cosmos/evm/tests/systemtests/mempool" @@ -15,71 +18,66 @@ func TestMain(m *testing.M) { systemtests.RunTests(m) } -// -///* -// * Mempool Tests -// */ -//func TestMempoolTxsOrdering(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) -//} -// -//func TestMempoolTxsReplacement(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) -//} -// -//func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) -//} -// -//func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) -//} -// -//func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) -//} -// -//func TestMempoolTxRebroadcasting(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxRebroadcasting) -//} +/* +* Mempool Tests + */ +func TestMempoolTxsOrdering(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) +} + +func TestMempoolTxsReplacement(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) +} + +func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) +} + +func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) +} + +func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) +} func TestMempoolTxBroadcasting(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) } -//func TestMinimumGasPricesZero(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero) -//} -// -//func TestMempoolCosmosTxsCompatibility(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) -//} -// -///* -// * EIP-712 Tests -// */ -//func TestEIP712BankSend(t *testing.T) { -// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) -//} -// -//func TestEIP712BankSendWithBalanceCheck(t *testing.T) { -// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) -//} -// -//func TestEIP712MultipleBankSends(t *testing.T) { -// suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) -//} -// -///* -// * Account Abstraction Tests -// */ -//func TestAccountAbstractionEIP7702(t *testing.T) { -// suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) -//} -// -///* -// * Chain Upgrade Tests -// */ -//func TestChainUpgrade(t *testing.T) { -// suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) -//} +func TestMinimumGasPricesZero(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero) +} + +func TestMempoolCosmosTxsCompatibility(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) +} + +/* +* EIP-712 Tests + */ +func TestEIP712BankSend(t *testing.T) { + suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) +} + +func TestEIP712BankSendWithBalanceCheck(t *testing.T) { + suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) +} + +func TestEIP712MultipleBankSends(t *testing.T) { + suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) +} + +/* +* Account Abstraction Tests + */ +func TestAccountAbstractionEIP7702(t *testing.T) { + suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) +} + +/* +* Chain Upgrade Tests + */ +func TestChainUpgrade(t *testing.T) { + suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) +} diff --git a/tests/systemtests/mempool/test_exceptions.go b/tests/systemtests/mempool/test_exceptions.go index 0a2063e0b..459231ebc 100644 --- a/tests/systemtests/mempool/test_exceptions.go +++ b/tests/systemtests/mempool/test_exceptions.go @@ -10,90 +10,6 @@ import ( "github.com/test-go/testify/require" ) -func RunTxRebroadcasting(t *testing.T, base *suite.BaseTestSuite) { - testCases := []struct { - name string - actions []func(*TestSuite, *TestContext) - }{ - { - name: "ordering of pending txs %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - signer := s.Acc(0) - - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - tx2, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - tx3, err := s.SendTx(t, s.Node(2), signer.ID, 2, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - // Skip tx4 with nonce 3 - - tx5, err := s.SendTx(t, s.Node(3), signer.ID, 4, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - tx6, err := s.SendTx(t, s.Node(0), signer.ID, 5, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - // At AfterEachAction hook, we will check expected queued txs are not broadcasted. - ctx.SetExpPendingTxs(tx1, tx2, tx3) - ctx.SetExpQueuedTxs(tx5, tx6) - }, - func(s *TestSuite, ctx *TestContext) { - // Wait for 3 blocks. - // It is because tx1, tx2, tx3 are sent to different nodes, tx3 needs maximum 3 blocks to be committed. - // e.g. node3 is 1st proposer -> tx3 will tale 1 block to be committed. - // e.g. node3 is 3rd proposer -> tx3 will take 3 blocks to be committed. - s.AwaitNBlocks(t, 3) - - // current nonce is 3. - // so, we should set nonce idx to 0. - nonce3Idx := uint64(0) - - signer := s.Acc(0) - - tx4, err := s.SendTx(t, s.Node(2), signer.ID, nonce3Idx, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - // At AfterEachAction hook, we will check expected pending txs are broadcasted. - ctx.SetExpPendingTxs(tx4) - ctx.PromoteExpTxs(2) - }, - }, - }, - } - - testOptions := []*suite.TestOptions{ - { - Description: "EVM LegacyTx", - TxType: suite.TxTypeEVM, - IsDynamicFeeTx: false, - }, - } - - s := NewTestSuite(base) - s.SetupTest(t) - - for _, to := range testOptions { - s.SetOptions(to) - for _, tc := range testCases { - testName := fmt.Sprintf(tc.name, to.Description) - t.Run(testName, func(t *testing.T) { - ctx := NewTestContext() - s.BeforeEachCase(t, ctx) - for _, action := range tc.actions { - action(s, ctx) - s.AfterEachAction(t, ctx) - } - s.AfterEachCase(t, ctx) - }) - } - } -} - func RunMinimumGasPricesZero(t *testing.T, base *suite.BaseTestSuite) { testCases := []struct { name string From 887cb85e61d0fc63a9533d80a810c0ea381034e0 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 11:26:10 -0500 Subject: [PATCH 05/19] a --- mempool/mempool.go | 11 ++ rpc/backend/call_tx.go | 6 + tests/systemtests/main_test.go | 107 +++++++------- tests/systemtests/mempool/test_broadcast.go | 151 ++++++++++++++++++++ 4 files changed, 222 insertions(+), 53 deletions(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 15b45b285..62a1d5540 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/holiman/uint256" @@ -424,6 +425,16 @@ func (m *ExperimentalEVMMempool) HasEventBus() bool { return m.eventBus != nil } +// Has returns true if the transaction with the given hash is already in the mempool. +// This checks both the legacy pool and the main tx pool for EVM transactions. +func (m *ExperimentalEVMMempool) Has(hash common.Hash) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + // Check both the legacy pool and the tx pool + return m.legacyTxPool.Has(hash) || m.txPool.Has(hash) +} + // Close unsubscribes from the CometBFT event bus and shuts down the mempool. func (m *ExperimentalEVMMempool) Close() error { var errs []error diff --git a/rpc/backend/call_tx.go b/rpc/backend/call_tx.go index 61dfc0b34..a47b9181b 100644 --- a/rpc/backend/call_tx.go +++ b/rpc/backend/call_tx.go @@ -148,6 +148,12 @@ func (b *Backend) SendRawTransaction(data hexutil.Bytes) (common.Hash, error) { txHash := ethereumTx.AsTransaction().Hash() + // Check if transaction is already in the mempool before broadcasting + // This is important for user-submitted transactions via JSON-RPC to provide proper error feedback + if b.Mempool != nil && b.Mempool.Has(txHash) { + return txHash, fmt.Errorf("transaction %s already known", txHash.Hex()) + } + syncCtx := b.ClientCtx.WithBroadcastMode(flags.BroadcastSync) rsp, err := syncCtx.BroadcastTx(txBytes) if rsp != nil && rsp.Code != 0 { diff --git a/tests/systemtests/main_test.go b/tests/systemtests/main_test.go index 8705a0fab..2753ec654 100644 --- a/tests/systemtests/main_test.go +++ b/tests/systemtests/main_test.go @@ -3,9 +3,6 @@ package systemtests import ( - "github.com/cosmos/evm/tests/systemtests/accountabstraction" - "github.com/cosmos/evm/tests/systemtests/chainupgrade" - "github.com/cosmos/evm/tests/systemtests/eip712" "testing" "github.com/cosmos/evm/tests/systemtests/mempool" @@ -21,28 +18,32 @@ func TestMain(m *testing.M) { /* * Mempool Tests */ -func TestMempoolTxsOrdering(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) -} - -func TestMempoolTxsReplacement(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) -} - -func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) -} - -func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) -} - -func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) -} - -func TestMempoolTxBroadcasting(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) +//func TestMempoolTxsOrdering(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) +//} +// +//func TestMempoolTxsReplacement(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) +//} +// +//func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) +//} +// +//func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) +//} +// +//func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) +//} +// +//func TestMempoolTxBroadcasting(t *testing.T) { +// suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) +//} + +func TestMempoolTxDuplicateHandling(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxDuplicateHandling) } func TestMinimumGasPricesZero(t *testing.T) { @@ -53,31 +54,31 @@ func TestMempoolCosmosTxsCompatibility(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) } -/* -* EIP-712 Tests - */ -func TestEIP712BankSend(t *testing.T) { - suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) -} - -func TestEIP712BankSendWithBalanceCheck(t *testing.T) { - suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) -} - -func TestEIP712MultipleBankSends(t *testing.T) { - suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) -} - -/* -* Account Abstraction Tests - */ -func TestAccountAbstractionEIP7702(t *testing.T) { - suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) -} - -/* -* Chain Upgrade Tests - */ -func TestChainUpgrade(t *testing.T) { - suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) -} +///* +//* EIP-712 Tests +// */ +//func TestEIP712BankSend(t *testing.T) { +// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) +//} +// +//func TestEIP712BankSendWithBalanceCheck(t *testing.T) { +// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) +//} +// +//func TestEIP712MultipleBankSends(t *testing.T) { +// suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) +//} +// +///* +//* Account Abstraction Tests +// */ +//func TestAccountAbstractionEIP7702(t *testing.T) { +// suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) +//} +// +///* +//* Chain Upgrade Tests +// */ +//func TestChainUpgrade(t *testing.T) { +// suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) +//} diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 5bf54f064..3611b4b88 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -14,6 +14,157 @@ import ( "github.com/cosmos/evm/tests/systemtests/suite" ) +// RunTxDuplicateHandling tests that duplicate transactions are properly rejected when submitted via JSON-RPC. +// +// IMPORTANT: This test currently FAILS because ErrAlreadyKnown is silently converted to success in check_tx.go. +// The fix needs to be implemented on the RPC side to distinguish between: +// - User-submitted duplicates (JSON-RPC) -> MUST return error +// - Internal rebroadcast/gossip -> Should be silent (current behavior is correct for this) +// +// When a duplicate transaction is sent via JSON-RPC, the txpool correctly returns ErrAlreadyKnown, +// but CheckTx converts it to success. The RPC handler should intercept this and return an appropriate +// error to the user instead. +func RunTxDuplicateHandling(t *testing.T, base *suite.BaseTestSuite) { + testCases := []struct { + name string + actions []func(*TestSuite, *TestContext) + }{ + { + name: "duplicate tx handling %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Verify the transaction is in the pool + pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) + require.NoError(t, err) + require.Contains(t, pendingTxs, tx1.TxHash, "transaction should be in pending pool") + + // Send the SAME transaction again to the same node via JSON-RPC + // This SHOULD return an error - users need to know when they're sending duplicates + // Currently this test will FAIL because ErrAlreadyKnown is silently converted to success in check_tx.go + // TODO: Fix this on the RPC side to return proper error for user-submitted duplicates + tx1Duplicate, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.Error(t, err, "duplicate tx via JSON-RPC MUST return error (currently fails - needs RPC-side fix)") + require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + + // If we got an error (correct behavior), tx1Duplicate will be nil or have empty hash + // If no error (current broken behavior), verify no duplication occurred + if err == nil { + require.Equal(t, tx1.TxHash, tx1Duplicate.TxHash, "duplicate tx should have same hash") + + // Verify the transaction is still in the pool (not duplicated) + pendingTxs, _, err = s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) + require.NoError(t, err) + require.Contains(t, pendingTxs, tx1.TxHash, "transaction should still be in pending pool") + + // Count occurrences - should be exactly 1 + count := 0 + for _, hash := range pendingTxs { + if hash == tx1.TxHash { + count++ + } + } + require.Equal(t, 1, count, "transaction should appear exactly once in pending pool, not duplicated") + } + + t.Logf("✓ Duplicate transaction correctly rejected with error from JSON-RPC") + + ctx.SetExpPendingTxs(tx1) + }, + func(s *TestSuite, ctx *TestContext) { + // Test re-gossiping scenario: send duplicate to different node after broadcast + signer := s.Acc(0) + + // Send transaction to node0 + tx2, err := s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Wait for it to be broadcast to node1 + maxWaitTime := 3 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + found := false + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to node1 within %s", + tx2.TxHash, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(s.Node(1), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + if slices.Contains(pendingTxs, tx2.TxHash) { + t.Logf("✓ Transaction %s broadcast to node1", tx2.TxHash) + found = true + } + } + } + + // Now try to send the same transaction to node1 via JSON-RPC + // Even though node1 already has it (from gossip), sending it again via JSON-RPC should error + // This is user-submitted, not internal rebroadcast + tx2Duplicate, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(10), nil) + require.Error(t, err, "duplicate tx via JSON-RPC should return error even on different node") + require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + + if err == nil { + require.Equal(t, tx2.TxHash, tx2Duplicate.TxHash, "duplicate tx should have same hash") + } + + t.Logf("✓ JSON-RPC correctly rejects duplicate transaction that node1 already has from gossip") + + ctx.SetExpPendingTxs(tx2) + }, + }, + }, + } + + testOptions := []*suite.TestOptions{ + { + Description: "EVM LegacyTx", + TxType: suite.TxTypeEVM, + IsDynamicFeeTx: false, + }, + { + Description: "EVM DynamicFeeTx", + TxType: suite.TxTypeEVM, + IsDynamicFeeTx: true, + }, + } + + s := NewTestSuite(base) + s.SetupTest(t) + + for _, to := range testOptions { + s.SetOptions(to) + for _, tc := range testCases { + testName := fmt.Sprintf(tc.name, to.Description) + t.Run(testName, func(t *testing.T) { + ctx := NewTestContext() + s.BeforeEachCase(t, ctx) + for _, action := range tc.actions { + action(s, ctx) + } + s.AfterEachCase(t, ctx) + }) + } + } +} + // RunTxBroadcasting tests that transactions are broadcast to other nodes via mempool gossip // before blocks are committed. This verifies that the mempool rebroadcast functionality works // correctly and transactions propagate through the network via the mempool gossip protocol, From d0ba70d5dde4f9f7e89076549eea0ef74e4c58c6 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 11:34:19 -0500 Subject: [PATCH 06/19] merge duplicate with broadcast test --- tests/systemtests/main_test.go | 105 +++++---- tests/systemtests/mempool/test_broadcast.go | 237 +++++++------------- 2 files changed, 136 insertions(+), 206 deletions(-) diff --git a/tests/systemtests/main_test.go b/tests/systemtests/main_test.go index 2753ec654..4b2609c99 100644 --- a/tests/systemtests/main_test.go +++ b/tests/systemtests/main_test.go @@ -3,6 +3,9 @@ package systemtests import ( + "github.com/cosmos/evm/tests/systemtests/accountabstraction" + "github.com/cosmos/evm/tests/systemtests/chainupgrade" + "github.com/cosmos/evm/tests/systemtests/eip712" "testing" "github.com/cosmos/evm/tests/systemtests/mempool" @@ -18,32 +21,28 @@ func TestMain(m *testing.M) { /* * Mempool Tests */ -//func TestMempoolTxsOrdering(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) -//} -// -//func TestMempoolTxsReplacement(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) -//} -// -//func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) -//} -// -//func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) -//} -// -//func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) -//} -// -//func TestMempoolTxBroadcasting(t *testing.T) { -// suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) -//} - -func TestMempoolTxDuplicateHandling(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxDuplicateHandling) +func TestMempoolTxsOrdering(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) +} + +func TestMempoolTxsReplacement(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsReplacement) +} + +func TestMempoolTxsReplacementWithCosmosTx(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx) +} + +func TestMempoolMixedTxsReplacementEVMAndCosmos(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementEVMAndCosmos) +} + +func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) +} + +func TestMempoolTxBroadcasting(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) } func TestMinimumGasPricesZero(t *testing.T) { @@ -54,31 +53,31 @@ func TestMempoolCosmosTxsCompatibility(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) } -///* -//* EIP-712 Tests +// /* +// * EIP-712 Tests // */ -//func TestEIP712BankSend(t *testing.T) { -// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) -//} -// -//func TestEIP712BankSendWithBalanceCheck(t *testing.T) { -// suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) -//} -// -//func TestEIP712MultipleBankSends(t *testing.T) { -// suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) -//} -// -///* -//* Account Abstraction Tests -// */ -//func TestAccountAbstractionEIP7702(t *testing.T) { -// suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) -//} -// -///* -//* Chain Upgrade Tests -// */ -//func TestChainUpgrade(t *testing.T) { -// suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) -//} +func TestEIP712BankSend(t *testing.T) { + suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) +} + +func TestEIP712BankSendWithBalanceCheck(t *testing.T) { + suite.RunWithSharedSuite(t, eip712.RunEIP712BankSendWithBalanceCheck) +} + +func TestEIP712MultipleBankSends(t *testing.T) { + suite.RunWithSharedSuite(t, eip712.RunEIP712MultipleBankSends) +} + +/* +* Account Abstraction Tests + */ +func TestAccountAbstractionEIP7702(t *testing.T) { + suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) +} + +/* +* Chain Upgrade Tests + */ +func TestChainUpgrade(t *testing.T) { + suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) +} diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 3611b4b88..24f715032 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -14,161 +14,13 @@ import ( "github.com/cosmos/evm/tests/systemtests/suite" ) -// RunTxDuplicateHandling tests that duplicate transactions are properly rejected when submitted via JSON-RPC. +// RunTxBroadcasting tests transaction broadcasting and duplicate handling: // -// IMPORTANT: This test currently FAILS because ErrAlreadyKnown is silently converted to success in check_tx.go. -// The fix needs to be implemented on the RPC side to distinguish between: -// - User-submitted duplicates (JSON-RPC) -> MUST return error -// - Internal rebroadcast/gossip -> Should be silent (current behavior is correct for this) +// 1. Broadcasting: Verifies transactions are broadcast to other nodes via mempool gossip +// before blocks are committed, proving the mempool rebroadcast functionality works correctly. // -// When a duplicate transaction is sent via JSON-RPC, the txpool correctly returns ErrAlreadyKnown, -// but CheckTx converts it to success. The RPC handler should intercept this and return an appropriate -// error to the user instead. -func RunTxDuplicateHandling(t *testing.T, base *suite.BaseTestSuite) { - testCases := []struct { - name string - actions []func(*TestSuite, *TestContext) - }{ - { - name: "duplicate tx handling %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - signer := s.Acc(0) - - // Send transaction to node0 - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx to node0") - - // Verify the transaction is in the pool - pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) - require.NoError(t, err) - require.Contains(t, pendingTxs, tx1.TxHash, "transaction should be in pending pool") - - // Send the SAME transaction again to the same node via JSON-RPC - // This SHOULD return an error - users need to know when they're sending duplicates - // Currently this test will FAIL because ErrAlreadyKnown is silently converted to success in check_tx.go - // TODO: Fix this on the RPC side to return proper error for user-submitted duplicates - tx1Duplicate, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.Error(t, err, "duplicate tx via JSON-RPC MUST return error (currently fails - needs RPC-side fix)") - require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") - - // If we got an error (correct behavior), tx1Duplicate will be nil or have empty hash - // If no error (current broken behavior), verify no duplication occurred - if err == nil { - require.Equal(t, tx1.TxHash, tx1Duplicate.TxHash, "duplicate tx should have same hash") - - // Verify the transaction is still in the pool (not duplicated) - pendingTxs, _, err = s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) - require.NoError(t, err) - require.Contains(t, pendingTxs, tx1.TxHash, "transaction should still be in pending pool") - - // Count occurrences - should be exactly 1 - count := 0 - for _, hash := range pendingTxs { - if hash == tx1.TxHash { - count++ - } - } - require.Equal(t, 1, count, "transaction should appear exactly once in pending pool, not duplicated") - } - - t.Logf("✓ Duplicate transaction correctly rejected with error from JSON-RPC") - - ctx.SetExpPendingTxs(tx1) - }, - func(s *TestSuite, ctx *TestContext) { - // Test re-gossiping scenario: send duplicate to different node after broadcast - signer := s.Acc(0) - - // Send transaction to node0 - tx2, err := s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx to node0") - - // Wait for it to be broadcast to node1 - maxWaitTime := 3 * time.Second - checkInterval := 100 * time.Millisecond - - timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) - defer cancel() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - found := false - for !found { - select { - case <-timeoutCtx.Done(): - require.FailNow(t, fmt.Sprintf( - "transaction %s was not broadcast to node1 within %s", - tx2.TxHash, maxWaitTime, - )) - case <-ticker.C: - pendingTxs, _, err := s.TxPoolContent(s.Node(1), suite.TxTypeEVM, 5*time.Second) - if err != nil { - continue - } - if slices.Contains(pendingTxs, tx2.TxHash) { - t.Logf("✓ Transaction %s broadcast to node1", tx2.TxHash) - found = true - } - } - } - - // Now try to send the same transaction to node1 via JSON-RPC - // Even though node1 already has it (from gossip), sending it again via JSON-RPC should error - // This is user-submitted, not internal rebroadcast - tx2Duplicate, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(10), nil) - require.Error(t, err, "duplicate tx via JSON-RPC should return error even on different node") - require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") - - if err == nil { - require.Equal(t, tx2.TxHash, tx2Duplicate.TxHash, "duplicate tx should have same hash") - } - - t.Logf("✓ JSON-RPC correctly rejects duplicate transaction that node1 already has from gossip") - - ctx.SetExpPendingTxs(tx2) - }, - }, - }, - } - - testOptions := []*suite.TestOptions{ - { - Description: "EVM LegacyTx", - TxType: suite.TxTypeEVM, - IsDynamicFeeTx: false, - }, - { - Description: "EVM DynamicFeeTx", - TxType: suite.TxTypeEVM, - IsDynamicFeeTx: true, - }, - } - - s := NewTestSuite(base) - s.SetupTest(t) - - for _, to := range testOptions { - s.SetOptions(to) - for _, tc := range testCases { - testName := fmt.Sprintf(tc.name, to.Description) - t.Run(testName, func(t *testing.T) { - ctx := NewTestContext() - s.BeforeEachCase(t, ctx) - for _, action := range tc.actions { - action(s, ctx) - } - s.AfterEachCase(t, ctx) - }) - } - } -} - -// RunTxBroadcasting tests that transactions are broadcast to other nodes via mempool gossip -// before blocks are committed. This verifies that the mempool rebroadcast functionality works -// correctly and transactions propagate through the network via the mempool gossip protocol, -// not just via block propagation. +// 2. Duplicate Handling: Verifies that duplicate transactions submitted via JSON-RPC are +// properly rejected with "already known" error, while internal gossip/rebroadcast remains silent. // // The test uses a slower block time (5 seconds) to ensure we have enough time to verify // that transactions appear in other nodes' mempools before a block is produced. @@ -341,6 +193,85 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { }, }, }, + { + name: "duplicate tx rejected on same node %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Verify the transaction is in the pool + pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) + require.NoError(t, err) + require.Contains(t, pendingTxs, tx1.TxHash, "transaction should be in pending pool") + + // Send the SAME transaction again to the same node via JSON-RPC + // This should return an error - users need to know when they're sending duplicates + _, err = s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) + require.Error(t, err, "duplicate tx via JSON-RPC must return error") + require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + + t.Logf("✓ Duplicate transaction correctly rejected with 'already known' error") + + ctx.SetExpPendingTxs(tx1) + }, + }, + }, + { + name: "duplicate tx rejected after gossip %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 4, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Wait for it to be broadcast to node1 via gossip + maxWaitTime := 3 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + found := false + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to node1 within %s", + tx1.TxHash, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(s.Node(1), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + if slices.Contains(pendingTxs, tx1.TxHash) { + t.Logf("✓ Transaction %s broadcast to node1 via gossip", tx1.TxHash) + found = true + } + } + } + + // Now try to send the same transaction to node1 via JSON-RPC + // Even though node1 already has it from gossip, JSON-RPC submission should error + _, err = s.SendTx(t, s.Node(1), signer.ID, 4, s.GasPriceMultiplier(10), nil) + require.Error(t, err, "duplicate tx via JSON-RPC should return error even after gossip") + require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + + t.Logf("✓ JSON-RPC correctly rejects duplicate that node already has from gossip") + + ctx.SetExpPendingTxs(tx1) + }, + }, + }, } testOptions := []*suite.TestOptions{ From 41ffdb73398fd1273c6718c3ae4d56acc8020c2f Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 11:42:37 -0500 Subject: [PATCH 07/19] use error type --- rpc/backend/call_tx.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/backend/call_tx.go b/rpc/backend/call_tx.go index a47b9181b..b505d3510 100644 --- a/rpc/backend/call_tx.go +++ b/rpc/backend/call_tx.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc/status" "github.com/cosmos/evm/mempool" + "github.com/cosmos/evm/mempool/txpool" rpctypes "github.com/cosmos/evm/rpc/types" evmtypes "github.com/cosmos/evm/x/vm/types" @@ -151,7 +152,7 @@ func (b *Backend) SendRawTransaction(data hexutil.Bytes) (common.Hash, error) { // Check if transaction is already in the mempool before broadcasting // This is important for user-submitted transactions via JSON-RPC to provide proper error feedback if b.Mempool != nil && b.Mempool.Has(txHash) { - return txHash, fmt.Errorf("transaction %s already known", txHash.Hex()) + return txHash, txpool.ErrAlreadyKnown } syncCtx := b.ClientCtx.WithBroadcastMode(flags.BroadcastSync) From 65f36a450452d81818cfe0524a87fc2220f10a72 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 12:38:57 -0500 Subject: [PATCH 08/19] explanation docs --- tests/systemtests/mempool/test_broadcast.go | 98 ++++++++++++++++----- 1 file changed, 75 insertions(+), 23 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 24f715032..203d44f25 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -14,33 +14,48 @@ import ( "github.com/cosmos/evm/tests/systemtests/suite" ) -// RunTxBroadcasting tests transaction broadcasting and duplicate handling: +// RunTxBroadcasting tests transaction broadcasting and duplicate handling in a multi-node network. // -// 1. Broadcasting: Verifies transactions are broadcast to other nodes via mempool gossip -// before blocks are committed, proving the mempool rebroadcast functionality works correctly. +// This test verifies two critical aspects of transaction propagation: // -// 2. Duplicate Handling: Verifies that duplicate transactions submitted via JSON-RPC are -// properly rejected with "already known" error, while internal gossip/rebroadcast remains silent. +// 1. Mempool Broadcasting: Transactions submitted to one node are gossiped to all other nodes +// via the mempool gossip protocol BEFORE blocks are produced. // -// The test uses a slower block time (5 seconds) to ensure we have enough time to verify -// that transactions appear in other nodes' mempools before a block is produced. +// 2. Duplicate Detection: The RPC layer properly rejects duplicate transactions submitted +// by users via JSON-RPC (returning txpool.ErrAlreadyKnown), while internal gossip remains silent. +// +// The test uses a 5-second consensus timeout to create a larger window for verifying that +// transactions appear in other nodes' mempools before blocks are committed. func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { testCases := []struct { name string actions []func(*TestSuite, *TestContext) }{ { + // Scenario 1: Basic Broadcasting and Transaction Promotion + // + // This scenario verifies that: + // 1. Transactions are gossiped to all nodes BEFORE blocks are committed + // 2. Queued transactions (with nonce gaps) are NOT gossiped + // 3. When gaps are filled, queued txs are promoted and then gossiped + // + // Broadcasting Flow: + // User -> JSON-RPC -> Mempool (pending) -> Gossip to peers + // When nonce gap filled: Queued -> Promoted to pending -> Gossiped name: "tx broadcast to other nodes %s", actions: []func(*TestSuite, *TestContext){ func(s *TestSuite, ctx *TestContext) { + // Step 1: Send tx with nonce 0 to node0 + // Expected: tx is added to node0's pending pool (nonce is correct) signer := s.Acc(0) // Send transaction to node0 tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") - // Verify the transaction appears in other nodes' mempools BEFORE any block is committed - // This proves transactions are broadcast via mempool gossip, not just block propagation + // Step 2: Verify tx appears in nodes 1, 2, 3 mempools within 3 seconds + // Expected: tx is gossiped to all nodes BEFORE any block is committed (5s timeout) + // This proves mempool gossip works, not just block propagation maxWaitTime := 3 * time.Second checkInterval := 100 * time.Millisecond @@ -82,14 +97,17 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { ctx.SetExpPendingTxs(tx1) }, func(s *TestSuite, ctx *TestContext) { - // Test with nonce-gapped transactions to verify rebroadcast/promotion + // Step 3: Send tx with nonce 2 to node1 (creating a nonce gap) + // Current nonce is 1 (after previous tx), so nonce 2 creates a gap + // Expected: tx is added to node1's QUEUED pool (not pending due to gap) signer := s.Acc(0) // Send tx with nonce 2 to node1 (creating a gap since current nonce is 1) tx3, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx with nonce 2") - // This transaction should be queued on node1, not pending + // Step 4: Verify tx is in node1's QUEUED pool (not pending) + // Queued txs cannot execute until the nonce gap is filled maxWaitTime := 2 * time.Second checkInterval := 100 * time.Millisecond @@ -120,8 +138,9 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { } } - // Verify the queued transaction is NOT broadcast to other nodes - // (queued txs should not be gossiped) + // Step 5: Verify queued tx is NOT gossiped to other nodes + // Queued txs should stay local until they become pending (executable) + // This prevents network spam from non-executable transactions time.Sleep(1 * time.Second) // Give some time for any potential gossip for _, nodeIdx := range []int{0, 2, 3} { @@ -135,12 +154,16 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { "queued transaction should not be broadcast to %s", nodeID) } - // Now send the missing transaction (nonce 1) + // Step 6: Send tx with nonce 1 to node2 (filling the gap) + // Expected: tx is added to node2's pending pool and gossiped tx2, err := s.SendTx(t, s.Node(2), signer.ID, 1, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx with nonce 1") - // tx2 should be broadcast to all nodes - // tx3 should be promoted to pending on node1 and then broadcast to all nodes + // Step 7: Verify BOTH txs appear in all nodes' pending pools + // - tx2 (nonce=1) should be gossiped immediately (it's pending) + // - tx3 (nonce=2) should be promoted from queued to pending on node1 + // - Promoted tx3 should then be gossiped to all other nodes + // This proves queued txs get rebroadcast when promoted maxWaitTime = 3 * time.Second ticker2 := time.NewTicker(checkInterval) defer ticker2.Stop() @@ -194,22 +217,33 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { }, }, { + // Scenario 2: Duplicate Detection on Same Node + // + // This scenario verifies that when a user submits the same transaction twice + // to the same node via JSON-RPC, the second submission returns an error. + // + // Error Handling: + // RPC Layer: Checks mempool.Has(txHash) -> returns txpool.ErrAlreadyKnown + // Users get immediate error feedback (not silent failure) name: "duplicate tx rejected on same node %s", actions: []func(*TestSuite, *TestContext){ func(s *TestSuite, ctx *TestContext) { + // Step 1: Send tx with nonce 3 to node0 + // Expected: tx is accepted and added to pending pool signer := s.Acc(0) // Send transaction to node0 tx1, err := s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") - // Verify the transaction is in the pool + // Step 2: Verify tx is in node0's pending pool pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) require.NoError(t, err) require.Contains(t, pendingTxs, tx1.TxHash, "transaction should be in pending pool") - // Send the SAME transaction again to the same node via JSON-RPC - // This should return an error - users need to know when they're sending duplicates + // Step 3: Send the SAME transaction again to node0 via JSON-RPC + // Expected: Error returned (txpool.ErrAlreadyKnown) + // Users must receive error feedback for duplicate submissions _, err = s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) require.Error(t, err, "duplicate tx via JSON-RPC must return error") require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") @@ -221,16 +255,32 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { }, }, { + // Scenario 3: Duplicate Detection After Gossip + // + // This scenario verifies that even when a node receives a transaction via + // internal gossip (not user submission), attempting to submit that same + // transaction again via JSON-RPC returns an error. + // + // Flow: + // 1. User submits tx to node0 -> added to mempool -> gossiped to node1 + // 2. User tries to submit same tx to node1 via JSON-RPC + // 3. RPC layer detects duplicate (mempool.Has) and returns error + // + // This ensures duplicate detection works regardless of how the node + // originally received the transaction (user submission vs gossip). name: "duplicate tx rejected after gossip %s", actions: []func(*TestSuite, *TestContext){ func(s *TestSuite, ctx *TestContext) { + // Step 1: Send tx with nonce 4 to node0 + // Expected: tx is accepted, added to pending, and gossiped signer := s.Acc(0) // Send transaction to node0 tx1, err := s.SendTx(t, s.Node(0), signer.ID, 4, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") - // Wait for it to be broadcast to node1 via gossip + // Step 2: Wait for tx to be gossiped to node1 + // Expected: tx appears in node1's pending pool within 3 seconds maxWaitTime := 3 * time.Second checkInterval := 100 * time.Millisecond @@ -260,8 +310,10 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { } } - // Now try to send the same transaction to node1 via JSON-RPC - // Even though node1 already has it from gossip, JSON-RPC submission should error + // Step 3: Try to send the SAME transaction to node1 via JSON-RPC + // Expected: Error returned (txpool.ErrAlreadyKnown) + // Even though node1 received it via gossip (not user submission), + // the RPC layer should still detect and reject the duplicate _, err = s.SendTx(t, s.Node(1), signer.ID, 4, s.GasPriceMultiplier(10), nil) require.Error(t, err, "duplicate tx via JSON-RPC should return error even after gossip") require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") @@ -312,4 +364,4 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { }) } } -} \ No newline at end of file +} From df850173b4afb3a87aef3b0265d90f746f4f6023 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 12:39:06 -0500 Subject: [PATCH 09/19] uncomment now? --- tests/systemtests/mempool/test_replacement.go | 104 +++++++++--------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/tests/systemtests/mempool/test_replacement.go b/tests/systemtests/mempool/test_replacement.go index f705e5079..a140c1b17 100644 --- a/tests/systemtests/mempool/test_replacement.go +++ b/tests/systemtests/mempool/test_replacement.go @@ -16,47 +16,47 @@ func RunTxsReplacement(t *testing.T, base *suite.BaseTestSuite) { name string actions []func(*TestSuite, *TestContext) }{ - // Note: These test cases are unstable in the GitHub CI environment. - // When running it locally, please uncomment it and run the test. - // - // { - // name: "single pending tx submitted to same nodes %s", - // actions: []func(*TestSuite, *TestContext){ - // func(s *TestSuite, ctx *TestContext) { - // signer := s.Acc(0) - // _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - - // ctx.SetExpPendingTxs(tx2) - // }, - // }, - // }, - // { - // name: "multiple pending txs submitted to same nodes %s", - // actions: []func(*TestSuite, *TestContext){ - // func(s *TestSuite, ctx *TestContext) { - // signer := s.Acc(0) - // _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - - // _, err = s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // tx4, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - - // _, err = s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // tx6, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - - // ctx.SetExpPendingTxs(tx2, tx4, tx6) - // }, - // }, - // }, + //Note: These test cases are unstable in the GitHub CI environment. + //When running it locally, please uncomment it and run the test. + + { + name: "single pending tx submitted to same nodes %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + signer := s.Acc(0) + _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + ctx.SetExpPendingTxs(tx2) + }, + }, + }, + { + name: "multiple pending txs submitted to same nodes %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + signer := s.Acc(0) + _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + _, err = s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + tx4, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + _, err = s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + tx6, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + ctx.SetExpPendingTxs(tx2, tx4, tx6) + }, + }, + }, { name: "single queued tx %s", actions: []func(*TestSuite, *TestContext){ @@ -278,17 +278,17 @@ func RunMixedTxsReplacementEVMAndCosmos(t *testing.T, base *suite.BaseTestSuite) // This test case is non-deterministic for unknown reason. // We need to manually test this case, find the reason, and enable this test case. // - // name: "single pending tx (high prio cosmos tx first) %s", - // actions: []func(*TestSuite, *TestContext){ - // func(s *TestSuite, ctx *TestContext) { - // tx1, err := s.SendCosmosTx(t, s.Node(0), "acc0", 0, s.BaseFeeMultiplier(20), nil) - // require.NoError(t, err, "failed to send tx") - // _, err = s.SendEthTx(t, s.Node(1), "acc0", 0, s.BaseFeeMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - - // ctx.SetExpPendingTxs(tx1) - // }, - // }, + name: "single pending tx (high prio cosmos tx first) %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + tx1, err := s.SendCosmosTx(t, s.Node(0), "acc0", 0, s.BaseFeeMultiplier(20), nil) + require.NoError(t, err, "failed to send tx") + _, err = s.SendEthTx(t, s.Node(1), "acc0", 0, s.BaseFeeMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + + ctx.SetExpPendingTxs(tx1) + }, + }, }, { name: "single queued tx (low prio evm tx first) %s", From 3b72d8ee418f06a631315f60f4fb801ad58b35af Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 12:45:24 -0500 Subject: [PATCH 10/19] no --- tests/systemtests/mempool/test_replacement.go | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/tests/systemtests/mempool/test_replacement.go b/tests/systemtests/mempool/test_replacement.go index a140c1b17..3d59f917a 100644 --- a/tests/systemtests/mempool/test_replacement.go +++ b/tests/systemtests/mempool/test_replacement.go @@ -153,52 +153,52 @@ func RunTxsReplacementWithCosmosTx(t *testing.T, base *suite.BaseTestSuite) { name string actions []func(*TestSuite, *TestContext) }{ - { - name: "single pending tx submitted to same nodes %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. - // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. - // After modifying CheckTxHandler, we can also modify this test case - // : high prio cosmos tx should replace low prio evm tx. - signer := s.Acc(0) - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - ctx.SetExpPendingTxs(tx1) - }, - }, - }, - { - name: "multiple pending txs submitted to same nodes %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. - // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. - // After modifying CheckTxHandler, we can also modify this test case - // : high prio cosmos tx should replace low prio evm tx. - signer := s.Acc(0) - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - tx3, err := s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - _, err = s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - tx5, err := s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - _, err = s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - ctx.SetExpPendingTxs(tx1, tx3, tx5) - }, - }, - }, + //{ + // name: "single pending tx submitted to same nodes %s", + // actions: []func(*TestSuite, *TestContext){ + // func(s *TestSuite, ctx *TestContext) { + // // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. + // // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. + // // After modifying CheckTxHandler, we can also modify this test case + // // : high prio cosmos tx should replace low prio evm tx. + // signer := s.Acc(0) + // tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + // + // ctx.SetExpPendingTxs(tx1) + // }, + // }, + //}, + //{ + // name: "multiple pending txs submitted to same nodes %s", + // actions: []func(*TestSuite, *TestContext){ + // func(s *TestSuite, ctx *TestContext) { + // // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. + // // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. + // // After modifying CheckTxHandler, we can also modify this test case + // // : high prio cosmos tx should replace low prio evm tx. + // signer := s.Acc(0) + // tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + // + // tx3, err := s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // _, err = s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + // + // tx5, err := s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // _, err = s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + // + // ctx.SetExpPendingTxs(tx1, tx3, tx5) + // }, + // }, + //}, } testOptions := []*suite.TestOptions{ From 827b2d9a94aae42be4f706bcbf663a3eb7af67c5 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 12:47:35 -0500 Subject: [PATCH 11/19] revert --- tests/systemtests/mempool/test_replacement.go | 104 +++++++++--------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/tests/systemtests/mempool/test_replacement.go b/tests/systemtests/mempool/test_replacement.go index 3d59f917a..c70c1f012 100644 --- a/tests/systemtests/mempool/test_replacement.go +++ b/tests/systemtests/mempool/test_replacement.go @@ -16,47 +16,47 @@ func RunTxsReplacement(t *testing.T, base *suite.BaseTestSuite) { name string actions []func(*TestSuite, *TestContext) }{ - //Note: These test cases are unstable in the GitHub CI environment. - //When running it locally, please uncomment it and run the test. - - { - name: "single pending tx submitted to same nodes %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - signer := s.Acc(0) - _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - ctx.SetExpPendingTxs(tx2) - }, - }, - }, - { - name: "multiple pending txs submitted to same nodes %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - signer := s.Acc(0) - _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - _, err = s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - tx4, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - _, err = s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - tx6, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) - require.NoError(t, err, "failed to send tx") - - ctx.SetExpPendingTxs(tx2, tx4, tx6) - }, - }, - }, + // Note: These test cases are unstable in the GitHub CI environment. + // When running it locally, please uncomment it and run the test. + // + // { + // name: "single pending tx submitted to same nodes %s", + // actions: []func(*TestSuite, *TestContext){ + // func(s *TestSuite, ctx *TestContext) { + // signer := s.Acc(0) + // _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + + // ctx.SetExpPendingTxs(tx2) + // }, + // }, + // }, + // { + // name: "multiple pending txs submitted to same nodes %s", + // actions: []func(*TestSuite, *TestContext){ + // func(s *TestSuite, ctx *TestContext) { + // signer := s.Acc(0) + // _, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // tx2, err := s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + + // _, err = s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // tx4, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + + // _, err = s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + // tx6, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) + // require.NoError(t, err, "failed to send tx") + + // ctx.SetExpPendingTxs(tx2, tx4, tx6) + // }, + // }, + // }, { name: "single queued tx %s", actions: []func(*TestSuite, *TestContext){ @@ -278,17 +278,17 @@ func RunMixedTxsReplacementEVMAndCosmos(t *testing.T, base *suite.BaseTestSuite) // This test case is non-deterministic for unknown reason. // We need to manually test this case, find the reason, and enable this test case. // - name: "single pending tx (high prio cosmos tx first) %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - tx1, err := s.SendCosmosTx(t, s.Node(0), "acc0", 0, s.BaseFeeMultiplier(20), nil) - require.NoError(t, err, "failed to send tx") - _, err = s.SendEthTx(t, s.Node(1), "acc0", 0, s.BaseFeeMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - ctx.SetExpPendingTxs(tx1) - }, - }, + // name: "single pending tx (high prio cosmos tx first) %s", + // actions: []func(*TestSuite, *TestContext){ + // func(s *TestSuite, ctx *TestContext) { + // tx1, err := s.SendCosmosTx(t, s.Node(0), "acc0", 0, s.BaseFeeMultiplier(20), nil) + // require.NoError(t, err, "failed to send tx") + // _, err = s.SendEthTx(t, s.Node(1), "acc0", 0, s.BaseFeeMultiplier(10), nil) + // require.NoError(t, err, "failed to send tx") + + // ctx.SetExpPendingTxs(tx1) + // }, + // }, }, { name: "single queued tx (low prio evm tx first) %s", From 43029eb089fc289bbd7f1bee45ca97b5d20321f3 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 12:55:25 -0500 Subject: [PATCH 12/19] test fix --- tests/systemtests/mempool/test_broadcast.go | 31 +++++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 203d44f25..668172418 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -237,9 +237,34 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { require.NoError(t, err, "failed to send tx to node0") // Step 2: Verify tx is in node0's pending pool - pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) - require.NoError(t, err) - require.Contains(t, pendingTxs, tx1.TxHash, "transaction should be in pending pool") + // Poll for the transaction to appear (it should be fast, but we need to wait for async processing) + maxWaitTime := 2 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + found := false + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not found in node0's pending pool within %s", + tx1.TxHash, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + if slices.Contains(pendingTxs, tx1.TxHash) { + found = true + } + } + } // Step 3: Send the SAME transaction again to node0 via JSON-RPC // Expected: Error returned (txpool.ErrAlreadyKnown) From e5b4a5213b540c1c18ed69339ec076c1d474515d Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 13:40:58 -0500 Subject: [PATCH 13/19] yo --- tests/systemtests/mempool/test_broadcast.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 668172418..60cb636a3 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -232,13 +232,15 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Expected: tx is accepted and added to pending pool signer := s.Acc(0) + s.AwaitNBlocks(t, 1) + // Send transaction to node0 tx1, err := s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") // Step 2: Verify tx is in node0's pending pool // Poll for the transaction to appear (it should be fast, but we need to wait for async processing) - maxWaitTime := 2 * time.Second + maxWaitTime := 3 * time.Second checkInterval := 100 * time.Millisecond timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) From 97183ebcb332a83e08d4e75d0fef6de864086415 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 13:51:32 -0500 Subject: [PATCH 14/19] undo --- tests/systemtests/mempool/test_replacement.go | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/tests/systemtests/mempool/test_replacement.go b/tests/systemtests/mempool/test_replacement.go index c70c1f012..f705e5079 100644 --- a/tests/systemtests/mempool/test_replacement.go +++ b/tests/systemtests/mempool/test_replacement.go @@ -153,52 +153,52 @@ func RunTxsReplacementWithCosmosTx(t *testing.T, base *suite.BaseTestSuite) { name string actions []func(*TestSuite, *TestContext) }{ - //{ - // name: "single pending tx submitted to same nodes %s", - // actions: []func(*TestSuite, *TestContext){ - // func(s *TestSuite, ctx *TestContext) { - // // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. - // // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. - // // After modifying CheckTxHandler, we can also modify this test case - // // : high prio cosmos tx should replace low prio evm tx. - // signer := s.Acc(0) - // tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - // - // ctx.SetExpPendingTxs(tx1) - // }, - // }, - //}, - //{ - // name: "multiple pending txs submitted to same nodes %s", - // actions: []func(*TestSuite, *TestContext){ - // func(s *TestSuite, ctx *TestContext) { - // // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. - // // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. - // // After modifying CheckTxHandler, we can also modify this test case - // // : high prio cosmos tx should replace low prio evm tx. - // signer := s.Acc(0) - // tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - // - // tx3, err := s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // _, err = s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - // - // tx5, err := s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) - // require.NoError(t, err, "failed to send tx") - // _, err = s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) - // require.NoError(t, err, "failed to send tx") - // - // ctx.SetExpPendingTxs(tx1, tx3, tx5) - // }, - // }, - //}, + { + name: "single pending tx submitted to same nodes %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. + // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. + // After modifying CheckTxHandler, we can also modify this test case + // : high prio cosmos tx should replace low prio evm tx. + signer := s.Acc(0) + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + ctx.SetExpPendingTxs(tx1) + }, + }, + }, + { + name: "multiple pending txs submitted to same nodes %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + // NOTE: Currently EVMD cannot handle tx reordering correctly when cosmos tx is used. + // It is because of CheckTxHandler cannot handle errors from SigVerificationDecorator properly. + // After modifying CheckTxHandler, we can also modify this test case + // : high prio cosmos tx should replace low prio evm tx. + signer := s.Acc(0) + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + tx3, err := s.SendTx(t, s.Node(0), signer.ID, 1, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + _, err = s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + tx5, err := s.SendTx(t, s.Node(0), signer.ID, 2, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx") + _, err = s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(20), big.NewInt(1)) + require.NoError(t, err, "failed to send tx") + + ctx.SetExpPendingTxs(tx1, tx3, tx5) + }, + }, + }, } testOptions := []*suite.TestOptions{ From 58b3077f158ee923282e04b80a04fba41e1a846d Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 13:58:23 -0500 Subject: [PATCH 15/19] it takes an index! --- tests/systemtests/mempool/test_broadcast.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 60cb636a3..140d8a1d7 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -228,14 +228,14 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { name: "duplicate tx rejected on same node %s", actions: []func(*TestSuite, *TestContext){ func(s *TestSuite, ctx *TestContext) { - // Step 1: Send tx with nonce 3 to node0 + // Step 1: Send tx with the current nonce to node0 // Expected: tx is accepted and added to pending pool signer := s.Acc(0) s.AwaitNBlocks(t, 1) // Send transaction to node0 - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") // Step 2: Verify tx is in node0's pending pool @@ -271,7 +271,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Step 3: Send the SAME transaction again to node0 via JSON-RPC // Expected: Error returned (txpool.ErrAlreadyKnown) // Users must receive error feedback for duplicate submissions - _, err = s.SendTx(t, s.Node(0), signer.ID, 3, s.GasPriceMultiplier(10), nil) + _, err = s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.Error(t, err, "duplicate tx via JSON-RPC must return error") require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") @@ -298,12 +298,12 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { name: "duplicate tx rejected after gossip %s", actions: []func(*TestSuite, *TestContext){ func(s *TestSuite, ctx *TestContext) { - // Step 1: Send tx with nonce 4 to node0 + // Step 1: Send tx with the current nonce to node0 // Expected: tx is accepted, added to pending, and gossiped signer := s.Acc(0) // Send transaction to node0 - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 4, s.GasPriceMultiplier(10), nil) + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") // Step 2: Wait for tx to be gossiped to node1 @@ -341,7 +341,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Expected: Error returned (txpool.ErrAlreadyKnown) // Even though node1 received it via gossip (not user submission), // the RPC layer should still detect and reject the duplicate - _, err = s.SendTx(t, s.Node(1), signer.ID, 4, s.GasPriceMultiplier(10), nil) + _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.Error(t, err, "duplicate tx via JSON-RPC should return error even after gossip") require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") From 23e6cf1b6f7784bac1687e624c3323acb76bf44b Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 14:14:29 -0500 Subject: [PATCH 16/19] Ensure that broadcast tests happen within a single block --- tests/systemtests/mempool/test_broadcast.go | 27 +++++++++++++++++++-- tests/systemtests/mempool/test_suite.go | 10 ++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index 140d8a1d7..bf642a58e 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -102,7 +102,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Expected: tx is added to node1's QUEUED pool (not pending due to gap) signer := s.Acc(0) - // Send tx with nonce 2 to node1 (creating a gap since current nonce is 1) + // Send tx with nonce +2 to node1 (creating a gap since current nonce is 1) tx3, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx with nonce 2") @@ -154,7 +154,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { "queued transaction should not be broadcast to %s", nodeID) } - // Step 6: Send tx with nonce 1 to node2 (filling the gap) + // Step 6: Send tx with nonce +1 to node2 (filling the gap) // Expected: tx is added to node2's pending pool and gossiped tx2, err := s.SendTx(t, s.Node(2), signer.ID, 1, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx with nonce 1") @@ -380,13 +380,36 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { for _, tc := range testCases { testName := fmt.Sprintf(tc.name, to.Description) t.Run(testName, func(t *testing.T) { + // Await a block before starting the test case (ensures clean state) + s.AwaitNBlocks(t, 1) + ctx := NewTestContext() s.BeforeEachCase(t, ctx) + + // Capture the initial block height - no blocks should be produced during the test case + initialHeight := s.GetCurrentBlockHeight(t, "node0") + t.Logf("Test case starting at block height %d", initialHeight) + + // Execute all test actions (broadcasting, mempool checks, etc.) for _, action := range tc.actions { action(s, ctx) // NOTE: We don't call AfterEachAction here because we're manually // checking the mempool state in the action functions } + + // Verify no blocks were produced during the test case + // All broadcasting and mempool checks should happen within a single block period + currentHeight := s.GetCurrentBlockHeight(t, "node0") + require.Equal(t, initialHeight, currentHeight, + "No blocks should be produced during test case execution - expected height %d but got %d", + initialHeight, currentHeight) + t.Logf("✓ Test case completed at same block height %d (no blocks produced)", currentHeight) + + // Now await a block to allow transactions to commit + s.AwaitNBlocks(t, 1) + t.Logf("Awaited block for transaction commits") + + // Verify transactions committed successfully s.AfterEachCase(t, ctx) }) } diff --git a/tests/systemtests/mempool/test_suite.go b/tests/systemtests/mempool/test_suite.go index 187119ece..4558b667e 100644 --- a/tests/systemtests/mempool/test_suite.go +++ b/tests/systemtests/mempool/test_suite.go @@ -31,6 +31,16 @@ func (s *TestSuite) SetupTest(t *testing.T, nodeStartArgs ...string) { s.BaseTestSuite.SetupTest(t, nodeStartArgs...) } +// GetCurrentBlockHeight returns the current block height from the specified node +func (s *TestSuite) GetCurrentBlockHeight(t *testing.T, nodeID string) uint64 { + t.Helper() + account := s.EthAccount("acc0") + ctx, cli, _ := s.EthClient.Setup(nodeID, account) + blockNumber, err := cli.BlockNumber(ctx) + require.NoError(t, err, "failed to get block number from %s", nodeID) + return blockNumber +} + // BeforeEach resets the expected mempool state and retrieves the current base fee before each test case func (s *TestSuite) BeforeEachCase(t *testing.T, ctx *TestContext) { ctx.Reset() From ee285038463d712eab16e7d0df4dc22e0b019082 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 14:18:12 -0500 Subject: [PATCH 17/19] xd --- tests/systemtests/mempool/test_broadcast.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index bf642a58e..a13c32063 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -232,8 +232,6 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Expected: tx is accepted and added to pending pool signer := s.Acc(0) - s.AwaitNBlocks(t, 1) - // Send transaction to node0 tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.NoError(t, err, "failed to send tx to node0") From af02b82bec6d574dae5660010c1f8a1a30c1d2e3 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 14:28:50 -0500 Subject: [PATCH 18/19] clean --- mempool/mempool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 62a1d5540..914196a8c 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -431,8 +431,8 @@ func (m *ExperimentalEVMMempool) Has(hash common.Hash) bool { m.mtx.Lock() defer m.mtx.Unlock() - // Check both the legacy pool and the tx pool - return m.legacyTxPool.Has(hash) || m.txPool.Has(hash) + // Check the tx pool + return m.txPool.Has(hash) } // Close unsubscribes from the CometBFT event bus and shuts down the mempool. From 5b86d5a23ba2555b8e2170ef32af9b85c19e9b31 Mon Sep 17 00:00:00 2001 From: Vlad Date: Fri, 12 Dec 2025 15:47:35 -0500 Subject: [PATCH 19/19] fix comment --- mempool/mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 914196a8c..9b6612614 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -426,7 +426,7 @@ func (m *ExperimentalEVMMempool) HasEventBus() bool { } // Has returns true if the transaction with the given hash is already in the mempool. -// This checks both the legacy pool and the main tx pool for EVM transactions. +// This checks tx pool for EVM transactions, which iterates through all pools (currently only legacypool) func (m *ExperimentalEVMMempool) Has(hash common.Hash) bool { m.mtx.Lock() defer m.mtx.Unlock()