Skip to content
36 changes: 22 additions & 14 deletions crates/anvil-polkadot/src/api_server/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1868,20 +1868,28 @@ async fn create_online_client(
substrate_service: &Service,
rpc_client: RpcClient,
) -> Result<OnlineClient<SrcChainConfig>> {
let genesis_block_number = substrate_service.genesis_block_number.try_into().map_err(|_| {
Error::InternalError(format!(
"Genesis block number {} is too large for u32 (max: {})",
substrate_service.genesis_block_number,
u32::MAX
))
})?;

let Some(genesis_hash) = substrate_service.client.hash(genesis_block_number).ok().flatten()
else {
return Err(Error::InternalError(format!(
"Genesis hash not found for genesis block number {}",
substrate_service.genesis_block_number
)));
// In fork mode, use the checkpoint hash directly to avoid lazy loading issues
// In normal mode, get the hash from the genesis block number
let genesis_hash = if let Some(checkpoint_hash) = substrate_service.checkpoint_hash {
// Fork mode: use the actual checkpoint block hash
checkpoint_hash
} else {
// Normal mode: get hash from genesis block number
let genesis_block_number = substrate_service.genesis_block_number.try_into().map_err(|_| {
Error::InternalError(format!(
"Genesis block number {} is too large for u32 (max: {})",
substrate_service.genesis_block_number,
u32::MAX
))
})?;

let Some(hash) = substrate_service.client.hash(genesis_block_number).ok().flatten() else {
return Err(Error::InternalError(format!(
"Genesis hash not found for genesis block number {}",
substrate_service.genesis_block_number
)));
};
hash
};

let Ok(runtime_version) = substrate_service.client.runtime_version_at(genesis_hash) else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ impl<Block: BlockT + DeserializeOwned> Backend<Block> {
}

#[inline]
fn fork_checkpoint(&self) -> Option<&Block::Header> {
pub fn fork_checkpoint(&self) -> Option<&Block::Header> {
self.fork_config.as_ref().map(|(_, checkpoint)| checkpoint)
}
}
Expand Down
44 changes: 41 additions & 3 deletions crates/anvil-polkadot/src/substrate_node/service/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ pub struct Service {
pub mining_engine: Arc<MiningEngine>,
pub storage_overrides: Arc<Mutex<StorageOverrides>>,
pub genesis_block_number: u64,
pub fork_url: Option<String>,
/// Hash of the checkpoint block when in fork mode. Used to avoid lazy loading issues
/// when fetching metadata at startup.
pub checkpoint_hash: Option<Hash>,
}

type CreateInherentDataProviders = Box<
Expand Down Expand Up @@ -94,8 +98,6 @@ fn create_manual_seal_inherent_data_providers(
Err(e) => return futures::future::ready(Err(Box::new(e))),
};

println!("nex block num {}", next_block_number);

let id = client
.runtime_api()
.parachain_id(current_para_head.hash())
Expand Down Expand Up @@ -138,6 +140,36 @@ fn create_manual_seal_inherent_data_providers(
// This helps with allowing greater block production velocity per relay chain slot.
backend.inject_relay_slot_info(current_para_head.hash(), (slot_in_state, 0));

// Read the DMQ MQC head from parachain storage to avoid "DMQ head mismatch" errors
// The storage key is: twox_128("ParachainSystem") + twox_128("LastDmqMqcHead")
let pallet_prefix = polkadot_sdk::sp_core::twox_128(b"ParachainSystem");
let storage_prefix = polkadot_sdk::sp_core::twox_128(b"LastDmqMqcHead");
let mut dmq_storage_key = Vec::new();
dmq_storage_key.extend_from_slice(&pallet_prefix);
dmq_storage_key.extend_from_slice(&storage_prefix);

// Read the MessageQueueChain from storage and extract its head hash
use polkadot_sdk::sc_client_api::StorageProvider;
let dmq_mqc_head = client
.storage(
current_para_head.hash(),
&polkadot_sdk::sc_client_api::StorageKey(dmq_storage_key),
)
.ok()
.flatten()
.and_then(|encoded_data| {
// MessageQueueChain is just a wrapper around a Hash, decode it
// The MessageQueueChain stores the head as the last 32 bytes
if encoded_data.0.len() >= 32 {
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(&encoded_data.0[encoded_data.0.len() - 32..]);
Some(polkadot_sdk::cumulus_primitives_core::relay_chain::Hash::from(hash_bytes))
} else {
None
}
})
.unwrap_or_default(); // Use default (zeros) if we can't read it

let mocked_parachain = MockValidationDataInherentDataProvider::<()> {
current_para_block: next_block_number,
para_id,
Expand All @@ -148,6 +180,10 @@ fn create_manual_seal_inherent_data_providers(
relay_offset: last_rc_block_number + 1,
current_para_block_head,
additional_key_values: Some(additional_key_values),
xcm_config: polkadot_sdk::cumulus_client_parachain_inherent::MockXcmConfig {
starting_dmq_mqc_head: dmq_mqc_head,
starting_hrmp_mqc_heads: Default::default(),
},
..Default::default()
};

Expand Down Expand Up @@ -268,12 +304,14 @@ pub fn new(
Service {
spawn_handle: task_manager.spawn_handle(),
client,
backend,
backend: backend.clone(),
tx_pool: transaction_pool,
rpc_handlers,
mining_engine,
storage_overrides,
genesis_block_number: anvil_config.get_genesis_number(),
fork_url: anvil_config.eth_rpc_url.clone(),
checkpoint_hash: backend.fork_checkpoint().map(|h| h.hash()),
},
task_manager,
))
Expand Down
60 changes: 59 additions & 1 deletion crates/anvil-polkadot/tests/it/forking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use anvil_polkadot::{
api_server::revive_conversions::ReviveAddress,
config::{AnvilNodeConfig, ForkChoice, SubstrateNodeConfig},
};
use polkadot_sdk::pallet_revive::evm::Account;
use polkadot_sdk::{pallet_revive::evm::Account, sp_blockchain::HeaderBackend, sp_core::H256};

/// Tests that forking preserves state from the source chain and allows local modifications
#[tokio::test(flavor = "multi_thread")]
Expand Down Expand Up @@ -413,6 +413,64 @@ async fn test_fork_from_negative_block_number() {
assert_eq!(fork_new_block, 4, "Forked node should be at block 4");
}

#[tokio::test(flavor = "multi_thread")]
async fn test_fork_from_westend_assethub() {
// Step 1: Set a specific block height to fork from
let fork_block_number = 13268000;
let assethub_rpc_url = "https://westend-asset-hub-rpc.polkadot.io".to_string();

// Step 2: Create a forked node from Westend AssetHub at the specific block
let fork_config = AnvilNodeConfig::test_config()
.with_port(0)
.with_eth_rpc_url(Some(assethub_rpc_url.clone()))
.with_fork_block_number(Some(fork_block_number as u64));

let fork_substrate_config = SubstrateNodeConfig::new(&fork_config);

// Step 3: Verify the forked node can start successfully
let mut fork_node = match TestNode::new(fork_config.clone(), fork_substrate_config).await {
Ok(node) => node,
Err(e) => {
panic!("Failed to start forked node from AssetHub: {e}");
}
};

// Step 4: Get the initial block number from the fork
let fork_initial_block = fork_node.best_block_number().await;

// Verify the fork started at the expected block number
assert_eq!(
fork_initial_block, fork_block_number,
"Fork should start from block {fork_block_number}"
);

// Step 5: Query AssetHub to get expected block hash
let rpc_client = fork_node
.service
.backend
.rpc()
.expect("Fork mode should have RPC client configured");

let expected_block_hash: H256 = rpc_client
.block_hash(Some(fork_block_number))
.expect("Failed to get block hash from AssetHub RPC")
.expect("Block not found on AssetHub");

// Step 6: Get the Substrate block hash from the forked node's client
let fork_substrate_hash = fork_node
.service
.client
.hash(fork_initial_block)
.expect("Failed to get block hash")
.expect("Block should exist");

// Step 7: Verify the fork's Substrate block hash matches the expected hash from AssetHub
assert_eq!(
fork_substrate_hash, expected_block_hash,
"Fork Substrate block hash should match AssetHub block hash at block {fork_block_number}"
);
}

/// Tests that forking preserves contract state from source chain and that multiple contract
/// instances maintain independent storage
#[tokio::test(flavor = "multi_thread")]
Expand Down
Loading