Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 73 additions & 5 deletions consensus/types/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1179,7 +1179,8 @@ impl ChainSpec {
epoch: Epoch::new(419072),
max_blobs_per_block: 21,
},
]),
])
.expect("mainnet blob schedule has unique epochs"),
min_epochs_for_data_column_sidecars_requests:
default_min_epochs_for_data_column_sidecars_requests(),
max_data_columns_by_root_request: default_data_columns_by_root_request(),
Expand Down Expand Up @@ -1584,18 +1585,31 @@ impl<'de> Deserialize<'de> for BlobSchedule {
D: Deserializer<'de>,
{
let vec = Vec::<BlobParameters>::deserialize(deserializer)?;
Ok(BlobSchedule::new(vec))
BlobSchedule::new(vec).map_err(serde::de::Error::custom)
}
}

impl BlobSchedule {
pub fn new(mut vec: Vec<BlobParameters>) -> Self {
pub fn new(mut vec: Vec<BlobParameters>) -> Result<Self, String> {
// reverse sort by epoch
vec.sort_by(|a, b| b.epoch.cmp(&a.epoch));
Self {

// Validate that all epochs are unique
for i in 0..vec.len() {
for j in (i + 1)..vec.len() {
if vec[i].epoch == vec[j].epoch {
return Err(format!(
"Duplicate epoch in blob schedule: epoch {}",
vec[i].epoch
));
}
}
}

Ok(Self {
schedule: vec,
skip_serializing: false,
}
})
}

pub fn is_empty(&self) -> bool {
Expand Down Expand Up @@ -1667,6 +1681,60 @@ impl IntoIterator for BlobSchedule {
}
}

#[cfg(test)]
mod blob_schedule_tests {
use super::*;

#[test]
fn test_unique_epochs_accepted() {
let vec = vec![
BlobParameters {
epoch: Epoch::new(10),
max_blobs_per_block: 6,
},
BlobParameters {
epoch: Epoch::new(20),
max_blobs_per_block: 12,
},
];
assert!(BlobSchedule::new(vec).is_ok());
}

#[test]
fn test_duplicate_epochs_rejected() {
let vec = vec![
BlobParameters {
epoch: Epoch::new(10),
max_blobs_per_block: 6,
},
BlobParameters {
epoch: Epoch::new(10),
max_blobs_per_block: 12,
},
];
let result = BlobSchedule::new(vec);
assert!(result.is_err());
assert!(result
.unwrap_err()
.contains("Duplicate epoch in blob schedule"));
}

#[test]
fn test_empty_schedule() {
let vec = vec![];
assert!(BlobSchedule::new(vec).is_ok());
}

#[test]
fn test_single_entry() {
let vec = vec![BlobParameters {
epoch: Epoch::new(10),
max_blobs_per_block: 6,
}];
assert!(BlobSchedule::new(vec).is_ok());
}
}

/// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON).
///
/// Fields relevant to hard forks after Altair should be optional so that we can continue
Expand Down
3 changes: 2 additions & 1 deletion consensus/types/src/fork_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,8 @@ mod tests {
spec.deneb_fork_epoch = Some(Epoch::new(4));
spec.electra_fork_epoch = Some(Epoch::new(5));
spec.fulu_fork_epoch = Some(Epoch::new(6));
spec.blob_schedule = BlobSchedule::new(blob_parameters);
spec.blob_schedule = BlobSchedule::new(blob_parameters)
.expect("test blob schedule has unique epochs");
spec
}

Expand Down