|
| 1 | +#include "barretenberg/vm2/generated/relations/memory.hpp" |
| 2 | +#include <algorithm> |
| 3 | +#include <cassert> |
| 4 | +#include <cstdint> |
| 5 | +#include <cstring> |
| 6 | +#include <random> |
| 7 | + |
| 8 | +#include "barretenberg/avm_fuzzer/mutations/basic_types/field.hpp" |
| 9 | +#include "barretenberg/common/serialize.hpp" |
| 10 | +#include "barretenberg/vm2/common/field.hpp" |
| 11 | +#include "barretenberg/vm2/common/memory_types.hpp" |
| 12 | +#include "barretenberg/vm2/constraining/testing/check_relation.hpp" |
| 13 | +#include "barretenberg/vm2/generated/columns.hpp" |
| 14 | +#include "barretenberg/vm2/simulation/events/event_emitter.hpp" |
| 15 | +#include "barretenberg/vm2/simulation/events/memory_event.hpp" |
| 16 | +#include "barretenberg/vm2/simulation/events/range_check_event.hpp" |
| 17 | +#include "barretenberg/vm2/simulation/gadgets/memory.hpp" |
| 18 | +#include "barretenberg/vm2/simulation/gadgets/range_check.hpp" |
| 19 | +#include "barretenberg/vm2/tooling/debugger.hpp" |
| 20 | +#include "barretenberg/vm2/tracegen/execution_trace.hpp" |
| 21 | +#include "barretenberg/vm2/tracegen/memory_trace.hpp" |
| 22 | +#include "barretenberg/vm2/tracegen/precomputed_trace.hpp" |
| 23 | +#include "barretenberg/vm2/tracegen/range_check_trace.hpp" |
| 24 | +#include "barretenberg/vm2/tracegen/test_trace_container.hpp" |
| 25 | + |
| 26 | +using namespace bb::avm2::simulation; |
| 27 | +using namespace bb::avm2::tracegen; |
| 28 | +using namespace bb::avm2::constraining; |
| 29 | + |
| 30 | +using bb::avm2::FF; |
| 31 | +using bb::avm2::MemoryAddress; |
| 32 | +using bb::avm2::MemoryTag; |
| 33 | +using bb::avm2::MemoryValue; |
| 34 | + |
| 35 | +using memory_rel = bb::avm2::memory<FF>; |
| 36 | + |
| 37 | +// Useful array of all memory tags for cycling through during upcast/downcast |
| 38 | +const std::array<MemoryTag, 7> memory_tags = { |
| 39 | + MemoryTag::FF, MemoryTag::U1, MemoryTag::U8, MemoryTag::U16, MemoryTag::U32, MemoryTag::U64, MemoryTag::U128, |
| 40 | +}; |
| 41 | + |
| 42 | +struct MemoryFuzzerInput { |
| 43 | + uint8_t num_of_entries_input = 1; // The number of read/write operations to perform |
| 44 | + uint64_t read_write_encoding = 0; // Bitmask: 1 = write, 0 = read |
| 45 | + uint64_t upcast_encoding = 0; // Bitmask: 1 = upcast on write |
| 46 | + uint64_t downcast_encoding = 0; // Bitmask: 1 = downcast on read |
| 47 | + uint64_t selection_encoding = 0; // element selection |
| 48 | + uint8_t space_ids = 0; // |
| 49 | + |
| 50 | + std::array<MemoryValue, 16> init_memory_values{}; |
| 51 | + std::array<MemoryAddress, 16> memory_addresses{}; |
| 52 | + |
| 53 | + MemoryFuzzerInput() = default; |
| 54 | + |
| 55 | + void to_buffer(uint8_t* buffer) const |
| 56 | + { |
| 57 | + size_t offset = 0; |
| 58 | + std::memcpy(buffer + offset, &num_of_entries_input, sizeof(num_of_entries_input)); |
| 59 | + offset += sizeof(num_of_entries_input); |
| 60 | + std::memcpy(buffer + offset, &read_write_encoding, sizeof(read_write_encoding)); |
| 61 | + offset += sizeof(read_write_encoding); |
| 62 | + std::memcpy(buffer + offset, &upcast_encoding, sizeof(upcast_encoding)); |
| 63 | + offset += sizeof(upcast_encoding); |
| 64 | + std::memcpy(buffer + offset, &downcast_encoding, sizeof(downcast_encoding)); |
| 65 | + offset += sizeof(downcast_encoding); |
| 66 | + std::memcpy(buffer + offset, &selection_encoding, sizeof(selection_encoding)); |
| 67 | + offset += sizeof(selection_encoding); |
| 68 | + std::memcpy(buffer + offset, &space_ids, sizeof(space_ids)); |
| 69 | + offset += sizeof(space_ids); |
| 70 | + std::memcpy(buffer + offset, &init_memory_values[0], sizeof(MemoryValue) * init_memory_values.size()); |
| 71 | + offset += sizeof(MemoryValue) * init_memory_values.size(); |
| 72 | + std::memcpy(buffer + offset, &memory_addresses[0], sizeof(MemoryAddress) * memory_addresses.size()); |
| 73 | + } |
| 74 | + |
| 75 | + MemoryFuzzerInput static from_buffer(const uint8_t* buffer) |
| 76 | + { |
| 77 | + MemoryFuzzerInput input; |
| 78 | + size_t offset = 0; |
| 79 | + std::memcpy(&input.num_of_entries_input, buffer + offset, sizeof(input.num_of_entries_input)); |
| 80 | + offset += sizeof(input.num_of_entries_input); |
| 81 | + std::memcpy(&input.read_write_encoding, buffer + offset, sizeof(input.read_write_encoding)); |
| 82 | + offset += sizeof(input.read_write_encoding); |
| 83 | + std::memcpy(&input.upcast_encoding, buffer + offset, sizeof(input.upcast_encoding)); |
| 84 | + offset += sizeof(input.upcast_encoding); |
| 85 | + std::memcpy(&input.downcast_encoding, buffer + offset, sizeof(input.downcast_encoding)); |
| 86 | + offset += sizeof(input.downcast_encoding); |
| 87 | + std::memcpy(&input.selection_encoding, buffer + offset, sizeof(input.selection_encoding)); |
| 88 | + offset += sizeof(input.selection_encoding); |
| 89 | + std::memcpy(&input.space_ids, buffer + offset, sizeof(input.space_ids)); |
| 90 | + offset += sizeof(input.space_ids); |
| 91 | + std::memcpy( |
| 92 | + &input.init_memory_values[0], buffer + offset, sizeof(MemoryValue) * input.init_memory_values.size()); |
| 93 | + offset += sizeof(MemoryValue) * input.init_memory_values.size(); |
| 94 | + std::memcpy(&input.memory_addresses[0], buffer + offset, sizeof(MemoryAddress) * input.memory_addresses.size()); |
| 95 | + |
| 96 | + return input; |
| 97 | + } |
| 98 | +}; |
| 99 | + |
| 100 | +extern "C" { |
| 101 | +__attribute__((section("__libfuzzer_extra_counters"))) uint8_t num_of_entries = 0; |
| 102 | +} |
| 103 | + |
| 104 | +std::vector<MemoryValue> generate_memory_values(const MemoryFuzzerInput& input) |
| 105 | +{ |
| 106 | + std::vector<MemoryValue> values; |
| 107 | + values.reserve(num_of_entries); |
| 108 | + |
| 109 | + // Place initial values |
| 110 | + for (const auto& val : input.init_memory_values) { |
| 111 | + values.emplace_back(val); |
| 112 | + } |
| 113 | + |
| 114 | + // Generate additional values based on encodings |
| 115 | + for (size_t i = input.init_memory_values.size(); i < num_of_entries; ++i) { |
| 116 | + auto entry_idx = (input.selection_encoding >> i) % values.size(); |
| 117 | + auto entry_value = values[entry_idx]; |
| 118 | + |
| 119 | + FF modified_value = entry_value.as_ff() + input.init_memory_values[i % input.init_memory_values.size()].as_ff(); |
| 120 | + |
| 121 | + auto should_upcast = (input.upcast_encoding >> i) & 1; |
| 122 | + auto should_downcast = (input.downcast_encoding >> i) & 1; |
| 123 | + if (should_upcast == 1) { |
| 124 | + // Upcast logic (example: change tag to a larger type) |
| 125 | + auto new_tag_index = (static_cast<uint8_t>(entry_value.get_tag()) + 1) % memory_tags.size(); |
| 126 | + auto memory_tag = memory_tags[new_tag_index]; |
| 127 | + entry_value = MemoryValue::from_tag_truncating(memory_tag, modified_value); |
| 128 | + } |
| 129 | + if (should_downcast == 1) { |
| 130 | + // Downcast logic (example: change tag to a smaller type) |
| 131 | + auto new_tag_index = (static_cast<uint8_t>(entry_value.get_tag()) - 1) % memory_tags.size(); |
| 132 | + auto memory_tag = memory_tags[new_tag_index]; |
| 133 | + entry_value = MemoryValue::from_tag_truncating(memory_tag, modified_value); |
| 134 | + } |
| 135 | + values.emplace_back(entry_value); |
| 136 | + } |
| 137 | + return values; |
| 138 | +} |
| 139 | + |
| 140 | +std::vector<MemoryAddress> generate_memory_addresses(const MemoryFuzzerInput& input) |
| 141 | +{ |
| 142 | + std::vector<MemoryAddress> addresses; |
| 143 | + addresses.reserve(num_of_entries); |
| 144 | + |
| 145 | + // Place initial addresses |
| 146 | + for (const auto& addr : input.memory_addresses) { |
| 147 | + addresses.emplace_back(addr); |
| 148 | + } |
| 149 | + |
| 150 | + for (size_t i = 0; i < num_of_entries; ++i) { |
| 151 | + // Select addresses in a round-robin fashion |
| 152 | + auto addr = input.memory_addresses[i % input.memory_addresses.size()]; |
| 153 | + addresses.emplace_back(addr + addr); |
| 154 | + } |
| 155 | + return addresses; |
| 156 | +} |
| 157 | + |
| 158 | +extern "C" size_t LLVMFuzzerCustomMutator(uint8_t* data, size_t size, size_t, unsigned int seed) |
| 159 | +{ |
| 160 | + if (size < sizeof(MemoryFuzzerInput)) { |
| 161 | + // Initialize with default input |
| 162 | + MemoryFuzzerInput input; |
| 163 | + input.to_buffer(data); |
| 164 | + return sizeof(MemoryFuzzerInput); |
| 165 | + } |
| 166 | + |
| 167 | + std::mt19937 rng(seed); |
| 168 | + MemoryFuzzerInput input = MemoryFuzzerInput::from_buffer(data); |
| 169 | + std::uniform_int_distribution<int> mutation_dist(0, 7); |
| 170 | + int mutation_choice = mutation_dist(rng); |
| 171 | + |
| 172 | + switch (mutation_choice) { |
| 173 | + case 0: { |
| 174 | + // Modify num_of_entries |
| 175 | + std::uniform_int_distribution<int> num_entries_dist(-8, 8); |
| 176 | + int new_val = static_cast<int>(input.num_of_entries_input) + num_entries_dist(rng); |
| 177 | + input.num_of_entries_input = static_cast<uint8_t>(std::clamp(new_val, 0, 63)); |
| 178 | + break; |
| 179 | + } |
| 180 | + case 1: { |
| 181 | + // Toggle a rw at a certain entry |
| 182 | + std::uniform_int_distribution<size_t> entry_dist(0, input.num_of_entries_input - 1); |
| 183 | + size_t entry_idx = entry_dist(rng); |
| 184 | + input.read_write_encoding ^= (1ULL << entry_idx); |
| 185 | + break; |
| 186 | + } |
| 187 | + case 2: { |
| 188 | + // Toggle upcast for a random entry |
| 189 | + std::uniform_int_distribution<size_t> entry_dist(0, input.num_of_entries_input - 1); |
| 190 | + size_t entry_idx = entry_dist(rng); |
| 191 | + input.upcast_encoding ^= (1ULL << entry_idx); |
| 192 | + break; |
| 193 | + } |
| 194 | + case 3: { |
| 195 | + // Toggle downcast for a random entry |
| 196 | + std::uniform_int_distribution<size_t> entry_dist(0, input.num_of_entries_input - 1); |
| 197 | + size_t entry_idx = entry_dist(rng); |
| 198 | + input.downcast_encoding ^= (1ULL << entry_idx); |
| 199 | + break; |
| 200 | + } |
| 201 | + case 4: { |
| 202 | + // Toggle selection encoding for a random entry |
| 203 | + std::uniform_int_distribution<size_t> entry_dist(0, input.num_of_entries_input - 1); |
| 204 | + size_t entry_idx = entry_dist(rng); |
| 205 | + input.selection_encoding ^= (1ULL << entry_idx); |
| 206 | + break; |
| 207 | + } |
| 208 | + case 5: { |
| 209 | + // Modify a random initial memory value |
| 210 | + std::uniform_int_distribution<size_t> value_dist(0, input.init_memory_values.size() - 1); |
| 211 | + size_t value_idx = value_dist(rng); |
| 212 | + // Random Tag from memory_tags |
| 213 | + std::uniform_int_distribution<size_t> tag_dist(0, memory_tags.size() - 1); |
| 214 | + size_t tag_idx = tag_dist(rng); |
| 215 | + std::uniform_int_distribution<uint64_t> dist(0, std::numeric_limits<uint64_t>::max()); |
| 216 | + |
| 217 | + std::array<uint64_t, 4> limbs; |
| 218 | + for (size_t i = 0; i < 4; ++i) { |
| 219 | + limbs[i] = dist(rng); |
| 220 | + } |
| 221 | + auto random_value = FF(limbs[0], limbs[1], limbs[2], limbs[3]); |
| 222 | + input.init_memory_values[value_idx] = MemoryValue::from_tag_truncating(memory_tags[tag_idx], random_value); |
| 223 | + break; |
| 224 | + } |
| 225 | + case 6: { |
| 226 | + // Incr/Decr a random memory address |
| 227 | + std::uniform_int_distribution<size_t> addr_idx_dist(0, input.memory_addresses.size() - 1); |
| 228 | + size_t addr_idx = addr_idx_dist(rng); |
| 229 | + std::uniform_int_distribution<int> addr_change(-1000, 1000); |
| 230 | + int new_addr = static_cast<int>(input.memory_addresses[addr_idx]) + addr_change(rng); |
| 231 | + input.memory_addresses[addr_idx] = static_cast<uint32_t>(new_addr); |
| 232 | + break; |
| 233 | + } |
| 234 | + case 7: { |
| 235 | + // Incr/Decr space_ids |
| 236 | + std::uniform_int_distribution<int> context_dist(-4, 4); |
| 237 | + int new_val = static_cast<int>(input.space_ids) + context_dist(rng); |
| 238 | + input.space_ids = static_cast<uint8_t>(new_val); |
| 239 | + break; |
| 240 | + } |
| 241 | + default: |
| 242 | + break; |
| 243 | + } |
| 244 | + |
| 245 | + input.to_buffer(data); |
| 246 | + return sizeof(MemoryFuzzerInput); |
| 247 | +} |
| 248 | + |
| 249 | +extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) |
| 250 | +{ |
| 251 | + using bb::avm2::MemoryValue; |
| 252 | + |
| 253 | + if (size < sizeof(MemoryFuzzerInput)) { |
| 254 | + info("Input size too small"); |
| 255 | + return 0; |
| 256 | + } |
| 257 | + |
| 258 | + // Parse input |
| 259 | + const MemoryFuzzerInput input = MemoryFuzzerInput::from_buffer(data); |
| 260 | + |
| 261 | + // Set the libFuzzer extra counter from input |
| 262 | + // LibFuzzer will track increases in this value as coverage progress |
| 263 | + num_of_entries = input.num_of_entries_input; |
| 264 | + |
| 265 | + // Set up gadgets and event emitters |
| 266 | + DeduplicatingEventEmitter<RangeCheckEvent> range_check_emitter; |
| 267 | + EventEmitter<MemoryEvent> memory_emitter; |
| 268 | + RangeCheck range_check(range_check_emitter); |
| 269 | + |
| 270 | + uint32_t clk = 0; |
| 271 | + ExecutionIdManager execution_id_manager(clk); |
| 272 | + MemoryProvider mem_provider(range_check, execution_id_manager, memory_emitter); |
| 273 | + // Ensure at least 1 memory context exists |
| 274 | + size_t num_contexts = std::max(static_cast<size_t>(input.space_ids), 1UL); |
| 275 | + std::vector<std::unique_ptr<MemoryInterface>> memories; |
| 276 | + memories.reserve(num_contexts); |
| 277 | + |
| 278 | + for (size_t i = 0; i < num_contexts; ++i) { |
| 279 | + memories.push_back(mem_provider.make_memory(static_cast<uint8_t>(i))); |
| 280 | + } |
| 281 | + |
| 282 | + std::vector<MemoryValue> memory_contents = generate_memory_values(input); |
| 283 | + std::vector<MemoryAddress> memory_addresses = generate_memory_addresses(input); |
| 284 | + |
| 285 | + std::unordered_map<uint16_t, std::unordered_map<MemoryAddress, MemoryValue>> running_memory_states; |
| 286 | + |
| 287 | + for (size_t i = 0; i < num_of_entries; ++i) { |
| 288 | + // Pick a memory partition in round-robin fashion |
| 289 | + MemoryInterface* mem = memories[i % memories.size()].get(); |
| 290 | + // Determine if read or write |
| 291 | + bool is_write = ((input.read_write_encoding >> i) & 1) != 0; |
| 292 | + MemoryAddress addr = memory_addresses[i]; |
| 293 | + if (is_write) { |
| 294 | + mem->set(addr, memory_contents[i]); |
| 295 | + // Update running memory state |
| 296 | + running_memory_states[mem->get_space_id()][addr] = memory_contents[i]; |
| 297 | + } else { |
| 298 | + auto retrieved_val = mem->get(addr); |
| 299 | + // Verify against running memory state |
| 300 | + if (running_memory_states[mem->get_space_id()].contains(addr)) { |
| 301 | + auto expected_val = running_memory_states[mem->get_space_id()][addr]; |
| 302 | + assert(retrieved_val == expected_val); |
| 303 | + } else { |
| 304 | + // If address was never written to, assume default value is FF(0) |
| 305 | + assert(retrieved_val == MemoryValue::from_tag_truncating(MemoryTag::FF, FF(0))); |
| 306 | + } |
| 307 | + } |
| 308 | + execution_id_manager.increment_execution_id(); |
| 309 | + } |
| 310 | + |
| 311 | + TestTraceContainer trace; |
| 312 | + MemoryTraceBuilder memory_trace_builder; |
| 313 | + PrecomputedTraceBuilder precomputed_builder; |
| 314 | + precomputed_builder.process_misc(trace, execution_id_manager.get_execution_id()); |
| 315 | + |
| 316 | + memory_trace_builder.process(memory_emitter.dump_events(), trace); |
| 317 | + |
| 318 | + // Memory is not entirely standalone, we need to set a relation #[ACTIVE_ROW_NEEDS_PERM_SELECTOR] |
| 319 | + for (uint32_t i = 1; i <= num_of_entries; ++i) { |
| 320 | + trace.set(avm2::Column::memory_sel_register_op_0_, i, 1); |
| 321 | + } |
| 322 | + check_relation<memory_rel>(trace); |
| 323 | + |
| 324 | + // This makes it all realllllly slow |
| 325 | + // RangeCheckTraceBuilder range_check_builder; |
| 326 | + // precomputed_builder.process_tag_parameters(trace); |
| 327 | + // precomputed_builder.process_sel_range_16(trace); |
| 328 | + // precomputed_builder.process_misc(trace, 1 << 16); |
| 329 | + // range_check_builder.process(range_check_emitter.dump_events(), trace); |
| 330 | + |
| 331 | + // check_all_interactions<MemoryTraceBuilder>(trace); |
| 332 | + |
| 333 | + return 0; |
| 334 | +} |
0 commit comments