Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions vrs/FileDetailsCache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ struct DiskStreamId {
explicit DiskStreamId(StreamId id)
: typeId(static_cast<uint16_t>(id.getTypeId())), instanceId(id.getInstanceId()) {}

FileFormat::LittleEndian<uint16_t> typeId;
FileFormat::LittleEndian<uint16_t> instanceId;
uint16_t typeId;
uint16_t instanceId;

RecordableTypeId getTypeId() const {
return static_cast<RecordableTypeId>(typeId());
return static_cast<RecordableTypeId>(typeId);
}

uint16_t getInstanceId() const {
Expand All @@ -74,13 +74,13 @@ struct DiskRecordInfo {
streamId(record.streamId),
recordType(static_cast<uint8_t>(record.recordType)) {}

FileFormat::LittleEndian<double> timestamp;
FileFormat::LittleEndian<int64_t> recordOffset;
double timestamp{};
int64_t recordOffset{};
DiskStreamId streamId;
FileFormat::LittleEndian<uint8_t> recordType;
uint8_t recordType{};

Record::Type getRecordType() const {
return static_cast<Record::Type>(recordType());
return static_cast<Record::Type>(recordType);
}

StreamId getStreamId() const {
Expand Down Expand Up @@ -176,8 +176,8 @@ int readIndexData(
set<StreamId>& outStreamIds,
vector<IndexRecord::RecordInfo>& outIndex,
size_t indexSize) {
FileFormat::LittleEndian<uint32_t> recordableCount;
FileFormat::LittleEndian<uint32_t> diskIndexSize;
uint32_t recordableCount{};
uint32_t diskIndexSize{};
if (!XR_VERIFY(indexSize >= sizeof(recordableCount))) {
return FAILURE;
}
Expand Down
9 changes: 2 additions & 7 deletions vrs/FileFormat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,17 +188,12 @@ void RecordHeader::initDescriptionHeader(
this->timestamp = Record::kMaxTimestamp;
}

template <typename ENUM, class LITTLEENDIAN>
bool littleEndianIsValid_cast(LITTLEENDIAN& littleEndian) {
return enumIsValid_cast<ENUM>(littleEndian());
}

bool RecordHeader::isSanityCheckOk() const {
if (!XR_VERIFY(recordSize >= sizeof(RecordHeader)) ||
!XR_VERIFY(previousRecordSize == 0 || previousRecordSize >= sizeof(RecordHeader))) {
return false;
}
if (!XR_VERIFY(littleEndianIsValid_cast<Record::Type>(recordType))) {
if (!XR_VERIFY(enumIsValid_cast<Record::Type>(recordType))) {
return false;
}
uint32_t uncompressedPayload = uncompressedSize; // doesn't include header already
Expand All @@ -213,7 +208,7 @@ bool RecordHeader::isSanityCheckOk() const {
return false;
}
}
if (!XR_VERIFY(littleEndianIsValid_cast<CompressionType>(compressionType))) {
if (!XR_VERIFY(enumIsValid_cast<CompressionType>(compressionType))) {
return false;
}
}
Expand Down
78 changes: 26 additions & 52 deletions vrs/FileFormat.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,48 +56,23 @@ namespace vrs {
namespace FileFormat {
#pragma pack(push, 1)

/// \brief Placeholder layer for endianness support, if we ever need it.
///
/// All it currently does is enforce that we read & write native types through get & set methods.
template <class T>
class LittleEndian final {
public:
LittleEndian() = default;
explicit LittleEndian(T value) : value_{value} {}

inline T operator()() const {
return value_;
}
inline operator T() const {
return value_;
}

inline LittleEndian& operator=(T value) {
value_ = value;
return *this;
}

private:
T value_{};
};

/// Every file starts with this header, which may grow but not shrink!
struct FileHeader {
LittleEndian<uint32_t> magicHeader1; ///< magic value #1
LittleEndian<uint32_t> magicHeader2; ///< magic value #2
LittleEndian<uint64_t> creationId; ///< A timestamp, hopefully unique, to match files (future).
LittleEndian<uint32_t> fileHeaderSize; ///< This header size, in bytes.
LittleEndian<uint32_t> recordHeaderSize; ///< Record headers' size, in bytes (same for all).
LittleEndian<int64_t> indexRecordOffset; ///< Index record offset in the whole file.
LittleEndian<int64_t> descriptionRecordOffset; ///< Description record offset in the whole file.
LittleEndian<int64_t>
firstUserRecordOffset; ///< Offset of the first user record in the file. If 0, the first
///< record is just after the description record (original behavior)
LittleEndian<uint64_t> future2; ///< For future use
LittleEndian<uint64_t> future3; ///< For future use
LittleEndian<uint64_t> future4; ///< For future use
LittleEndian<uint32_t> magicHeader3; ///< magic value #3
LittleEndian<uint32_t> fileFormatVersion; ///< file format version.
uint32_t magicHeader1{}; ///< magic value #1
uint32_t magicHeader2{}; ///< magic value #2
uint64_t creationId{}; ///< A timestamp, hopefully unique, to match files (future).
uint32_t fileHeaderSize{}; ///< This header size, in bytes.
uint32_t recordHeaderSize{}; ///< Record headers' size, in bytes (same for all).
int64_t indexRecordOffset{}; ///< Index record offset in the whole file.
int64_t descriptionRecordOffset{}; ///< Description record offset in the whole file.
int64_t
firstUserRecordOffset{}; ///< Offset of the first user record in the file. If 0, the first
///< record is just after the description record (original behavior)
uint64_t future2{}; ///< For future use
uint64_t future3{}; ///< For future use
uint64_t future4{}; ///< For future use
uint32_t magicHeader3{}; ///< magic value #3
uint32_t fileFormatVersion{}; ///< file format version.

/// Initialize the structure's fixed values with default values for a regular VRS file.
void init();
Expand Down Expand Up @@ -144,16 +119,15 @@ struct RecordHeader {
uint32_t previousRecordSize,
uint32_t recordSize,
uint32_t uncompressedSize);
LittleEndian<uint32_t> recordSize; ///< byte count to the next record, header + data
LittleEndian<uint32_t> previousRecordSize; ///< byte count to the previous record, header + data
LittleEndian<int32_t> recordableTypeId; ///< record handler type id
LittleEndian<uint32_t> formatVersion; ///< data format version, as declared by the data producer
LittleEndian<double> timestamp; ///< record presentation time stamp
LittleEndian<uint16_t> recordableInstanceId; ///< record handle instance id
LittleEndian<uint8_t> recordType; ///< See Record::Type
LittleEndian<uint8_t> compressionType; ///< compression used, or 0 for none
LittleEndian<uint32_t>
uncompressedSize; ///< uncompressed payload size without header. 0 if not compressed.
uint32_t recordSize{}; ///< byte count to the next record, header + data
uint32_t previousRecordSize{}; ///< byte count to the previous record, header + data
int32_t recordableTypeId{}; ///< record handler type id
uint32_t formatVersion{}; ///< data format version, as declared by the data producer
double timestamp{}; ///< record presentation time stamp
uint16_t recordableInstanceId{}; ///< record handle instance id
uint8_t recordType{}; ///< See Record::Type
uint8_t compressionType{}; ///< compression used, or 0 for none
uint32_t uncompressedSize{}; ///< uncompressed payload size without header. 0 if not compressed.

/// Set the record's type.
/// @param type: Type of the record, as an enum.
Expand All @@ -164,7 +138,7 @@ struct RecordHeader {
/// Get the record type, as an enum.
/// @return The record type.
inline Record::Type getRecordType() const {
return static_cast<Record::Type>(recordType());
return static_cast<Record::Type>(recordType);
}

/// Set the recordable type id for this record.
Expand All @@ -188,7 +162,7 @@ struct RecordHeader {
/// Get the compression type used when writing the payload of this record.
/// @return Compression type.
inline CompressionType getCompressionType() const {
return static_cast<CompressionType>(compressionType());
return static_cast<CompressionType>(compressionType);
}

/// Set the compression type used when writing the payload of this record.
Expand Down
40 changes: 22 additions & 18 deletions vrs/IndexRecord.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -388,22 +388,25 @@ int IndexRecord::Reader::readClassicIndexRecord(
index_.reserve(recordStructs.size());
int64_t fileOffset = firstUserRecordOffset;
for (auto record : recordStructs) {
double timestamp = record.timestamp;
Record::Type recordType = record.getRecordType();
uint32_t recordSize = record.recordSize;
StreamId streamId = record.getStreamId();
if (!isValid(record.getRecordType())) {
XR_LOGE(
"Unexpected index record entry: Stream Id: {} Type: {} Size: {} Timestamp: {}",
record.getStreamId().getNumericName(),
toString(record.getRecordType()),
record.recordSize,
record.timestamp);
streamId.getNumericName(),
toString(recordType),
recordSize,
timestamp);
return INDEX_RECORD_ERROR;
}
int64_t nextFileOffset = fileOffset + record.recordSize;
int64_t nextFileOffset = fileOffset + recordSize;
if (nextFileOffset > totalFileSize_) {
droppedRecordCount_ = static_cast<int32_t>(recordStructs.size() - index_.size());
break; // The file is too short, and this record goes beyond the end...
}
index_.emplace_back(
record.timestamp, fileOffset, record.getStreamId(), record.getRecordType());
index_.emplace_back(timestamp, fileOffset, streamId, recordType);
if (index_.size() > 1 && index_.back() < index_[index_.size() - 2]) {
sortErrorCount_++;
}
Expand Down Expand Up @@ -536,28 +539,29 @@ int IndexRecord::Reader::readSplitIndexRecord(
index_.reserve(recordStructs.size());
const uint32_t recordHeaderSize = fileHeader_.recordHeaderSize;
for (const DiskRecordInfo& record : recordStructs) {
double timestamp = record.timestamp;
StreamId streamId = record.getStreamId();
uint32_t recordSize = record.recordSize;
Record::Type recordType = record.getRecordType();
if (record.recordSize < recordHeaderSize || !isValid(recordType)) {
if (recordSize < recordHeaderSize || !isValid(recordType)) {
XR_LOGE(
"Unexpected index record entry: Stream Id: {} Type: {} Size: {} Timestamp: {}",
record.getStreamId().getNumericName(),
streamId.getNumericName(),
toString(recordType),
record.recordSize,
record.timestamp);
recordSize,
timestamp);
return INDEX_RECORD_ERROR;
}
int64_t followingRecordOffset = outUsedFileSize + record.recordSize;
int64_t followingRecordOffset = outUsedFileSize + recordSize;
if (droppedRecordCount_ > 0 || followingRecordOffset > totalFileSize_) {
droppedRecordCount_++;
} else {
double timestamp = record.timestamp;
StreamId streamId = record.getStreamId();
index_.emplace_back(timestamp, outUsedFileSize, streamId, recordType);
if (index_.size() > 1 && index_.back() < index_[index_.size() - 2]) {
sortErrorCount_++;
}
if (diskIndex_) {
diskIndex_->emplace_back(timestamp, record.recordSize, streamId, recordType);
diskIndex_->emplace_back(timestamp, recordSize, streamId, recordType);
}
streamIds_.insert(streamId);
outUsedFileSize = followingRecordOffset;
Expand Down Expand Up @@ -731,17 +735,17 @@ int IndexRecord::Reader::rebuildIndex(bool writeFixedIndex) {
if (recordableTypeId != RecordableTypeId::VRSIndex &&
recordableTypeId != RecordableTypeId::VRSDescription) {
// We read/skipped that record: it's "good", as far as we can tell. Add it to the index!
double timestamp = recordHeader->timestamp;
StreamId streamId{recordHeader->getStreamId()};
Record::Type recordType = recordHeader->getRecordType();
if (isValid(recordType)) {
streamIds_.insert(streamId);
index_.emplace_back(recordHeader->timestamp, absolutePosition, streamId, recordType);
index_.emplace_back(timestamp, absolutePosition, streamId, recordType);
if (index_.size() > 1 && index_.back() < index_[index_.size() - 2]) {
sortErrorCount_++;
}
if (diskIndex_) {
diskIndex_->emplace_back(
recordHeader->timestamp, recordHeader->recordSize, streamId, recordType);
diskIndex_->emplace_back(timestamp, recordSize, streamId, recordType);
}
} else {
// We're probably in the weeds already
Expand Down
14 changes: 7 additions & 7 deletions vrs/IndexRecord.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ struct DiskStreamId {
explicit DiskStreamId(StreamId streamId)
: typeId(static_cast<int32_t>(streamId.getTypeId())), instanceId(streamId.getInstanceId()) {}

FileFormat::LittleEndian<int32_t> typeId;
FileFormat::LittleEndian<uint16_t> instanceId;
int32_t typeId;
uint16_t instanceId;

inline RecordableTypeId getTypeId() const {
return FileFormat::readRecordableTypeId(typeId);
Expand Down Expand Up @@ -85,13 +85,13 @@ struct DiskRecordInfo {
recordType(static_cast<uint8_t>(record->getRecordType())),
streamId(streamId) {}

FileFormat::LittleEndian<double> timestamp;
FileFormat::LittleEndian<uint32_t> recordSize;
FileFormat::LittleEndian<uint8_t> recordType;
DiskStreamId streamId;
double timestamp{};
uint32_t recordSize{};
uint8_t recordType{};
DiskStreamId streamId{};

inline Record::Type getRecordType() const {
return static_cast<Record::Type>(recordType());
return static_cast<Record::Type>(recordType);
}

inline StreamId getStreamId() const {
Expand Down
Loading