added properties to control position of tags when writing files

This commit is contained in:
Martchus 2015-11-07 15:23:36 +01:00
parent 6563a0c854
commit e7bd2185d5
9 changed files with 874 additions and 610 deletions

View File

@ -703,430 +703,497 @@ void MatroskaContainer::internalMakeFile()
addNotification(NotificationType::Critical, "No EBML elements could be found.", context);
throw InvalidDataException();
}
// prepare rewriting the file
updateStatus("Preparing for rewriting Matroska/EBML file ...");
fileInfo().close(); // ensure the file is close before renaming it
string backupPath;
fstream &outputStream = fileInfo().stream();
BinaryWriter outputWriter(&outputStream);
fstream backupStream; // create a stream to open the backup/original file
// check whether a rewrite is required
try {
BackupHelper::createBackupFile(fileInfo().path(), backupPath, backupStream);
// set backup stream as associated input stream since we need the original elements to write the new file
setStream(backupStream);
// recreate original file, define buffer variables
outputStream.open(fileInfo().path(), ios_base::out | ios_base::binary | ios_base::trunc);
// define needed variables
uint64 elementSize; // the size of the current element
uint64 clusterSize; // the new size the current cluster
uint64 clusterReadOffset; // the read offset of the current cluster
uint64 clusterReadSize; // the original size of the current cluster
vector<uint64> clusterSizes; // the sizes of the cluster elements
vector<uint64>::const_iterator clusterSizesIterator;
uint64 readOffset = 0; // the current read offset to calculate positions
uint64 currentOffset = 0; // the current write offset to calculate positions
uint64 offset; // offset of the segment which is currently written, offset of "Cues"-element in segment
bool cuesPresent; // whether the "Cues"-element is present in the current segment
vector<tuple<uint64, uint64> > crc32Offsets; // holds the offsets of all CRC-32 elements and the length of the enclosing block
bool elementHasCrc32; // whether the current segment has a CRC-32 element
byte sizeLength; // size length used to make size denotations
char buff[8]; // buffer used to make size denotations
// calculate EBML header size
updateStatus("Writing EBML header ...");
elementSize = 2 * 7; // sub element ID sizes
for(auto headerValue : initializer_list<uint64>{m_version, m_readVersion, m_maxIdLength, m_maxSizeLength, m_doctypeVersion, m_doctypeReadVersion}) {
elementSize += sizeLength = EbmlElement::calculateUIntegerLength(headerValue);
elementSize += EbmlElement::calculateSizeDenotationLength(sizeLength);
}
elementSize += m_doctype.size();
elementSize += EbmlElement::calculateSizeDenotationLength(m_doctype.size());
// write EBML header
outputWriter.writeUInt32BE(EbmlIds::Header);
sizeLength = EbmlElement::makeSizeDenotation(elementSize, buff);
outputStream.write(buff, sizeLength);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::Version, m_version);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::ReadVersion, m_readVersion);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::MaxIdLength, m_maxIdLength);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::MaxSizeLength, m_maxSizeLength);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::DocType, m_doctype);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::DocTypeVersion, m_doctypeVersion);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::DocTypeReadVersion, m_doctypeReadVersion);
// write segments
EbmlElement *level1Element, *level2Element;
uint64 segmentInfoElementDataSize;
MatroskaSeekInfo seekInfo;
MatroskaCuePositionUpdater cuesUpdater;
// calculate size of tags
vector<MatroskaTagMaker> tagMaker;
uint64 tagElementsSize, tagsSize;
uint64 tagElementsSize = 0;
for(auto &tag : tags()) {
tag->invalidateNotifications();
try {
tagMaker.emplace_back(tag->prepareMaking());
if(tagMaker.back().requiredSize() > 3) {
// a tag of 3 bytes size is empty and can be skipped
tagElementsSize += tagMaker.back().requiredSize();
}
} catch(Failure &) {
// nothing to do because notifications will be added anyways
}
addNotifications(*tag);
}
uint64 tagsSize = tagElementsSize ? 4 + EbmlElement::calculateSizeDenotationLength(tagElementsSize) + tagElementsSize : 0;
// calculate size of attachments
vector<MatroskaAttachmentMaker> attachmentMaker;
uint64 attachedFileElementsSize, attachmentsSize;
unsigned int segmentIndex = 0;
unsigned int index;
uint64 attachedFileElementsSize = 0;
for(auto &attachment : m_attachments) {
if(!attachment->isIgnored()) {
attachment->invalidateNotifications();
try {
attachmentMaker.emplace_back(attachment->prepareMaking());
if(attachmentMaker.back().requiredSize() > 3) {
// an attachment of 3 bytes size is empty and can be skipped
attachedFileElementsSize += attachmentMaker.back().requiredSize();
}
} catch(Failure &) {
// nothing to do because notifications will be added anyways
}
addNotifications(*attachment);
}
}
uint64 attachmentsSize = attachedFileElementsSize ? 4 + EbmlElement::calculateSizeDenotationLength(attachedFileElementsSize) + attachedFileElementsSize : 0;
// check the number of segments to be written
unsigned int lastSegmentIndex = static_cast<unsigned int>(-1);
for(; level0Element; level0Element = level0Element->nextSibling()) {
level0Element->parse();
if(level0Element->id() == MatroskaIds::Segment) {
++lastSegmentIndex;
}
}
// prepare rewriting the file
updateStatus("Preparing for rewriting Matroska/EBML file ...");
fileInfo().close(); // ensure the file is close before renaming it
string backupPath;
fstream &outputStream = fileInfo().stream();
BinaryWriter outputWriter(&outputStream);
fstream backupStream; // create a stream to open the backup/original file
try {
for(; level0Element; level0Element = level0Element->nextSibling()) {
level0Element->parse();
switch(level0Element->id()) {
case EbmlIds::Header:
break; // header is already written; skip header here
case EbmlIds::Void:
case EbmlIds::Crc32:
break;
case MatroskaIds::Segment:
// write "Segment" element
updateStatus("Prepare writing segment ...", 0.0);
// prepare writing tags
// ensure seek info contains no old entries
seekInfo.clear();
// calculate size of tags
tagElementsSize = 0;
for(auto &tag : tags()) {
tag->invalidateNotifications();
try {
tagMaker.emplace_back(tag->prepareMaking());
if(tagMaker.back().requiredSize() > 3) {
// a tag of 3 bytes size is empty and can be skipped
tagElementsSize += tagMaker.back().requiredSize();
}
} catch(Failure &) {
// nothing to do because notifications will be added anyways
}
addNotifications(*tag);
}
tagsSize = tagElementsSize ? 4 + EbmlElement::calculateSizeDenotationLength(tagElementsSize) + tagElementsSize : 0;
// calculate size of attachments
attachedFileElementsSize = 0;
for(auto &attachment : m_attachments) {
if(!attachment->isIgnored()) {
attachment->invalidateNotifications();
BackupHelper::createBackupFile(fileInfo().path(), backupPath, backupStream);
// set backup stream as associated input stream since we need the original elements to write the new file
setStream(backupStream);
// recreate original file, define buffer variables
outputStream.open(fileInfo().path(), ios_base::out | ios_base::binary | ios_base::trunc);
// define needed variables
uint64 elementSize; // the size of the current element
uint64 clusterSize; // the new size the current cluster
uint64 clusterReadOffset; // the read offset of the current cluster
uint64 clusterReadSize; // the original size of the current cluster
vector<uint64> clusterSizes; // the sizes of the cluster elements
vector<uint64>::const_iterator clusterSizesIterator;
uint64 readOffset = 0; // the current read offset to calculate positions
uint64 currentOffset = 0; // the current write offset to calculate positions
uint64 offset; // offset of the segment which is currently written, offset of "Cues"-element in segment
bool cuesPresent; // whether the "Cues"-element is present in the current segment
vector<tuple<uint64, uint64> > crc32Offsets; // holds the offsets of all CRC-32 elements and the length of the enclosing block
bool elementHasCrc32; // whether the current segment has a CRC-32 element
byte sizeLength; // size length used to make size denotations
char buff[8]; // buffer used to make size denotations
// calculate EBML header size
updateStatus("Writing EBML header ...");
elementSize = 2 * 7; // sub element ID sizes
for(auto headerValue : initializer_list<uint64>{m_version, m_readVersion, m_maxIdLength, m_maxSizeLength, m_doctypeVersion, m_doctypeReadVersion}) {
elementSize += sizeLength = EbmlElement::calculateUIntegerLength(headerValue);
elementSize += EbmlElement::calculateSizeDenotationLength(sizeLength);
}
elementSize += m_doctype.size();
elementSize += EbmlElement::calculateSizeDenotationLength(m_doctype.size());
// write EBML header
outputWriter.writeUInt32BE(EbmlIds::Header);
sizeLength = EbmlElement::makeSizeDenotation(elementSize, buff);
outputStream.write(buff, sizeLength);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::Version, m_version);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::ReadVersion, m_readVersion);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::MaxIdLength, m_maxIdLength);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::MaxSizeLength, m_maxSizeLength);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::DocType, m_doctype);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::DocTypeVersion, m_doctypeVersion);
EbmlElement::makeSimpleElement(outputStream, EbmlIds::DocTypeReadVersion, m_doctypeReadVersion);
// write segments
EbmlElement *level1Element, *level2Element;
uint64 segmentInfoElementDataSize;
MatroskaSeekInfo seekInfo;
MatroskaCuePositionUpdater cuesUpdater;
unsigned int segmentIndex = 0;
unsigned int index;
try {
for(level0Element = firstElement(); level0Element; level0Element = level0Element->nextSibling()) {
switch(level0Element->id()) {
case EbmlIds::Header:
break; // header is already written; skip header here
case EbmlIds::Void:
case EbmlIds::Crc32:
break;
case MatroskaIds::Segment:
// write "Segment" element
updateStatus("Prepare writing segment ...", 0.0);
// prepare writing tags
// ensure seek info contains no old entries
seekInfo.clear();
// parse cues
cuesUpdater.invalidateNotifications();
if((level1Element = level0Element->childById(MatroskaIds::Cues))) {
cuesPresent = true;
try {
attachmentMaker.emplace_back(attachment->prepareMaking());
if(attachmentMaker.back().requiredSize() > 3) {
// an attachment of 3 bytes size is empty and can be skipped
attachedFileElementsSize += attachmentMaker.back().requiredSize();
}
cuesUpdater.parse(level1Element);
} catch(Failure &) {
// nothing to do because notifications will be added anyways
addNotifications(cuesUpdater);
throw;
}
addNotifications(*attachment);
}
}
attachmentsSize = attachedFileElementsSize ? 4 + EbmlElement::calculateSizeDenotationLength(attachedFileElementsSize) + attachedFileElementsSize : 0;
// parse cues
cuesUpdater.invalidateNotifications();
if((level1Element = level0Element->childById(MatroskaIds::Cues))) {
cuesPresent = true;
try {
cuesUpdater.parse(level1Element);
} catch(Failure &) {
addNotifications(cuesUpdater);
throw;
}
addNotifications(cuesUpdater);
} else {
cuesPresent = false;
}
// check whether the segment has a CRC-32 element
elementHasCrc32 = level0Element->firstChild() && level0Element->firstChild()->id() == EbmlIds::Crc32;
// calculate segment size
calculateSegmentSize:
// CRC-32 element is 6 byte long
elementSize = elementHasCrc32 ? 6 : 0;
// calculate size of "SeekHead"-element
elementSize += seekInfo.actualSize();
// pretend writing elements to find out the offsets and the total segment size
// pretend writing "SegmentInfo"-element
for(level1Element = level0Element->childById(MatroskaIds::SegmentInfo), index = 0; level1Element; level1Element = level1Element->siblingById(MatroskaIds::SegmentInfo), ++index) {
// update offset in "SeekHead"-element
if(seekInfo.push(index, MatroskaIds::SegmentInfo, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "SegmentInfo"-element
// -> size of "MuxingApp"- and "WritingApp"-element
segmentInfoElementDataSize = 2 * appInfoElementTotalSize;
// -> add size of "Title"-element
if(segmentIndex < m_titles.size()) {
const auto &title = m_titles[segmentIndex];
if(!title.empty()) {
segmentInfoElementDataSize += 2 + EbmlElement::calculateSizeDenotationLength(title.size()) + title.size();
cuesPresent = false;
}
// check whether the segment has a CRC-32 element
elementHasCrc32 = level0Element->firstChild() && level0Element->firstChild()->id() == EbmlIds::Crc32;
// calculate segment size
calculateSegmentSize:
// CRC-32 element is 6 byte long
elementSize = elementHasCrc32 ? 6 : 0;
// calculate size of "SeekHead"-element
elementSize += seekInfo.actualSize();
// pretend writing elements to find out the offsets and the total segment size
// pretend writing "SegmentInfo"-element
for(level1Element = level0Element->childById(MatroskaIds::SegmentInfo), index = 0; level1Element; level1Element = level1Element->siblingById(MatroskaIds::SegmentInfo), ++index) {
// update offset in "SeekHead"-element
if(seekInfo.push(index, MatroskaIds::SegmentInfo, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "SegmentInfo"-element
// -> size of "MuxingApp"- and "WritingApp"-element
segmentInfoElementDataSize = 2 * appInfoElementTotalSize;
// -> add size of "Title"-element
if(segmentIndex < m_titles.size()) {
const auto &title = m_titles[segmentIndex];
if(!title.empty()) {
segmentInfoElementDataSize += 2 + EbmlElement::calculateSizeDenotationLength(title.size()) + title.size();
}
}
// -> add size of other childs
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
level2Element->parse();
switch(level2Element->id()) {
case EbmlIds::Void: // skipped
case EbmlIds::Crc32: // skipped
case MatroskaIds::Title: // calculated separately
case MatroskaIds::MuxingApp: // calculated separately
case MatroskaIds::WrittingApp: // calculated separately
break;
default:
segmentInfoElementDataSize += level2Element->totalSize();
}
}
// -> calculate total size
elementSize += 4 + EbmlElement::calculateSizeDenotationLength(segmentInfoElementDataSize) + segmentInfoElementDataSize;
}
}
// pretend writing "Tracks"- and "Chapters"-element
for(const auto id : initializer_list<EbmlElement::identifierType>{MatroskaIds::Tracks, MatroskaIds::Chapters}) {
for(level1Element = level0Element->childById(id), index = 0; level1Element; level1Element = level1Element->siblingById(id), ++index) {
// update offset in "SeekHead"-element
if(seekInfo.push(index, id, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of element
elementSize += level1Element->totalSize();
}
}
// -> add size of other childs
}
// all "Tags"- and "Attachments"-elements are written in either the first or the last segment
// and either before "Cues"- and "Cluster"-elements or after these elements
// depending on the desired tag position (at the front/at the end)
if(fileInfo().tagPosition() == TagPosition::BeforeData && segmentIndex == 0) {
// pretend writing "Tags"-element
if(tagsSize) {
// update offsets in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Tags, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Tags"-element
elementSize += tagsSize;
}
}
// pretend writing "Attachments"-element
if(attachmentsSize) {
// update offsets in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Attachments, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Attachments"-element
elementSize += attachmentsSize;
}
}
}
// pretend writing "Cues"-element
if(cuesPresent) {
offset = elementSize; // save current offset
// update offset of "Cues"-element in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Cues, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Cues"-element
addCuesElementSize:
elementSize += cuesUpdater.totalSize();
}
}
// pretend writing "Cluster"-element
clusterSizes.clear();
for(level1Element = level0Element->childById(MatroskaIds::Cluster), index = 0; level1Element; level1Element = level1Element->siblingById(MatroskaIds::Cluster), ++index) {
// update offset of "Cluster"-element in "Cues"-element
//if(cuesPresent && cuesUpdater.updatePositions(currentOffset + level1Element->startOffset() - level0Element->dataOffset(), elementSize)) {
clusterReadOffset = level1Element->startOffset() - level0Element->dataOffset() + readOffset;
if(cuesPresent && cuesUpdater.updateOffsets(clusterReadOffset, currentOffset + elementSize)) {
elementSize = offset; // reset element size to previously saved offset of "Cues"-element
goto addCuesElementSize;
} else {
if(index == 0 && seekInfo.push(index, MatroskaIds::Cluster, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Cluster"-element
clusterSize = 0;
clusterReadSize = 0;
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
level2Element->parse();
if(cuesPresent && cuesUpdater.updateRelativeOffsets(clusterReadOffset, clusterReadSize, clusterSize)) {
elementSize = offset;
goto addCuesElementSize;
}
switch(level2Element->id()) {
case EbmlIds::Void:
case EbmlIds::Crc32:
break;
case MatroskaIds::Position:
clusterSize += 1 + 1 + EbmlElement::calculateUIntegerLength(currentOffset + elementSize);
break;
default:
clusterSize += level2Element->totalSize();
}
clusterReadSize += level2Element->totalSize();
}
clusterSizes.push_back(clusterSize);
elementSize += 4 + EbmlElement::calculateSizeDenotationLength(clusterSize) + clusterSize;
}
}
}
if(fileInfo().tagPosition() == TagPosition::AfterData && segmentIndex == lastSegmentIndex) {
// pretend writing "Tags"-element
if(tagsSize) {
// update offsets in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Tags, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Tags"-element
elementSize += tagsSize;
}
}
// pretend writing "Attachments"-element
if(attachmentsSize) {
// update offsets in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Attachments, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Attachments"-element
elementSize += attachmentsSize;
}
}
}
// write "Segment"-element actually
updateStatus("Writing segment header ...");
outputWriter.writeUInt32BE(MatroskaIds::Segment);
sizeLength = EbmlElement::makeSizeDenotation(elementSize, buff);
outputStream.write(buff, sizeLength);
offset = outputStream.tellp(); // store segment data offset here
// write CRC-32 element ...
if(elementHasCrc32) {
// ... if the original element had a CRC-32 element
*buff = EbmlIds::Crc32;
*(buff + 1) = 0x84; // length denotation: 4 byte
// set the value after writing the element
crc32Offsets.emplace_back(outputStream.tellp(), elementSize);
outputStream.write(buff, 6);
}
// write "SeekHead"-element (except there is no seek information for the current segment)
seekInfo.invalidateNotifications();
seekInfo.make(outputStream);
addNotifications(seekInfo);
// write "SegmentInfo"-element
for(level1Element = level0Element->childById(MatroskaIds::SegmentInfo); level1Element; level1Element = level1Element->siblingById(MatroskaIds::SegmentInfo)) {
// -> write ID and size
outputWriter.writeUInt32BE(MatroskaIds::SegmentInfo);
sizeLength = EbmlElement::makeSizeDenotation(segmentInfoElementDataSize, buff);
outputStream.write(buff, sizeLength);
// -> write childs
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
level2Element->parse();
switch(level2Element->id()) {
case EbmlIds::Void: // skipped
case EbmlIds::Crc32: // skipped
case MatroskaIds::Title: // calculated separately
case MatroskaIds::MuxingApp: // calculated separately
case MatroskaIds::WrittingApp: // calculated separately
case MatroskaIds::Title: // written separately
case MatroskaIds::MuxingApp: // written separately
case MatroskaIds::WrittingApp: // written separately
break;
default:
segmentInfoElementDataSize += level2Element->totalSize();
level2Element->copyEntirely(outputStream);
}
}
// -> calculate total size
elementSize += 4 + EbmlElement::calculateSizeDenotationLength(segmentInfoElementDataSize) + segmentInfoElementDataSize;
}
}
// pretend writing "Tracks"- and "Chapters"-element
for(const auto id : initializer_list<EbmlElement::identifierType>{MatroskaIds::Tracks, MatroskaIds::Chapters}) {
for(level1Element = level0Element->childById(id), index = 0; level1Element; level1Element = level1Element->siblingById(id), ++index) {
// update offset in "SeekHead"-element
if(seekInfo.push(index, id, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of element
elementSize += level1Element->totalSize();
}
}
}
// pretend writing "Attachments"-element
if(attachmentsSize) {
// update offsets in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Attachments, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Attachments"-element
elementSize += attachmentsSize;
}
}
// pretend writing "Tags"-element
if(tagsSize) {
// update offsets in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Tags, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Tags"-element
elementSize += tagsSize;
}
}
// pretend writing "Cues"-element
if(cuesPresent) {
offset = elementSize; // save current offset
// update offset of "Cues"-element in "SeekHead"-element
if(seekInfo.push(0, MatroskaIds::Cues, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Cues"-element
addCuesElementSize:
elementSize += cuesUpdater.totalSize();
}
}
// pretend writing "Cluster"-element
clusterSizes.clear();
for(level1Element = level0Element->childById(MatroskaIds::Cluster), index = 0; level1Element; level1Element = level1Element->siblingById(MatroskaIds::Cluster), ++index) {
// update offset of "Cluster"-element in "Cues"-element
//if(cuesPresent && cuesUpdater.updatePositions(currentOffset + level1Element->startOffset() - level0Element->dataOffset(), elementSize)) {
clusterReadOffset = level1Element->startOffset() - level0Element->dataOffset() + readOffset;
if(cuesPresent && cuesUpdater.updateOffsets(clusterReadOffset, currentOffset + elementSize)) {
elementSize = offset; // reset element size to previously saved offset of "Cues"-element
goto addCuesElementSize;
} else {
if(index == 0 && seekInfo.push(index, MatroskaIds::Cluster, currentOffset + elementSize)) {
goto calculateSegmentSize;
} else {
// add size of "Cluster"-element
clusterSize = 0;
clusterReadSize = 0;
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
level2Element->parse();
if(cuesPresent && cuesUpdater.updateRelativeOffsets(clusterReadOffset, clusterReadSize, clusterSize)) {
elementSize = offset;
goto addCuesElementSize;
}
switch(level2Element->id()) {
case EbmlIds::Void:
case EbmlIds::Crc32:
break;
case MatroskaIds::Position:
clusterSize += 1 + 1 + EbmlElement::calculateUIntegerLength(currentOffset + elementSize);
break;
default:
clusterSize += level2Element->totalSize();
}
clusterReadSize += level2Element->totalSize();
// -> write "Title"-element
if(segmentIndex < m_titles.size()) {
const auto &title = m_titles[segmentIndex];
if(!title.empty()) {
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::Title, title);
}
clusterSizes.push_back(clusterSize);
elementSize += 4 + EbmlElement::calculateSizeDenotationLength(clusterSize) + clusterSize;
}
// -> write "MuxingApp"- and "WritingApp"-element
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::MuxingApp, appInfo, appInfoElementDataSize);
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::WrittingApp, appInfo, appInfoElementDataSize);
}
// write "Tracks"- and "Chapters"-element
for(const auto id : initializer_list<EbmlElement::identifierType>{MatroskaIds::Tracks, MatroskaIds::Chapters}) {
for(level1Element = level0Element->childById(id); level1Element; level1Element = level1Element->siblingById(id)) {
level1Element->copyEntirely(outputStream);
}
}
}
// write "Segment"-element actually
updateStatus("Writing segment header ...");
outputWriter.writeUInt32BE(MatroskaIds::Segment);
sizeLength = EbmlElement::makeSizeDenotation(elementSize, buff);
outputStream.write(buff, sizeLength);
offset = outputStream.tellp(); // store segment data offset here
// write CRC-32 element ...
if(elementHasCrc32) {
// ... if the original element had a CRC-32 element
*buff = EbmlIds::Crc32;
*(buff + 1) = 0x84; // length denotation: 4 byte
// set the value after writing the element
crc32Offsets.emplace_back(outputStream.tellp(), elementSize);
outputStream.write(buff, 6);
}
// write "SeekHead"-element (except there is no seek information for the current segment)
seekInfo.invalidateNotifications();
seekInfo.make(outputStream);
addNotifications(seekInfo);
// write "SegmentInfo"-element
for(level1Element = level0Element->childById(MatroskaIds::SegmentInfo); level1Element; level1Element = level1Element->siblingById(MatroskaIds::SegmentInfo)) {
// -> write ID and size
outputWriter.writeUInt32BE(MatroskaIds::SegmentInfo);
sizeLength = EbmlElement::makeSizeDenotation(segmentInfoElementDataSize, buff);
outputStream.write(buff, sizeLength);
// -> write childs
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
switch(level2Element->id()) {
case EbmlIds::Void: // skipped
case EbmlIds::Crc32: // skipped
case MatroskaIds::Title: // written separately
case MatroskaIds::MuxingApp: // written separately
case MatroskaIds::WrittingApp: // written separately
break;
default:
level2Element->copyEntirely(outputStream);
if(fileInfo().tagPosition() == TagPosition::BeforeData && segmentIndex == 0) {
// write "Tags"-element
if(tagsSize) {
outputWriter.writeUInt32BE(MatroskaIds::Tags);
sizeLength = EbmlElement::makeSizeDenotation(tagElementsSize, buff);
outputStream.write(buff, sizeLength);
for(auto &maker : tagMaker) {
maker.make(outputStream);
}
// no need to add notifications; this has been done when creating the make
}
// write "Attachments"-element
if(attachmentsSize) {
outputWriter.writeUInt32BE(MatroskaIds::Attachments);
sizeLength = EbmlElement::makeSizeDenotation(attachedFileElementsSize, buff);
outputStream.write(buff, sizeLength);
for(auto &maker : attachmentMaker) {
maker.make(outputStream);
}
// no need to add notifications; this has been done when creating the make
}
}
// -> write "Title"-element
if(segmentIndex < m_titles.size()) {
const auto &title = m_titles[segmentIndex];
if(!title.empty()) {
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::Title, title);
// write "Cues"-element
if(cuesPresent) {
try {
cuesUpdater.make(outputStream);
} catch(Failure &) {
addNotifications(cuesUpdater);
throw;
}
}
// -> write "MuxingApp"- and "WritingApp"-element
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::MuxingApp, appInfo, appInfoElementDataSize);
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::WrittingApp, appInfo, appInfoElementDataSize);
}
// write "Tracks"- and "Chapters"-element
for(const auto id : initializer_list<EbmlElement::identifierType>{MatroskaIds::Tracks, MatroskaIds::Chapters}) {
for(level1Element = level0Element->childById(id); level1Element; level1Element = level1Element->siblingById(id)) {
level1Element->copyEntirely(outputStream);
}
}
// write "Attachments"-element
if(attachmentsSize) {
outputWriter.writeUInt32BE(MatroskaIds::Attachments);
sizeLength = EbmlElement::makeSizeDenotation(attachedFileElementsSize, buff);
outputStream.write(buff, sizeLength);
for(auto &maker : attachmentMaker) {
maker.make(outputStream);
}
// no need to add notifications; this has been done when creating the make
}
// write "Tags"-element
if(tagsSize) {
outputWriter.writeUInt32BE(MatroskaIds::Tags);
sizeLength = EbmlElement::makeSizeDenotation(tagElementsSize, buff);
outputStream.write(buff, sizeLength);
for(auto &maker : tagMaker) {
maker.make(outputStream);
}
// no need to add notifications; this has been done when creating the make
}
// write "Cues"-element
if(cuesPresent) {
try {
cuesUpdater.make(outputStream);
} catch(Failure &) {
addNotifications(cuesUpdater);
throw;
}
}
// update status, check whether the operation has been aborted
if(isAborted()) {
throw OperationAbortedException();
} else {
addNotifications(cuesUpdater);
updateStatus("Writing segment data ...", static_cast<double>(static_cast<uint64>(outputStream.tellp()) - offset) / elementSize);
}
// write "Cluster"-element
for(level1Element = level0Element->childById(MatroskaIds::Cluster), clusterSizesIterator = clusterSizes.cbegin();
level1Element; level1Element = level1Element->siblingById(MatroskaIds::Cluster), ++clusterSizesIterator) {
// calculate position of cluster in segment
clusterSize = currentOffset + (static_cast<uint64>(outputStream.tellp()) - offset);
// write header; checking whether clusterSizesIterator is valid shouldn't be necessary
outputWriter.writeUInt32BE(MatroskaIds::Cluster);
sizeLength = EbmlElement::makeSizeDenotation(*clusterSizesIterator, buff);
outputStream.write(buff, sizeLength);
// write childs
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
switch(level2Element->id()) {
case EbmlIds::Void:
case EbmlIds::Crc32:
break;
case MatroskaIds::Position:
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::Position, clusterSize);
break;
default:
level2Element->copyEntirely(outputStream);
}
}
// update percentage, check whether the operation has been aborted
// update status, check whether the operation has been aborted
if(isAborted()) {
throw OperationAbortedException();
} else {
updatePercentage(static_cast<double>(static_cast<uint64>(outputStream.tellp()) - offset) / elementSize);
addNotifications(cuesUpdater);
updateStatus("Writing segment data ...", static_cast<double>(static_cast<uint64>(outputStream.tellp()) - offset) / elementSize);
}
// write "Cluster"-element
for(level1Element = level0Element->childById(MatroskaIds::Cluster), clusterSizesIterator = clusterSizes.cbegin();
level1Element; level1Element = level1Element->siblingById(MatroskaIds::Cluster), ++clusterSizesIterator) {
// calculate position of cluster in segment
clusterSize = currentOffset + (static_cast<uint64>(outputStream.tellp()) - offset);
// write header; checking whether clusterSizesIterator is valid shouldn't be necessary
outputWriter.writeUInt32BE(MatroskaIds::Cluster);
sizeLength = EbmlElement::makeSizeDenotation(*clusterSizesIterator, buff);
outputStream.write(buff, sizeLength);
// write childs
for(level2Element = level1Element->firstChild(); level2Element; level2Element = level2Element->nextSibling()) {
switch(level2Element->id()) {
case EbmlIds::Void:
case EbmlIds::Crc32:
break;
case MatroskaIds::Position:
EbmlElement::makeSimpleElement(outputStream, MatroskaIds::Position, clusterSize);
break;
default:
level2Element->copyEntirely(outputStream);
}
}
// update percentage, check whether the operation has been aborted
if(isAborted()) {
throw OperationAbortedException();
} else {
updatePercentage(static_cast<double>(static_cast<uint64>(outputStream.tellp()) - offset) / elementSize);
}
}
if(fileInfo().tagPosition() == TagPosition::AfterData && segmentIndex == lastSegmentIndex) {
// write "Tags"-element
if(tagsSize) {
outputWriter.writeUInt32BE(MatroskaIds::Tags);
sizeLength = EbmlElement::makeSizeDenotation(tagElementsSize, buff);
outputStream.write(buff, sizeLength);
for(auto &maker : tagMaker) {
maker.make(outputStream);
}
// no need to add notifications; this has been done when creating the make
}
// write "Attachments"-element
if(attachmentsSize) {
outputWriter.writeUInt32BE(MatroskaIds::Attachments);
sizeLength = EbmlElement::makeSizeDenotation(attachedFileElementsSize, buff);
outputStream.write(buff, sizeLength);
for(auto &maker : attachmentMaker) {
maker.make(outputStream);
}
// no need to add notifications; this has been done when creating the make
}
}
++segmentIndex; // increase the current segment index
currentOffset += 4 + sizeLength + elementSize; // increase current write offset by the size of the segment which has just been written
readOffset = level0Element->totalSize(); // increase the read offset by the size of the segment read from the orignial file
break;
default:
// just copy any unknown top-level elements
addNotification(NotificationType::Warning, "The top-level element \"" + level0Element->idToString() + "\" of the original file is unknown and will just be copied.", context);
level0Element->copyEntirely(outputStream);
}
++segmentIndex; // increase the current segment index
currentOffset += 4 + sizeLength + elementSize; // increase current write offset by the size of the segment which has just been written
readOffset = level0Element->totalSize(); // increase the read offset by the size of the segment read from the orignial file
break;
default:
// just copy any unknown top-level elements
addNotification(NotificationType::Warning, "The top-level element \"" + level0Element->idToString() + "\" of the original file is unknown and will just be copied.", context);
level0Element->copyEntirely(outputStream);
}
}
} catch(Failure &) {
addNotifications(cuesUpdater);
addNotification(NotificationType::Critical, "Unable to parse content in top-level element at " + numberToString(level0Element->startOffset()) + " of original file.", context);
throw;
}
// reparse what is written so far
updateStatus("Reparsing output file ...");
outputStream.close(); // the outputStream needs to be reopened to be able to read again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
setStream(outputStream);
reset();
try {
parseHeader();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to reparse the header of the new file.", context);
throw;
}
// update CRC-32 checksums
if(!crc32Offsets.empty()) {
updateStatus("Updating CRC-32 checksums ...");
for(const auto &crc32Offset : crc32Offsets) {
outputStream.seekg(get<0>(crc32Offset) + 6);
outputStream.seekp(get<0>(crc32Offset) + 2);
writer().writeUInt32LE(reader().readCrc32(get<1>(crc32Offset) - 6));
} catch(Failure &) {
addNotifications(cuesUpdater);
addNotification(NotificationType::Critical, "Unable to parse content in top-level element at " + numberToString(level0Element->startOffset()) + " of original file.", context);
throw;
}
// reparse what is written so far
updateStatus("Reparsing output file ...");
outputStream.close(); // the outputStream needs to be reopened to be able to read again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
setStream(outputStream);
reset();
try {
parseHeader();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to reparse the header of the new file.", context);
throw;
}
// update CRC-32 checksums
if(!crc32Offsets.empty()) {
updateStatus("Updating CRC-32 checksums ...");
for(const auto &crc32Offset : crc32Offsets) {
outputStream.seekg(get<0>(crc32Offset) + 6);
outputStream.seekp(get<0>(crc32Offset) + 2);
writer().writeUInt32LE(reader().readCrc32(get<1>(crc32Offset) - 6));
}
}
updatePercentage(100.0);
// flush output stream
outputStream.flush();
// handle errors which occured after renaming/creating backup file
} catch(OperationAbortedException &) {
setStream(outputStream);
reset();
addNotification(NotificationType::Information, "Rewriting the file to apply changed tag information has been aborted.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
throw;
} catch(Failure &) {
setStream(outputStream);
reset();
addNotification(NotificationType::Critical, "Rewriting the file to apply changed tag information failed.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
throw;
} catch(ios_base::failure &) {
setStream(outputStream);
reset();
addNotification(NotificationType::Critical, "An IO error occured when rewriting the file to apply changed tag information.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
throw;
}
updatePercentage(100.0);
// flush output stream
outputStream.flush();
} catch(OperationAbortedException &) {
setStream(outputStream);
reset();
addNotification(NotificationType::Information, "Rewriting the file to apply changed tag information has been aborted.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
throw;
// handle errors which occured before renaming/creating backup file
} catch(Failure &) {
setStream(outputStream);
reset();
addNotification(NotificationType::Critical, "Rewriting the file to apply changed tag information failed.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
addNotification(NotificationType::Critical, "Parsing the original file failed.", context);
throw;
} catch(ios_base::failure &) {
setStream(outputStream);
reset();
addNotification(NotificationType::Critical, "An IO error occured when rewriting the file to apply changed tag information.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
addNotification(NotificationType::Critical, "An IO error occured when parsing the original file.", context);
throw;
}
}

View File

@ -53,13 +53,12 @@ namespace Media {
/*!
* \class Media::MediaFileInfo
* \brief The MediaFileInfo class extends the BasicFileInfo class.
* \brief The MediaFileInfo class allows to read and write tag information providing
* a container/tag format independent interface.
*
* The MediaFileInfo class allows to read and edit meta information
* of MP3 files with ID3 tag and MP4 files with iTunes tag. It also
* provides some technical information about these types of files such
* as contained streams.
* A bunch of other file types (see Media::ContainerFormat) can be recognized.
* It also provides some technical information such as contained streams.
*
* For examples see "cli/mainfeatures.cpp" of the tageditor repository.
*/
/*!
@ -74,7 +73,11 @@ MediaFileInfo::MediaFileInfo() :
m_tagsParsingStatus(ParsingStatus::NotParsedYet),
m_chaptersParsingStatus(ParsingStatus::NotParsedYet),
m_attachmentsParsingStatus(ParsingStatus::NotParsedYet),
m_forceFullParse(MEDIAINFO_CPP_FORCE_FULL_PARSE)
m_forceFullParse(MEDIAINFO_CPP_FORCE_FULL_PARSE),
m_minPadding(0),
m_maxPadding(0),
m_tagPosition(TagPosition::BeforeData),
m_forceTagPosition(true)
{}
/*!
@ -92,7 +95,11 @@ MediaFileInfo::MediaFileInfo(const string &path) :
m_tagsParsingStatus(ParsingStatus::NotParsedYet),
m_chaptersParsingStatus(ParsingStatus::NotParsedYet),
m_attachmentsParsingStatus(ParsingStatus::NotParsedYet),
m_forceFullParse(MEDIAINFO_CPP_FORCE_FULL_PARSE)
m_forceFullParse(MEDIAINFO_CPP_FORCE_FULL_PARSE),
m_minPadding(0),
m_maxPadding(0),
m_tagPosition(TagPosition::BeforeData),
m_forceTagPosition(true)
{}
/*!
@ -181,7 +188,7 @@ startParsingSignature:
addNotifications(notifications);
break;
} case ContainerFormat::Ebml: {
unique_ptr<MatroskaContainer> container = make_unique<MatroskaContainer>(*this, m_containerOffset);
auto container = make_unique<MatroskaContainer>(*this, m_containerOffset);
NotificationList notifications;
try {
container->parseHeader();

View File

@ -40,6 +40,12 @@ enum class TagUsage
Never /**< tags of the type are never used; a possibly existing tag of the type is removed */
};
enum class TagPosition
{
BeforeData,
AfterData
};
/*!
* \brief The ParsingStatus enum specifies whether a certain part of the file (tracks, tags, ...) has
* been parsed yet and if what the parsing result is.
@ -135,6 +141,14 @@ public:
// methods to get, set object behaviour
bool isForcingFullParse() const;
void setForceFullParse(bool forceFullParse);
size_t minPadding() const;
void setMinPadding(size_t minPadding);
size_t maxPadding() const;
void setMaxPadding(size_t maxPadding);
TagPosition tagPosition() const;
void setTagPosition(TagPosition tagPosition);
bool forceTagPosition() const;
void setForceTagPosition(bool forceTagPosition);
protected:
virtual void invalidated();
@ -164,6 +178,10 @@ private:
ParsingStatus m_attachmentsParsingStatus;
// fields specifying object behaviour
bool m_forceFullParse;
size_t m_minPadding;
size_t m_maxPadding;
TagPosition m_tagPosition;
bool m_forceTagPosition;
};
/*!
@ -362,6 +380,99 @@ inline void MediaFileInfo::setForceFullParse(bool forceFullParse)
m_forceFullParse = forceFullParse;
}
/*!
* \brief Returns the minimum padding to be written before the data blocks when applying changes.
*
* Padding in front of the file allows adding additional fields afterwards whithout needing
* to rewrite the entire file or to put tag information at the end of the file.
*
* \sa maxPadding()
* \sa tagPosition()
* \sa setMinPadding()
*/
inline size_t MediaFileInfo::minPadding() const
{
return m_minPadding;
}
/*!
* \brief Sets the minimum padding to be written before the data blocks when applying changes.
* \remarks This value might be ignored if not supported by the container/tag format or the corresponding implementation.
* \sa minPadding()
*/
inline void MediaFileInfo::setMinPadding(size_t minPadding)
{
m_minPadding = minPadding;
}
/*!
* \brief Returns the maximum padding to be written before the data blocks when applying changes.
*
* Padding in front of the file allows adding additional fields afterwards whithout needing
* to rewrite the entire file or to put tag information at the end of the file.
*
* \sa minPadding()
* \sa tagPosition()
* \sa setMaxPadding()
*/
inline size_t MediaFileInfo::maxPadding() const
{
return m_maxPadding;
}
/*!
* \brief Sets the maximum padding to be written before the data blocks when applying changes.
* \remarks This value might be ignored if not supported by the container/tag format or the corresponding implementation.
* \sa maxPadding()
*/
inline void MediaFileInfo::setMaxPadding(size_t maxPadding)
{
m_maxPadding = maxPadding;
}
/*!
* \brief Returns the position (in the output file) where the tag information is written when applying changes.
* \sa setTagPosition()
*/
inline TagPosition MediaFileInfo::tagPosition() const
{
return m_tagPosition;
}
/*!
* \brief Sets the position (in the output file) where the tag information is written when applying changes.
*
* \remarks
* - If putting the tags at another position would prevent rewriting the entire file the specified position
* might not be used if forceTagPosition() is false.
* - However if the specified position is not supported by the container/tag format or by the implementation
* for the format it is ignored (even if forceTagPosition() is true).
*/
inline void MediaFileInfo::setTagPosition(TagPosition tagPosition)
{
m_tagPosition = tagPosition;
}
/*!
* \brief Returns whether tagPosition() is forced.
* \sa setForceTagPosition()
* \sa tagPosition(), setTagPosition()
*/
inline bool MediaFileInfo::forceTagPosition() const
{
return m_forceTagPosition;
}
/*!
* \brief Sets whether tagPosition() is forced.
* \sa forceTagPosition()
* \sa tagPosition(), setTagPosition()
*/
inline void MediaFileInfo::setForceTagPosition(bool forceTagPosition)
{
m_forceTagPosition = forceTagPosition;
}
}
#endif // MEDIAINFO_H

View File

@ -78,7 +78,7 @@ void Mp4Atom::internalParse()
}
m_id = reader().readUInt32BE();
m_idLength = 4;
if(dataSize() == 1) { // atom denotes 64-bit size
if(m_dataSize == 1) { // atom denotes 64-bit size
m_dataSize = reader().readUInt64BE();
m_sizeLength = 12; // 4 bytes indicate long size denotation + 8 bytes for actual size denotation
if(dataSize() < 16 && m_dataSize != 1) {
@ -116,24 +116,37 @@ void Mp4Atom::internalParse()
* \brief This function helps to write the atom size after writing an atom to a stream.
* \param stream Specifies the stream.
* \param startOffset Specifies the start offset of the atom.
* \param denote64BitSize Specifies whether the atom denotes its size with a 64-bit unsigned integer.
*
* This function seeks back to the start offset and writes the difference between the
* previous offset and the start offset as 32-bit unsigned integer to the \a stream.
* Then it seeks back to the previous offset.
*/
void Mp4Atom::seekBackAndWriteAtomSize(std::ostream &stream, const ostream::pos_type &startOffset, bool denote64BitSize)
void Mp4Atom::seekBackAndWriteAtomSize(std::ostream &stream, const ostream::pos_type &startOffset)
{
ostream::pos_type currentOffset = stream.tellp();
stream.seekp(startOffset);
BinaryWriter writer(&stream);
if(denote64BitSize) {
writer.writeUInt32BE(0);
stream.seekp(4, ios_base::cur);
writer.writeUInt64BE(currentOffset - startOffset);
} else {
writer.writeUInt32BE(currentOffset - startOffset);
}
writer.writeUInt32BE(currentOffset - startOffset);
stream.seekp(currentOffset);
}
/*!
* \brief This function helps to write the atom size after writing an atom to a stream.
* \param stream Specifies the stream.
* \param startOffset Specifies the start offset of the atom.
*
* This function seeks back to the start offset and writes the difference between the
* previous offset and the start offset as 64-bit unsigned integer to the \a stream.
* Then it seeks back to the previous offset.
*/
void Mp4Atom::seekBackAndWriteAtomSize64(std::ostream &stream, const ostream::pos_type &startOffset)
{
ostream::pos_type currentOffset = stream.tellp();
stream.seekp(startOffset);
BinaryWriter writer(&stream);
writer.writeUInt32BE(1);
stream.seekp(4, ios_base::cur);
writer.writeUInt64BE(currentOffset - startOffset);
stream.seekp(currentOffset);
}

View File

@ -66,7 +66,8 @@ public:
bool isPadding() const;
uint64 firstChildOffset() const;
static void seekBackAndWriteAtomSize(std::ostream &stream, const std::ostream::pos_type &startOffset, bool denote64BitSize = false);
static void seekBackAndWriteAtomSize(std::ostream &stream, const std::ostream::pos_type &startOffset);
static void seekBackAndWriteAtomSize64(std::ostream &stream, const std::ostream::pos_type &startOffset);
protected:
Mp4Atom(containerType& container, uint64 startOffset, uint64 maxSize);

View File

@ -211,17 +211,17 @@ void Mp4Container::internalMakeFile()
setStream(backupStream);
// recreate original file
outputStream.open(fileInfo().path(), ios_base::out | ios_base::binary | ios_base::trunc);
// collect needed atoms
// collect needed atoms from the original file
Mp4Atom *ftypAtom, *pdinAtom, *moovAtom;
try {
ftypAtom = firstElement()->siblingById(Mp4AtomIds::FileType, true); // mandatory
if(!ftypAtom) { // throw error if missing
addNotification(NotificationType::Critical, "Mandatory ftyp atom not found.", context);
addNotification(NotificationType::Critical, "Mandatory \"ftyp\"-atom not found.", context);
}
pdinAtom = firstElement()->siblingById(Mp4AtomIds::ProgressiveDownloadInformation, true); // not mandatory
moovAtom = firstElement()->siblingById(Mp4AtomIds::Movie, true); // mandatory
if(!moovAtom) { // throw error if missing
addNotification(NotificationType::Critical, "Mandatory moov atom not found.", context);
addNotification(NotificationType::Critical, "Mandatory \"moov\"-atom not found.", context);
throw InvalidDataException();
}
} catch (Failure &) {
@ -231,169 +231,212 @@ void Mp4Container::internalMakeFile()
if(m_tags.size() > 1) {
addNotification(NotificationType::Warning, "There are multiple MP4-tags assigned. Only the first one will be attached to the file.", context);
}
// write all top-level atoms, header boxes be placed first
// write all top-level atoms
updateStatus("Writing header ...");
// write "ftype"-atom
ftypAtom->copyEntirely(outputStream);
// write "pdin"-atom ("progressive download information")
if(pdinAtom) {
pdinAtom->copyEntirely(outputStream);
}
ostream::pos_type newMoovOffset = outputStream.tellp();
Mp4Atom *udtaAtom = nullptr;
uint64 newUdtaOffset = 0u;
if(isAborted()) {
throw OperationAbortedException();
}
moovAtom->copyWithoutChilds(outputStream);
for(Mp4Atom *moovChildAtom = moovAtom->firstChild(); moovChildAtom; moovChildAtom = moovChildAtom->nextSibling()) { // write child atoms manually, because the child udta has to be altered/ignored
try {
moovChildAtom->parse();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to parse childs of moov atom of original file.", context);
throw InvalidDataException();
}
if(moovChildAtom->id() == Mp4AtomIds::UserData) {
// found a udta (user data) atom which childs hold tag infromation
if(!udtaAtom) {
udtaAtom = moovChildAtom;
// check if the udta atom needs to be written
bool writeUdtaAtom = !m_tags.empty(); // it has to be written only when a MP4 tag is assigned
if(!writeUdtaAtom) { // or when there is at least one child except the meta atom in the original file
try {
for(Mp4Atom *udtaChildAtom = udtaAtom->firstChild(); udtaChildAtom; udtaChildAtom = udtaChildAtom->nextSibling()) {
udtaChildAtom->parse();
if(udtaChildAtom->id() != Mp4AtomIds::Meta) {
writeUdtaAtom = true;
break;
}
}
} catch(Failure &) {
addNotification(NotificationType::Warning,
"Unable to parse childs of udta atom of original file. These invalid/unknown atoms will be ignored.", context);
}
}
if(writeUdtaAtom) {
updateStatus("Writing tag information ...");
newUdtaOffset = outputStream.tellp(); // save offset
udtaAtom->copyHeader(outputStream); // and write header
// write meta atom if there's a tag assigned
if(!m_tags.empty()) {
try {
m_tags.front()->make(outputStream);
} catch(Failure &) {
addNotification(NotificationType::Warning, "Unable to write meta atom (of assigned mp4 tag).", context);
}
addNotifications(*m_tags.front());
}
// write rest of the child atoms of udta atom
try {
for(Mp4Atom *udtaChildAtom = udtaAtom->firstChild(); udtaChildAtom; udtaChildAtom = udtaChildAtom->nextSibling()) {
udtaChildAtom->parse();
if(udtaChildAtom->id() != Mp4AtomIds::Meta) { // skip meta atoms here of course
udtaChildAtom->copyEntirely(outputStream);
}
}
} catch(Failure &) {
addNotification(NotificationType::Warning,
"Unable to parse childs of udta atom of original file. These will be ignored.", context);
}
// write correct size of udta atom
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newUdtaOffset);
}
} else {
addNotification(NotificationType::Warning, "The source file has multiple udta atoms. Surplus atoms will be ignored.", context);
}
} else if(!writeChunkByChunk || moovChildAtom->id() != Mp4AtomIds::Track) {
// copy trak atoms only when not writing the data chunk-by-chunk
moovChildAtom->copyEntirely(outputStream);
}
}
// the original file has no udta atom but there is tag information to be written
if(!udtaAtom && !m_tags.empty()) {
updateStatus("Writing tag information ...");
newUdtaOffset = outputStream.tellp();
// write udta atom
outputWriter.writeUInt32BE(0); // the size will be written later
outputWriter.writeUInt32BE(Mp4AtomIds::UserData);
// write tags
try {
m_tags.front()->make(outputStream);
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newUdtaOffset);
} catch(Failure &) {
addNotification(NotificationType::Warning, "Unable to write meta atom (of assigned mp4 tag).", context);
outputStream.seekp(-8, ios_base::cur);
}
}
// write trak atoms for each currently assigned track, this is only required when writing data chunk-by-chunk
if(writeChunkByChunk) {
updateStatus("Writing meta information for the tracks ...");
for(auto &track : tracks()) {
if(isAborted()) {
throw OperationAbortedException();
}
track->setOutputStream(outputStream);
track->makeTrack();
}
}
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newMoovOffset);
// prepare for writing the actual data
vector<tuple<istream *, vector<uint64>, vector<uint64> > > trackInfos; // used when writing chunk-by-chunk
uint64 totalChunkCount; // used when writing chunk-by-chunk
uint64 totalChunkCount = 0; // used when writing chunk-by-chunk
vector<int64> origMdatOffsets; // used when simply copying mdat
vector<int64> newMdatOffsets; // used when simply copying mdat
// write other atoms
for(Mp4Atom *otherTopLevelAtom = firstElement(); otherTopLevelAtom; otherTopLevelAtom = otherTopLevelAtom->nextSibling()) {
if(isAborted()) {
throw OperationAbortedException();
}
try {
otherTopLevelAtom->parse();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to parse all top-level atoms of original file.", context);
throw InvalidDataException();
}
using namespace Mp4AtomIds;
switch(otherTopLevelAtom->id()) {
case FileType: case ProgressiveDownloadInformation: case Movie: case Free: case Skip:
break;
case MediaData:
auto trackCount = tracks().size();
for(byte pass = 0; pass != 2; ++pass) {
if(fileInfo().tagPosition() == (pass ? TagPosition::AfterData : TagPosition::BeforeData)) {
// write "moov"-atom (contains track and tag information)
ostream::pos_type newMoovOffset = outputStream.tellp();
Mp4Atom *udtaAtom = nullptr;
uint64 newUdtaOffset = 0u;
// -> write child atoms manually, because the child "udta" has to be altered/ignored
moovAtom->copyWithoutChilds(outputStream);
for(Mp4Atom *moovChildAtom = moovAtom->firstChild(); moovChildAtom; moovChildAtom = moovChildAtom->nextSibling()) {
try {
moovChildAtom->parse();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to parse childs of moov atom of original file.", context);
throw InvalidDataException();
}
if(moovChildAtom->id() == Mp4AtomIds::UserData) {
// found a "udta" (user data) atom which child "meta" holds tag information
if(!udtaAtom) {
udtaAtom = moovChildAtom;
// check whether the "udta"-atom needs to be written
// it has to be written only when an MP4 tag is assigned
bool writeUdtaAtom = !m_tags.empty();
// or when there is at least one child except the meta atom in the original file
if(!writeUdtaAtom) {
try {
for(Mp4Atom *udtaChildAtom = udtaAtom->firstChild(); udtaChildAtom; udtaChildAtom = udtaChildAtom->nextSibling()) {
udtaChildAtom->parse();
if(udtaChildAtom->id() != Mp4AtomIds::Meta) {
writeUdtaAtom = true;
break;
}
}
} catch(Failure &) {
addNotification(NotificationType::Warning,
"Unable to parse childs of \"udta\"-atom atom of original file. These invalid/unknown atoms will be ignored.", context);
}
}
if(writeUdtaAtom) {
updateStatus("Writing tag information ...");
newUdtaOffset = outputStream.tellp(); // save offset
udtaAtom->copyHeader(outputStream); // and write header
// write meta atom if there's a tag assigned
if(!m_tags.empty()) {
try {
m_tags.front()->make(outputStream);
} catch(Failure &) {
addNotification(NotificationType::Warning, "Unable to write meta atom (of assigned mp4 tag).", context);
}
addNotifications(*m_tags.front());
}
// write rest of the child atoms of "udta"-atom
try {
for(Mp4Atom *udtaChildAtom = udtaAtom->firstChild(); udtaChildAtom; udtaChildAtom = udtaChildAtom->nextSibling()) {
udtaChildAtom->parse();
// skip "meta"-atoms here of course
if(udtaChildAtom->id() != Mp4AtomIds::Meta) {
udtaChildAtom->copyEntirely(outputStream);
}
}
} catch(Failure &) {
addNotification(NotificationType::Warning,
"Unable to parse childs of \"udta\"-atom of original file. These will be ignored.", context);
}
// write correct size of udta atom
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newUdtaOffset);
}
} else {
addNotification(NotificationType::Warning, "The source file has multiple \"udta\"-atoms. Surplus atoms will be ignored.", context);
}
} else if(!writeChunkByChunk || moovChildAtom->id() != Mp4AtomIds::Track) {
// copy "trak"-atoms only when not writing the data chunk-by-chunk
moovChildAtom->copyEntirely(outputStream);
}
}
// -> the original file has no udta atom but there is tag information to be written
if(!udtaAtom && !m_tags.empty()) {
updateStatus("Writing tag information ...");
newUdtaOffset = outputStream.tellp();
// write udta atom
outputWriter.writeUInt32BE(0); // the size will be written later
outputWriter.writeUInt32BE(Mp4AtomIds::UserData);
// write tags
try {
m_tags.front()->make(outputStream);
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newUdtaOffset);
} catch(Failure &) {
addNotification(NotificationType::Warning, "Unable to write meta atom (of assigned mp4 tag).", context);
}
}
// -> write trak atoms for each currently assigned track (this is only required when writing data chunk-by-chunk)
if(writeChunkByChunk) {
break; // write actual data separately when writing chunk by chunk
} else {
// store the mdat offsets when not writing chunk by chunk to be able to update the stco tables
origMdatOffsets.push_back(otherTopLevelAtom->startOffset());
newMdatOffsets.push_back(outputStream.tellp());
updateStatus("Writing meta information for the tracks ...");
for(auto &track : tracks()) {
track->setOutputStream(outputStream);
track->makeTrack();
}
}
default:
updateStatus("Writing " + otherTopLevelAtom->idToString() + " atom ...");
otherTopLevelAtom->forwardStatusUpdateCalls(this);
otherTopLevelAtom->copyEntirely(outputStream);
}
}
// when writing chunk by chunk the actual data needs to be written separately
if(writeChunkByChunk) {
// get the chunk offset and the chunk size table from the old file to be able to write single chunks later ...
updateStatus("Reading chunk offsets and sizes from the original file ...");
trackInfos.reserve(tracks().size());
totalChunkCount = 0;
for(auto &track : tracks()) {
if(&track->inputStream() == &outputStream) {
track->setInputStream(backupStream); // ensure the track reads from the original file
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newMoovOffset);
} else {
// write other atoms and "mdat"-atom (holds actual data)
for(Mp4Atom *otherTopLevelAtom = firstElement(); otherTopLevelAtom; otherTopLevelAtom = otherTopLevelAtom->nextSibling()) {
if(isAborted()) {
throw OperationAbortedException();
}
try {
otherTopLevelAtom->parse();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to parse all top-level atoms of original file.", context);
throw InvalidDataException();
}
using namespace Mp4AtomIds;
switch(otherTopLevelAtom->id()) {
case FileType: case ProgressiveDownloadInformation: case Movie: case Free: case Skip:
break;
case MediaData:
if(writeChunkByChunk) {
break; // write actual data separately when writing chunk by chunk
} else {
// store the mdat offsets when not writing chunk by chunk to be able to update the stco tables
origMdatOffsets.push_back(otherTopLevelAtom->startOffset());
newMdatOffsets.push_back(outputStream.tellp());
}
default:
updateStatus("Writing " + otherTopLevelAtom->idToString() + " atom ...");
otherTopLevelAtom->forwardStatusUpdateCalls(this);
otherTopLevelAtom->copyEntirely(outputStream);
}
}
trackInfos.emplace_back(&track->inputStream(), track->readChunkOffsets(), track->readChunkSizes());
const vector<uint64> &chunkOffsetTable = get<1>(trackInfos.back());
const vector<uint64> &chunkSizesTable = get<2>(trackInfos.back());
totalChunkCount += track->chunkCount();
if(track->chunkCount() != chunkOffsetTable.size() || track->chunkCount() != chunkSizesTable.size()) {
addNotification(NotificationType::Critical, "Chunks of track " + numberToString<uint64, string>(track->id()) + " could not be parsed correctly.", context);
// when writing chunk by chunk the actual data needs to be written separately
if(writeChunkByChunk) {
// get the chunk offset and the chunk size table from the old file to be able to write single chunks later ...
updateStatus("Reading chunk offsets and sizes from the original file ...");
trackInfos.reserve(trackCount);
for(auto &track : tracks()) {
if(isAborted()) {
throw OperationAbortedException();
}
// ensure the track reads from the original file
if(&track->inputStream() == &outputStream) {
track->setInputStream(backupStream);
}
trackInfos.emplace_back(&track->inputStream(), track->readChunkOffsets(), track->readChunkSizes());
const vector<uint64> &chunkOffsetTable = get<1>(trackInfos.back());
const vector<uint64> &chunkSizesTable = get<2>(trackInfos.back());
totalChunkCount += track->chunkCount();
if(track->chunkCount() != chunkOffsetTable.size() || track->chunkCount() != chunkSizesTable.size()) {
addNotification(NotificationType::Critical, "Chunks of track " + numberToString<uint64, string>(track->id()) + " could not be parsed correctly.", context);
}
}
// writing single chunks is needed when tracks have been added or removed
updateStatus("Writing chunks to mdat atom ...");
//outputStream.seekp(0, ios_base::end);
ostream::pos_type newMdatOffset = outputStream.tellp();
writer().writeUInt32BE(1); // denote 64 bit size
outputWriter.writeUInt32BE(Mp4AtomIds::MediaData);
outputWriter.writeUInt64BE(0); // write size of mdat atom later
CopyHelper<0x2000> copyHelper;
uint64 chunkIndex = 0;
uint64 totalChunksCopied = 0;
bool chunksCopied;
do {
if(isAborted()) {
throw OperationAbortedException();
}
chunksCopied = false;
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
//auto &track = tracks()[trackIndex];
auto &trackInfo = trackInfos[trackIndex];
istream &sourceStream = *get<0>(trackInfo);
vector<uint64> &chunkOffsetTable = get<1>(trackInfo);
const vector<uint64> &chunkSizesTable = get<2>(trackInfo);
if(chunkIndex < chunkOffsetTable.size() && chunkIndex < chunkSizesTable.size()) {
sourceStream.seekg(chunkOffsetTable[chunkIndex]);
//outputStream.seekp(0, ios_base::end);
chunkOffsetTable[chunkIndex] = outputStream.tellp();
copyHelper.copy(sourceStream, outputStream, chunkSizesTable[chunkIndex]);
//track->updateChunkOffset(chunkIndex, chunkOffset);
chunksCopied = true;
++totalChunksCopied;
}
}
++chunkIndex;
updatePercentage(static_cast<double>(totalChunksCopied) / totalChunkCount);
} while(chunksCopied);
//outputStream.seekp(0, ios_base::end);
Mp4Atom::seekBackAndWriteAtomSize64(outputStream, newMdatOffset);
}
}
}
if(isAborted()) {
throw OperationAbortedException();
}
// reparse what is written so far
// reparse new file
updateStatus("Reparsing output file ...");
outputStream.close(); // the outputStream needs to be reopened to be able to read again
outputStream.close(); // outputStream needs to be reopened to be able to read again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
setStream(outputStream);
m_headerParsed = false;
@ -406,54 +449,27 @@ void Mp4Container::internalMakeFile()
addNotification(NotificationType::Critical, "Unable to reparse the header of the new file.", context);
throw;
}
// update chunk offsets in the "stco"-atom of each track
if(trackCount != tracks().size()) {
stringstream error;
error << "Unable to update chunk offsets (\"stco\"-atom): Number of tracks in the output file (" << tracks().size()
<< ") differs from the number of tracks in the original file (" << trackCount << ").";
addNotification(NotificationType::Critical, error.str(), context);
throw Failure();
}
if(writeChunkByChunk) {
// checking parsed tracks
size_t trackCount = tracks().size();
if(trackCount != trackInfos.size()) {
if(trackCount > trackInfos.size()) {
trackCount = trackInfos.size();
updateStatus("Updating chunk offset table for each track ...");
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
auto &track = tracks()[trackIndex];
auto &chunkOffsetTable = get<1>(trackInfos[trackIndex]);
if(track->chunkCount() == chunkOffsetTable.size()) {
track->updateChunkOffsets(chunkOffsetTable);
} else {
addNotification(NotificationType::Critical, "Unable to update chunk offsets of track " + numberToString(trackIndex + 1) + ": Number of chunks in the output file differs from the number of chunks in the orignal file.", context);
throw Failure();
}
addNotification(NotificationType::Critical, "The track meta data could not be written correctly. Trying to write the chunk data anyways.", context);
}
// writing single chunks is needed when tracks have been added or removed
updateStatus("Writing chunks to mdat atom ...");
outputStream.seekp(0, ios_base::end);
ostream::pos_type newMdatOffset = outputStream.tellp();
writer().writeUInt32BE(0); // denote 64 bit size
writer().writeUInt32BE(Mp4AtomIds::MediaData);
writer().writeUInt64BE(0); // write size of mdat atom later
CopyHelper<0x2000> copyHelper;
uint64 chunkIndex = 0;
uint64 totalChunksCopied = 0;
bool chunksCopied;
do {
if(isAborted()) {
throw OperationAbortedException();
}
chunksCopied = false;
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
auto &track = tracks()[trackIndex];
auto &trackInfo = trackInfos[trackIndex];
istream &sourceStream = *get<0>(trackInfo);
const vector<uint64> &chunkOffsetTable = get<1>(trackInfo);
const vector<uint64> &chunkSizesTable = get<2>(trackInfo);
if(chunkIndex < chunkOffsetTable.size() && chunkIndex < chunkSizesTable.size()) {
sourceStream.seekg(chunkOffsetTable[chunkIndex]);
outputStream.seekp(0, ios_base::end);
uint64 chunkOffset = outputStream.tellp();
copyHelper.copy(sourceStream, outputStream, chunkSizesTable[chunkIndex]);
track->updateChunkOffset(chunkIndex, chunkOffset);
chunksCopied = true;
++totalChunksCopied;
}
}
++chunkIndex;
updatePercentage(static_cast<double>(totalChunksCopied) / totalChunkCount);
} while(chunksCopied);
outputStream.seekp(0, ios_base::end);
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newMdatOffset, true);
} else {
// correct mdat offsets in the stco atom of each track when we've just copied the mdat atom
updateOffsets(origMdatOffsets, newMdatOffsets);
}
updatePercentage(100.0);
@ -494,7 +510,7 @@ void Mp4Container::internalMakeFile()
* \param oldMdatOffsets Specifies a vector holding the old offsets of the "mdat"-atoms.
* \param newMdatOffsets Specifies a vector holding the new offsets of the "mdat"-atoms.
*
* Internally uses Mp4Track::updateOffsets(). Offsets stored in the "tfhd"-atom are also
* Uses internally Mp4Track::updateOffsets(). Offsets stored in the "tfhd"-atom are also
* updated (this is not tested yet since I don't have files using this atom).
*
* \throws Throws std::ios_base::failure when an IO error occurs.
@ -510,7 +526,7 @@ void Mp4Container::updateOffsets(const std::vector<int64> &oldMdatOffsets, const
addNotification(NotificationType::Critical, "No MP4 atoms could be found.", context);
throw InvalidDataException();
}
// update "base-data-offset-present" of tfhd atom (NOT tested properly)
// update "base-data-offset-present" of "tfhd"-atom (NOT tested properly)
try {
for(Mp4Atom *moofAtom = firstElement()->siblingById(Mp4AtomIds::MovieFragment, false);
moofAtom; moofAtom = moofAtom->siblingById(Mp4AtomIds::MovieFragment, false)) {
@ -574,14 +590,16 @@ void Mp4Container::updateOffsets(const std::vector<int64> &oldMdatOffsets, const
try {
track->parseHeader();
} catch(Failure &) {
addNotification(NotificationType::Warning, "The chunk offsets of track " + track->name() + " couldn't be updated because the track seems to be invalid. The newly written file seems to be damaged.", context);
addNotification(NotificationType::Warning, "The chunk offsets of track " + track->name() + " couldn't be updated because the track seems to be invalid..", context);
throw;
}
}
if(track->isHeaderValid()) {
try {
track->updateChunkOffsets(oldMdatOffsets, newMdatOffsets);
} catch(Failure &) {
addNotification(NotificationType::Warning, "The chunk offsets of track " + track->name() + " couldn't be updated. The altered file is damaged now, restore the backup!", context);
addNotification(NotificationType::Warning, "The chunk offsets of track " + track->name() + " couldn't be updated.", context);
throw;
}
}
}

View File

@ -290,6 +290,9 @@ uint32 mpeg4SamplingFrequencyTable[] = {
24000, 22050, 16000, 12000, 11025, 8000, 7350
};
/*!
* \brief Encapsulates all supported MPEG-4 channel configurations.
*/
namespace Mpeg4ChannelConfigs {
/*!

View File

@ -780,8 +780,8 @@ std::unique_ptr<Mpeg4VideoSpecificConfig> Mp4Track::parseVideoSpecificConfig(Sta
}
/*!
* \brief Updates the chunk offsets of the track. This is necessary when the mdat atom (which contains
* the actual chunk data) is moved.
* \brief Updates the chunk offsets of the track. This is necessary when the "mdat"-atom
* (which contains the actual chunk data) is moved.
* \param oldMdatOffsets Specifies a vector holding the old offsets of the "mdat"-atoms.
* \param newMdatOffsets Specifies a vector holding the new offsets of the "mdat"-atoms.
*
@ -849,10 +849,53 @@ void Mp4Track::updateChunkOffsets(const vector<int64> &oldMdatOffsets, const vec
}
}
/*!
* \brief Updates the chunk offsets of the track. This is necessary when the "mdat"-atom
* (which contains the actual chunk data) is moved.
* \param chunkOffsets Specifies the new chunk offset table.
*
* \throws Throws InvalidDataException when
* - there is no stream assigned.
* - the header has been considered as invalid when parsing the header information.
* - the size of \a chunkOffsets does not match chunkCount().
* - there is no atom holding these offsets.
* - the ID of the atom holding these offsets is not "stco" or "co64".
*/
void Mp4Track::updateChunkOffsets(const std::vector<uint64> &chunkOffsets)
{
if(!isHeaderValid() || !m_ostream || !m_istream || !m_stcoAtom) {
throw InvalidDataException();
}
if(chunkOffsets.size() != chunkCount()) {
throw InvalidDataException();
}
m_ostream->seekp(m_stcoAtom->dataOffset() + 8);
switch(m_stcoAtom->id()) {
case Mp4AtomIds::ChunkOffset:
for(auto offset : chunkOffsets) {
m_writer.writeUInt32BE(offset);
}
break;
case Mp4AtomIds::ChunkOffset64:
for(auto offset : chunkOffsets) {
m_writer.writeUInt64BE(offset);
}
default:
throw InvalidDataException();
}
}
/*!
* \brief Updates a particular chunk offset.
* \param chunkIndex Specifies the index of the chunk offset to be updated.
* \param offset Specifies the new chunk offset.
* \remarks This method seems to be obsolete.
* \throws Throws InvalidDataException when
* - there is no stream assigned.
* - the header has been considered as invalid when parsing the header information.
* - \a chunkIndex is not less than chunkCount().
* - there is no atom holding these offsets.
* - the ID of the atom holding these offsets is not "stco" or "co64".
*/
void Mp4Track::updateChunkOffset(uint32 chunkIndex, uint64 offset)
{
@ -1006,7 +1049,7 @@ void Mp4Track::makeMedia()
// write minf atom
makeMediaInfo();
// write size (of mdia atom)
Mp4Atom::seekBackAndWriteAtomSize(outputStream(), mdiaStartOffset, false);
Mp4Atom::seekBackAndWriteAtomSize(outputStream(), mdiaStartOffset);
}
/*!
@ -1060,7 +1103,7 @@ void Mp4Track::makeMediaInfo()
// write stbl atom
makeSampleTable();
// write size (of minf atom)
Mp4Atom::seekBackAndWriteAtomSize(outputStream(), minfStartOffset, false);
Mp4Atom::seekBackAndWriteAtomSize(outputStream(), minfStartOffset);
}
/*!
@ -1123,7 +1166,7 @@ void Mp4Track::makeSampleTable()
// write subs atom (sub-sample information)
// write size (of stbl atom)
Mp4Atom::seekBackAndWriteAtomSize(outputStream(), stblStartOffset, false);
Mp4Atom::seekBackAndWriteAtomSize(outputStream(), stblStartOffset);
}
void Mp4Track::internalParseHeader()

View File

@ -156,6 +156,7 @@ public:
// methods to update chunk offsets
void updateChunkOffsets(const std::vector<int64> &oldMdatOffsets, const std::vector<int64> &newMdatOffsets);
void updateChunkOffsets(const std::vector<uint64> &chunkOffsets);
void updateChunkOffset(uint32 chunkIndex, uint64 offset);
protected: