make use of padding when applying changes to MP4 files

This commit is contained in:
Martchus 2015-12-21 00:04:56 +01:00
parent 84fd56f122
commit 3da8337c6c
7 changed files with 617 additions and 289 deletions

View File

@ -17,6 +17,8 @@
#include <c++utilities/conversion/stringconversion.h>
#include <c++utilities/misc/memory.h>
#include <unistd.h>
#include <functional>
#include <initializer_list>
#include <unordered_set>
@ -804,8 +806,9 @@ void MatroskaContainer::internalMakeFile()
ElementPosition currentCuesPos = ElementPosition::Keep;
// -> index of the last segment
unsigned int lastSegmentIndex = static_cast<unsigned int>(-1);
// -> holds new padding
uint64 newPadding;
// -> whether rewrite is required (always required when forced to rewrite)
bool rewriteRequired = fileInfo().isForcingRewrite();
// calculate EBML header size
@ -831,7 +834,7 @@ void MatroskaContainer::internalMakeFile()
// a tag of 3 bytes size is empty and can be skipped
tagElementsSize += tagMaker.back().requiredSize();
}
} catch(Failure &) {
} catch(const Failure &) {
// nothing to do because notifications will be added anyways
}
addNotifications(*tag);
@ -848,7 +851,7 @@ void MatroskaContainer::internalMakeFile()
// an attachment of 3 bytes size is empty and can be skipped
attachedFileElementsSize += attachmentMaker.back().requiredSize();
}
} catch(Failure &) {
} catch(const Failure &) {
// nothing to do because notifications will be added anyways
}
addNotifications(*attachment);
@ -902,11 +905,11 @@ void MatroskaContainer::internalMakeFile()
calculateSegmentData:
// define variables to store sizes, offsets and other information required to make a header and "Segment"-elements
// current "pretent" write offset
// -> current "pretent" write offset
uint64 currentOffset = ebmlHeaderSize;
// current read offset (used to calculate positions)
// -> current read offset (used to calculate positions)
uint64 readOffset = 0;
// index of current element during iteration
// -> index of current element during iteration
unsigned int index;
// if rewriting is required always use the preferred tag/cue position
@ -1330,10 +1333,9 @@ nonRewriteCalculations:
}
// setup stream(s) for writing
// -> update status
updateStatus("Preparing streams ...");
if(rewriteRequired) {
// update status
updateStatus("Preparing streams ...");
// move current file to temp dir and reopen it as backupStream, recreate original file
try {
// ensure the file is close before moving
@ -1365,6 +1367,7 @@ nonRewriteCalculations:
}
}
// start actual writing
try {
// write EBML header
updateStatus("Writing EBML header ...");
@ -1436,7 +1439,7 @@ nonRewriteCalculations:
break;
default:
level2Element->copyBuffer(outputStream);
level2Element->releaseBuffer();
level2Element->discardBuffer();
//level2Element->copyEntirely(outputStream);
}
}
@ -1456,7 +1459,7 @@ nonRewriteCalculations:
for(const auto id : initializer_list<EbmlElement::identifierType>{MatroskaIds::Tracks, MatroskaIds::Chapters}) {
for(level1Element = level0Element->childById(id); level1Element; level1Element = level1Element->siblingById(id)) {
level1Element->copyBuffer(outputStream);
level1Element->releaseBuffer();
level1Element->discardBuffer();
//level1Element->copyEntirely(outputStream);
}
}
@ -1498,7 +1501,7 @@ nonRewriteCalculations:
// write padding / "Void"-element
if(segment.newPadding) {
// -> calculate length
// calculate length
uint64 voidLength;
if(segment.newPadding < 64) {
sizeLength = 1;
@ -1507,10 +1510,10 @@ nonRewriteCalculations:
sizeLength = 8;
BE::getBytes(static_cast<uint64>((voidLength = segment.newPadding - 9) | 0x100000000000000), buff);
}
// -> write header
// write header
outputWriter.writeByte(EbmlIds::Void);
outputStream.write(buff, sizeLength);
// -> write zeroes
// write zeroes
for(; voidLength; --voidLength) {
outputStream.put(0);
}
@ -1613,6 +1616,8 @@ nonRewriteCalculations:
outputStream.close(); // the outputStream needs to be reopened to be able to read again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
setStream(outputStream);
} else {
// TODO: truncate file
}
reset();
try {
@ -1638,7 +1643,7 @@ nonRewriteCalculations:
outputStream.flush();
// handle errors (which might have been occured after renaming/creating backup file)
} catch(OperationAbortedException &) {
} catch(const OperationAbortedException &) {
reset();
if(&stream() != &outputStream) {
// a temp/backup file has been created -> restore original file
@ -1654,7 +1659,7 @@ nonRewriteCalculations:
addNotification(NotificationType::Information, "Applying new tag information has been aborted.", context);
}
throw;
} catch(Failure &) {
} catch(const Failure &) {
reset();
if(&stream() != &outputStream) {
// a temp/backup file has been created -> restore original file
@ -1667,10 +1672,10 @@ nonRewriteCalculations:
addNotification(NotificationType::Critical, ex.what(), context);
}
} else {
addNotification(NotificationType::Information, "Applying new tag information failed.", context);
addNotification(NotificationType::Critical, "Applying new tag information failed.", context);
}
throw;
} catch(ios_base::failure &) {
} catch(const ios_base::failure &) {
reset();
if(&stream() != &outputStream) {
// a temp/backup file has been created -> restore original file
@ -1683,7 +1688,7 @@ nonRewriteCalculations:
addNotification(NotificationType::Critical, ex.what(), context);
}
} else {
addNotification(NotificationType::Information, "An IO error occured when applying tag information.", context);
addNotification(NotificationType::Critical, "An IO error occured when applying tag information.", context);
}
throw;
}

View File

@ -242,7 +242,7 @@ void MatroskaCuePositionUpdater::make(ostream &stream)
case MatroskaIds::CueTime:
// write "CueTime"-element
cuePointChild->copyBuffer(stream);
cuePointChild->releaseBuffer();
cuePointChild->discardBuffer();
//cuePointChild->copyEntirely(stream);
break;
case MatroskaIds::CueTrackPositions:
@ -258,7 +258,7 @@ void MatroskaCuePositionUpdater::make(ostream &stream)
case MatroskaIds::CueBlockNumber:
// write unchanged childs of "CueTrackPositions"-element
cueTrackPositionsChild->copyBuffer(stream);
cueTrackPositionsChild->releaseBuffer();
cueTrackPositionsChild->discardBuffer();
//cueTrackPositionsChild->copyEntirely(stream);
break;
case MatroskaIds::CueRelativePosition:
@ -289,7 +289,7 @@ void MatroskaCuePositionUpdater::make(ostream &stream)
case MatroskaIds::CueRefNumber:
// write unchanged childs of "CueReference"-element
cueReferenceChild->copyBuffer(stream);
cueReferenceChild->releaseBuffer();
cueReferenceChild->discardBuffer();
cueReferenceChild->copyEntirely(stream);
break;
case MatroskaIds::CueRefCluster:

View File

@ -150,6 +150,21 @@ void Mp4Atom::seekBackAndWriteAtomSize64(std::ostream &stream, const ostream::po
stream.seekp(currentOffset);
}
/*!
* \brief Writes an MP4 atom header to the specified \a stream.
*/
void Mp4Atom::makeHeader(uint64 size, uint32 id, BinaryWriter &writer)
{
if(size < 0xFFFFFFFF) {
writer.writeUInt32BE(static_cast<uint32>(size));
writer.writeUInt32BE(id);
} else {
writer.writeUInt32BE(1);
writer.writeUInt32BE(id);
writer.writeUInt64BE(size);
}
}
/*!
* \brief Returns an indication whether the atom is a parent element.
*

View File

@ -68,6 +68,8 @@ public:
static void seekBackAndWriteAtomSize(std::ostream &stream, const std::ostream::pos_type &startOffset);
static void seekBackAndWriteAtomSize64(std::ostream &stream, const std::ostream::pos_type &startOffset);
static void addHeaderSize(uint64 &dataSize);
static void makeHeader(uint64 size, uint32 id, IoUtilities::BinaryWriter &writer);
protected:
Mp4Atom(containerType& container, uint64 startOffset, uint64 maxSize);
@ -87,6 +89,14 @@ inline std::string Mp4Atom::idToString() const
return ConversionUtilities::interpretIntegerAsString<identifierType>(id());
}
/*!
* \brief Adds the header size to the specified \a data size.
*/
inline void Mp4Atom::addHeaderSize(uint64 &dataSize)
{
dataSize += dataSize < 0xFFFFFFF7 ? 8 : 16;
}
}
#endif // MP4ATOM_H

View File

@ -10,6 +10,8 @@
#include <c++utilities/io/copy.h>
#include <c++utilities/misc/memory.h>
#include <unistd.h>
#include <tuple>
using namespace std;
@ -191,318 +193,605 @@ void Mp4Container::internalParseTracks()
void Mp4Container::internalMakeFile()
{
// set initial status
invalidateStatus();
static const string context("making MP4 container");
updateStatus("Calculating atom sizes and padding ...");
// basic validation of original file
if(!isHeaderParsed()) {
addNotification(NotificationType::Critical, "The header has not been parsed yet.", context);
throw InvalidDataException();
}
// define variables needed to parse atoms of original file
//Mp4Atom *level0Atom = firstElement();
if(!firstElement()) {
addNotification(NotificationType::Critical, "No MP4 atoms could be found.", context);
throw InvalidDataException();
}
// prepare rewriting the file
bool writeChunkByChunk = m_tracksAltered;
updateStatus("Preparing for rewriting MP4 file ...");
fileInfo().close(); // ensure the file is close before renaming it
string backupPath;
fstream &outputStream = fileInfo().stream();
BinaryWriter outputWriter(&outputStream);
fstream backupStream; // create a stream to open the backup/original file
// define variables needed to manage file layout
// -> whether media data is written chunk by chunk (need to write chunk by chunk if tracks have been altered)
const bool writeChunkByChunk = m_tracksAltered;
// -> whether rewrite is required (always required when forced to rewrite or when tracks have been altered)
bool rewriteRequired = fileInfo().isForcingRewrite() || writeChunkByChunk;
// -> use the preferred tag position by default (might be changed later if not forced)
ElementPosition newTagPos = fileInfo().tagPosition();
// -> current tag position (determined later)
ElementPosition currentTagPos;
// -> holds new padding
uint64 newPadding;
// -> holds current offset
uint64 currentOffset;
// -> holds track information, used when writing chunk-by-chunk
vector<tuple<istream *, vector<uint64>, vector<uint64> > > trackInfos;
// -> holds offsets of media data atoms in original file, used when simply copying mdat
vector<int64> origMediaDataOffsets;
// -> holds offsets of media data atoms in new file, used when simply copying mdat
vector<int64> newMediaDataOffsets;
// -> new size of movie atom and user data atom
uint64 movieAtomSize, userDataAtomSize;
// -> track count of original file
const auto trackCount = this->trackCount();
// find relevant atoms in original file
Mp4Atom *fileTypeAtom, *progressiveDownloadInfoAtom, *movieAtom, *firstMediaDataAtom, *userDataAtom, *metaAtom;
Mp4Atom *level0Atom, *level1Atom, *level2Atom, *movieNextSibling;
try {
BackupHelper::createBackupFile(fileInfo().path(), backupPath, backupStream);
// set backup stream as associated input stream since we still the original data/atoms to write the new file
setStream(backupStream);
// recreate original file
outputStream.open(fileInfo().path(), ios_base::out | ios_base::binary | ios_base::trunc);
// collect needed atoms from the original file
Mp4Atom *ftypAtom, *pdinAtom, *moovAtom;
try {
ftypAtom = firstElement()->siblingById(Mp4AtomIds::FileType, true); // mandatory
if(!ftypAtom) { // throw error if missing
addNotification(NotificationType::Critical, "Mandatory \"ftyp\"-atom not found.", context);
}
pdinAtom = firstElement()->siblingById(Mp4AtomIds::ProgressiveDownloadInformation, true); // not mandatory
moovAtom = firstElement()->siblingById(Mp4AtomIds::Movie, true); // mandatory
if(!moovAtom) { // throw error if missing
addNotification(NotificationType::Critical, "Mandatory \"moov\"-atom not found.", context);
throw InvalidDataException();
}
} catch (Failure &) {
addNotification(NotificationType::Critical, "Unable to parse the atom strucutre of the source file.", context);
// file type atom (mandatory)
if((fileTypeAtom = firstElement()->siblingById(Mp4AtomIds::FileType, true))) {
// buffer atom
fileTypeAtom->makeBuffer();
} else {
// throw error if missing
addNotification(NotificationType::Critical, "Mandatory \"ftyp\"-atom not found.", context);
throw InvalidDataException();
}
if(m_tags.size() > 1) {
addNotification(NotificationType::Warning, "There are multiple MP4-tags assigned. Only the first one will be attached to the file.", context);
// progressive download information atom (not mandatory)
if((progressiveDownloadInfoAtom = firstElement()->siblingById(Mp4AtomIds::ProgressiveDownloadInformation, true))) {
// buffer atom
progressiveDownloadInfoAtom->makeBuffer();
}
// write all top-level atoms
updateStatus("Writing header ...");
// write "ftype"-atom
ftypAtom->copyEntirely(outputStream);
// write "pdin"-atom ("progressive download information")
if(pdinAtom) {
pdinAtom->copyEntirely(outputStream);
// movie atom (mandatory) and next sibling
if(!(movieAtom = firstElement()->siblingById(Mp4AtomIds::Movie, true))) {
// throw error if missing
addNotification(NotificationType::Critical, "Mandatory \"moov\"-atom not in the source file found.", context);
throw InvalidDataException();
}
// prepare for writing the actual data
vector<tuple<istream *, vector<uint64>, vector<uint64> > > trackInfos; // used when writing chunk-by-chunk
uint64 totalChunkCount = 0; // used when writing chunk-by-chunk
vector<int64> origMdatOffsets; // used when simply copying mdat
vector<int64> newMdatOffsets; // used when simply copying mdat
auto trackCount = tracks().size();
for(byte pass = 0; pass != 2; ++pass) {
if(fileInfo().tagPosition() == (pass ? ElementPosition::AfterData : ElementPosition::BeforeData)) {
// write "moov"-atom (contains track and tag information)
ostream::pos_type newMoovOffset = outputStream.tellp();
Mp4Atom *udtaAtom = nullptr;
uint64 newUdtaOffset = 0u;
// -> write child atoms manually, because the child "udta" has to be altered/ignored
moovAtom->copyWithoutChilds(outputStream);
for(Mp4Atom *moovChildAtom = moovAtom->firstChild(); moovChildAtom; moovChildAtom = moovChildAtom->nextSibling()) {
for(movieNextSibling = movieAtom->nextSibling(); movieNextSibling; movieNextSibling = movieNextSibling->nextSibling()) {
movieNextSibling->parse();
switch(movieNextSibling->id()) {
case Mp4AtomIds::FileType: case Mp4AtomIds::ProgressiveDownloadInformation:
case Mp4AtomIds::Movie: case Mp4AtomIds::Free: case Mp4AtomIds::Skip:
continue;
}
break;
}
// media data atom (mandatory?)
// -> determine current tag position
// -> since tags are nested in the move atom its position is relevant here
if((firstMediaDataAtom = firstElement()->siblingById(Mp4AtomIds::MediaData, true))) {
currentTagPos = firstMediaDataAtom->startOffset() < movieAtom->startOffset()
? ElementPosition::AfterData : ElementPosition::BeforeData;
if(newTagPos == ElementPosition::Keep) {
newTagPos = currentTagPos;
}
} else {
currentTagPos = ElementPosition::Keep;
}
// user data atom, meta atom, next sibling of meta atom
if((userDataAtom = movieAtom->childById(Mp4AtomIds::UserData))) {
metaAtom = userDataAtom->childById(Mp4AtomIds::Meta);
}
} catch (const Failure &) {
// can't ignore parsing errors here
addNotification(NotificationType::Critical, "Unable to parse the overall atom structure of the source file.", context);
throw InvalidDataException();
}
if(isAborted()) {
throw OperationAbortedException();
}
// calculate sizes
// -> size of tags
vector<Mp4TagMaker> tagMaker;
uint64 tagsSize = 0;
tagMaker.reserve(m_tags.size());
for(auto &tag : m_tags) {
try {
tagMaker.emplace_back(tag->prepareMaking());
tagsSize += tagMaker.back().requiredSize();
} catch(const Failure &) {
// nothing to do because notifications will be added anyways
}
addNotifications(*tag);
}
// -> size of movie atom (contains track and tag information)
movieAtomSize = userDataAtomSize = 0;
try {
// add size of children
for(level0Atom = movieAtom; level0Atom; level0Atom = level0Atom->siblingById(Mp4AtomIds::Movie)) {
for(level1Atom = level0Atom->firstChild(); level1Atom; level1Atom = level1Atom->nextSibling()) {
level1Atom->parse();
switch(level1Atom->id()) {
case Mp4AtomIds::UserData:
try {
moovChildAtom->parse();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to parse childs of moov atom of original file.", context);
throw InvalidDataException();
}
if(moovChildAtom->id() == Mp4AtomIds::UserData) {
// found a "udta" (user data) atom which child "meta" holds tag information
if(!udtaAtom) {
udtaAtom = moovChildAtom;
// check whether the "udta"-atom needs to be written
// it has to be written only when an MP4 tag is assigned
bool writeUdtaAtom = !m_tags.empty();
// or when there is at least one child except the meta atom in the original file
if(!writeUdtaAtom) {
try {
for(Mp4Atom *udtaChildAtom = udtaAtom->firstChild(); udtaChildAtom; udtaChildAtom = udtaChildAtom->nextSibling()) {
udtaChildAtom->parse();
if(udtaChildAtom->id() != Mp4AtomIds::Meta) {
writeUdtaAtom = true;
break;
}
}
} catch(Failure &) {
addNotification(NotificationType::Warning,
"Unable to parse childs of \"udta\"-atom atom of original file. These invalid/unknown atoms will be ignored.", context);
}
for(level2Atom = level1Atom->firstChild(); level2Atom; level2Atom = level2Atom->nextSibling()) {
level2Atom->parse();
switch(level2Atom->id()) {
case Mp4AtomIds::Meta:
// ignore meta data here; it is added separately
break;
default:
// add size of unknown childs of the user data atom
userDataAtomSize += level2Atom->totalSize();
level2Atom->makeBuffer();
}
if(writeUdtaAtom) {
updateStatus("Writing tag information ...");
newUdtaOffset = outputStream.tellp(); // save offset
udtaAtom->copyHeader(outputStream); // and write header
// write meta atom if there's a tag assigned
if(!m_tags.empty()) {
try {
m_tags.front()->make(outputStream);
} catch(Failure &) {
addNotification(NotificationType::Warning, "Unable to write meta atom (of assigned mp4 tag).", context);
}
addNotifications(*m_tags.front());
}
// write rest of the child atoms of "udta"-atom
try {
for(Mp4Atom *udtaChildAtom = udtaAtom->firstChild(); udtaChildAtom; udtaChildAtom = udtaChildAtom->nextSibling()) {
udtaChildAtom->parse();
// skip "meta"-atoms here of course
if(udtaChildAtom->id() != Mp4AtomIds::Meta) {
udtaChildAtom->copyEntirely(outputStream);
}
}
} catch(Failure &) {
addNotification(NotificationType::Warning,
"Unable to parse childs of \"udta\"-atom of original file. These will be ignored.", context);
}
// write correct size of udta atom
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newUdtaOffset);
}
} else {
addNotification(NotificationType::Warning, "The source file has multiple \"udta\"-atoms. Surplus atoms will be ignored.", context);
}
} else if(!writeChunkByChunk || moovChildAtom->id() != Mp4AtomIds::Track) {
// copy "trak"-atoms only when not writing the data chunk-by-chunk
moovChildAtom->copyEntirely(outputStream);
} catch(const Failure &) {
// invalid children might be ignored as not mandatory
addNotification(NotificationType::Critical, "Unable to parse the children of \"udta\"-atom of the source file; ignoring them.", context);
}
}
// -> the original file has no udta atom but there is tag information to be written
if(!udtaAtom && !m_tags.empty()) {
updateStatus("Writing tag information ...");
newUdtaOffset = outputStream.tellp();
// write udta atom
outputWriter.writeUInt32BE(0); // the size will be written later
outputWriter.writeUInt32BE(Mp4AtomIds::UserData);
// write tags
try {
m_tags.front()->make(outputStream);
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newUdtaOffset);
} catch(Failure &) {
addNotification(NotificationType::Warning, "Unable to write meta atom (of assigned mp4 tag).", context);
break;
case Mp4AtomIds::Track:
// add size of track atoms only if not writing chunk-by-chunk (otherwise sizes are added separately)
if(!writeChunkByChunk) {
movieAtomSize += level1Atom->totalSize();
level1Atom->makeBuffer();
}
break;
default:
// add size of unknown childs of the movie atom
movieAtomSize += level1Atom->totalSize();
level1Atom->makeBuffer();
}
// -> write trak atoms for each currently assigned track (this is only required when writing data chunk-by-chunk)
}
}
// add size of meta data
if(userDataAtomSize += tagsSize) {
Mp4Atom::addHeaderSize(userDataAtomSize);
movieAtomSize += userDataAtomSize;
}
// add size of track atoms when writing chunk-by-chunk
if(writeChunkByChunk) {
// note: Mp4Track API has to be changed when Mp4Track::makeTrack() gets a real implementation.
for(const auto &track : tracks()) {
movieAtomSize += track->requiredSize();
}
}
// add header size
Mp4Atom::addHeaderSize(movieAtomSize);
} catch(const Failure &) {
// can't ignore parsing errors here
addNotification(NotificationType::Critical, "Unable to parse the children of \"moov\"-atom of the source file.", context);
throw InvalidDataException();
}
if(isAborted()) {
throw OperationAbortedException();
}
// calculate padding if no rewrite is required; otherwise use the preferred padding
calculatePadding:
if(rewriteRequired) {
newPadding = (fileInfo().preferredPadding() && fileInfo().preferredPadding() < 8 ? 8 : fileInfo().preferredPadding());
} else {
// file type atom
currentOffset = fileTypeAtom->totalSize();
// progressive download information atom
if(progressiveDownloadInfoAtom) {
currentOffset += progressiveDownloadInfoAtom->totalSize();
}
// if writing tags before data: movie atom (contains tag)
switch(newTagPos) {
case ElementPosition::BeforeData:
case ElementPosition::Keep:
currentOffset += movieAtomSize;
break;
default:
;
}
// check whether there is sufficiant space before the next atom
if(!(rewriteRequired = movieNextSibling && currentOffset > movieNextSibling->startOffset())) {
// there is sufficiant space
// -> check whether the padding matches specifications
newPadding = movieNextSibling->startOffset() - currentOffset;
rewriteRequired = (newPadding > 0 && newPadding < 8) || newPadding < fileInfo().minPadding() || newPadding > fileInfo().maxPadding();
}
if(rewriteRequired) {
// can't put the tags before media data
if(!fileInfo().forceTagPosition() || newTagPos == ElementPosition::Keep) {
// writing tag before media data is not forced -> just put the tags at the end
newTagPos = ElementPosition::AfterData;
rewriteRequired = false;
} else {
// writing tag before media data is forced -> rewrite the file
}
// in any case: recalculate padding
goto calculatePadding;
} else {
// tags can be put before the media data
// -> ensure newTagPos is not ElementPosition::Keep
if(newTagPos == ElementPosition::Keep) {
newTagPos = ElementPosition::BeforeData;
}
}
}
if(isAborted()) {
throw OperationAbortedException();
}
// define variables needed to handle output stream and backup stream (required when rewriting the file)
string backupPath;
fstream &outputStream = fileInfo().stream();
fstream backupStream; // create a stream to open the backup/original file for the case rewriting the file is required
BinaryWriter outputWriter(&outputStream);
// setup stream(s) for writing
// -> update status
updateStatus("Preparing streams ...");
if(rewriteRequired) {
// move current file to temp dir and reopen it as backupStream, recreate original file
try {
// ensure the file is close before moving
fileInfo().close();
BackupHelper::createBackupFile(fileInfo().path(), backupPath, backupStream);
// set backup stream as associated input stream since we need the original elements to write the new file
setStream(backupStream);
// recreate original file, define buffer variables
outputStream.open(fileInfo().path(), ios_base::out | ios_base::binary | ios_base::trunc);
} catch(const ios_base::failure &) {
addNotification(NotificationType::Critical, "Creation of temporary file (to rewrite the original file) failed.", context);
throw;
}
} else { // !rewriteRequired
// reopen original file to ensure it is opened for writing
try {
fileInfo().close();
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
} catch(const ios_base::failure &) {
addNotification(NotificationType::Critical, "Opening the file with write permissions failed.", context);
throw;
}
}
// start actual writing
try {
// write header
updateStatus("Writing header and tags ...");
// -> make file type atom
fileTypeAtom->copyBuffer(outputStream);
fileTypeAtom->discardBuffer();
// -> make progressive download info atom
if(progressiveDownloadInfoAtom) {
progressiveDownloadInfoAtom->copyBuffer(outputStream);
progressiveDownloadInfoAtom->discardBuffer();
}
// write movie atom / padding and media data
for(byte pass = 0; pass != 2; ++pass) {
if(newTagPos == (pass ? ElementPosition::AfterData : ElementPosition::BeforeData)) {
// write movie atom
// -> write movie atom header
Mp4Atom::makeHeader(movieAtomSize, Mp4AtomIds::Movie, outputWriter);
// -> write track atoms (only if writing chunk-by-chunk; otherwise track atoms are written with other children)
if(writeChunkByChunk) {
updateStatus("Writing meta information for the tracks ...");
// note: Mp4Track API has to be changed when Mp4Track::makeTrack() gets a real implementation.
for(auto &track : tracks()) {
track->setOutputStream(outputStream);
track->makeTrack();
}
}
Mp4Atom::seekBackAndWriteAtomSize(outputStream, newMoovOffset);
} else {
// write other atoms and "mdat"-atom (holds actual data)
for(Mp4Atom *otherTopLevelAtom = firstElement(); otherTopLevelAtom; otherTopLevelAtom = otherTopLevelAtom->nextSibling()) {
if(isAborted()) {
throw OperationAbortedException();
}
try {
otherTopLevelAtom->parse();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to parse all top-level atoms of original file.", context);
throw InvalidDataException();
}
using namespace Mp4AtomIds;
switch(otherTopLevelAtom->id()) {
case FileType: case ProgressiveDownloadInformation: case Movie: case Free: case Skip:
break;
case MediaData:
if(writeChunkByChunk) {
break; // write actual data separately when writing chunk by chunk
} else {
// store the mdat offsets when not writing chunk by chunk to be able to update the stco tables
origMdatOffsets.push_back(otherTopLevelAtom->startOffset());
newMdatOffsets.push_back(outputStream.tellp());
// -> write other movie atom children
for(level0Atom = movieAtom; level0Atom; level0Atom = level0Atom->siblingById(Mp4AtomIds::Movie)) {
for(level1Atom = level0Atom->firstChild(); level1Atom; level1Atom = level1Atom->nextSibling()) {
switch(level1Atom->id()) {
case Mp4AtomIds::UserData:
break;
case Mp4AtomIds::Track:
// write buffered data
if(!writeChunkByChunk) {
level1Atom->copyBuffer(outputStream);
level1Atom->discardBuffer();
}
break;
default:
// write buffered data
level1Atom->copyBuffer(outputStream);
level1Atom->discardBuffer();
}
default:
updateStatus("Writing " + otherTopLevelAtom->idToString() + " atom ...");
otherTopLevelAtom->forwardStatusUpdateCalls(this);
otherTopLevelAtom->copyEntirely(outputStream);
}
}
// when writing chunk by chunk the actual data needs to be written separately
if(writeChunkByChunk) {
// get the chunk offset and the chunk size table from the old file to be able to write single chunks later ...
updateStatus("Reading chunk offsets and sizes from the original file ...");
trackInfos.reserve(trackCount);
for(auto &track : tracks()) {
if(isAborted()) {
throw OperationAbortedException();
}
// ensure the track reads from the original file
if(&track->inputStream() == &outputStream) {
track->setInputStream(backupStream);
}
trackInfos.emplace_back(&track->inputStream(), track->readChunkOffsets(), track->readChunkSizes());
const vector<uint64> &chunkOffsetTable = get<1>(trackInfos.back());
const vector<uint64> &chunkSizesTable = get<2>(trackInfos.back());
totalChunkCount += track->chunkCount();
if(track->chunkCount() != chunkOffsetTable.size() || track->chunkCount() != chunkSizesTable.size()) {
addNotification(NotificationType::Critical, "Chunks of track " + numberToString<uint64, string>(track->id()) + " could not be parsed correctly.", context);
}
}
// writing single chunks is needed when tracks have been added or removed
updateStatus("Writing chunks to mdat atom ...");
//outputStream.seekp(0, ios_base::end);
ostream::pos_type newMdatOffset = outputStream.tellp();
writer().writeUInt32BE(1); // denote 64 bit size
outputWriter.writeUInt32BE(Mp4AtomIds::MediaData);
outputWriter.writeUInt64BE(0); // write size of mdat atom later
CopyHelper<0x2000> copyHelper;
uint64 chunkIndex = 0;
uint64 totalChunksCopied = 0;
bool chunksCopied;
do {
if(isAborted()) {
throw OperationAbortedException();
}
chunksCopied = false;
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
//auto &track = tracks()[trackIndex];
auto &trackInfo = trackInfos[trackIndex];
istream &sourceStream = *get<0>(trackInfo);
vector<uint64> &chunkOffsetTable = get<1>(trackInfo);
const vector<uint64> &chunkSizesTable = get<2>(trackInfo);
if(chunkIndex < chunkOffsetTable.size() && chunkIndex < chunkSizesTable.size()) {
sourceStream.seekg(chunkOffsetTable[chunkIndex]);
//outputStream.seekp(0, ios_base::end);
chunkOffsetTable[chunkIndex] = outputStream.tellp();
copyHelper.copy(sourceStream, outputStream, chunkSizesTable[chunkIndex]);
//track->updateChunkOffset(chunkIndex, chunkOffset);
chunksCopied = true;
++totalChunksCopied;
// -> write user data atom
if(userDataAtomSize) {
// writer user data atom header
Mp4Atom::makeHeader(userDataAtomSize, Mp4AtomIds::UserData, outputWriter);
// write other children of user data atom
for(level0Atom = movieAtom; level0Atom; level0Atom = level0Atom->siblingById(Mp4AtomIds::Movie)) {
for(level1Atom = level0Atom->childById(Mp4AtomIds::UserData); level1Atom; level1Atom = level1Atom->siblingById(Mp4AtomIds::UserData)) {
for(level2Atom = level1Atom->firstChild(); level2Atom; level2Atom = level2Atom->nextSibling()) {
switch(level2Atom->id()) {
case Mp4AtomIds::Meta:
break;
default:
// write buffered data
level2Atom->copyBuffer(outputStream);
level2Atom->discardBuffer();
}
}
}
++chunkIndex;
updatePercentage(static_cast<double>(totalChunksCopied) / totalChunkCount);
} while(chunksCopied);
//outputStream.seekp(0, ios_base::end);
Mp4Atom::seekBackAndWriteAtomSize64(outputStream, newMdatOffset);
}
// write meta atom
for(auto &maker : tagMaker) {
maker.make(outputStream);
}
}
} else {
// write padding
if(newPadding) {
// write free atom header
if(newPadding < 0xFFFFFFFF) {
outputWriter.writeUInt32BE(newPadding);
outputWriter.writeUInt32BE(Mp4AtomIds::Free);
newPadding -= 8;
} else {
outputWriter.writeUInt32BE(1);
outputWriter.writeUInt32BE(Mp4AtomIds::Free);
outputWriter.writeUInt64BE(newPadding);
newPadding -= 16;
}
// write zeroes
for(; newPadding; --newPadding) {
outputStream.put(0);
}
}
// write media data
if(rewriteRequired) {
for(level0Atom = firstElement(); level0Atom; level0Atom = level0Atom->nextSibling()) {
level0Atom->parse();
switch(level0Atom->id()) {
case Mp4AtomIds::FileType: case Mp4AtomIds::ProgressiveDownloadInformation:
case Mp4AtomIds::Movie: case Mp4AtomIds::Free: case Mp4AtomIds::Skip:
break;
case Mp4AtomIds::MediaData:
if(writeChunkByChunk) {
// write actual data separately when writing chunk-by-chunk
break;
} else {
// store media data offsets when not writing chunk-by-chunk to be able to update chunk offset table
origMediaDataOffsets.push_back(level0Atom->startOffset());
newMediaDataOffsets.push_back(outputStream.tellp());
}
default:
// update status
updateStatus("Writing atom: " + level0Atom->idToString());
// copy atom entirely and forward status update calls
level0Atom->forwardStatusUpdateCalls(this);
level0Atom->copyEntirely(outputStream);
}
}
// when writing chunk-by-chunk write media data now
if(writeChunkByChunk) {
// read chunk offset and chunk size table from the old file which are required to get chunks
updateStatus("Reading chunk offsets and sizes from the original file ...");
trackInfos.reserve(trackCount);
uint64 totalChunkCount = 0;
uint64 totalMediaDataSize = 0;
for(auto &track : tracks()) {
if(isAborted()) {
throw OperationAbortedException();
}
// ensure the track reads from the original file
if(&track->inputStream() == &outputStream) {
track->setInputStream(backupStream);
}
// emplace information
trackInfos.emplace_back(&track->inputStream(), track->readChunkOffsets(), track->readChunkSizes());
// check whether the chunks could be parsed correctly
const vector<uint64> &chunkOffsetTable = get<1>(trackInfos.back());
const vector<uint64> &chunkSizesTable = get<2>(trackInfos.back());
if(track->chunkCount() != chunkOffsetTable.size() || track->chunkCount() != chunkSizesTable.size()) {
addNotification(NotificationType::Critical, "Chunks of track " + numberToString<uint64, string>(track->id()) + " could not be parsed correctly.", context);
}
// increase total chunk count and size
totalChunkCount += track->chunkCount();
totalMediaDataSize = accumulate(chunkSizesTable.cbegin(), chunkSizesTable.cend(), totalMediaDataSize);
}
// write media data chunk-by-chunk
// -> write header of media data atom
Mp4Atom::addHeaderSize(totalMediaDataSize);
Mp4Atom::makeHeader(totalMediaDataSize, Mp4AtomIds::MediaData, outputWriter);
// -> copy chunks
CopyHelper<0x2000> copyHelper;
uint64 chunkIndexWithinTrack = 0, totalChunksCopied = 0;
bool anyChunksCopied;
do {
if(isAborted()) {
throw OperationAbortedException();
}
// copy a chunk from each track
anyChunksCopied = false;
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
// get source stream and tables for current track
auto &trackInfo = trackInfos[trackIndex];
istream &sourceStream = *get<0>(trackInfo);
vector<uint64> &chunkOffsetTable = get<1>(trackInfo);
const vector<uint64> &chunkSizesTable = get<2>(trackInfo);
// still chunks to be copied (of this track)?
if(chunkIndexWithinTrack < chunkOffsetTable.size() && chunkIndexWithinTrack < chunkSizesTable.size()) {
// copy chunk, update entry in chunk offset table
sourceStream.seekg(chunkOffsetTable[chunkIndexWithinTrack]);
chunkOffsetTable[chunkIndexWithinTrack] = outputStream.tellp();
copyHelper.copy(sourceStream, outputStream, chunkSizesTable[chunkIndexWithinTrack]);
// update counter / status
anyChunksCopied = true;
++totalChunksCopied;
}
}
// incrase chunk index within track, update progress percentage
if(++chunkIndexWithinTrack % 10) {
updatePercentage(static_cast<double>(totalChunksCopied) / totalChunkCount);
}
} while(anyChunksCopied);
}
} else {
outputStream.seekp(movieNextSibling->totalSize(), ios_base::cur);
}
}
}
// reparse new file
// reparse what is written so far
updateStatus("Reparsing output file ...");
outputStream.close(); // outputStream needs to be reopened to be able to read again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
setStream(outputStream);
m_headerParsed = false;
m_tracks.clear();
m_tracksParsed = false;
m_tagsParsed = false;
if(rewriteRequired) {
outputStream.close(); // the outputStream needs to be reopened to be able to read again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
setStream(outputStream);
} else {
// ensure invalid bytes at the end are truncated (if the modified file is smaller then the original file)
const auto newSize = static_cast<uint64>(outputStream.tellp());
if(newSize < fileInfo().size()) {
// close stream before truncating
outputStream.close();
// truncate file
if(truncate(fileInfo().path().c_str(), newSize) == 0) {
fileInfo().reportSizeChanged(newSize);
} else {
addNotification(NotificationType::Critical, "Unable to truncate the file.", context);
}
// reopen the stream again
outputStream.open(fileInfo().path(), ios_base::in | ios_base::out | ios_base::binary);
}
}
reset();
try {
parseTracks();
} catch(Failure &) {
addNotification(NotificationType::Critical, "Unable to reparse the header of the new file.", context);
throw;
}
// update chunk offsets in the "stco"-atom of each track
if(trackCount != tracks().size()) {
stringstream error;
error << "Unable to update chunk offsets (\"stco\"-atom): Number of tracks in the output file (" << tracks().size()
<< ") differs from the number of tracks in the original file (" << trackCount << ").";
addNotification(NotificationType::Critical, error.str(), context);
throw Failure();
}
if(writeChunkByChunk) {
updateStatus("Updating chunk offset table for each track ...");
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
auto &track = tracks()[trackIndex];
auto &chunkOffsetTable = get<1>(trackInfos[trackIndex]);
if(track->chunkCount() == chunkOffsetTable.size()) {
track->updateChunkOffsets(chunkOffsetTable);
} else {
addNotification(NotificationType::Critical, "Unable to update chunk offsets of track " + numberToString(trackIndex + 1) + ": Number of chunks in the output file differs from the number of chunks in the orignal file.", context);
throw Failure();
if(rewriteRequired) {
// check whether track count of new file equals track count of old file
if(trackCount != tracks().size()) {
stringstream error;
error << "Unable to update chunk offsets (\"stco\"-atom): Number of tracks in the output file (" << tracks().size()
<< ") differs from the number of tracks in the original file (" << trackCount << ").";
addNotification(NotificationType::Critical, error.str(), context);
throw Failure();
}
// update chunk offset table
if(writeChunkByChunk) {
updateStatus("Updating chunk offset table for each track ...");
for(size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
const auto &track = tracks()[trackIndex];
const auto &chunkOffsetTable = get<1>(trackInfos[trackIndex]);
if(track->chunkCount() == chunkOffsetTable.size()) {
track->updateChunkOffsets(chunkOffsetTable);
} else {
addNotification(NotificationType::Critical, "Unable to update chunk offsets of track " + numberToString(trackIndex + 1) + ": Number of chunks in the output file differs from the number of chunks in the orignal file.", context);
throw Failure();
}
}
} else {
updateOffsets(origMediaDataOffsets, newMediaDataOffsets);
}
}
updatePercentage(100.0);
// flush output stream
outputStream.flush();
// handle errors (which might have been occured after renaming/creating backup file)
} catch(const OperationAbortedException &) {
reset();
if(&stream() != &outputStream) {
// a temp/backup file has been created -> restore original file
setStream(outputStream);
addNotification(NotificationType::Information, "Rewriting the file to apply changed tag information has been aborted.", context);
try {
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
addNotification(NotificationType::Information, "The original file has been restored.", context);
} catch(const ios_base::failure &ex) {
addNotification(NotificationType::Critical, ex.what(), context);
}
} else {
updateOffsets(origMdatOffsets, newMdatOffsets);
addNotification(NotificationType::Information, "Applying new tag information has been aborted.", context);
}
updatePercentage(100.0);
// dispose no longer used resources and flush output file stream
outputStream.flush();
} catch(OperationAbortedException &) {
setStream(outputStream);
m_tracksParsed = false;
m_headerParsed = false;
m_firstElement.reset();
m_tracks.clear();
addNotification(NotificationType::Information, "Rewriting the file to apply changed tag information has been aborted.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
throw;
} catch(Failure &) {
setStream(outputStream);
m_tracksParsed = false;
m_headerParsed = false;
m_firstElement.reset();
m_tracks.clear();
addNotification(NotificationType::Critical, "Rewriting the file to apply changed tag information failed.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
} catch(const Failure &) {
reset();
if(&stream() != &outputStream) {
// a temp/backup file has been created -> restore original file
setStream(outputStream);
addNotification(NotificationType::Critical, "Rewriting the file to apply changed tag information failed.", context);
try {
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
addNotification(NotificationType::Information, "The original file has been restored.", context);
} catch(const ios_base::failure &ex) {
addNotification(NotificationType::Critical, ex.what(), context);
}
} else {
addNotification(NotificationType::Critical, "Applying new tag information failed.", context);
}
throw;
} catch(ios_base::failure &) {
setStream(outputStream);
m_tracksParsed = false;
m_headerParsed = false;
m_firstElement.reset();
m_tracks.clear();
addNotification(NotificationType::Critical, "An IO error occured when rewriting the file to apply changed tag information.", context);
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
} catch(const ios_base::failure &) {
reset();
if(&stream() != &outputStream) {
// a temp/backup file has been created -> restore original file
setStream(outputStream);
addNotification(NotificationType::Critical, "An IO error occured when rewriting the file to apply changed tag information.", context);
try {
BackupHelper::restoreOriginalFileFromBackupFile(fileInfo().path(), backupPath, outputStream, backupStream);
addNotification(NotificationType::Information, "The original file has been restored.", context);
} catch(const ios_base::failure &ex) {
addNotification(NotificationType::Critical, ex.what(), context);
}
} else {
addNotification(NotificationType::Critical, "An IO error occured when applying tag information.", context);
}
throw;
}
// TODO: reduce code duplication
}
/*!

View File

@ -945,6 +945,14 @@ void Mp4Track::makeTrack()
trakAtom().copyEntirely(outputStream());
}
/*!
* \brief Returns the number of bytes written when calling makeTrack().
*/
uint64 Mp4Track::requiredSize() const
{
return m_trakAtom->totalSize();
}
/*!
* \brief Makes the track header (tkhd atom) for the track. The data is written to the assigned output stream
* at the current position.

View File

@ -149,6 +149,7 @@ public:
// methods to make the track header
void makeTrack();
uint64 requiredSize() const;
void makeTrackHeader();
void makeMedia();
void makeMediaInfo();