liveMedia/QuickTimeFileSink.cpp

Go to the documentation of this file.
00001 /**********
00002 This library is free software; you can redistribute it and/or modify it under
00003 the terms of the GNU Lesser General Public License as published by the
00004 Free Software Foundation; either version 2.1 of the License, or (at your
00005 option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
00006 
00007 This library is distributed in the hope that it will be useful, but WITHOUT
00008 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00009 FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
00010 more details.
00011 
00012 You should have received a copy of the GNU Lesser General Public License
00013 along with this library; if not, write to the Free Software Foundation, Inc.,
00014 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
00015 **********/
00016 // "liveMedia"
00017 // Copyright (c) 1996-2014 Live Networks, Inc.  All rights reserved.
00018 // A sink that generates a QuickTime file from a composite media session
00019 // Implementation
00020 
00021 #include "QuickTimeFileSink.hh"
00022 #include "QuickTimeGenericRTPSource.hh"
00023 #include "GroupsockHelper.hh"
00024 #include "InputFile.hh"
00025 #include "OutputFile.hh"
00026 #include "H263plusVideoRTPSource.hh" // for the special header
00027 #include "MPEG4GenericRTPSource.hh" //for "samplingFrequencyFromAudioSpecificConfig()"
00028 #include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
00029 #include "Base64.hh"
00030 
00031 #include <ctype.h>
00032 
00033 #define fourChar(x,y,z,w) ( ((x)<<24)|((y)<<16)|((z)<<8)|(w) )
00034 
00035 #define H264_IDR_FRAME 0x65  //bit 8 == 0, bits 7-6 (ref) == 3, bits 5-0 (type) == 5
00036 
00038 // A structure used to represent the I/O state of each input 'subsession':
00039 
00040 class ChunkDescriptor {
00041 public:
00042   ChunkDescriptor(int64_t offsetInFile, unsigned size,
00043                   unsigned frameSize, unsigned frameDuration,
00044                   struct timeval presentationTime);
00045 
00046   ChunkDescriptor* extendChunk(int64_t newOffsetInFile, unsigned newSize,
00047                                unsigned newFrameSize,
00048                                unsigned newFrameDuration,
00049                                struct timeval newPresentationTime);
00050       // this may end up allocating a new chunk instead
00051 public:
00052   ChunkDescriptor* fNextChunk;
00053   int64_t fOffsetInFile;
00054   unsigned fNumFrames;
00055   unsigned fFrameSize;
00056   unsigned fFrameDuration;
00057   struct timeval fPresentationTime; // of the start of the data
00058 };
00059 
00060 class SubsessionBuffer {
00061 public:
00062   SubsessionBuffer(unsigned bufferSize)
00063     : fBufferSize(bufferSize) {
00064     reset();
00065     fData = new unsigned char[bufferSize];
00066   }
00067   virtual ~SubsessionBuffer() { delete[] fData; }
00068   void reset() { fBytesInUse = 0; }
00069   void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }
00070 
00071   unsigned char* dataStart() { return &fData[0]; }
00072   unsigned char* dataEnd() { return &fData[fBytesInUse]; }
00073   unsigned bytesInUse() const { return fBytesInUse; }
00074   unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }
00075 
00076   void setPresentationTime(struct timeval const& presentationTime) {
00077     fPresentationTime = presentationTime;
00078   }
00079   struct timeval const& presentationTime() const {return fPresentationTime;}
00080 
00081 private:
00082   unsigned fBufferSize;
00083   struct timeval fPresentationTime;
00084   unsigned char* fData;
00085   unsigned fBytesInUse;
00086 };
00087 
00088 class SyncFrame {
00089 public:
00090   SyncFrame(unsigned frameNum);
00091 
00092 public:
00093   class SyncFrame *nextSyncFrame;
00094   unsigned sfFrameNum;  
00095 };
00096 
00097 // A 64-bit counter, used below:
00098 class Count64 {
00099 public:
00100   Count64()
00101     : hi(0), lo(0) {
00102   }
00103 
00104   void operator+=(unsigned arg);
00105 
00106   u_int32_t hi, lo;
00107 };
00108 
00109 class SubsessionIOState {
00110 public:
00111   SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession);
00112   virtual ~SubsessionIOState();
00113 
00114   Boolean setQTstate();
00115   void setFinalQTstate();
00116 
00117   void afterGettingFrame(unsigned packetDataSize,
00118                          struct timeval presentationTime);
00119   void onSourceClosure();
00120 
00121   Boolean syncOK(struct timeval presentationTime);
00122       // returns true iff data is usable despite a sync check
00123 
00124   static void setHintTrack(SubsessionIOState* hintedTrack,
00125                            SubsessionIOState* hintTrack);
00126   Boolean isHintTrack() const { return fTrackHintedByUs != NULL; }
00127   Boolean hasHintTrack() const { return fHintTrackForUs != NULL; }
00128 
00129   UsageEnvironment& envir() const { return fOurSink.envir(); }
00130 
00131 public:
00132   static unsigned fCurrentTrackNumber;
00133   unsigned fTrackID;
00134   SubsessionIOState* fHintTrackForUs; SubsessionIOState* fTrackHintedByUs;
00135 
00136   SubsessionBuffer *fBuffer, *fPrevBuffer;
00137   QuickTimeFileSink& fOurSink;
00138   MediaSubsession& fOurSubsession;
00139 
00140   unsigned short fLastPacketRTPSeqNum;
00141   Boolean fOurSourceIsActive;
00142 
00143   Boolean fHaveBeenSynced; // used in synchronizing with other streams
00144   struct timeval fSyncTime;
00145 
00146   Boolean fQTEnableTrack;
00147   unsigned fQTcomponentSubtype;
00148   char const* fQTcomponentName;
00149   typedef unsigned (QuickTimeFileSink::*atomCreationFunc)();
00150   atomCreationFunc fQTMediaInformationAtomCreator;
00151   atomCreationFunc fQTMediaDataAtomCreator;
00152   char const* fQTAudioDataType;
00153   unsigned short fQTSoundSampleVersion;
00154   unsigned fQTTimeScale;
00155   unsigned fQTTimeUnitsPerSample;
00156   unsigned fQTBytesPerFrame;
00157   unsigned fQTSamplesPerFrame;
00158   // These next fields are derived from the ones above,
00159   // plus the information from each chunk:
00160   unsigned fQTTotNumSamples;
00161   unsigned fQTDurationM; // in media time units
00162   unsigned fQTDurationT; // in track time units
00163   int64_t fTKHD_durationPosn;
00164       // position of the duration in the output 'tkhd' atom
00165   unsigned fQTInitialOffsetDuration;
00166       // if there's a pause at the beginning
00167 
00168   ChunkDescriptor *fHeadChunk, *fTailChunk;
00169   unsigned fNumChunks;
00170   SyncFrame *fHeadSyncFrame, *fTailSyncFrame;
00171 
00172   // Counters to be used in the hint track's 'udta'/'hinf' atom;
00173   struct hinf {
00174     Count64 trpy;
00175     Count64 nump;
00176     Count64 tpyl;
00177     // Is 'maxr' needed? Computing this would be a PITA. #####
00178     Count64 dmed;
00179     Count64 dimm;
00180     // 'drep' is always 0
00181     // 'tmin' and 'tmax' are always 0
00182     unsigned pmax;
00183     unsigned dmax;
00184   } fHINF;
00185 
00186 private:
00187   void useFrame(SubsessionBuffer& buffer);
00188   void useFrameForHinting(unsigned frameSize,
00189                           struct timeval presentationTime,
00190                           unsigned startSampleNumber);
00191 
00192   // used by the above two routines:
00193   unsigned useFrame1(unsigned sourceDataSize,
00194                      struct timeval presentationTime,
00195                      unsigned frameDuration, int64_t destFileOffset);
00196       // returns the number of samples in this data
00197 
00198 private:
00199   // A structure used for temporarily storing frame state:
00200   struct {
00201     unsigned frameSize;
00202     struct timeval presentationTime;
00203     int64_t destFileOffset; // used for non-hint tracks only
00204 
00205     // The remaining fields are used for hint tracks only:
00206     unsigned startSampleNumber;
00207     unsigned short seqNum;
00208     unsigned rtpHeader;
00209     unsigned char numSpecialHeaders; // used when our RTP source has special headers
00210     unsigned specialHeaderBytesLength; // ditto
00211     unsigned char specialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; // ditto
00212     unsigned packetSizes[256];
00213   } fPrevFrameState;
00214 };
00215 
00216 
00218 
00219 QuickTimeFileSink::QuickTimeFileSink(UsageEnvironment& env,
00220                                      MediaSession& inputSession,
00221                                      char const* outputFileName,
00222                                      unsigned bufferSize,
00223                                      unsigned short movieWidth,
00224                                      unsigned short movieHeight,
00225                                      unsigned movieFPS,
00226                                      Boolean packetLossCompensate,
00227                                      Boolean syncStreams,
00228                                      Boolean generateHintTracks,
00229                                      Boolean generateMP4Format)
00230   : Medium(env), fInputSession(inputSession),
00231     fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),
00232     fSyncStreams(syncStreams), fGenerateMP4Format(generateMP4Format),
00233     fAreCurrentlyBeingPlayed(False),
00234     fLargestRTPtimestampFrequency(0),
00235     fNumSubsessions(0), fNumSyncedSubsessions(0),
00236     fHaveCompletedOutputFile(False),
00237     fMovieWidth(movieWidth), fMovieHeight(movieHeight),
00238     fMovieFPS(movieFPS), fMaxTrackDurationM(0) {
00239   fOutFid = OpenOutputFile(env, outputFileName);
00240   if (fOutFid == NULL) return;
00241 
00242   fNewestSyncTime.tv_sec = fNewestSyncTime.tv_usec = 0;
00243   fFirstDataTime.tv_sec = fFirstDataTime.tv_usec = (unsigned)(~0);
00244 
00245   // Set up I/O state for each input subsession:
00246   MediaSubsessionIterator iter(fInputSession);
00247   MediaSubsession* subsession;
00248   while ((subsession = iter.next()) != NULL) {
00249     // Ignore subsessions without a data source:
00250     FramedSource* subsessionSource = subsession->readSource();
00251     if (subsessionSource == NULL) continue;
00252 
00253     // If "subsession's" SDP description specified screen dimension
00254     // or frame rate parameters, then use these.  (Note that this must
00255     // be done before the call to "setQTState()" below.)
00256     if (subsession->videoWidth() != 0) {
00257       fMovieWidth = subsession->videoWidth();
00258     }
00259     if (subsession->videoHeight() != 0) {
00260       fMovieHeight = subsession->videoHeight();
00261     }
00262     if (subsession->videoFPS() != 0) {
00263       fMovieFPS = subsession->videoFPS();
00264     }
00265 
00266     SubsessionIOState* ioState
00267       = new SubsessionIOState(*this, *subsession);
00268     if (ioState == NULL || !ioState->setQTstate()) {
00269       // We're not able to output a QuickTime track for this subsession
00270       delete ioState; ioState = NULL;
00271       continue;
00272     }
00273     subsession->miscPtr = (void*)ioState;
00274 
00275     if (generateHintTracks) {
00276       // Also create a hint track for this track:
00277       SubsessionIOState* hintTrack
00278         = new SubsessionIOState(*this, *subsession);
00279       SubsessionIOState::setHintTrack(ioState, hintTrack);
00280       if (!hintTrack->setQTstate()) {
00281         delete hintTrack;
00282         SubsessionIOState::setHintTrack(ioState, NULL);
00283       }
00284     }
00285 
00286     // Also set a 'BYE' handler for this subsession's RTCP instance:
00287     if (subsession->rtcpInstance() != NULL) {
00288       subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);
00289     }
00290 
00291     unsigned rtpTimestampFrequency = subsession->rtpTimestampFrequency();
00292     if (rtpTimestampFrequency > fLargestRTPtimestampFrequency) {
00293       fLargestRTPtimestampFrequency = rtpTimestampFrequency;
00294     }
00295 
00296     ++fNumSubsessions;
00297   }
00298 
00299   // Use the current time as the file's creation and modification
00300   // time.  Use Apple's time format: seconds since January 1, 1904
00301 
00302   gettimeofday(&fStartTime, NULL);
00303   fAppleCreationTime = fStartTime.tv_sec - 0x83dac000;
00304 
00305   // Begin by writing a "mdat" atom at the start of the file.
00306   // (Later, when we've finished copying data to the file, we'll come
00307   // back and fill in its size.)
00308   fMDATposition = TellFile64(fOutFid);
00309   addAtomHeader64("mdat");
00310   // add 64Bit offset
00311   fMDATposition += 8;
00312 }
00313 
00314 QuickTimeFileSink::~QuickTimeFileSink() {
00315   completeOutputFile();
00316 
00317   // Then, stop streaming and delete each active "SubsessionIOState":
00318   MediaSubsessionIterator iter(fInputSession);
00319   MediaSubsession* subsession;
00320   while ((subsession = iter.next()) != NULL) {
00321     if (subsession->readSource() != NULL) subsession->readSource()->stopGettingFrames();
00322 
00323     SubsessionIOState* ioState
00324       = (SubsessionIOState*)(subsession->miscPtr);
00325     if (ioState == NULL) continue;
00326 
00327     delete ioState->fHintTrackForUs; // if any
00328     delete ioState;
00329   }
00330 
00331   // Finally, close our output file:
00332   CloseOutputFile(fOutFid);
00333 }
00334 
00335 QuickTimeFileSink*
00336 QuickTimeFileSink::createNew(UsageEnvironment& env,
00337                              MediaSession& inputSession,
00338                              char const* outputFileName,
00339                              unsigned bufferSize,
00340                              unsigned short movieWidth,
00341                              unsigned short movieHeight,
00342                              unsigned movieFPS,
00343                              Boolean packetLossCompensate,
00344                              Boolean syncStreams,
00345                              Boolean generateHintTracks,
00346                              Boolean generateMP4Format) {
00347   QuickTimeFileSink* newSink = 
00348     new QuickTimeFileSink(env, inputSession, outputFileName, bufferSize, movieWidth, movieHeight, movieFPS,
00349                           packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format);
00350   if (newSink == NULL || newSink->fOutFid == NULL) {
00351     Medium::close(newSink);
00352     return NULL;
00353   }
00354 
00355   return newSink;
00356 }
00357 
00358 Boolean QuickTimeFileSink::startPlaying(afterPlayingFunc* afterFunc,
00359                                         void* afterClientData) {
00360   // Make sure we're not already being played:
00361   if (fAreCurrentlyBeingPlayed) {
00362     envir().setResultMsg("This sink has already been played");
00363     return False;
00364   }
00365 
00366   fAreCurrentlyBeingPlayed = True;
00367   fAfterFunc = afterFunc;
00368   fAfterClientData = afterClientData;
00369 
00370   return continuePlaying();
00371 }
00372 
00373 Boolean QuickTimeFileSink::continuePlaying() {
00374   // Run through each of our input session's 'subsessions',
00375   // asking for a frame from each one:
00376   Boolean haveActiveSubsessions = False;
00377   MediaSubsessionIterator iter(fInputSession);
00378   MediaSubsession* subsession;
00379   while ((subsession = iter.next()) != NULL) {
00380     FramedSource* subsessionSource = subsession->readSource();
00381     if (subsessionSource == NULL) continue;
00382 
00383     if (subsessionSource->isCurrentlyAwaitingData()) continue;
00384 
00385     SubsessionIOState* ioState
00386       = (SubsessionIOState*)(subsession->miscPtr);
00387     if (ioState == NULL) continue;
00388 
00389     haveActiveSubsessions = True;
00390     unsigned char* toPtr = ioState->fBuffer->dataEnd();
00391     unsigned toSize = ioState->fBuffer->bytesAvailable();
00392     subsessionSource->getNextFrame(toPtr, toSize,
00393                                    afterGettingFrame, ioState,
00394                                    onSourceClosure, ioState);
00395   }
00396   if (!haveActiveSubsessions) {
00397     envir().setResultMsg("No subsessions are currently active");
00398     return False;
00399   }
00400 
00401   return True;
00402 }
00403 
00404 void QuickTimeFileSink
00405 ::afterGettingFrame(void* clientData, unsigned packetDataSize,
00406                     unsigned numTruncatedBytes,
00407                     struct timeval presentationTime,
00408                     unsigned /*durationInMicroseconds*/) {
00409   SubsessionIOState* ioState = (SubsessionIOState*)clientData;
00410   if (!ioState->syncOK(presentationTime)) {
00411     // Ignore this data:
00412     ioState->fOurSink.continuePlaying();
00413     return;
00414   }
00415   if (numTruncatedBytes > 0) {
00416     ioState->envir() << "QuickTimeFileSink::afterGettingFrame(): The input frame data was too large for our buffer.  "
00417                      << numTruncatedBytes
00418                      << " bytes of trailing data was dropped!  Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n";
00419   }
00420   ioState->afterGettingFrame(packetDataSize, presentationTime);
00421 }
00422 
00423 void QuickTimeFileSink::onSourceClosure(void* clientData) {
00424   SubsessionIOState* ioState = (SubsessionIOState*)clientData;
00425   ioState->onSourceClosure();
00426 }
00427 
00428 void QuickTimeFileSink::onSourceClosure1() {
00429   // Check whether *all* of the subsession sources have closed.
00430   // If not, do nothing for now:
00431   MediaSubsessionIterator iter(fInputSession);
00432   MediaSubsession* subsession;
00433   while ((subsession = iter.next()) != NULL) {
00434     SubsessionIOState* ioState
00435       = (SubsessionIOState*)(subsession->miscPtr);
00436     if (ioState == NULL) continue;
00437 
00438     if (ioState->fOurSourceIsActive) return; // this source hasn't closed
00439   }
00440 
00441   completeOutputFile();
00442 
00443   // Call our specified 'after' function:
00444   if (fAfterFunc != NULL) {
00445     (*fAfterFunc)(fAfterClientData);
00446   }
00447 }
00448 
00449 void QuickTimeFileSink::onRTCPBye(void* clientData) {
00450   SubsessionIOState* ioState = (SubsessionIOState*)clientData;
00451 
00452   struct timeval timeNow;
00453   gettimeofday(&timeNow, NULL);
00454   unsigned secsDiff
00455     = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;
00456 
00457   MediaSubsession& subsession = ioState->fOurSubsession;
00458   ioState->envir() << "Received RTCP \"BYE\" on \""
00459                    << subsession.mediumName()
00460                    << "/" << subsession.codecName()
00461                    << "\" subsession (after "
00462                    << secsDiff << " seconds)\n";
00463 
00464   // Handle the reception of a RTCP "BYE" as if the source had closed:
00465   ioState->onSourceClosure();
00466 }
00467 
00468 static Boolean timevalGE(struct timeval const& tv1,
00469                          struct timeval const& tv2) {
00470   return (unsigned)tv1.tv_sec > (unsigned)tv2.tv_sec
00471     || (tv1.tv_sec == tv2.tv_sec
00472         && (unsigned)tv1.tv_usec >= (unsigned)tv2.tv_usec);
00473 }
00474 
00475 void QuickTimeFileSink::completeOutputFile() {
00476   if (fHaveCompletedOutputFile || fOutFid == NULL) return;
00477 
00478   // Begin by filling in the initial "mdat" atom with the current
00479   // file size:
00480   int64_t curFileSize = TellFile64(fOutFid);
00481   setWord64(fMDATposition, (u_int64_t)curFileSize);
00482 
00483   // Then, note the time of the first received data:
00484   MediaSubsessionIterator iter(fInputSession);
00485   MediaSubsession* subsession;
00486   while ((subsession = iter.next()) != NULL) {
00487     SubsessionIOState* ioState
00488       = (SubsessionIOState*)(subsession->miscPtr);
00489     if (ioState == NULL) continue;
00490 
00491     ChunkDescriptor* const headChunk = ioState->fHeadChunk;
00492     if (headChunk != NULL
00493         && timevalGE(fFirstDataTime, headChunk->fPresentationTime)) {
00494       fFirstDataTime = headChunk->fPresentationTime;
00495     }
00496   }
00497 
00498   // Then, update the QuickTime-specific state for each active track:
00499   iter.reset();
00500   while ((subsession = iter.next()) != NULL) {
00501     SubsessionIOState* ioState
00502       = (SubsessionIOState*)(subsession->miscPtr);
00503     if (ioState == NULL) continue;
00504 
00505     ioState->setFinalQTstate();
00506     // Do the same for a hint track (if any):
00507     if (ioState->hasHintTrack()) {
00508       ioState->fHintTrackForUs->setFinalQTstate();
00509     }
00510   }
00511 
00512   if (fGenerateMP4Format) {
00513     // Begin with a "ftyp" atom:
00514     addAtom_ftyp();
00515   }
00516 
00517   // Then, add a "moov" atom for the file metadata:
00518   addAtom_moov();
00519 
00520   // We're done:
00521   fHaveCompletedOutputFile = True;
00522 }
00523 
00524 
00526 
00527 unsigned SubsessionIOState::fCurrentTrackNumber = 0;
00528 
00529 SubsessionIOState::SubsessionIOState(QuickTimeFileSink& sink,
00530                                      MediaSubsession& subsession)
00531   : fHintTrackForUs(NULL), fTrackHintedByUs(NULL),
00532     fOurSink(sink), fOurSubsession(subsession),
00533     fLastPacketRTPSeqNum(0), fHaveBeenSynced(False), fQTTotNumSamples(0), 
00534     fHeadChunk(NULL), fTailChunk(NULL), fNumChunks(0),
00535     fHeadSyncFrame(NULL), fTailSyncFrame(NULL) {
00536   fTrackID = ++fCurrentTrackNumber;
00537 
00538   fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
00539   fPrevBuffer = sink.fPacketLossCompensate
00540     ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
00541 
00542   FramedSource* subsessionSource = subsession.readSource();
00543   fOurSourceIsActive = subsessionSource != NULL;
00544 
00545   fPrevFrameState.presentationTime.tv_sec = 0;
00546   fPrevFrameState.presentationTime.tv_usec = 0;
00547   fPrevFrameState.seqNum = 0;
00548 }
00549 
00550 SubsessionIOState::~SubsessionIOState() {
00551   delete fBuffer; delete fPrevBuffer;
00552 
00553   // Delete the list of chunk descriptors:
00554   ChunkDescriptor* chunk = fHeadChunk;
00555   while (chunk != NULL) {
00556     ChunkDescriptor* next = chunk->fNextChunk;
00557     delete chunk;
00558     chunk = next;
00559   }
00560 
00561   // Delete the list of sync frames:
00562   SyncFrame* syncFrame = fHeadSyncFrame;
00563   while (syncFrame != NULL) {
00564     SyncFrame* next = syncFrame->nextSyncFrame;
00565     delete syncFrame;
00566     syncFrame = next;
00567   }
00568 }
00569 
00570 Boolean SubsessionIOState::setQTstate() {
00571   char const* noCodecWarning1 = "Warning: We don't implement a QuickTime ";
00572   char const* noCodecWarning2 = " Media Data Type for the \"";
00573   char const* noCodecWarning3 = "\" track, so we'll insert a dummy \"????\" Media Data Atom instead.  A separate, codec-specific editing pass will be needed before this track can be played.\n";
00574 
00575   do {
00576     fQTEnableTrack = True; // enable this track in the movie by default
00577     fQTTimeScale = fOurSubsession.rtpTimestampFrequency(); // by default
00578     fQTTimeUnitsPerSample = 1; // by default
00579     fQTBytesPerFrame = 0;
00580         // by default - indicates that the whole packet data is a frame
00581     fQTSamplesPerFrame = 1; // by default
00582 
00583     // Make sure our subsession's medium is one that we know how to
00584     // represent in a QuickTime file:
00585     if (isHintTrack()) {
00586       // Hint tracks are treated specially
00587       fQTEnableTrack = False; // hint tracks are marked as inactive
00588       fQTcomponentSubtype = fourChar('h','i','n','t');
00589       fQTcomponentName = "hint media handler";
00590       fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_gmhd;
00591       fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_rtp;
00592     } else if (strcmp(fOurSubsession.mediumName(), "audio") == 0) {
00593       fQTcomponentSubtype = fourChar('s','o','u','n');
00594       fQTcomponentName = "Apple Sound Media Handler";
00595       fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_smhd;
00596       fQTMediaDataAtomCreator
00597         = &QuickTimeFileSink::addAtom_soundMediaGeneral; // by default
00598       fQTSoundSampleVersion = 0; // by default
00599 
00600       // Make sure that our subsession's codec is one that we can handle:
00601       if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
00602           strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
00603         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
00604       } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) {
00605         fQTAudioDataType = "ulaw";
00606         fQTBytesPerFrame = 1;
00607       } else if (strcmp(fOurSubsession.codecName(), "GSM") == 0) {
00608         fQTAudioDataType = "agsm";
00609         fQTBytesPerFrame = 33;
00610         fQTSamplesPerFrame = 160;
00611       } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) {
00612         fQTAudioDataType = "alaw";
00613         fQTBytesPerFrame = 1;
00614       } else if (strcmp(fOurSubsession.codecName(), "QCELP") == 0) {
00615         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_Qclp;
00616         fQTSamplesPerFrame = 160;
00617       } else if (strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0 ||
00618                  strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0) {
00619         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4a;
00620         fQTTimeUnitsPerSample = 1024; // QT considers each frame to be a 'sample'
00621         // The time scale (frequency) comes from the 'config' information.
00622         // It might be different from the RTP timestamp frequency (e.g., aacPlus).
00623         unsigned frequencyFromConfig
00624           = samplingFrequencyFromAudioSpecificConfig(fOurSubsession.fmtp_config());
00625         if (frequencyFromConfig != 0) fQTTimeScale = frequencyFromConfig;
00626       } else {
00627         envir() << noCodecWarning1 << "Audio" << noCodecWarning2
00628                 << fOurSubsession.codecName() << noCodecWarning3;
00629         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
00630         fQTEnableTrack = False; // disable this track in the movie
00631       }
00632     } else if (strcmp(fOurSubsession.mediumName(), "video") == 0) {
00633       fQTcomponentSubtype = fourChar('v','i','d','e');
00634       fQTcomponentName = "Apple Video Media Handler";
00635       fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_vmhd;
00636 
00637       // Make sure that our subsession's codec is one that we can handle:
00638       if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
00639           strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
00640         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
00641       } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 ||
00642                  strcmp(fOurSubsession.codecName(), "H263-2000") == 0) {
00643         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_h263;
00644         fQTTimeScale = 600;
00645         fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
00646       } else if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
00647         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_avc1;
00648         fQTTimeScale = 600;
00649         fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
00650       } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {
00651         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4v;
00652         fQTTimeScale = 600;
00653         fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
00654       } else {
00655         envir() << noCodecWarning1 << "Video" << noCodecWarning2
00656                 << fOurSubsession.codecName() << noCodecWarning3;
00657         fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
00658         fQTEnableTrack = False; // disable this track in the movie
00659       }
00660     } else {
00661       envir() << "Warning: We don't implement a QuickTime Media Handler for media type \""
00662               << fOurSubsession.mediumName() << "\"";
00663       break;
00664     }
00665 
00666 #ifdef QT_SUPPORT_PARTIALLY_ONLY
00667     envir() << "Warning: We don't have sufficient codec-specific information (e.g., sample sizes) to fully generate the \""
00668             << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName()
00669             << "\" track, so we'll disable this track in the movie.  A separate, codec-specific editing pass will be needed before this track can be played\n";
00670     fQTEnableTrack = False; // disable this track in the movie
00671 #endif
00672 
00673     return True;
00674   } while (0);
00675 
00676   envir() << ", so a track for the \"" << fOurSubsession.mediumName()
00677           << "/" << fOurSubsession.codecName()
00678           << "\" subsession will not be included in the output QuickTime file\n";
00679   return False;
00680 }
00681 
00682 void SubsessionIOState::setFinalQTstate() {
00683   // Compute derived parameters, by running through the list of chunks:
00684   fQTDurationT = 0;
00685 
00686   ChunkDescriptor* chunk = fHeadChunk;
00687   while (chunk != NULL) {
00688     unsigned const numFrames = chunk->fNumFrames;
00689     unsigned const dur = numFrames*chunk->fFrameDuration;
00690     fQTDurationT += dur;
00691 
00692     chunk = chunk->fNextChunk;
00693   }
00694 
00695   // Convert this duration from track to movie time scale:
00696   double scaleFactor = fOurSink.movieTimeScale()/(double)fQTTimeScale;
00697   fQTDurationM = (unsigned)(fQTDurationT*scaleFactor);
00698 
00699   if (fQTDurationM > fOurSink.fMaxTrackDurationM) {
00700     fOurSink.fMaxTrackDurationM = fQTDurationM;
00701   }
00702 }
00703 
00704 void SubsessionIOState::afterGettingFrame(unsigned packetDataSize,
00705                                           struct timeval presentationTime) {
00706   // Begin by checking whether there was a gap in the RTP stream.
00707   // If so, try to compensate for this (if desired):
00708   unsigned short rtpSeqNum
00709     = fOurSubsession.rtpSource()->curPacketRTPSeqNum();
00710   if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
00711     short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
00712     for (short i = 1; i < seqNumGap; ++i) {
00713       // Insert a copy of the previous frame, to compensate for the loss:
00714       useFrame(*fPrevBuffer);
00715     }
00716   }
00717   fLastPacketRTPSeqNum = rtpSeqNum;
00718 
00719   // Now, continue working with the frame that we just got
00720   if (fBuffer->bytesInUse() == 0) {
00721     fBuffer->setPresentationTime(presentationTime);
00722   }
00723   fBuffer->addBytes(packetDataSize);
00724 
00725   // If our RTP source is a "QuickTimeGenericRTPSource", then
00726   // use its 'qtState' to set some parameters that we need:
00727   if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_genericMedia){
00728     QuickTimeGenericRTPSource* rtpSource
00729       = (QuickTimeGenericRTPSource*)fOurSubsession.rtpSource();
00730     QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
00731     fQTTimeScale = qtState.timescale;
00732     if (qtState.width != 0) {
00733       fOurSink.fMovieWidth = qtState.width;
00734     }
00735     if (qtState.height != 0) {
00736       fOurSink.fMovieHeight = qtState.height;
00737     }
00738 
00739     // Also, if the media type in the "sdAtom" is one that we recognize
00740     // to have a special parameters, then fix this here:
00741     if (qtState.sdAtomSize >= 8) {
00742       char const* atom = qtState.sdAtom;
00743       unsigned mediaType = fourChar(atom[4],atom[5],atom[6],atom[7]);
00744       switch (mediaType) {
00745       case fourChar('a','g','s','m'): {
00746         fQTBytesPerFrame = 33;
00747         fQTSamplesPerFrame = 160;
00748         break;
00749       }
00750       case fourChar('Q','c','l','p'): {
00751         fQTBytesPerFrame = 35;
00752         fQTSamplesPerFrame = 160;
00753         break;
00754       }
00755       case fourChar('H','c','l','p'): {
00756         fQTBytesPerFrame = 17;
00757         fQTSamplesPerFrame = 160;
00758         break;
00759       }
00760       case fourChar('h','2','6','3'): {
00761         fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
00762         break;
00763       }
00764       }
00765     }
00766   } else if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_Qclp) {
00767     // For QCELP data, make a note of the frame size (even though it's the
00768     // same as the packet data size), because it varies depending on the
00769     // 'rate' of the stream, and this size gets used later when setting up
00770     // the 'Qclp' QuickTime atom:
00771     fQTBytesPerFrame = packetDataSize;
00772   }
00773 
00774   useFrame(*fBuffer);
00775   if (fOurSink.fPacketLossCompensate) {
00776     // Save this frame, in case we need it for recovery:
00777     SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
00778     fPrevBuffer = fBuffer;
00779     fBuffer = tmp;
00780   }
00781   fBuffer->reset(); // for the next input
00782 
00783   // Now, try getting more frames:
00784   fOurSink.continuePlaying();
00785 }
00786 
00787 void SubsessionIOState::useFrame(SubsessionBuffer& buffer) {
00788   unsigned char* const frameSource = buffer.dataStart();
00789   unsigned const frameSize = buffer.bytesInUse();
00790   struct timeval const& presentationTime = buffer.presentationTime();
00791   int64_t const destFileOffset = TellFile64(fOurSink.fOutFid);
00792   unsigned sampleNumberOfFrameStart = fQTTotNumSamples + 1;
00793   Boolean avcHack = fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1;
00794 
00795   // If we're not syncing streams, or this subsession is not video, then
00796   // just give this frame a fixed duration:
00797   if (!fOurSink.fSyncStreams
00798       || fQTcomponentSubtype != fourChar('v','i','d','e')) {
00799     unsigned const frameDuration = fQTTimeUnitsPerSample*fQTSamplesPerFrame;
00800     unsigned frameSizeToUse = frameSize;
00801     if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
00802 
00803     fQTTotNumSamples += useFrame1(frameSizeToUse, presentationTime, frameDuration, destFileOffset);
00804   } else {
00805     // For synced video streams, we use the difference between successive
00806     // frames' presentation times as the 'frame duration'.  So, record
00807     // information about the *previous* frame:
00808     struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
00809     if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
00810       // There has been a previous frame.
00811       double duration = (presentationTime.tv_sec - ppt.tv_sec)
00812         + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
00813       if (duration < 0.0) duration = 0.0;
00814       unsigned frameDuration
00815         = (unsigned)((2*duration*fQTTimeScale+1)/2); // round
00816       unsigned frameSizeToUse = fPrevFrameState.frameSize;
00817       if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
00818 
00819       unsigned numSamples
00820         = useFrame1(frameSizeToUse, ppt, frameDuration, fPrevFrameState.destFileOffset);
00821       fQTTotNumSamples += numSamples;
00822       sampleNumberOfFrameStart = fQTTotNumSamples + 1;
00823     }
00824 
00825     if (avcHack && (*frameSource == H264_IDR_FRAME)) {
00826       SyncFrame* newSyncFrame = new SyncFrame(fQTTotNumSamples + 1);
00827       if (fTailSyncFrame == NULL) {
00828         fHeadSyncFrame = newSyncFrame;
00829       } else {
00830         fTailSyncFrame->nextSyncFrame = newSyncFrame;
00831       }
00832       fTailSyncFrame = newSyncFrame;
00833     }
00834 
00835     // Remember the current frame for next time:
00836     fPrevFrameState.frameSize = frameSize;
00837     fPrevFrameState.presentationTime = presentationTime;
00838     fPrevFrameState.destFileOffset = destFileOffset;
00839   }
00840 
00841   if (avcHack) fOurSink.addWord(frameSize);
00842 
00843   // Write the data into the file:
00844   fwrite(frameSource, 1, frameSize, fOurSink.fOutFid);
00845 
00846   // If we have a hint track, then write to it also:
00847   if (hasHintTrack()) {
00848     // Because presentation times are used for RTP packet timestamps,
00849     // we don't starting writing to the hint track until we've been synced:
00850     if (!fHaveBeenSynced) {
00851       fHaveBeenSynced
00852         = fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP();
00853     }
00854     if (fHaveBeenSynced) {
00855       fHintTrackForUs->useFrameForHinting(frameSize, presentationTime,
00856                                           sampleNumberOfFrameStart);
00857     }
00858   }
00859 }
00860 
00861 void SubsessionIOState::useFrameForHinting(unsigned frameSize,
00862                                            struct timeval presentationTime,
00863                                            unsigned startSampleNumber) {
00864   // At this point, we have a single, combined frame - not individual packets.
00865   // For the hint track, we need to split the frame back up into separate packets.
00866   // However, for some RTP sources, then we also need to reuse the special
00867   // header bytes that were at the start of each of the RTP packets.
00868   Boolean hack263 = strcmp(fOurSubsession.codecName(), "H263-1998") == 0;
00869   Boolean hackm4a_generic = strcmp(fOurSubsession.mediumName(), "audio") == 0
00870     && strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0;
00871   Boolean hackm4a_latm = strcmp(fOurSubsession.mediumName(), "audio") == 0
00872     && strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0;
00873   Boolean hackm4a = hackm4a_generic || hackm4a_latm;
00874   Boolean haveSpecialHeaders = (hack263 || hackm4a_generic);
00875 
00876   // If there has been a previous frame, then output a 'hint sample' for it.
00877   // (We use the current frame's presentation time to compute the previous
00878   // hint sample's duration.)
00879   RTPSource* const rs = fOurSubsession.rtpSource(); // abbrev
00880   struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
00881   if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
00882     double duration = (presentationTime.tv_sec - ppt.tv_sec)
00883       + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
00884     if (duration < 0.0) duration = 0.0;
00885     unsigned msDuration = (unsigned)(duration*1000); // milliseconds
00886     if (msDuration > fHINF.dmax) fHINF.dmax = msDuration;
00887     unsigned hintSampleDuration
00888       = (unsigned)((2*duration*fQTTimeScale+1)/2); // round
00889     if (hackm4a) {
00890       // Because multiple AAC frames can appear in a RTP packet, the presentation
00891       // times of the second and subsequent frames will not be accurate.
00892       // So, use the known "hintSampleDuration" instead:
00893       hintSampleDuration = fTrackHintedByUs->fQTTimeUnitsPerSample;
00894 
00895       // Also, if the 'time scale' was different from the RTP timestamp frequency,
00896       // (as can happen with aacPlus), then we need to scale "hintSampleDuration"
00897       // accordingly:
00898       if (fTrackHintedByUs->fQTTimeScale != fOurSubsession.rtpTimestampFrequency()) {
00899         unsigned const scalingFactor
00900           = fOurSubsession.rtpTimestampFrequency()/fTrackHintedByUs->fQTTimeScale ;
00901         hintSampleDuration *= scalingFactor;
00902       }
00903     }
00904 
00905     int64_t const hintSampleDestFileOffset = TellFile64(fOurSink.fOutFid);
00906 
00907     unsigned const maxPacketSize = 1450;
00908     unsigned short numPTEntries
00909       = (fPrevFrameState.frameSize + (maxPacketSize-1))/maxPacketSize; // normal case
00910     unsigned char* immediateDataPtr = NULL;
00911     unsigned immediateDataBytesRemaining = 0;
00912     if (haveSpecialHeaders) { // special case
00913       numPTEntries = fPrevFrameState.numSpecialHeaders;
00914       immediateDataPtr = fPrevFrameState.specialHeaderBytes;
00915       immediateDataBytesRemaining
00916         = fPrevFrameState.specialHeaderBytesLength;
00917     }
00918     unsigned hintSampleSize
00919       = fOurSink.addHalfWord(numPTEntries);// Entry count
00920     hintSampleSize += fOurSink.addHalfWord(0x0000); // Reserved
00921 
00922     unsigned offsetWithinSample = 0;
00923     for (unsigned i = 0; i < numPTEntries; ++i) {
00924       // Output a Packet Table entry (representing a single RTP packet):
00925       unsigned short numDTEntries = 1;
00926       unsigned short seqNum = fPrevFrameState.seqNum++;
00927           // Note: This assumes that the input stream had no packets lost #####
00928       unsigned rtpHeader = fPrevFrameState.rtpHeader;
00929       if (i+1 < numPTEntries) {
00930         // This is not the last RTP packet, so clear the marker bit:
00931         rtpHeader &=~ (1<<23);
00932       }
00933       unsigned dataFrameSize = (i+1 < numPTEntries)
00934         ? maxPacketSize : fPrevFrameState.frameSize - i*maxPacketSize; // normal case
00935       unsigned sampleNumber = fPrevFrameState.startSampleNumber;
00936 
00937       unsigned char immediateDataLen = 0;
00938       if (haveSpecialHeaders) { // special case
00939         ++numDTEntries; // to include a Data Table entry for the special hdr
00940         if (immediateDataBytesRemaining > 0) {
00941           if (hack263) {
00942             immediateDataLen = *immediateDataPtr++;
00943             --immediateDataBytesRemaining;
00944             if (immediateDataLen > immediateDataBytesRemaining) {
00945               // shouldn't happen (length byte was bad)
00946               immediateDataLen = immediateDataBytesRemaining;
00947             }
00948           } else {
00949             immediateDataLen = fPrevFrameState.specialHeaderBytesLength;
00950           }
00951         }
00952         dataFrameSize = fPrevFrameState.packetSizes[i] - immediateDataLen;
00953 
00954         if (hack263) {
00955           Boolean PbitSet
00956             = immediateDataLen >= 1 && (immediateDataPtr[0]&0x4) != 0;
00957           if (PbitSet) {
00958             offsetWithinSample += 2; // to omit the two leading 0 bytes
00959           }
00960         }
00961       }
00962 
00963       // Output the Packet Table:
00964       hintSampleSize += fOurSink.addWord(0); // Relative transmission time
00965       hintSampleSize += fOurSink.addWord(rtpHeader|seqNum);
00966           // RTP header info + RTP sequence number
00967       hintSampleSize += fOurSink.addHalfWord(0x0000); // Flags
00968       hintSampleSize += fOurSink.addHalfWord(numDTEntries); // Entry count
00969       unsigned totalPacketSize = 0;
00970 
00971       // Output the Data Table:
00972       if (haveSpecialHeaders) {
00973         //   use the "Immediate Data" format (1):
00974         hintSampleSize += fOurSink.addByte(1); // Source
00975         unsigned char len = immediateDataLen > 14 ? 14 : immediateDataLen;
00976         hintSampleSize += fOurSink.addByte(len); // Length
00977         totalPacketSize += len; fHINF.dimm += len;
00978         unsigned char j;
00979         for (j = 0; j < len; ++j) {
00980           hintSampleSize += fOurSink.addByte(immediateDataPtr[j]); // Data
00981         }
00982         for (j = len; j < 14; ++j) {
00983           hintSampleSize += fOurSink.addByte(0); // Data (padding)
00984         }
00985 
00986         immediateDataPtr += immediateDataLen;
00987         immediateDataBytesRemaining -= immediateDataLen;
00988       }
00989       //   use the "Sample Data" format (2):
00990       hintSampleSize += fOurSink.addByte(2); // Source
00991       hintSampleSize += fOurSink.addByte(0); // Track ref index
00992       hintSampleSize += fOurSink.addHalfWord(dataFrameSize); // Length
00993       totalPacketSize += dataFrameSize; fHINF.dmed += dataFrameSize;
00994       hintSampleSize += fOurSink.addWord(sampleNumber); // Sample number
00995       hintSampleSize += fOurSink.addWord(offsetWithinSample); // Offset
00996       // Get "bytes|samples per compression block" from the hinted track:
00997       unsigned short const bytesPerCompressionBlock
00998         = fTrackHintedByUs->fQTBytesPerFrame;
00999       unsigned short const samplesPerCompressionBlock
01000         = fTrackHintedByUs->fQTSamplesPerFrame;
01001       hintSampleSize += fOurSink.addHalfWord(bytesPerCompressionBlock);
01002       hintSampleSize += fOurSink.addHalfWord(samplesPerCompressionBlock);
01003 
01004       offsetWithinSample += dataFrameSize;// for the next iteration (if any)
01005 
01006       // Tally statistics for this packet:
01007       fHINF.nump += 1;
01008       fHINF.tpyl += totalPacketSize;
01009       totalPacketSize += 12; // add in the size of the RTP header
01010       fHINF.trpy += totalPacketSize;
01011       if (totalPacketSize > fHINF.pmax) fHINF.pmax = totalPacketSize;
01012     }
01013 
01014     // Make note of this completed hint sample frame:
01015     fQTTotNumSamples += useFrame1(hintSampleSize, ppt, hintSampleDuration,
01016                                   hintSampleDestFileOffset);
01017   }
01018 
01019   // Remember this frame for next time:
01020   fPrevFrameState.frameSize = frameSize;
01021   fPrevFrameState.presentationTime = presentationTime;
01022   fPrevFrameState.startSampleNumber = startSampleNumber;
01023   fPrevFrameState.rtpHeader
01024     = rs->curPacketMarkerBit()<<23
01025     | (rs->rtpPayloadFormat()&0x7F)<<16;
01026   if (hack263) {
01027     H263plusVideoRTPSource* rs_263 = (H263plusVideoRTPSource*)rs;
01028     fPrevFrameState.numSpecialHeaders = rs_263->fNumSpecialHeaders;
01029     fPrevFrameState.specialHeaderBytesLength = rs_263->fSpecialHeaderBytesLength;
01030     unsigned i;
01031     for (i = 0; i < rs_263->fSpecialHeaderBytesLength; ++i) {
01032       fPrevFrameState.specialHeaderBytes[i] = rs_263->fSpecialHeaderBytes[i];
01033     }
01034     for (i = 0; i < rs_263->fNumSpecialHeaders; ++i) {
01035       fPrevFrameState.packetSizes[i] = rs_263->fPacketSizes[i];
01036     }
01037   } else if (hackm4a_generic) {
01038     // Synthesize a special header, so that this frame can be in its own RTP packet.
01039     unsigned const sizeLength = fOurSubsession.attrVal_unsigned("sizelength");
01040     unsigned const indexLength = fOurSubsession.attrVal_unsigned("indexlength");
01041     if (sizeLength + indexLength != 16) {
01042       envir() << "Warning: unexpected 'sizeLength' " << sizeLength
01043               << " and 'indexLength' " << indexLength
01044               << "seen when creating hint track\n";
01045     }
01046     fPrevFrameState.numSpecialHeaders = 1;
01047     fPrevFrameState.specialHeaderBytesLength = 4;
01048     fPrevFrameState.specialHeaderBytes[0] = 0; // AU_headers_length (high byte)
01049     fPrevFrameState.specialHeaderBytes[1] = 16; // AU_headers_length (low byte)
01050     fPrevFrameState.specialHeaderBytes[2] = ((frameSize<<indexLength)&0xFF00)>>8;
01051     fPrevFrameState.specialHeaderBytes[3] = (frameSize<<indexLength);
01052     fPrevFrameState.packetSizes[0]
01053       = fPrevFrameState.specialHeaderBytesLength + frameSize;
01054   }
01055 }
01056 
01057 unsigned SubsessionIOState::useFrame1(unsigned sourceDataSize,
01058                                       struct timeval presentationTime,
01059                                       unsigned frameDuration,
01060                                       int64_t destFileOffset) {
01061   // Figure out the actual frame size for this data:
01062   unsigned frameSize = fQTBytesPerFrame;
01063   if (frameSize == 0) {
01064     // The entire packet data is assumed to be a frame:
01065     frameSize = sourceDataSize;
01066   }
01067   unsigned const numFrames = sourceDataSize/frameSize;
01068   unsigned const numSamples = numFrames*fQTSamplesPerFrame;
01069 
01070   // Record the information about which 'chunk' this data belongs to:
01071   ChunkDescriptor* newTailChunk;
01072   if (fTailChunk == NULL) {
01073     newTailChunk = fHeadChunk
01074       = new ChunkDescriptor(destFileOffset, sourceDataSize,
01075                             frameSize, frameDuration, presentationTime);
01076   } else {
01077     newTailChunk = fTailChunk->extendChunk(destFileOffset, sourceDataSize,
01078                                            frameSize, frameDuration,
01079                                            presentationTime);
01080   }
01081   if (newTailChunk != fTailChunk) {
01082    // This data created a new chunk, rather than extending the old one
01083     ++fNumChunks;
01084     fTailChunk = newTailChunk;
01085   }
01086 
01087   return numSamples;
01088 }
01089 
01090 void SubsessionIOState::onSourceClosure() {
01091   fOurSourceIsActive = False;
01092   fOurSink.onSourceClosure1();
01093 }
01094 
01095 Boolean SubsessionIOState::syncOK(struct timeval presentationTime) {
01096   QuickTimeFileSink& s = fOurSink; // abbreviation
01097   if (!s.fSyncStreams) return True; // we don't care
01098 
01099   if (s.fNumSyncedSubsessions < s.fNumSubsessions) {
01100     // Not all subsessions have yet been synced.  Check whether ours was
01101     // one of the unsynced ones, and, if so, whether it is now synced:
01102     if (!fHaveBeenSynced) {
01103       // We weren't synchronized before
01104       if (fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
01105         // H264 ?
01106         if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1) {
01107           // special case: audio + H264 video: wait until audio is in sync
01108           if ((s.fNumSubsessions == 2) && (s.fNumSyncedSubsessions < (s.fNumSubsessions - 1))) return False;
01109 
01110           // if audio is in sync, wait for the next IDR frame to start
01111           unsigned char* const frameSource = fBuffer->dataStart();
01112           if (*frameSource != H264_IDR_FRAME) return False;
01113         }
01114         // But now we are
01115         fHaveBeenSynced = True;
01116         fSyncTime = presentationTime;
01117         ++s.fNumSyncedSubsessions;
01118 
01119         if (timevalGE(fSyncTime, s.fNewestSyncTime)) {
01120           s.fNewestSyncTime = fSyncTime;
01121         }
01122       }
01123     }
01124   }
01125 
01126   // Check again whether all subsessions have been synced:
01127   if (s.fNumSyncedSubsessions < s.fNumSubsessions) return False;
01128 
01129   // Allow this data if it is more recent than the newest sync time:
01130   return timevalGE(presentationTime, s.fNewestSyncTime);
01131 }
01132 
01133 void SubsessionIOState::setHintTrack(SubsessionIOState* hintedTrack,
01134                                      SubsessionIOState* hintTrack) {
01135   if (hintedTrack != NULL) hintedTrack->fHintTrackForUs = hintTrack;
01136   if (hintTrack != NULL) hintTrack->fTrackHintedByUs = hintedTrack;
01137 }
01138 
01139 SyncFrame::SyncFrame(unsigned frameNum)
01140   : nextSyncFrame(NULL), sfFrameNum(frameNum) {
01141 }  
01142 
01143 void Count64::operator+=(unsigned arg) {
01144   unsigned newLo = lo + arg;
01145   if (newLo < lo) { // lo has overflowed
01146     ++hi;
01147   }
01148   lo = newLo;
01149 }
01150 
01151 ChunkDescriptor
01152 ::ChunkDescriptor(int64_t offsetInFile, unsigned size,
01153                   unsigned frameSize, unsigned frameDuration,
01154                   struct timeval presentationTime)
01155   : fNextChunk(NULL), fOffsetInFile(offsetInFile),
01156     fNumFrames(size/frameSize),
01157     fFrameSize(frameSize), fFrameDuration(frameDuration),
01158     fPresentationTime(presentationTime) {
01159 }
01160 
01161 ChunkDescriptor* ChunkDescriptor
01162 ::extendChunk(int64_t newOffsetInFile, unsigned newSize,
01163               unsigned newFrameSize, unsigned newFrameDuration,
01164               struct timeval newPresentationTime) {
01165   // First, check whether the new space is just at the end of this
01166   // existing chunk:
01167   if (newOffsetInFile == fOffsetInFile + fNumFrames*fFrameSize) {
01168     // We can extend this existing chunk, provided that the frame size
01169     // and frame duration have not changed:
01170     if (newFrameSize == fFrameSize && newFrameDuration == fFrameDuration) {
01171       fNumFrames += newSize/fFrameSize;
01172       return this;
01173     }
01174   }
01175 
01176   // We'll allocate a new ChunkDescriptor, and link it to the end of us:
01177   ChunkDescriptor* newDescriptor
01178     = new ChunkDescriptor(newOffsetInFile, newSize,
01179                           newFrameSize, newFrameDuration,
01180                           newPresentationTime);
01181 
01182   fNextChunk = newDescriptor;
01183 
01184   return newDescriptor;
01185 }
01186 
01187 
01189 
01190 unsigned QuickTimeFileSink::addWord64(u_int64_t word) {
01191   addByte((unsigned char)(word>>56)); addByte((unsigned char)(word>>48));
01192   addByte((unsigned char)(word>>40)); addByte((unsigned char)(word>>32));
01193   addByte((unsigned char)(word>>24)); addByte((unsigned char)(word>>16));
01194   addByte((unsigned char)(word>>8)); addByte((unsigned char)(word));
01195 
01196   return 8;
01197 }
01198 
01199 unsigned QuickTimeFileSink::addWord(unsigned word) {
01200   addByte(word>>24); addByte(word>>16);
01201   addByte(word>>8); addByte(word);
01202 
01203   return 4;
01204 }
01205 
01206 unsigned QuickTimeFileSink::addHalfWord(unsigned short halfWord) {
01207   addByte((unsigned char)(halfWord>>8)); addByte((unsigned char)halfWord);
01208 
01209   return 2;
01210 }
01211 
01212 unsigned QuickTimeFileSink::addZeroWords(unsigned numWords) {
01213   for (unsigned i = 0; i < numWords; ++i) {
01214     addWord(0);
01215   }
01216 
01217   return numWords*4;
01218 }
01219 
01220 unsigned QuickTimeFileSink::add4ByteString(char const* str) {
01221   addByte(str[0]); addByte(str[1]); addByte(str[2]); addByte(str[3]);
01222 
01223   return 4;
01224 }
01225 
01226 unsigned QuickTimeFileSink::addArbitraryString(char const* str,
01227                                                Boolean oneByteLength) {
01228   unsigned size = 0;
01229   if (oneByteLength) {
01230     // Begin with a byte containing the string length:
01231     unsigned strLength = strlen(str);
01232     if (strLength >= 256) {
01233       envir() << "QuickTimeFileSink::addArbitraryString(\""
01234               << str << "\") saw string longer than we know how to handle ("
01235               << strLength << "\n";
01236     }
01237     size += addByte((unsigned char)strLength);
01238   }
01239 
01240   while (*str != '\0') {
01241     size += addByte(*str++);
01242   }
01243 
01244   return size;
01245 }
01246 
01247 unsigned QuickTimeFileSink::addAtomHeader(char const* atomName) {
01248   // Output a placeholder for the 4-byte size:
01249   addWord(0);
01250 
01251   // Output the 4-byte atom name:
01252   add4ByteString(atomName);
01253 
01254   return 8;
01255 }
01256 
01257 unsigned QuickTimeFileSink::addAtomHeader64(char const* atomName) {
01258   // Output 64Bit size marker
01259   addWord(1);
01260 
01261   // Output the 4-byte atom name:
01262   add4ByteString(atomName);
01263 
01264   addWord64(0);
01265 
01266   return 16;
01267 }
01268 
01269 void QuickTimeFileSink::setWord(int64_t filePosn, unsigned size) {
01270   do {
01271     if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
01272     addWord(size);
01273     if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
01274 
01275     return;
01276   } while (0);
01277 
01278   // One of the SeekFile64()s failed, probable because we're not a seekable file
01279   envir() << "QuickTimeFileSink::setWord(): SeekFile64 failed (err "
01280           << envir().getErrno() << ")\n";
01281 }
01282 
01283 void QuickTimeFileSink::setWord64(int64_t filePosn, u_int64_t size) {
01284   do {
01285     if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
01286     addWord64(size);
01287     if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
01288 
01289     return;
01290   } while (0);
01291 
01292   // One of the SeekFile64()s failed, probable because we're not a seekable file
01293   envir() << "QuickTimeFileSink::setWord64(): SeekFile64 failed (err "
01294           << envir().getErrno() << ")\n";
01295 }
01296 
01297 // Methods for writing particular atoms.  Note the following macros:
01298 
01299 #define addAtom(name) \
01300     unsigned QuickTimeFileSink::addAtom_##name() { \
01301     int64_t initFilePosn = TellFile64(fOutFid); \
01302     unsigned size = addAtomHeader("" #name "")
01303 
01304 #define addAtomEnd \
01305   setWord(initFilePosn, size); \
01306   return size; \
01307 }
01308 
01309 addAtom(ftyp);
01310   size += add4ByteString("mp42");
01311   size += addWord(0x00000000);
01312   size += add4ByteString("mp42");
01313   size += add4ByteString("isom");
01314 addAtomEnd;
01315 
01316 addAtom(moov);
01317   size += addAtom_mvhd();
01318 
01319   if (fGenerateMP4Format) {
01320     size += addAtom_iods();
01321   }
01322 
01323   // Add a 'trak' atom for each subsession:
01324   // (For some unknown reason, QuickTime Player (5.0 at least)
01325   //  doesn't display the movie correctly unless the audio track
01326   //  (if present) appears before the video track.  So ensure this here.)
01327   MediaSubsessionIterator iter(fInputSession);
01328   MediaSubsession* subsession;
01329   while ((subsession = iter.next()) != NULL) {
01330     fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
01331     if (fCurrentIOState == NULL) continue;
01332     if (strcmp(subsession->mediumName(), "audio") != 0) continue;
01333 
01334     size += addAtom_trak();
01335 
01336     if (fCurrentIOState->hasHintTrack()) {
01337       // This track has a hint track; output it also:
01338       fCurrentIOState = fCurrentIOState->fHintTrackForUs;
01339       size += addAtom_trak();
01340     }
01341   }
01342   iter.reset();
01343   while ((subsession = iter.next()) != NULL) {
01344     fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
01345     if (fCurrentIOState == NULL) continue;
01346     if (strcmp(subsession->mediumName(), "audio") == 0) continue;
01347 
01348     size += addAtom_trak();
01349 
01350     if (fCurrentIOState->hasHintTrack()) {
01351       // This track has a hint track; output it also:
01352       fCurrentIOState = fCurrentIOState->fHintTrackForUs;
01353       size += addAtom_trak();
01354     }
01355   }
01356 addAtomEnd;
01357 
01358 addAtom(mvhd);
01359   size += addWord(0x00000000); // Version + Flags
01360   size += addWord(fAppleCreationTime); // Creation time
01361   size += addWord(fAppleCreationTime); // Modification time
01362 
01363   // For the "Time scale" field, use the largest RTP timestamp frequency
01364   // that we saw in any of the subsessions.
01365   size += addWord(movieTimeScale()); // Time scale
01366 
01367   unsigned const duration = fMaxTrackDurationM;
01368   fMVHD_durationPosn = TellFile64(fOutFid);
01369   size += addWord(duration); // Duration
01370 
01371   size += addWord(0x00010000); // Preferred rate
01372   size += addWord(0x01000000); // Preferred volume + Reserved[0]
01373   size += addZeroWords(2); // Reserved[1-2]
01374   size += addWord(0x00010000); // matrix top left corner
01375   size += addZeroWords(3); // matrix
01376   size += addWord(0x00010000); // matrix center
01377   size += addZeroWords(3); // matrix
01378   size += addWord(0x40000000); // matrix bottom right corner
01379   size += addZeroWords(6); // various time fields
01380   size += addWord(SubsessionIOState::fCurrentTrackNumber+1);// Next track ID
01381 addAtomEnd;
01382 
01383 addAtom(iods);
01384   size += addWord(0x00000000); // Version + Flags
01385   size += addWord(0x10808080);
01386   size += addWord(0x07004FFF);
01387   size += addWord(0xFF0FFFFF);
01388 addAtomEnd;
01389 
01390 addAtom(trak);
01391   size += addAtom_tkhd();
01392 
01393   // If we're synchronizing the media streams (or are a hint track),
01394   // add an edit list that helps do this:
01395   if (fCurrentIOState->fHeadChunk != NULL
01396       && (fSyncStreams || fCurrentIOState->isHintTrack())) {
01397     size += addAtom_edts();
01398   }
01399 
01400   // If we're generating a hint track, add a 'tref' atom:
01401   if (fCurrentIOState->isHintTrack()) size += addAtom_tref();
01402 
01403   size += addAtom_mdia();
01404 
01405   // If we're generating a hint track, add a 'udta' atom:
01406   if (fCurrentIOState->isHintTrack()) size += addAtom_udta();
01407 addAtomEnd;
01408 
01409 addAtom(tkhd);
01410   if (fCurrentIOState->fQTEnableTrack) {
01411     size += addWord(0x0000000F); // Version +  Flags
01412   } else {
01413     // Disable this track in the movie:
01414     size += addWord(0x00000000); // Version +  Flags
01415   }
01416   size += addWord(fAppleCreationTime); // Creation time
01417   size += addWord(fAppleCreationTime); // Modification time
01418   size += addWord(fCurrentIOState->fTrackID); // Track ID
01419   size += addWord(0x00000000); // Reserved
01420 
01421   unsigned const duration = fCurrentIOState->fQTDurationM; // movie units
01422   fCurrentIOState->fTKHD_durationPosn = TellFile64(fOutFid);
01423   size += addWord(duration); // Duration
01424   size += addZeroWords(3); // Reserved+Layer+Alternate grp
01425   size += addWord(0x01000000); // Volume + Reserved
01426   size += addWord(0x00010000); // matrix top left corner
01427   size += addZeroWords(3); // matrix
01428   size += addWord(0x00010000); // matrix center
01429   size += addZeroWords(3); // matrix
01430   size += addWord(0x40000000); // matrix bottom right corner
01431   if (strcmp(fCurrentIOState->fOurSubsession.mediumName(), "video") == 0) {
01432     size += addWord(fMovieWidth<<16); // Track width
01433     size += addWord(fMovieHeight<<16); // Track height
01434   } else {
01435     size += addZeroWords(2); // not video: leave width and height fields zero
01436   }
01437 addAtomEnd;
01438 
01439 addAtom(edts);
01440   size += addAtom_elst();
01441 addAtomEnd;
01442 
01443 #define addEdit1(duration,trackPosition) do { \
01444       unsigned trackDuration \
01445         = (unsigned) ((2*(duration)*movieTimeScale()+1)/2); \
01446             /* in movie time units */ \
01447       size += addWord(trackDuration); /* Track duration */ \
01448       totalDurationOfEdits += trackDuration; \
01449       size += addWord(trackPosition); /* Media time */ \
01450       size += addWord(0x00010000); /* Media rate (1x) */ \
01451       ++numEdits; \
01452 } while (0)
01453 #define addEdit(duration) addEdit1((duration),editTrackPosition)
01454 #define addEmptyEdit(duration) addEdit1((duration),(~0))
01455 
01456 addAtom(elst);
01457   size += addWord(0x00000000); // Version + Flags
01458 
01459   // Add a dummy "Number of entries" field
01460   // (and remember its position).  We'll fill this field in later:
01461   int64_t numEntriesPosition = TellFile64(fOutFid);
01462   size += addWord(0); // dummy for "Number of entries"
01463   unsigned numEdits = 0;
01464   unsigned totalDurationOfEdits = 0; // in movie time units
01465 
01466   // Run through our chunks, looking at their presentation times.
01467   // From these, figure out the edits that need to be made to keep
01468   // the track media data in sync with the presentation times.
01469 
01470   double const syncThreshold = 0.1; // 100 ms
01471     // don't allow the track to get out of sync by more than this
01472 
01473   struct timeval editStartTime = fFirstDataTime;
01474   unsigned editTrackPosition = 0;
01475   unsigned currentTrackPosition = 0;
01476   double trackDurationOfEdit = 0.0;
01477   unsigned chunkDuration = 0;
01478 
01479   ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
01480   while (chunk != NULL) {
01481     struct timeval const& chunkStartTime = chunk->fPresentationTime;
01482     double movieDurationOfEdit
01483       = (chunkStartTime.tv_sec - editStartTime.tv_sec)
01484       + (chunkStartTime.tv_usec - editStartTime.tv_usec)/1000000.0;
01485     trackDurationOfEdit = (currentTrackPosition-editTrackPosition)
01486       / (double)(fCurrentIOState->fQTTimeScale);
01487 
01488     double outOfSync = movieDurationOfEdit - trackDurationOfEdit;
01489 
01490     if (outOfSync > syncThreshold) {
01491       // The track's data is too short, so end this edit, add a new
01492       // 'empty' edit after it, and start a new edit
01493       // (at the current track posn.):
01494       if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
01495       addEmptyEdit(outOfSync);
01496 
01497       editStartTime = chunkStartTime;
01498       editTrackPosition = currentTrackPosition;
01499     } else if (outOfSync < -syncThreshold) {
01500       // The track's data is too long, so end this edit, and start
01501       // a new edit (pointing at the current track posn.):
01502       if (movieDurationOfEdit > 0.0) addEdit(movieDurationOfEdit);
01503 
01504       editStartTime = chunkStartTime;
01505       editTrackPosition = currentTrackPosition;
01506     }
01507 
01508     // Note the duration of this chunk:
01509     unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels();
01510     chunkDuration = chunk->fNumFrames*chunk->fFrameDuration/numChannels;
01511     currentTrackPosition += chunkDuration;
01512 
01513     chunk = chunk->fNextChunk;
01514   }
01515 
01516   // Write out the final edit
01517   trackDurationOfEdit
01518       += (double)chunkDuration/fCurrentIOState->fQTTimeScale;
01519   if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
01520 
01521   // Now go back and fill in the "Number of entries" field:
01522   setWord(numEntriesPosition, numEdits);
01523 
01524   // Also, if the sum of all of the edit durations exceeds the
01525   // track duration that we already computed (from sample durations),
01526   // then reset the track duration to this new value:
01527   if (totalDurationOfEdits > fCurrentIOState->fQTDurationM) {
01528     fCurrentIOState->fQTDurationM = totalDurationOfEdits;
01529     setWord(fCurrentIOState->fTKHD_durationPosn, totalDurationOfEdits);
01530 
01531     // Also, check whether the overall movie duration needs to change:
01532     if (totalDurationOfEdits > fMaxTrackDurationM) {
01533       fMaxTrackDurationM = totalDurationOfEdits;
01534       setWord(fMVHD_durationPosn, totalDurationOfEdits);
01535     }
01536 
01537     // Also, convert to track time scale:
01538     double scaleFactor
01539       = fCurrentIOState->fQTTimeScale/(double)movieTimeScale();
01540     fCurrentIOState->fQTDurationT
01541       = (unsigned)(totalDurationOfEdits*scaleFactor);
01542   }
01543 addAtomEnd;
01544 
01545 addAtom(tref);
01546   size += addAtom_hint();
01547 addAtomEnd;
01548 
01549 addAtom(hint);
01550   SubsessionIOState* hintedTrack = fCurrentIOState->fTrackHintedByUs;
01551     // Assert: hintedTrack != NULL
01552   size += addWord(hintedTrack->fTrackID);
01553 addAtomEnd;
01554 
01555 addAtom(mdia);
01556   size += addAtom_mdhd();
01557   size += addAtom_hdlr();
01558   size += addAtom_minf();
01559 addAtomEnd;
01560 
01561 addAtom(mdhd);
01562   size += addWord(0x00000000); // Version + Flags
01563   size += addWord(fAppleCreationTime); // Creation time
01564   size += addWord(fAppleCreationTime); // Modification time
01565 
01566   unsigned const timeScale = fCurrentIOState->fQTTimeScale;
01567   size += addWord(timeScale); // Time scale
01568 
01569   unsigned const duration = fCurrentIOState->fQTDurationT; // track units
01570   size += addWord(duration); // Duration
01571 
01572   size += addWord(0x00000000); // Language+Quality
01573 addAtomEnd;
01574 
01575 addAtom(hdlr);
01576   size += addWord(0x00000000); // Version + Flags
01577   size += add4ByteString("mhlr"); // Component type
01578   size += addWord(fCurrentIOState->fQTcomponentSubtype);
01579     // Component subtype
01580   size += add4ByteString("appl"); // Component manufacturer
01581   size += addWord(0x00000000); // Component flags
01582   size += addWord(0x00000000); // Component flags mask
01583   size += addArbitraryString(fCurrentIOState->fQTcomponentName);
01584     // Component name
01585 addAtomEnd;
01586 
01587 addAtom(minf);
01588   SubsessionIOState::atomCreationFunc mediaInformationAtomCreator
01589     = fCurrentIOState->fQTMediaInformationAtomCreator;
01590   size += (this->*mediaInformationAtomCreator)();
01591   size += addAtom_hdlr2();
01592   size += addAtom_dinf();
01593   size += addAtom_stbl();
01594 addAtomEnd;
01595 
01596 addAtom(smhd);
01597   size += addZeroWords(2); // Version+Flags+Balance+Reserved
01598 addAtomEnd;
01599 
01600 addAtom(vmhd);
01601   size += addWord(0x00000001); // Version + Flags
01602   size += addWord(0x00408000); // Graphics mode + Opcolor[red]
01603   size += addWord(0x80008000); // Opcolor[green} + Opcolor[blue]
01604 addAtomEnd;
01605 
01606 addAtom(gmhd);
01607   size += addAtom_gmin();
01608 addAtomEnd;
01609 
01610 addAtom(gmin);
01611   size += addWord(0x00000000); // Version + Flags
01612   // The following fields probably aren't used for hint tracks, so just
01613   // use values that I've seen in other files:
01614   size += addWord(0x00408000); // Graphics mode + Opcolor (1st 2 bytes)
01615   size += addWord(0x80008000); // Opcolor (last 4 bytes)
01616   size += addWord(0x00000000); // Balance + Reserved
01617 addAtomEnd;
01618 
01619 unsigned QuickTimeFileSink::addAtom_hdlr2() {
01620   int64_t initFilePosn = TellFile64(fOutFid);
01621   unsigned size = addAtomHeader("hdlr");
01622   size += addWord(0x00000000); // Version + Flags
01623   size += add4ByteString("dhlr"); // Component type
01624   size += add4ByteString("alis"); // Component subtype
01625   size += add4ByteString("appl"); // Component manufacturer
01626   size += addZeroWords(2); // Component flags+Component flags mask
01627   size += addArbitraryString("Apple Alias Data Handler"); // Component name
01628 addAtomEnd;
01629 
01630 addAtom(dinf);
01631   size += addAtom_dref();
01632 addAtomEnd;
01633 
01634 addAtom(dref);
01635   size += addWord(0x00000000); // Version + Flags
01636   size += addWord(0x00000001); // Number of entries
01637   size += addAtom_alis();
01638 addAtomEnd;
01639 
01640 addAtom(alis);
01641   size += addWord(0x00000001); // Version + Flags
01642 addAtomEnd;
01643 
01644 addAtom(stbl);
01645   size += addAtom_stsd();
01646   size += addAtom_stts();
01647   if (fCurrentIOState->fQTcomponentSubtype == fourChar('v','i','d','e')) {
01648     size += addAtom_stss(); // only for video streams
01649   }
01650   size += addAtom_stsc();
01651   size += addAtom_stsz();
01652   size += addAtom_co64();
01653 addAtomEnd;
01654 
01655 addAtom(stsd);
01656   size += addWord(0x00000000); // Version+Flags
01657   size += addWord(0x00000001); // Number of entries
01658   SubsessionIOState::atomCreationFunc mediaDataAtomCreator
01659     = fCurrentIOState->fQTMediaDataAtomCreator;
01660   size += (this->*mediaDataAtomCreator)();
01661 addAtomEnd;
01662 
01663 unsigned QuickTimeFileSink::addAtom_genericMedia() {
01664   int64_t initFilePosn = TellFile64(fOutFid);
01665 
01666   // Our source is assumed to be a "QuickTimeGenericRTPSource"
01667   // Use its "sdAtom" state for our contents:
01668   QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*)
01669     fCurrentIOState->fOurSubsession.rtpSource();
01670   QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
01671   char const* from = qtState.sdAtom;
01672   unsigned size = qtState.sdAtomSize;
01673   for (unsigned i = 0; i < size; ++i) addByte(from[i]);
01674 addAtomEnd;
01675 
01676 unsigned QuickTimeFileSink::addAtom_soundMediaGeneral() {
01677   int64_t initFilePosn = TellFile64(fOutFid);
01678   unsigned size = addAtomHeader(fCurrentIOState->fQTAudioDataType);
01679 
01680 // General sample description fields:
01681   size += addWord(0x00000000); // Reserved
01682   size += addWord(0x00000001); // Reserved+Data reference index
01683 // Sound sample description fields:
01684   unsigned short const version = fCurrentIOState->fQTSoundSampleVersion;
01685   size += addWord(version<<16); // Version+Revision level
01686   size += addWord(0x00000000); // Vendor
01687   unsigned short numChannels
01688     = (unsigned short)(fCurrentIOState->fOurSubsession.numChannels());
01689   size += addHalfWord(numChannels); // Number of channels
01690   size += addHalfWord(0x0010); // Sample size
01691   //  size += addWord(0x00000000); // Compression ID+Packet size
01692   size += addWord(0xfffe0000); // Compression ID+Packet size #####
01693 
01694   unsigned const sampleRateFixedPoint = fCurrentIOState->fQTTimeScale << 16;
01695   size += addWord(sampleRateFixedPoint); // Sample rate
01696 addAtomEnd;
01697 
01698 unsigned QuickTimeFileSink::addAtom_Qclp() {
01699   // The beginning of this atom looks just like a general Sound Media atom,
01700   // except with a version field of 1:
01701   int64_t initFilePosn = TellFile64(fOutFid);
01702   fCurrentIOState->fQTAudioDataType = "Qclp";
01703   fCurrentIOState->fQTSoundSampleVersion = 1;
01704   unsigned size = addAtom_soundMediaGeneral();
01705 
01706   // Next, add the four fields that are particular to version 1:
01707   // (Later, parameterize these #####)
01708   size += addWord(0x000000a0); // samples per packet
01709   size += addWord(0x00000000); // ???
01710   size += addWord(0x00000000); // ???
01711   size += addWord(0x00000002); // bytes per sample (uncompressed)
01712 
01713   // Other special fields are in a 'wave' atom that follows:
01714   size += addAtom_wave();
01715 addAtomEnd;
01716 
01717 addAtom(wave);
01718   size += addAtom_frma();
01719   if (strcmp(fCurrentIOState->fQTAudioDataType, "Qclp") == 0) {
01720     size += addWord(0x00000014); // ???
01721     size += add4ByteString("Qclp"); // ???
01722     if (fCurrentIOState->fQTBytesPerFrame == 35) {
01723       size += addAtom_Fclp(); // full-rate QCELP
01724     } else {
01725       size += addAtom_Hclp(); // half-rate QCELP
01726     } // what about other QCELP 'rates'??? #####
01727     size += addWord(0x00000008); // ???
01728     size += addWord(0x00000000); // ???
01729     size += addWord(0x00000000); // ???
01730     size += addWord(0x00000008); // ???
01731   } else if (strcmp(fCurrentIOState->fQTAudioDataType, "mp4a") == 0) {
01732     size += addWord(0x0000000c); // ???
01733     size += add4ByteString("mp4a"); // ???
01734     size += addWord(0x00000000); // ???
01735     size += addAtom_esds(); // ESDescriptor
01736     size += addWord(0x00000008); // ???
01737     size += addWord(0x00000000); // ???
01738   }
01739 addAtomEnd;
01740 
01741 addAtom(frma);
01742   size += add4ByteString(fCurrentIOState->fQTAudioDataType); // ???
01743 addAtomEnd;
01744 
01745 addAtom(Fclp);
01746  size += addWord(0x00000000); // ???
01747 addAtomEnd;
01748 
01749 addAtom(Hclp);
01750  size += addWord(0x00000000); // ???
01751 addAtomEnd;
01752 
01753 unsigned QuickTimeFileSink::addAtom_mp4a() {
01754   unsigned size = 0;
01755   // The beginning of this atom looks just like a general Sound Media atom,
01756   // except with a version field of 1:
01757   int64_t initFilePosn = TellFile64(fOutFid);
01758   fCurrentIOState->fQTAudioDataType = "mp4a";
01759 
01760   if (fGenerateMP4Format) {
01761     fCurrentIOState->fQTSoundSampleVersion = 0;
01762     size = addAtom_soundMediaGeneral();
01763     size += addAtom_esds();
01764   } else {
01765     fCurrentIOState->fQTSoundSampleVersion = 1;
01766     size = addAtom_soundMediaGeneral();
01767 
01768     // Next, add the four fields that are particular to version 1:
01769     // (Later, parameterize these #####)
01770     size += addWord(fCurrentIOState->fQTTimeUnitsPerSample);
01771     size += addWord(0x00000001); // ???
01772     size += addWord(0x00000001); // ???
01773     size += addWord(0x00000002); // bytes per sample (uncompressed)
01774 
01775     // Other special fields are in a 'wave' atom that follows:
01776     size += addAtom_wave();
01777   }
01778 addAtomEnd;
01779 
01780 addAtom(esds);
01781   //#####
01782   MediaSubsession& subsession = fCurrentIOState->fOurSubsession;
01783   if (strcmp(subsession.mediumName(), "audio") == 0) {
01784     // MPEG-4 audio
01785     size += addWord(0x00000000); // ???
01786     size += addWord(0x03808080); // ???
01787     size += addWord(0x2a000000); // ???
01788     size += addWord(0x04808080); // ???
01789     size += addWord(0x1c401500); // ???
01790     size += addWord(0x18000000); // ???
01791     size += addWord(0x6d600000); // ???
01792     size += addWord(0x6d600580); // ???
01793     size += addByte(0x80); size += addByte(0x80); // ???
01794   } else if (strcmp(subsession.mediumName(), "video") == 0) {
01795     // MPEG-4 video
01796     size += addWord(0x00000000); // ???
01797     size += addWord(0x03330000); // ???
01798     size += addWord(0x1f042b20); // ???
01799     size += addWord(0x1104fd46); // ???
01800     size += addWord(0x000d4e10); // ???
01801     size += addWord(0x000d4e10); // ???
01802     size += addByte(0x05); // ???
01803   }
01804 
01805   // Add the source's 'config' information:
01806   unsigned configSize;
01807   unsigned char* config
01808     = parseGeneralConfigStr(subsession.fmtp_config(), configSize);
01809   size += addByte(configSize);
01810   for (unsigned i = 0; i < configSize; ++i) {
01811     size += addByte(config[i]);
01812   }
01813   delete[] config;
01814 
01815   if (strcmp(subsession.mediumName(), "audio") == 0) {
01816     // MPEG-4 audio
01817     size += addWord(0x06808080); // ???
01818     size += addHalfWord(0x0102); // ???
01819   } else {
01820     // MPEG-4 video
01821     size += addHalfWord(0x0601); // ???
01822     size += addByte(0x02); // ???
01823   }
01824   //#####
01825 addAtomEnd;
01826 
01827 addAtom(srcq);
01828   //#####
01829   size += addWord(0x00000040); // ???
01830   //#####
01831 addAtomEnd;
01832 
01833 addAtom(h263);
01834 // General sample description fields:
01835   size += addWord(0x00000000); // Reserved
01836   size += addWord(0x00000001); // Reserved+Data reference index
01837 // Video sample description fields:
01838   size += addWord(0x00020001); // Version+Revision level
01839   size += add4ByteString("appl"); // Vendor
01840   size += addWord(0x00000000); // Temporal quality
01841   size += addWord(0x000002fc); // Spatial quality
01842   unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
01843   size += addWord(widthAndHeight); // Width+height
01844   size += addWord(0x00480000); // Horizontal resolution
01845   size += addWord(0x00480000); // Vertical resolution
01846   size += addWord(0x00000000); // Data size
01847   size += addWord(0x00010548); // Frame count+Compressor name (start)
01848     // "H.263"
01849   size += addWord(0x2e323633); // Compressor name (continued)
01850   size += addZeroWords(6); // Compressor name (continued - zero)
01851   size += addWord(0x00000018); // Compressor name (final)+Depth
01852   size += addHalfWord(0xffff); // Color table id
01853 addAtomEnd;
01854 
01855 addAtom(avc1);
01856 // General sample description fields:
01857   size += addWord(0x00000000); // Reserved
01858   size += addWord(0x00000001); // Reserved+Data       reference index
01859 // Video sample       description     fields:
01860   size += addWord(0x00000000); // Version+Revision level
01861   size += add4ByteString("appl"); // Vendor
01862   size += addWord(0x00000000); // Temporal quality
01863   size += addWord(0x00000000); // Spatial quality
01864   unsigned const widthAndHeight       = (fMovieWidth<<16)|fMovieHeight;
01865   size += addWord(widthAndHeight); // Width+height
01866   size += addWord(0x00480000); // Horizontal resolution
01867   size += addWord(0x00480000); // Vertical resolution
01868   size += addWord(0x00000000); // Data size
01869   size += addWord(0x00010548); // Frame       count+Compressor name (start)
01870     // "H.264"
01871   size += addWord(0x2e323634); // Compressor name (continued)
01872   size += addZeroWords(6); // Compressor name (continued - zero)
01873   size += addWord(0x00000018); // Compressor name (final)+Depth
01874   size += addHalfWord(0xffff); // Color       table id
01875   size += addAtom_avcC();
01876 addAtomEnd;
01877 
01878 addAtom(avcC);
01879 // Begin by Base-64 decoding the "sprop" parameter sets strings:
01880   char* psets = strDup(fCurrentIOState->fOurSubsession.fmtp_spropparametersets());
01881   if (psets == NULL) return 0;
01882 
01883   size_t comma_pos = strcspn(psets, ",");
01884   psets[comma_pos] = '\0';
01885   char const* sps_b64 = psets;
01886   char const* pps_b64 = &psets[comma_pos+1];
01887   unsigned sps_count;
01888   unsigned char* sps_data = base64Decode(sps_b64, sps_count, false);
01889   unsigned pps_count;
01890   unsigned char* pps_data = base64Decode(pps_b64, pps_count, false);
01891 
01892 // Then add the decoded data:
01893   size += addByte(0x01); // configuration version
01894   size += addByte(sps_data[1]); // profile
01895   size += addByte(sps_data[2]); // profile compat
01896   size += addByte(sps_data[3]); // level
01897   size += addByte(0xff); /* 0b11111100 | lengthsize = 0x11 */
01898   size += addByte(0xe0 | (sps_count > 0 ? 1 : 0) );
01899   if (sps_count > 0) {
01900     size += addHalfWord(sps_count);
01901     for (unsigned i = 0; i < sps_count; i++) {
01902       size += addByte(sps_data[i]);
01903     }
01904   }
01905   size += addByte(pps_count > 0 ? 1 : 0);
01906   if (pps_count > 0) {
01907     size += addHalfWord(pps_count);
01908     for (unsigned i = 0; i < pps_count; i++) {
01909       size += addByte(pps_data[i]);
01910     }
01911   }
01912 
01913 // Finally, delete the data that we allocated:
01914   delete[] pps_data; delete[] sps_data;
01915   delete[] psets;
01916 addAtomEnd;
01917 
01918 addAtom(mp4v);
01919 // General sample description fields:
01920   size += addWord(0x00000000); // Reserved
01921   size += addWord(0x00000001); // Reserved+Data reference index
01922 // Video sample description fields:
01923   size += addWord(0x00020001); // Version+Revision level
01924   size += add4ByteString("appl"); // Vendor
01925   size += addWord(0x00000200); // Temporal quality
01926   size += addWord(0x00000400); // Spatial quality
01927   unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
01928   size += addWord(widthAndHeight); // Width+height
01929   size += addWord(0x00480000); // Horizontal resolution
01930   size += addWord(0x00480000); // Vertical resolution
01931   size += addWord(0x00000000); // Data size
01932   size += addWord(0x00010c4d); // Frame count+Compressor name (start)
01933     // "MPEG-4 Video"
01934   size += addWord(0x5045472d); // Compressor name (continued)
01935   size += addWord(0x34205669); // Compressor name (continued)
01936   size += addWord(0x64656f00); // Compressor name (continued)
01937   size += addZeroWords(4); // Compressor name (continued - zero)
01938   size += addWord(0x00000018); // Compressor name (final)+Depth
01939   size += addHalfWord(0xffff); // Color table id
01940   size += addAtom_esds(); // ESDescriptor
01941   size += addWord(0x00000000); // ???
01942 addAtomEnd;
01943 
01944 unsigned QuickTimeFileSink::addAtom_rtp() {
01945   int64_t initFilePosn = TellFile64(fOutFid);
01946   unsigned size = addAtomHeader("rtp ");
01947 
01948   size += addWord(0x00000000); // Reserved (1st 4 bytes)
01949   size += addWord(0x00000001); // Reserved (last 2 bytes) + Data ref index
01950   size += addWord(0x00010001); // Hint track version + Last compat htv
01951   size += addWord(1450); // Max packet size
01952 
01953   size += addAtom_tims();
01954 addAtomEnd;
01955 
01956 addAtom(tims);
01957   size += addWord(fCurrentIOState->fOurSubsession.rtpTimestampFrequency());
01958 addAtomEnd;
01959 
01960 addAtom(stts); // Time-to-Sample
01961   size += addWord(0x00000000); // Version+flags
01962 
01963   // First, add a dummy "Number of entries" field
01964   // (and remember its position).  We'll fill this field in later:
01965   int64_t numEntriesPosition = TellFile64(fOutFid);
01966   size += addWord(0); // dummy for "Number of entries"
01967 
01968   // Then, run through the chunk descriptors, and enter the entries
01969   // in this (compressed) Time-to-Sample table:
01970   unsigned numEntries = 0, numSamplesSoFar = 0;
01971   unsigned prevSampleDuration = 0;
01972   unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
01973   ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
01974   while (chunk != NULL) {
01975     unsigned const sampleDuration = chunk->fFrameDuration/samplesPerFrame;
01976     if (sampleDuration != prevSampleDuration) {
01977       // This chunk will start a new table entry,
01978       // so write out the old one (if any):
01979       if (chunk != fCurrentIOState->fHeadChunk) {
01980         ++numEntries;
01981         size += addWord(numSamplesSoFar); // Sample count
01982         size += addWord(prevSampleDuration); // Sample duration
01983         numSamplesSoFar = 0;
01984       }
01985     }
01986 
01987     unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
01988     numSamplesSoFar += numSamples;
01989     prevSampleDuration = sampleDuration;
01990     chunk = chunk->fNextChunk;
01991   }
01992 
01993   // Then, write out the last entry:
01994   ++numEntries;
01995   size += addWord(numSamplesSoFar); // Sample count
01996   size += addWord(prevSampleDuration); // Sample duration
01997 
01998   // Now go back and fill in the "Number of entries" field:
01999   setWord(numEntriesPosition, numEntries);
02000 addAtomEnd;
02001 
02002 addAtom(stss); // Sync-Sample
02003   size += addWord(0x00000000); // Version+flags
02004 
02005   // First, add a dummy "Number of entries" field
02006   // (and remember its position).  We'll fill this field in later:
02007   int64_t numEntriesPosition = TellFile64(fOutFid);
02008   size += addWord(0); // dummy for "Number of entries"
02009 
02010   unsigned numEntries = 0, numSamplesSoFar = 0;
02011   if (fCurrentIOState->fHeadSyncFrame != NULL) {
02012     SyncFrame* currentSyncFrame = fCurrentIOState->fHeadSyncFrame;
02013     while(currentSyncFrame != NULL) {
02014       ++numEntries;
02015       size += addWord(currentSyncFrame->sfFrameNum);
02016       currentSyncFrame = currentSyncFrame->nextSyncFrame;
02017     }
02018   } else {
02019     // Then, run through the chunk descriptors, counting up the total nuber of samples:
02020     unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
02021     ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
02022     while (chunk != NULL) {
02023       unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
02024       numSamplesSoFar += numSamples;
02025       chunk = chunk->fNextChunk;
02026     }
02027   
02028     // Then, write out the sample numbers that we deem correspond to 'sync samples':
02029     unsigned i;
02030     for (i = 0; i < numSamplesSoFar; i += 12) {
02031       // For an explanation of the constant "12", see http://lists.live555.com/pipermail/live-devel/2009-July/010969.html
02032       // (Perhaps we should really try to keep track of which 'samples' ('frames' for video) really are 'key frames'?)
02033       size += addWord(i+1);
02034       ++numEntries;
02035     }
02036   
02037     // Then, write out the last entry (if we haven't already done so):
02038     if (i != (numSamplesSoFar - 1)) {
02039       size += addWord(numSamplesSoFar);
02040       ++numEntries;
02041     }
02042   }
02043 
02044   // Now go back and fill in the "Number of entries" field:
02045   setWord(numEntriesPosition, numEntries);
02046 addAtomEnd;
02047 
02048 addAtom(stsc); // Sample-to-Chunk
02049   size += addWord(0x00000000); // Version+flags
02050 
02051   // First, add a dummy "Number of entries" field
02052   // (and remember its position).  We'll fill this field in later:
02053   int64_t numEntriesPosition = TellFile64(fOutFid);
02054   size += addWord(0); // dummy for "Number of entries"
02055 
02056   // Then, run through the chunk descriptors, and enter the entries
02057   // in this (compressed) Sample-to-Chunk table:
02058   unsigned numEntries = 0, chunkNumber = 0;
02059   unsigned prevSamplesPerChunk = ~0;
02060   unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
02061   ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
02062   while (chunk != NULL) {
02063     ++chunkNumber;
02064     unsigned const samplesPerChunk = chunk->fNumFrames*samplesPerFrame;
02065     if (samplesPerChunk != prevSamplesPerChunk) {
02066       // This chunk will be a new table entry:
02067       ++numEntries;
02068       size += addWord(chunkNumber); // Chunk number
02069       size += addWord(samplesPerChunk); // Samples per chunk
02070       size += addWord(0x00000001); // Sample description ID
02071 
02072       prevSamplesPerChunk = samplesPerChunk;
02073     }
02074     chunk = chunk->fNextChunk;
02075   }
02076 
02077   // Now go back and fill in the "Number of entries" field:
02078   setWord(numEntriesPosition, numEntries);
02079 addAtomEnd;
02080 
02081 addAtom(stsz); // Sample Size
02082   size += addWord(0x00000000); // Version+flags
02083 
02084   // Begin by checking whether our chunks all have the same
02085   // 'bytes-per-sample'.  This determines whether this atom's table
02086   // has just a single entry, or multiple entries.
02087   Boolean haveSingleEntryTable = True;
02088   double firstBPS = 0.0;
02089   ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
02090   while (chunk != NULL) {
02091     double bps
02092       = (double)(chunk->fFrameSize)/(fCurrentIOState->fQTSamplesPerFrame);
02093     if (bps < 1.0) {
02094       // I don't think a multiple-entry table would make sense in
02095       // this case, so assume a single entry table ??? #####
02096       break;
02097     }
02098 
02099     if (firstBPS == 0.0) {
02100       firstBPS = bps;
02101     } else if (bps != firstBPS) {
02102       haveSingleEntryTable = False;
02103       break;
02104     }
02105 
02106     chunk = chunk->fNextChunk;
02107   }
02108 
02109   unsigned sampleSize;
02110   if (haveSingleEntryTable) {
02111     if (fCurrentIOState->isHintTrack()
02112         && fCurrentIOState->fHeadChunk != NULL) {
02113       sampleSize = fCurrentIOState->fHeadChunk->fFrameSize
02114                       / fCurrentIOState->fQTSamplesPerFrame;
02115     } else {
02116       // The following doesn't seem right, but seems to do the right thing:
02117       sampleSize = fCurrentIOState->fQTTimeUnitsPerSample; //???
02118     }
02119   } else {
02120     sampleSize = 0; // indicates a multiple-entry table
02121   }
02122   size += addWord(sampleSize); // Sample size
02123   unsigned const totNumSamples = fCurrentIOState->fQTTotNumSamples;
02124   size += addWord(totNumSamples); // Number of entries
02125 
02126   if (!haveSingleEntryTable) {
02127     // Multiple-entry table:
02128     // Run through the chunk descriptors, entering the sample sizes:
02129     ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
02130     while (chunk != NULL) {
02131       unsigned numSamples
02132         = chunk->fNumFrames*(fCurrentIOState->fQTSamplesPerFrame);
02133       unsigned sampleSize
02134         = chunk->fFrameSize/(fCurrentIOState->fQTSamplesPerFrame);
02135       for (unsigned i = 0; i < numSamples; ++i) {
02136         size += addWord(sampleSize);
02137       }
02138 
02139       chunk = chunk->fNextChunk;
02140     }
02141   }
02142 addAtomEnd;
02143 
02144 addAtom(co64); // Chunk Offset
02145   size += addWord(0x00000000); // Version+flags
02146   size += addWord(fCurrentIOState->fNumChunks); // Number of entries
02147 
02148   // Run through the chunk descriptors, entering the file offsets:
02149   ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
02150   while (chunk != NULL) {
02151     size += addWord64(chunk->fOffsetInFile);
02152 
02153     chunk = chunk->fNextChunk;
02154   }
02155 addAtomEnd;
02156 
02157 addAtom(udta);
02158   size += addAtom_name();
02159   size += addAtom_hnti();
02160   size += addAtom_hinf();
02161 addAtomEnd;
02162 
02163 addAtom(name);
02164   char description[100];
02165   sprintf(description, "Hinted %s track",
02166           fCurrentIOState->fOurSubsession.mediumName());
02167   size += addArbitraryString(description, False); // name of object
02168 addAtomEnd;
02169 
02170 addAtom(hnti);
02171   size += addAtom_sdp();
02172 addAtomEnd;
02173 
02174 unsigned QuickTimeFileSink::addAtom_sdp() {
02175   int64_t initFilePosn = TellFile64(fOutFid);
02176   unsigned size = addAtomHeader("sdp ");
02177 
02178   // Add this subsession's SDP lines:
02179   char const* sdpLines = fCurrentIOState->fOurSubsession.savedSDPLines();
02180   // We need to change any "a=control:trackID=" values to be this
02181   // track's actual track id:
02182   char* newSDPLines = new char[strlen(sdpLines)+100/*overkill*/];
02183   char const* searchStr = "a=control:trackid=";
02184   Boolean foundSearchString = False;
02185   char const *p1, *p2, *p3;
02186   for (p1 = sdpLines; *p1 != '\0'; ++p1) {
02187     for (p2 = p1,p3 = searchStr; tolower(*p2) == *p3; ++p2,++p3) {}
02188     if (*p3 == '\0') {
02189       // We found the end of the search string, at p2.
02190       int beforeTrackNumPosn = p2-sdpLines;
02191       // Look for the subsequent track number, and skip over it:
02192       int trackNumLength;
02193       if (sscanf(p2, " %*d%n", &trackNumLength) < 0) break;
02194       int afterTrackNumPosn = beforeTrackNumPosn + trackNumLength;
02195 
02196       // Replace the old track number with the correct one:
02197       int i;
02198       for (i = 0; i < beforeTrackNumPosn; ++i) newSDPLines[i] = sdpLines[i];
02199       sprintf(&newSDPLines[i], "%d", fCurrentIOState->fTrackID);
02200       i = afterTrackNumPosn;
02201       int j = i + strlen(&newSDPLines[i]);
02202       while (1) {
02203         if ((newSDPLines[j] = sdpLines[i]) == '\0') break;
02204         ++i; ++j;
02205       }
02206 
02207       foundSearchString = True;
02208       break;
02209     }
02210   }
02211 
02212   if (!foundSearchString) {
02213     // Because we didn't find a "a=control:trackID=<trackId>" line,
02214     // add one of our own:
02215     sprintf(newSDPLines, "%s%s%d\r\n",
02216             sdpLines, searchStr, fCurrentIOState->fTrackID);
02217   }
02218 
02219   size += addArbitraryString(newSDPLines, False);
02220   delete[] newSDPLines;
02221 addAtomEnd;
02222 
02223 addAtom(hinf);
02224   size += addAtom_totl();
02225   size += addAtom_npck();
02226   size += addAtom_tpay();
02227   size += addAtom_trpy();
02228   size += addAtom_nump();
02229   size += addAtom_tpyl();
02230   // Is 'maxr' required? #####
02231   size += addAtom_dmed();
02232   size += addAtom_dimm();
02233   size += addAtom_drep();
02234   size += addAtom_tmin();
02235   size += addAtom_tmax();
02236   size += addAtom_pmax();
02237   size += addAtom_dmax();
02238   size += addAtom_payt();
02239 addAtomEnd;
02240 
02241 addAtom(totl);
02242  size += addWord(fCurrentIOState->fHINF.trpy.lo);
02243 addAtomEnd;
02244 
02245 addAtom(npck);
02246  size += addWord(fCurrentIOState->fHINF.nump.lo);
02247 addAtomEnd;
02248 
02249 addAtom(tpay);
02250  size += addWord(fCurrentIOState->fHINF.tpyl.lo);
02251 addAtomEnd;
02252 
02253 addAtom(trpy);
02254  size += addWord(fCurrentIOState->fHINF.trpy.hi);
02255  size += addWord(fCurrentIOState->fHINF.trpy.lo);
02256 addAtomEnd;
02257 
02258 addAtom(nump);
02259  size += addWord(fCurrentIOState->fHINF.nump.hi);
02260  size += addWord(fCurrentIOState->fHINF.nump.lo);
02261 addAtomEnd;
02262 
02263 addAtom(tpyl);
02264  size += addWord(fCurrentIOState->fHINF.tpyl.hi);
02265  size += addWord(fCurrentIOState->fHINF.tpyl.lo);
02266 addAtomEnd;
02267 
02268 addAtom(dmed);
02269  size += addWord(fCurrentIOState->fHINF.dmed.hi);
02270  size += addWord(fCurrentIOState->fHINF.dmed.lo);
02271 addAtomEnd;
02272 
02273 addAtom(dimm);
02274  size += addWord(fCurrentIOState->fHINF.dimm.hi);
02275  size += addWord(fCurrentIOState->fHINF.dimm.lo);
02276 addAtomEnd;
02277 
02278 addAtom(drep);
02279  size += addWord(0);
02280  size += addWord(0);
02281 addAtomEnd;
02282 
02283 addAtom(tmin);
02284  size += addWord(0);
02285 addAtomEnd;
02286 
02287 addAtom(tmax);
02288  size += addWord(0);
02289 addAtomEnd;
02290 
02291 addAtom(pmax);
02292  size += addWord(fCurrentIOState->fHINF.pmax);
02293 addAtomEnd;
02294 
02295 addAtom(dmax);
02296  size += addWord(fCurrentIOState->fHINF.dmax);
02297 addAtomEnd;
02298 
02299 addAtom(payt);
02300   MediaSubsession& ourSubsession = fCurrentIOState->fOurSubsession;
02301   RTPSource* rtpSource = ourSubsession.rtpSource();
02302   size += addWord(rtpSource->rtpPayloadFormat());
02303 
02304   // Also, add a 'rtpmap' string: <mime-subtype>/<rtp-frequency>
02305   unsigned rtpmapStringLength = strlen(ourSubsession.codecName()) + 20;
02306   char* rtpmapString = new char[rtpmapStringLength];
02307   sprintf(rtpmapString, "%s/%d",
02308           ourSubsession.codecName(), rtpSource->timestampFrequency());
02309   size += addArbitraryString(rtpmapString);
02310   delete[] rtpmapString;
02311 addAtomEnd;
02312 
02313 // A dummy atom (with name "????"):
02314 unsigned QuickTimeFileSink::addAtom_dummy() {
02315     int64_t initFilePosn = TellFile64(fOutFid);
02316     unsigned size = addAtomHeader("????");
02317 addAtomEnd;

Generated on Wed Apr 23 16:12:01 2014 for live by  doxygen 1.5.2