testProgs/testWAVAudioStreamer.cpp

Go to the documentation of this file.
00001 /**********
00002 This library is free software; you can redistribute it and/or modify it under
00003 the terms of the GNU Lesser General Public License as published by the
00004 Free Software Foundation; either version 2.1 of the License, or (at your
00005 option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
00006 
00007 This library is distributed in the hope that it will be useful, but WITHOUT
00008 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00009 FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
00010 more details.
00011 
00012 You should have received a copy of the GNU Lesser General Public License
00013 along with this library; if not, write to the Free Software Foundation, Inc.,
00014 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
00015 **********/
00016 // Copyright (c) 1996-2014, Live Networks, Inc.  All rights reserved
00017 // A test program that streams a WAV audio file via RTP/RTCP
00018 // main program
00019 
00020 #include "liveMedia.hh"
00021 #include "GroupsockHelper.hh"
00022 
00023 #include "BasicUsageEnvironment.hh"
00024 
00025 // To convert 16-bit samples to 8-bit u-law ("u" is the Greek letter "mu")
00026 // encoding, before streaming, uncomment the following line:
00027 //#define CONVERT_TO_ULAW 1
00028 
00029 UsageEnvironment* env;
00030 
00031 void play(); // forward
00032 
00033 int main(int argc, char** argv) {
00034   // Begin by setting up our usage environment:
00035   TaskScheduler* scheduler = BasicTaskScheduler::createNew();
00036   env = BasicUsageEnvironment::createNew(*scheduler);
00037 
00038   play();
00039 
00040   env->taskScheduler().doEventLoop(); // does not return
00041   return 0; // only to prevent compiler warnings
00042 }
00043 
00044 char const* inputFileName = "test.wav";
00045 
00046 void afterPlaying(void* clientData); // forward
00047 
00048 // A structure to hold the state of the current session.
00049 // It is used in the "afterPlaying()" function to clean up the session.
00050 struct sessionState_t {
00051   FramedSource* source;
00052   RTPSink* sink;
00053   RTCPInstance* rtcpInstance;
00054   Groupsock* rtpGroupsock;
00055   Groupsock* rtcpGroupsock;
00056   RTSPServer* rtspServer;
00057 } sessionState;
00058 
00059 void play() {
00060   // Open the file as a 'WAV' file:
00061   WAVAudioFileSource* wavSource = WAVAudioFileSource::createNew(*env, inputFileName);
00062   if (wavSource == NULL) {
00063     *env << "Unable to open file \"" << inputFileName
00064          << "\" as a WAV audio file source: "
00065          << env->getResultMsg() << "\n";
00066     exit(1);
00067   }
00068 
00069   // Get attributes of the audio source:
00070   unsigned char audioFormat = wavSource->getAudioFormat();
00071   unsigned char const bitsPerSample = wavSource->bitsPerSample();
00072   // We handle only 4,8,16,20,24 bits-per-sample audio:
00073   if (bitsPerSample%4 != 0 || bitsPerSample < 4 || bitsPerSample > 24 || bitsPerSample == 12) {
00074     *env << "The input file contains " << bitsPerSample << " bit-per-sample audio, which we don't handle\n";
00075     exit(1);
00076   }
00077   unsigned const samplingFrequency = wavSource->samplingFrequency();
00078   unsigned char const numChannels = wavSource->numChannels();
00079   unsigned bitsPerSecond = samplingFrequency*bitsPerSample*numChannels;
00080   *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, ";
00081   *env << bitsPerSample << " bits-per-sample, ";
00082   *env << numChannels << " channels => ";
00083   *env << bitsPerSecond << " bits-per-second\n";
00084 
00085   char const* mimeType;
00086   unsigned char payloadFormatCode = 96; // by default, unless a static RTP payload type can be used
00087 
00088   // Add in any filter necessary to transform the data prior to streaming.
00089   // (This is where any audio compression would get added.)
00090   sessionState.source = wavSource; // by default
00091   if (audioFormat == WA_PCM) {
00092     if (bitsPerSample == 16) {
00093       // Note that samples in the WAV audio file are in little-endian order.
00094 #ifdef CONVERT_TO_ULAW
00095       // Add a filter that converts from raw 16-bit PCM audio (in little-endian order) to 8-bit u-law audio:
00096       sessionState.source = uLawFromPCMAudioSource::createNew(*env, wavSource, 1/*little-endian*/);
00097       if (sessionState.source == NULL) {
00098         *env << "Unable to create a u-law filter from the PCM audio source: " << env->getResultMsg() << "\n";
00099         exit(1);
00100       }
00101       bitsPerSecond /= 2;
00102       *env << "Converting to 8-bit u-law audio for streaming => " << bitsPerSecond << " bits-per-second\n";
00103       mimeType = "PCMU";
00104       if (samplingFrequency == 8000 && numChannels == 1) {
00105         payloadFormatCode = 0; // a static RTP payload type
00106       }
00107 #else
00108       // Add a filter that converts from little-endian to network (big-endian) order: 
00109       sessionState.source = EndianSwap16::createNew(*env, wavSource);
00110       if (sessionState.source == NULL) {
00111         *env << "Unable to create a little->bit-endian order filter from the PCM audio source: " << env->getResultMsg() << "\n";
00112         exit(1);
00113       }
00114       *env << "Converting to network byte order for streaming\n";
00115       mimeType = "L16";
00116       if (samplingFrequency == 44100 && numChannels == 2) {
00117         payloadFormatCode = 10; // a static RTP payload type
00118       } else if (samplingFrequency == 44100 && numChannels == 1) {
00119         payloadFormatCode = 11; // a static RTP payload type
00120       }
00121 #endif
00122     } else if (bitsPerSample == 20 || bitsPerSample == 24) {
00123       // Add a filter that converts from little-endian to network (big-endian) order: 
00124       sessionState.source = EndianSwap24::createNew(*env, wavSource);
00125       if (sessionState.source == NULL) {
00126         *env << "Unable to create a little->bit-endian order filter from the PCM audio source: " << env->getResultMsg() << "\n";
00127         exit(1);
00128       }
00129       *env << "Converting to network byte order for streaming\n";
00130       mimeType = bitsPerSample == 20 ? "L20" : "L24";
00131     } else { // bitsPerSample == 8 (we assume that bitsPerSample == 4 is only for WA_IMA_ADPCM)
00132       // Don't do any transformation; send the 8-bit PCM data 'as is':
00133       mimeType = "L8";
00134     }
00135   } else if (audioFormat == WA_PCMU) {
00136     mimeType = "PCMU";
00137     if (samplingFrequency == 8000 && numChannels == 1) {
00138       payloadFormatCode = 0; // a static RTP payload type                                                                          
00139     }
00140   } else if (audioFormat == WA_PCMA) {
00141     mimeType = "PCMA";
00142     if (samplingFrequency == 8000 && numChannels == 1) {
00143       payloadFormatCode = 8; // a static RTP payload type                                                                          
00144     } 
00145   } else if (audioFormat == WA_IMA_ADPCM) {
00146     mimeType = "DVI4";
00147     // Use a static payload type, if one is defined:                                                                               
00148     if (numChannels == 1) {
00149       if (samplingFrequency == 8000) {
00150         payloadFormatCode = 5; // a static RTP payload type                                                                        
00151       } else if (samplingFrequency == 16000) {
00152         payloadFormatCode = 6; // a static RTP payload type                                                                        
00153       } else if (samplingFrequency == 11025) {
00154         payloadFormatCode = 16; // a static RTP payload type                                                                       
00155       } else if (samplingFrequency == 22050) {
00156         payloadFormatCode = 17; // a static RTP payload type                                                                       
00157       }
00158     }
00159   } else { //unknown format                                                                                                        
00160     *env << "Unknown audio format code \"" << audioFormat << "\" in WAV file header\n";
00161     exit(1);
00162   }
00163 
00164   // Create 'groupsocks' for RTP and RTCP:
00165   struct in_addr destinationAddress;
00166   destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
00167   // Note: This is a multicast address.  If you wish instead to stream
00168   // using unicast, then you should use the "testOnDemandRTSPServer" demo application,
00169   // or the "LIVE555 Media Server" - not this application - as a model.
00170 
00171   const unsigned short rtpPortNum = 2222;
00172   const unsigned short rtcpPortNum = rtpPortNum+1;
00173   const unsigned char ttl = 255;
00174 
00175   const Port rtpPort(rtpPortNum);
00176   const Port rtcpPort(rtcpPortNum);
00177 
00178   sessionState.rtpGroupsock
00179     = new Groupsock(*env, destinationAddress, rtpPort, ttl);
00180   sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source
00181   sessionState.rtcpGroupsock
00182     = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
00183   sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source
00184 
00185   // Create an appropriate audio RTP sink (using "SimpleRTPSink") from the RTP 'groupsock':
00186   sessionState.sink
00187     = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock,
00188                                payloadFormatCode, samplingFrequency,
00189                                "audio", mimeType, numChannels);
00190 
00191   // Create (and start) a 'RTCP instance' for this RTP sink:
00192   const unsigned estimatedSessionBandwidth = (bitsPerSecond + 500)/1000; // in kbps; for RTCP b/w share
00193   const unsigned maxCNAMElen = 100;
00194   unsigned char CNAME[maxCNAMElen+1];
00195   gethostname((char*)CNAME, maxCNAMElen);
00196   CNAME[maxCNAMElen] = '\0'; // just in case
00197   sessionState.rtcpInstance
00198     = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
00199                               estimatedSessionBandwidth, CNAME,
00200                               sessionState.sink, NULL /* we're a server */,
00201                               True /* we're a SSM source*/);
00202   // Note: This starts RTCP running automatically
00203 
00204   // Create and start a RTSP server to serve this stream:
00205   sessionState.rtspServer = RTSPServer::createNew(*env, 8554);
00206   if (sessionState.rtspServer == NULL) {
00207     *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
00208     exit(1);
00209   }
00210   ServerMediaSession* sms
00211     = ServerMediaSession::createNew(*env, "testStream", inputFileName,
00212            "Session streamed by \"testWAVAudiotreamer\"", True/*SSM*/);
00213   sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
00214   sessionState.rtspServer->addServerMediaSession(sms);
00215 
00216   char* url = sessionState.rtspServer->rtspURL(sms);
00217   *env << "Play this stream using the URL \"" << url << "\"\n";
00218   delete[] url;
00219 
00220   // Finally, start the streaming:
00221   *env << "Beginning streaming...\n";
00222   sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
00223 }
00224 
00225 
00226 void afterPlaying(void* /*clientData*/) {
00227   *env << "...done streaming\n";
00228 
00229   // End by closing the media:
00230   Medium::close(sessionState.rtspServer);
00231   Medium::close(sessionState.rtcpInstance);
00232   Medium::close(sessionState.sink);
00233   delete sessionState.rtpGroupsock;
00234   Medium::close(sessionState.source);
00235   delete sessionState.rtcpGroupsock;
00236 
00237   // We're done:
00238   exit(0);
00239 }

Generated on Tue Mar 25 14:35:37 2014 for live by  doxygen 1.5.2