http://blog.csdn.net/xipiaoyouzi/article/details/65632572
1、DynamicRTSPServer.cpp
[cpp] view plain copy ServerMediaSession* DynamicRTSPServer ::lookupServerMediaSession(char const* streamName, Boolean isFirstLookupInSession) { // First, check whether the specified "streamName" exists as a local file: // Next, check whether we already have a "ServerMediaSession" for this file: ServerMediaSession* sms = RTSPServer::lookupServerMediaSession(streamName); Boolean smsExists = sms != NULL; FILE* fid = fopen(streamName, "rb"); Boolean fileExists = fid != NULL; if(strcmp(streamName,"live") == 0) { if (smsExists) { // "sms" was created for a file that no longer exists. Remove it: removeServerMediaSession(sms); sms = NULL; } if (sms == NULL) { sms = createNewSMS(envir(), streamName, fid); addServerMediaSession(sms); } return sms; } // Handle the four possibilities for "fileExists" and "smsExists": if (!fileExists) { if (smsExists) { // "sms" was created for a file that no longer exists. Remove it: removeServerMediaSession(sms); sms = NULL; } return NULL; } else { if (smsExists && isFirstLookupInSession) { // Remove the existing "ServerMediaSession" and create a new one, in case the underlying // file has changed in some way: removeServerMediaSession(sms); sms = NULL; } if (sms == NULL) { sms = createNewSMS(envir(), streamName, fid); addServerMediaSession(sms); } fclose(fid); return sms; } } [cpp] view plain copy static ServerMediaSession* createNewSMS(UsageEnvironment& env, char const* fileName, FILE* /*fid*/) { ServerMediaSession* sms = NULL; Boolean const reuseSource = False; if (strcmp(fileName, "live") == 0) { NEW_SMS("live"); //env << "live detected. addsubsession:\n"; //sms->addSubsession(LiveADTSAudioServerMediaSubsession::createNew(env, fileName, reuseSource)); OutPacketBuffer::maxSize = 300000; sms->addSubsession(LiveVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); return sms; } // Use the file name extension to determine the type of "ServerMediaSession": char const* extension = strrchr(fileName, '.'); if (extension == NULL) return NULL; if (strcmp(extension, ".aac") == 0) { // Assumed to be an AAC Audio (ADTS format) file: NEW_SMS("AAC Audio"); sms->addSubsession(ADTSAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".amr") == 0) { // Assumed to be an AMR Audio file: NEW_SMS("AMR Audio"); sms->addSubsession(AMRAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".ac3") == 0) { // Assumed to be an AC-3 Audio file: NEW_SMS("AC-3 Audio"); sms->addSubsession(AC3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".m4e") == 0) { // Assumed to be a MPEG-4 Video Elementary Stream file: NEW_SMS("MPEG-4 Video"); sms->addSubsession(MPEG4VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".264") == 0) { // Assumed to be a H.264 Video Elementary Stream file: NEW_SMS("H.264 Video"); OutPacketBuffer::maxSize = 300000; // allow for some possibly large H.264 frames sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".265") == 0) { // Assumed to be a H.265 Video Elementary Stream file: NEW_SMS("H.265 Video"); OutPacketBuffer::maxSize = 300000; // allow for some possibly large H.265 frames sms->addSubsession(H265VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".mp3") == 0) { // Assumed to be a MPEG-1 or 2 Audio file: NEW_SMS("MPEG-1 or 2 Audio"); // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following: //#define STREAM_USING_ADUS 1 // To also reorder ADUs before streaming, uncomment the following: //#define INTERLEAVE_ADUS 1 // (For more information about ADUs and interleaving, // see <http://www.live555.com/rtp-mp3/>) Boolean useADUs = False; Interleaving* interleaving = NULL; #ifdef STREAM_USING_ADUS useADUs = True; #ifdef INTERLEAVE_ADUS unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own... unsigned const interleaveCycleSize = (sizeof interleaveCycle)/(sizeof (unsigned char)); interleaving = new Interleaving(interleaveCycleSize, interleaveCycle); #endif #endif sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, useADUs, interleaving)); } else if (strcmp(extension, ".mpg") == 0) { // Assumed to be a MPEG-1 or 2 Program Stream (audio+video) file: NEW_SMS("MPEG-1 or 2 Program Stream"); MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource); sms->addSubsession(demux->newVideoServerMediaSubsession()); sms->addSubsession(demux->newAudioServerMediaSubsession()); } else if (strcmp(extension, ".vob") == 0) { // Assumed to be a VOB (MPEG-2 Program Stream, with AC-3 audio) file: NEW_SMS("VOB (MPEG-2 video with AC-3 audio)"); MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource); sms->addSubsession(demux->newVideoServerMediaSubsession()); sms->addSubsession(demux->newAC3AudioServerMediaSubsession()); } else if (strcmp(extension, ".ts") == 0) { // Assumed to be a MPEG Transport Stream file: // Use an index file name that's the same as the TS file name, except with ".tsx": unsigned indexFileNameLen = strlen(fileName) + 2; // allow for trailing "x\0" char* indexFileName = new char[indexFileNameLen]; sprintf(indexFileName, "%sx", fileName); NEW_SMS("MPEG Transport Stream"); sms->addSubsession(MPEG2TransportFileServerMediaSubsession::createNew(env, fileName, indexFileName, reuseSource)); delete[] indexFileName; } else if (strcmp(extension, ".wav") == 0) { // Assumed to be a WAV Audio file: NEW_SMS("WAV Audio Stream"); // To convert 16-bit PCM data to 8-bit u-law, prior to streaming, // change the following to True: Boolean convertToULaw = False; sms->addSubsession(WAVAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, convertToULaw)); } else if (strcmp(extension, ".dv") == 0) { // Assumed to be a DV Video file // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000). OutPacketBuffer::maxSize = 300000; NEW_SMS("DV Video"); sms->addSubsession(DVVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource)); } else if (strcmp(extension, ".mkv") == 0 || strcmp(extension, ".webm") == 0) { // Assumed to be a Matroska file (note that WebM ('.webm') files are also Matroska files) OutPacketBuffer::maxSize = 100000; // allow for some possibly large VP8 or VP9 frames NEW_SMS("Matroska video+audio+(optional)subtitles"); // Create a Matroska file server demultiplexor for the specified file. // (We enter the event loop to wait for this to complete.) MatroskaDemuxCreationState creationState; creationState.watchVariable = 0; MatroskaFileServerDemux::createNew(env, fileName, onMatroskaDemuxCreation, &creationState); env.taskScheduler().doEventLoop(&creationState.watchVariable); ServerMediaSubsession* smss; while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); } } else if (strcmp(extension, ".ogg") == 0 || strcmp(extension, ".ogv") == 0 || strcmp(extension, ".opus") == 0) { // Assumed to be an Ogg file NEW_SMS("Ogg video and/or audio"); // Create a Ogg file server demultiplexor for the specified file. // (We enter the event loop to wait for this to complete.) OggDemuxCreationState creationState; creationState.watchVariable = 0; OggFileServerDemux::createNew(env, fileName, onOggDemuxCreation, &creationState); env.taskScheduler().doEventLoop(&creationState.watchVariable); ServerMediaSubsession* smss; while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); } } return sms; }
2、添加LiveADTSAudioServerMediaSubsession
[cpp] view plain copy #ifndef _H264_STREAM_FILE_SOURCE_HH #define _H264_STREAM_FILE_SOURCE_HH #ifndef _FRAMED_FILE_SOURCE_HH #include "FramedFileSource.hh" #endif #include<pthread.h> #include "semaphore.h" #define H264_BUF_SIZE 150000 #define H264_BUF_COUNT 10 typedef void (*CB_FUN)(void); extern void h264_buf_init(); extern void h264_buf_destroy(); extern Boolean h264_buf_full(); extern Boolean h264_buf_empty(); extern int h264_buf_put(unsigned char* buf,int len); extern unsigned char* h264_buf_get(int* len); extern sem_t h264_f; extern sem_t h264_e; extern int b264IsInit; extern CB_FUN startfun; extern CB_FUN endfun; class LiveH264StreamSource: public FramedSource { public: static LiveH264StreamSource* createNew(UsageEnvironment& env, char const* fileName, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0); // "preferredFrameSize" == 0 means 'no preference' // "playTimePerFrame" is in microseconds /* static LiveH264StreamSource* createNew(UsageEnvironment& env, unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0); // an alternative version of "createNew()" that's used if you already have // an open file. */ u_int64_t fileSize() const { return fFileSize; } // 0 means zero-length, unbounded, or unknown void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0); // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF void seekToByteRelative(int64_t offset, u_int64_t numBytesToStream = 0); void seekToEnd(); // to force EOF handling on the next read protected: LiveH264StreamSource(UsageEnvironment& env, unsigned preferredFrameSize, unsigned playTimePerFrame); // called only by createNew() virtual ~LiveH264StreamSource(); static void fileReadableHandler(LiveH264StreamSource* source, int mask); void doReadFromFile(); private: // redefined virtual functions: virtual void doGetNextFrame(); virtual void doStopGettingFrames(); protected: u_int64_t fFileSize; private: unsigned fPreferredFrameSize; unsigned fPlayTimePerFrame; Boolean fFidIsSeekable; unsigned fLastPlayTime; Boolean fHaveStartedReading; Boolean fLimitNumBytesToStream; u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True }; #endif [cpp] view plain copy #include "LiveADTSAudioServerMediaSubsession.hh" #include "LiveADTSAudioSource.hh" #include "MPEG4GenericRTPSink.hh" LiveADTSAudioServerMediaSubsession* LiveADTSAudioServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) { return new LiveADTSAudioServerMediaSubsession(env, fileName, reuseFirstSource); } LiveADTSAudioServerMediaSubsession ::LiveADTSAudioServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) : FileServerMediaSubsession(env, fileName, reuseFirstSource) { } LiveADTSAudioServerMediaSubsession ::~LiveADTSAudioServerMediaSubsession() { } FramedSource* LiveADTSAudioServerMediaSubsession ::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) { estBitrate = 96; // kbps, estimate return LiveADTSAudioSource::createNew(envir()); } RTPSink* LiveADTSAudioServerMediaSubsession ::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) { LiveADTSAudioSource* adtsSource = (LiveADTSAudioSource*)inputSource; return MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, adtsSource->samplingFrequency(), "audio", "AAC-hbr", adtsSource->configStr(), adtsSource->numChannels()); } 3、添加LiveADTSAudioSource类
[cpp] view plain copy #ifndef _LiveADTSAudioSource_HH #define _LiveADTSAudioSource_HH #ifndef _FRAMED_FILE_SOURCE_HH #include "FramedFileSource.hh" #endif #include<pthread.h> #include "semaphore.h" #define AAC_BUF_SIZE 10000 #define AAC_BUF_COUNT 20 extern void aac_buf_init(); extern Boolean aac_buf_full(); extern Boolean aac_buf_empty(); extern int aac_buf_put(unsigned char* buf,int len); extern unsigned char* aac_buf_get(); extern void aac_buf_destroy(); extern sem_t aac_f; extern sem_t aac_e; extern int bIsInit; class LiveADTSAudioSource: public FramedSource { public: static LiveADTSAudioSource* createNew(UsageEnvironment& env); unsigned samplingFrequency() const { return fSamplingFrequency; } unsigned numChannels() const { return fNumChannels; } char const* configStr() const { return fConfigStr; } // returns the 'AudioSpecificConfig' for this stream (in ASCII form) private: LiveADTSAudioSource(UsageEnvironment& env, u_int8_t profile, u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration); // called only by createNew() virtual ~LiveADTSAudioSource(); private: // redefined virtual functions: virtual void doGetNextFrame(); private: unsigned fSamplingFrequency; unsigned fNumChannels; unsigned fuSecsPerFrame; char fConfigStr[5]; }; #endif [cpp] view plain copy #include "LiveADTSAudioSource.hh" #include "InputFile.hh" #include <GroupsockHelper.hh> // ADTSAudioFileSource // static unsigned const samplingFrequencyTable[16] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0 }; unsigned char aac_framebuf[AAC_BUF_COUNT][AAC_BUF_SIZE]; int aac_frame_len[AAC_BUF_COUNT]; int aac_buf_head; int aac_buf_tail; int aac_buf_size; void aac_buf_init(); void aac_buf_destroy(); Boolean aac_buf_full(); Boolean aac_buf_empty(); int aac_buf_put(unsigned char* buf,int len); unsigned char* aac_buf_get(); sem_t aac_f; sem_t aac_e; sem_t aac_m; int bIsInit = 0; void aac_buf_init() { if(bIsInit == 0) { sem_init(&aac_f,0,0); sem_init(&aac_e,0,AAC_BUF_COUNT); sem_init(&aac_m,0,1); aac_buf_head = 0; aac_buf_tail = 0; aac_buf_size = 0; } } Boolean aac_buf_full() { if(aac_buf_size == AAC_BUF_COUNT) return True; return False; } Boolean aac_buf_empty() { if(aac_buf_size == 0) return True; return False; } int aac_buf_put(unsigned char* buf,int len) { sem_wait(&aac_e); sem_wait(&aac_m); bzero(aac_framebuf[aac_buf_tail],AAC_BUF_SIZE); memcpy(aac_framebuf[aac_buf_tail],buf,len); aac_frame_len[aac_buf_tail] = len; aac_buf_tail = (aac_buf_tail + 1)