[SCM] liblivemedia packaging branch, master, updated. debian/2008.07.25-2-12-g51e2a07
xtophe-guest at users.alioth.debian.org
xtophe-guest at users.alioth.debian.org
Sat Dec 5 14:51:31 UTC 2009
The following commit has been merged in the master branch:
commit 0e4f1c0d8e48a835e7d26ab1f3f77e0d06a2574b
Author: Christophe Mutricy <xtophe at videolan.org>
Date: Fri Dec 4 18:30:31 2009 +0000
Imported Upstream version 2009.11.27
diff --git a/BasicUsageEnvironment/BasicTaskScheduler.cpp b/BasicUsageEnvironment/BasicTaskScheduler.cpp
index 6a7e37b..5a163c0 100644
--- a/BasicUsageEnvironment/BasicTaskScheduler.cpp
+++ b/BasicUsageEnvironment/BasicTaskScheduler.cpp
@@ -75,7 +75,7 @@ void BasicTaskScheduler::SingleStep(unsigned maxDelayTime) {
// For some unknown reason, select() in Windoze sometimes fails with WSAEINVAL if
// it was called with no entries set in "readSet". If this happens, ignore it:
if (err == WSAEINVAL && readSet.fd_count == 0) {
- err = 0;
+ err = EINTR;
// To stop this from happening again, create a dummy readable socket:
int dummySocketNum = socket(AF_INET, SOCK_DGRAM, 0);
FD_SET((unsigned)dummySocketNum, &fReadSet);
diff --git a/BasicUsageEnvironment/DelayQueue.cpp b/BasicUsageEnvironment/DelayQueue.cpp
index 64c6c25..e04c058 100644
--- a/BasicUsageEnvironment/DelayQueue.cpp
+++ b/BasicUsageEnvironment/DelayQueue.cpp
@@ -33,7 +33,7 @@ int Timeval::operator>=(const Timeval& arg2) const {
void Timeval::operator+=(const DelayInterval& arg2) {
secs() += arg2.seconds(); usecs() += arg2.useconds();
- if (usecs() >= MILLION) {
+ if (useconds() >= MILLION) {
usecs() -= MILLION;
++secs();
}
@@ -41,23 +41,24 @@ void Timeval::operator+=(const DelayInterval& arg2) {
void Timeval::operator-=(const DelayInterval& arg2) {
secs() -= arg2.seconds(); usecs() -= arg2.useconds();
- if (usecs() < 0) {
+ if ((int)useconds() < 0) {
usecs() += MILLION;
--secs();
}
- if (secs() < 0)
+ if ((int)seconds() < 0)
secs() = usecs() = 0;
+
}
DelayInterval operator-(const Timeval& arg1, const Timeval& arg2) {
time_base_seconds secs = arg1.seconds() - arg2.seconds();
time_base_seconds usecs = arg1.useconds() - arg2.useconds();
- if (usecs < 0) {
+ if ((int)usecs < 0) {
usecs += MILLION;
--secs;
}
- if (secs < 0)
+ if ((int)secs < 0)
return DELAY_ZERO;
else
return DelayInterval(secs, usecs);
@@ -193,6 +194,11 @@ DelayQueueEntry* DelayQueue::findEntryByToken(long tokenToFind) {
void DelayQueue::synchronize() {
// First, figure out how much time has elapsed since the last sync:
EventTime timeNow = TimeNow();
+ if (timeNow < fLastSyncTime) {
+ // The system clock has apparently gone back in time; reset our sync time and return:
+ fLastSyncTime = timeNow;
+ return;
+ }
DelayInterval timeSinceLastSync = timeNow - fLastSyncTime;
fLastSyncTime = timeNow;
@@ -217,8 +223,4 @@ EventTime TimeNow() {
return EventTime(tvNow.tv_sec, tvNow.tv_usec);
}
-DelayInterval TimeRemainingUntil(const EventTime& futureEvent) {
- return futureEvent - TimeNow();
-}
-
const EventTime THE_END_OF_TIME(INT_MAX);
diff --git a/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh b/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh
index 8056534..099c2e1 100644
--- a/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh
+++ b/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh
@@ -4,7 +4,7 @@
#ifndef _BASICUSAGEENVIRONMENT_VERSION_HH
#define _BASICUSAGEENVIRONMENT_VERSION_HH
-#define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_STRING "2009.07.09"
-#define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_INT 1247097600
+#define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_STRING "2009.11.27"
+#define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_INT 1259280000
#endif
diff --git a/BasicUsageEnvironment/include/DelayQueue.hh b/BasicUsageEnvironment/include/DelayQueue.hh
index 6dd7160..2227bd5 100644
--- a/BasicUsageEnvironment/include/DelayQueue.hh
+++ b/BasicUsageEnvironment/include/DelayQueue.hh
@@ -127,9 +127,6 @@ public:
EventTime TimeNow();
-DelayInterval TimeRemainingUntil(EventTime const& futureEvent);
-// Returns DELAY_ZERO if "futureEvent" has already occurred.
-
extern EventTime const THE_END_OF_TIME;
diff --git a/Makefile.tail b/Makefile.tail
index 6a8f549..6838011 100644
--- a/Makefile.tail
+++ b/Makefile.tail
@@ -1,40 +1,20 @@
##### End of variables to change
LIVEMEDIA_DIR = liveMedia
-LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(LIB_SUFFIX)
GROUPSOCK_DIR = groupsock
-GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(LIB_SUFFIX)
USAGE_ENVIRONMENT_DIR = UsageEnvironment
-USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(LIB_SUFFIX)
BASIC_USAGE_ENVIRONMENT_DIR = BasicUsageEnvironment
-BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(LIB_SUFFIX)
TESTPROGS_DIR = testProgs
-TESTPROGS_APP = $(TESTPROGS_DIR)/testMP3Streamer$(EXE)
MEDIA_SERVER_DIR = mediaServer
-MEDIA_SERVER_APP = $(MEDIA_SERVER_DIR)/mediaServer$(EXE)
-ALL = $(LIVEMEDIA_LIB) \
- $(GROUPSOCK_LIB) \
- $(USAGE_ENVIRONMENT_LIB) \
- $(BASIC_USAGE_ENVIRONMENT_LIB) \
- $(TESTPROGS_APP) \
- $(MEDIA_SERVER_APP)
-all: $(ALL)
-
-
-$(LIVEMEDIA_LIB):
+all:
cd $(LIVEMEDIA_DIR) ; $(MAKE)
-$(GROUPSOCK_LIB):
cd $(GROUPSOCK_DIR) ; $(MAKE)
-$(USAGE_ENVIRONMENT_LIB):
cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE)
-$(BASIC_USAGE_ENVIRONMENT_LIB):
cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE)
-$(TESTPROGS_APP): $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) $(USAGE_ENVIRONMENT_LIB) $(BASIC_USAGE_ENVIRONMENT_LIB)
cd $(TESTPROGS_DIR) ; $(MAKE)
-$(MEDIA_SERVER_APP): $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) $(USAGE_ENVIRONMENT_LIB) $(BASIC_USAGE_ENVIRONMENT_LIB)
cd $(MEDIA_SERVER_DIR) ; $(MAKE)
clean:
diff --git a/UsageEnvironment/include/UsageEnvironment_version.hh b/UsageEnvironment/include/UsageEnvironment_version.hh
index 5912045..c2ca4b2 100644
--- a/UsageEnvironment/include/UsageEnvironment_version.hh
+++ b/UsageEnvironment/include/UsageEnvironment_version.hh
@@ -4,7 +4,7 @@
#ifndef _USAGEENVIRONMENT_VERSION_HH
#define _USAGEENVIRONMENT_VERSION_HH
-#define USAGEENVIRONMENT_LIBRARY_VERSION_STRING "2009.07.09"
-#define USAGEENVIRONMENT_LIBRARY_VERSION_INT 1247097600
+#define USAGEENVIRONMENT_LIBRARY_VERSION_STRING "2009.11.27"
+#define USAGEENVIRONMENT_LIBRARY_VERSION_INT 1259280000
#endif
diff --git a/config.armeb-uclibc b/config.armeb-uclibc
index 0fa5eca..922647a 100644
--- a/config.armeb-uclibc
+++ b/config.armeb-uclibc
@@ -1,5 +1,5 @@
CROSS_COMPILE= armeb-linux-uclibc-
-COMPILE_OPTS = $(INCLUDES) -I. -Os -DSOCKLEN_T=socklen_t -DNO_STRSTREAM=1 -D
+COMPILE_OPTS = $(INCLUDES) -I. -Os -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D
LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)gcc
@@ -11,8 +11,8 @@ OBJ = o
LINK = $(CROSS_COMPILE)gcc -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = $(CROSS_COMPILE)ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.armlinux b/config.armlinux
index 62d44ee..3e604f7 100644
--- a/config.armlinux
+++ b/config.armlinux
@@ -1,5 +1,5 @@
CROSS_COMPILE= arm-elf-
-COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_STRSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)gcc
C_FLAGS = $(COMPILE_OPTS)
@@ -10,8 +10,8 @@ OBJ = o
LINK = $(CROSS_COMPILE)gcc -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = $(CROSS_COMPILE)ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr
+LIBRARY_LINK_OPTS = $(LINK_OPTS)
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.avr32-linux b/config.avr32-linux
new file mode 100644
index 0000000..14e3a51
--- /dev/null
+++ b/config.avr32-linux
@@ -0,0 +1,15 @@
+CROSS_COMPILE= avr32-linux-uclibc-
+COMPILE_OPTS = -Os $(INCLUDES) -msoft-float -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 C = c
+C_COMPILER = $(CROSS_COMPILE)gcc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -fuse-cxa-atexit -DBSD=1 OBJ = o
+LINK = $(CROSS_COMPILE)c++ -o
+LINK_OPTS =
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION =
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.bfin-linux-uclibc b/config.bfin-linux-uclibc
index 5dad85a..3342b7a 100644
--- a/config.bfin-linux-uclibc
+++ b/config.bfin-linux-uclibc
@@ -10,8 +10,8 @@ OBJ = o
LINK = $(CROSS_COMPILER)g++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = $(CROSS_COMPILER)ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic -m elf32bfinfd
+LIBRARY_LINK = $(CROSS_COMPILER)ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.bfin-uclinux b/config.bfin-uclinux
index c36135c..157572e 100644
--- a/config.bfin-uclinux
+++ b/config.bfin-uclinux
@@ -10,8 +10,8 @@ OBJ = o
LINK = $(CROSS_COMPILER)g++ -Wl,-elf2flt -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = $(CROSS_COMPILER)ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIBRARY_LINK = $(CROSS_COMPILER)ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.bsplinux b/config.bsplinux
index e812d95..e1879c0 100644
--- a/config.bsplinux
+++ b/config.bsplinux
@@ -1,5 +1,5 @@
CROSS_COMPILE=
-COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_STRSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
C = c
C_COMPILER = $(CROSS_COMPILE)ecc
C_FLAGS = $(COMPILE_OPTS)
diff --git a/config.freebsd b/config.freebsd
index 78d042b..e71a7df 100644
--- a/config.freebsd
+++ b/config.freebsd
@@ -9,8 +9,8 @@ OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.linux b/config.linux
index e111780..5c7bc8f 100644
--- a/config.linux
+++ b/config.linux
@@ -9,8 +9,8 @@ OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.linux-gdb b/config.linux-gdb
index 4c64030..87387e5 100644
--- a/config.linux-gdb
+++ b/config.linux-gdb
@@ -9,8 +9,8 @@ OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.macosx b/config.macosx
index 417562d..4c719a8 100644
--- a/config.macosx
+++ b/config.macosx
@@ -9,8 +9,8 @@ OBJ = o
LINK = c++ -o
LINK_OPTS = -L.
CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = ld -o
-LIBRARY_LINK_OPTS = $(LINK_OPTS) -r
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
diff --git a/config.uClinux b/config.uClinux
index e52241e..1e0696b 100644
--- a/config.uClinux
+++ b/config.uClinux
@@ -1,19 +1,20 @@
-COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
-C = c
-C_COMPILER = $(CC)
-CFLAGS += $(COMPILE_OPTS)
+CROSS_COMPILE= arc-linux-uclibc-
+COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILE)gcc
+CFLAGS += $(COMPILE_OPTS)
C_FLAGS = $(CFLAGS)
-CPP = cpp
-CPLUSPLUS_COMPILER = $(CXX)
-CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
CPLUSPLUS_FLAGS += $(CPPFLAGS) -fexceptions
-OBJ = o
-LINK = $(CC) -o
-LINK_OPTS = -L. $(LDFLAGS)
-CONSOLE_LINK_OPTS = $(LINK_OPTS)
-LIBRARY_LINK = $(LD) -o
-LIBRARY_LINK_OPTS = -L. -r -Bstatic
-LIB_SUFFIX = a
+OBJ = o
+LINK = $(CROSS_COMPILE)g++ -o
+LINK_OPTS = -L. $(LDFLAGS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION = $(CXXLIBS)
LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
EXE =
diff --git a/groupsock/Groupsock.cpp b/groupsock/Groupsock.cpp
index 8b3b9e9..c4bff3a 100644
--- a/groupsock/Groupsock.cpp
+++ b/groupsock/Groupsock.cpp
@@ -22,16 +22,8 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
//##### Eventually fix the following #include; we shouldn't know about tunnels
#include "TunnelEncaps.hh"
-#ifndef NO_STRSTREAM
-#if (defined(__WIN32__) || defined(_WIN32)) && !defined(__MINGW32__)
-#include <strstrea.h>
-#else
-#if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 0)
-#include <strstream>
-#else
-#include <strstream.h>
-#endif
-#endif
+#ifndef NO_SSTREAM
+#include <sstream>
#endif
#include <stdio.h>
diff --git a/groupsock/GroupsockHelper.cpp b/groupsock/GroupsockHelper.cpp
index 09e127d..9faacee 100644
--- a/groupsock/GroupsockHelper.cpp
+++ b/groupsock/GroupsockHelper.cpp
@@ -728,7 +728,7 @@ char const* timestampString() {
int gettimeofday(struct timeval* tp, int* /*tz*/) {
#if defined(_WIN32_WCE)
/* FILETIME of Jan 1 1970 00:00:00. */
- static const unsigned __int64 epoch = 116444736000000000L;
+ static const unsigned __int64 epoch = 116444736000000000LL;
FILETIME file_time;
SYSTEMTIME system_time;
diff --git a/groupsock/NetInterface.cpp b/groupsock/NetInterface.cpp
index 9e25fcd..ee732ac 100644
--- a/groupsock/NetInterface.cpp
+++ b/groupsock/NetInterface.cpp
@@ -21,16 +21,8 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
#include "NetInterface.hh"
#include "GroupsockHelper.hh"
-#ifndef NO_STRSTREAM
-#if (defined(__WIN32__) || defined(_WIN32)) && !defined(__MINGW32__)
-#include <strstrea.h>
-#else
-#if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 0)
-#include <strstream>
-#else
-#include <strstream.h>
-#endif
-#endif
+#ifndef NO_SSTREAM
+#include <sstream>
#endif
////////// NetInterface //////////
diff --git a/groupsock/include/groupsock_version.hh b/groupsock/include/groupsock_version.hh
index b5b74b6..fa27fc6 100644
--- a/groupsock/include/groupsock_version.hh
+++ b/groupsock/include/groupsock_version.hh
@@ -4,7 +4,7 @@
#ifndef _GROUPSOCK_VERSION_HH
#define _GROUPSOCK_VERSION_HH
-#define GROUPSOCK_LIBRARY_VERSION_STRING "2009.07.09"
-#define GROUPSOCK_LIBRARY_VERSION_INT 1247097600
+#define GROUPSOCK_LIBRARY_VERSION_STRING "2009.11.27"
+#define GROUPSOCK_LIBRARY_VERSION_INT 1259280000
#endif
diff --git a/liveMedia/#H264VideoRTPSink.cpp# b/liveMedia/#H264VideoRTPSink.cpp#
deleted file mode 100644
index 5662fcc..0000000
--- a/liveMedia/#H264VideoRTPSink.cpp#
+++ /dev/null
@@ -1,239 +0,0 @@
-sffmtp/**********
-This library is free software; you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation; either version 2.1 of the License, or (at your
-option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
-
-This library is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
-more details.
-
-You should have received a copy of the GNU Lesser General Public License
-along with this library; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-**********/
-// "liveMedia"
-// Copyright (c) 1996-2009 Live Networks, Inc. All rights reserved.
-// RTP sink for H.264 video (RFC 3984)
-// Implementation
-
-#include "H264VideoRTPSink.hh"
-#include "H264VideoStreamFramer.hh"
-
-////////// H264VideoRTPSink implementation //////////
-
-H264VideoRTPSink
-::H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
- unsigned char rtpPayloadFormat,
- unsigned profile_level_id,
- char const* sprop_parameter_sets_str)
- : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "H264"),
- fOurFragmenter(NULL) {
- // Set up the "a=fmtp:" SDP line for this stream:
- char const* fmtpFmt =
- "a=fmtp:%d packetization-mode=1"
- ";profile-level-id=%06X"
- ";sprop-parameter-sets=%s\r\n";
- unsigned fmtpFmtSize = strlen(fmtpFmt)
- + 3 /* max char len */
- + 8 /* max unsigned len in hex */
- + strlen(sprop_parameter_sets_str);
- char* fmtp = new char[fmtpFmtSize];
- sprintf(fmtp, fmtpFmt,
- rtpPayloadFormat,
- profile_level_id,
- sprop_parameter_sets_str);
- fFmtpSDPLine = strDup(fmtp);
- delete[] fmtp;
-}
-
-H264VideoRTPSink::~H264VideoRTPSink() {
- delete[] fFmtpSDPLine;
- Medium::close(fOurFragmenter);
- fSource = NULL;
-}
-
-H264VideoRTPSink*
-H264VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
- unsigned char rtpPayloadFormat,
- unsigned profile_level_id,
- char const* sprop_parameter_sets_str) {
- return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat,
- profile_level_id, sprop_parameter_sets_str);
-}
-
-Boolean H264VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
- // Our source must be an appropriate framer:
- return source.isH264VideoStreamFramer();
-}
-
-Boolean H264VideoRTPSink::continuePlaying() {
- // First, check whether we have a 'fragmenter' class set up yet.
- // If not, create it now:
- if (fOurFragmenter == NULL) {
- fOurFragmenter = new H264FUAFragmenter(envir(), fSource, OutPacketBuffer::maxSize,
- ourMaxPacketSize() - 12/*RTP hdr size*/);
- fSource = fOurFragmenter;
- }
-
- // Then call the parent class's implementation:
- return MultiFramedRTPSink::continuePlaying();
-}
-
-void H264VideoRTPSink::stopPlaying() {
- // First, call the parent class's implementation, to stop our fragmenter object
- // (and its source):
- MultiFramedRTPSink::stopPlaying();
-
- // Then, close our 'fragmenter' object:
- Medium::close(fOurFragmenter); fOurFragmenter = NULL;
- fSource = NULL;
-}
-
-void H264VideoRTPSink::doSpecialFrameHandling(unsigned /*fragmentationOffset*/,
- unsigned char* /*frameStart*/,
- unsigned /*numBytesInFrame*/,
- struct timeval frameTimestamp,
- unsigned /*numRemainingBytes*/) {
- // Set the RTP 'M' (marker) bit iff
- // 1/ The most recently delivered fragment was the end of
- // (or the only fragment of) an NAL unit, and
- // 2/ This NAL unit was the last NAL unit of an 'access unit' (i.e. video frame).
- if (fOurFragmenter != NULL) {
- H264VideoStreamFramer* framerSource
- = (H264VideoStreamFramer*)(fOurFragmenter->inputSource());
- // This relies on our fragmenter's source being a "MPEG4VideoStreamFramer".
- if (fOurFragmenter->lastFragmentCompletedNALUnit()
- && framerSource != NULL && framerSource->currentNALUnitEndsAccessUnit()) {
- setMarkerBit();
- }
- }
-
- setTimestamp(frameTimestamp);
-}
-
-Boolean H264VideoRTPSink
-::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
- unsigned /*numBytesInFrame*/) const {
- return False;
-}
-
-char const* H264VideoRTPSink::auxSDPLine() {
- return fFmtpSDPLine;
-}
-
-
-////////// H264FUAFragmenter implementation //////////
-
-H264FUAFragmenter::H264FUAFragmenter(UsageEnvironment& env,
- FramedSource* inputSource,
- unsigned inputBufferMax,
- unsigned maxOutputPacketSize)
- : FramedFilter(env, inputSource),
- fInputBufferSize(inputBufferMax+1), fMaxOutputPacketSize(maxOutputPacketSize),
- fNumValidDataBytes(1), fCurDataOffset(1), fSaveNumTruncatedBytes(0),
- fLastFragmentCompletedNALUnit(True) {
- fInputBuffer = new unsigned char[fInputBufferSize];
-}
-
-H264FUAFragmenter::~H264FUAFragmenter() {
- delete[] fInputBuffer;
- fInputSource = NULL; // so that the subsequent ~FramedFilter doesn't delete it
-}
-
-void H264FUAFragmenter::doGetNextFrame() {
- if (fNumValidDataBytes == 1) {
- // We have no NAL unit data currently in the buffer. Read a new one:
- fInputSource->getNextFrame(&fInputBuffer[1], fInputBufferSize - 1,
- afterGettingFrame, this,
- FramedSource::handleClosure, this);
- } else {
- // We have NAL unit data in the buffer. There are three cases to consider:
- // 1. There is a new NAL unit in the buffer, and it's small enough to deliver
- // to the RTP sink (as is).
- // 2. There is a new NAL unit in the buffer, but it's too large to deliver to
- // the RTP sink in its entirety. Deliver the first fragment of this data,
- // as a FU-A packet, with one extra preceding header byte.
- // 3. There is a NAL unit in the buffer, and we've already delivered some
- // fragment(s) of this. Deliver the next fragment of this data,
- // as a FU-A packet, with two extra preceding header bytes.
-
- if (fMaxSize < fMaxOutputPacketSize) { // shouldn't happen
- envir() << "H264FUAFragmenter::doGetNextFrame(): fMaxSize ("
- << fMaxSize << ") is smaller than expected\n";
- } else {
- fMaxSize = fMaxOutputPacketSize;
- }
-
- fLastFragmentCompletedNALUnit = True; // by default
- if (fCurDataOffset == 1) { // case 1 or 2
- if (fNumValidDataBytes - 1 <= fMaxSize) { // case 1
- memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1);
- fFrameSize = fNumValidDataBytes - 1;
- fCurDataOffset = fNumValidDataBytes;
- } else { // case 2
- // We need to send the NAL unit data as FU-A packets. Deliver the first
- // packet now. Note that we add FU indicator and FU header bytes to the front
- // of the packet (reusing the existing NAL header byte for the FU header).
- fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator
- fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit)
- memmove(fTo, fInputBuffer, fMaxSize);
- fFrameSize = fMaxSize;
- fCurDataOffset += fMaxSize - 1;
- fLastFragmentCompletedNALUnit = False;
- }
- } else { // case 3
- // We are sending this NAL unit data as FU-A packets. We've already sent the
- // first packet (fragment). Now, send the next fragment. Note that we add
- // FU indicator and FU header bytes to the front. (We reuse these bytes that
- // we already sent for the first fragment, but clear the S bit, and add the E
- // bit if this is the last fragment.)
- fInputBuffer[fCurDataOffset-2] = fInputBuffer[0]; // FU indicator
- fInputBuffer[fCurDataOffset-1] = fInputBuffer[1]&~0x80; // FU header (no S bit)
- unsigned numBytesToSend = 2 + fNumValidDataBytes - fCurDataOffset;
- if (numBytesToSend > fMaxSize) {
- // We can't send all of the remaining data this time:
- numBytesToSend = fMaxSize;
- fLastFragmentCompletedNALUnit = False;
- } else {
- // This is the last fragment:
- fInputBuffer[fCurDataOffset-1] |= 0x40; // set the E bit in the FU header
- fNumTruncatedBytes = fSaveNumTruncatedBytes;
- }
- memmove(fTo, &fInputBuffer[fCurDataOffset-2], numBytesToSend);
- fFrameSize = numBytesToSend;
- fCurDataOffset += numBytesToSend - 2;
- }
-
- if (fCurDataOffset >= fNumValidDataBytes) {
- // We're done with this data. Reset the pointers for receiving new data:
- fNumValidDataBytes = fCurDataOffset = 1;
- }
-
- // Complete delivery to the client:
- FramedSource::afterGetting(this);
- }
-}
-
-void H264FUAFragmenter::afterGettingFrame(void* clientData, unsigned frameSize,
- unsigned numTruncatedBytes,
- struct timeval presentationTime,
- unsigned durationInMicroseconds) {
- H264FUAFragmenter* fragmenter = (H264FUAFragmenter*)clientData;
- fragmenter->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime,
- durationInMicroseconds);
-}
-
-void H264FUAFragmenter::afterGettingFrame1(unsigned frameSize,
- unsigned numTruncatedBytes,
- struct timeval presentationTime,
- unsigned durationInMicroseconds) {
- fNumValidDataBytes += frameSize;
- fSaveNumTruncatedBytes = numTruncatedBytes;
- fPresentationTime = presentationTime;
- fDurationInMicroseconds = durationInMicroseconds;
-
- // Deliver data to the client:
- doGetNextFrame();
-}
diff --git a/liveMedia/#MultiFramedRTPSink.cpp# b/liveMedia/#MultiFramedRTPSink.cpp#
deleted file mode 100644
index 78e48f3..0000000
--- a/liveMedia/#MultiFramedRTPSink.cpp#
+++ /dev/null
@@ -1,419 +0,0 @@
-frame**********
-This library is free software; you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation; either version 2.1 of the License, or (at your
-option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
-
-This library is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
-more details.
-
-You should have received a copy of the GNU Lesser General Public License
-along with this library; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-**********/
-// "liveMedia"
-// Copyright (c) 1996-2009 Live Networks, Inc. All rights reserved.
-// RTP sink for a common kind of payload format: Those which pack multiple,
-// complete codec frames (as many as possible) into each RTP packet.
-// Implementation
-
-#include "MultiFramedRTPSink.hh"
-#include "GroupsockHelper.hh"
-
-////////// MultiFramedRTPSink //////////
-
-void MultiFramedRTPSink::setPacketSizes(unsigned preferredPacketSize,
- unsigned maxPacketSize) {
- if (preferredPacketSize > maxPacketSize || preferredPacketSize == 0) return;
- // sanity check
-
- delete fOutBuf;
- fOutBuf = new OutPacketBuffer(preferredPacketSize, maxPacketSize);
- fOurMaxPacketSize = maxPacketSize; // save value, in case subclasses need it
-}
-
-MultiFramedRTPSink::MultiFramedRTPSink(UsageEnvironment& env,
- Groupsock* rtpGS,
- unsigned char rtpPayloadType,
- unsigned rtpTimestampFrequency,
- char const* rtpPayloadFormatName,
- unsigned numChannels)
- : RTPSink(env, rtpGS, rtpPayloadType, rtpTimestampFrequency,
- rtpPayloadFormatName, numChannels),
- fOutBuf(NULL), fCurFragmentationOffset(0), fPreviousFrameEndedFragmentation(False) {
- setPacketSizes(1000, 1448);
- // Default max packet size (1500, minus allowance for IP, UDP, UMTP headers)
- // (Also, make it a multiple of 4 bytes, just in case that matters.)
-}
-
-MultiFramedRTPSink::~MultiFramedRTPSink() {
- delete fOutBuf;
-}
-
-void MultiFramedRTPSink
-::doSpecialFrameHandling(unsigned /*fragmentationOffset*/,
- unsigned char* /*frameStart*/,
- unsigned /*numBytesInFrame*/,
- struct timeval frameTimestamp,
- unsigned /*numRemainingBytes*/) {
- // default implementation: If this is the first frame in the packet,
- // use its timestamp for the RTP timestamp:
- if (isFirstFrameInPacket()) {
- setTimestamp(frameTimestamp);
- }
-}
-
-Boolean MultiFramedRTPSink::allowFragmentationAfterStart() const {
- return False; // by default
-}
-
-Boolean MultiFramedRTPSink::allowOtherFramesAfterLastFragment() const {
- return False; // by default
-}
-
-Boolean MultiFramedRTPSink
-::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
- unsigned /*numBytesInFrame*/) const {
- return True; // by default
-}
-
-unsigned MultiFramedRTPSink::specialHeaderSize() const {
- // default implementation: Assume no special header:
- return 0;
-}
-
-unsigned MultiFramedRTPSink::frameSpecificHeaderSize() const {
- // default implementation: Assume no frame-specific header:
- return 0;
-}
-
-unsigned MultiFramedRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const {
- // default implementation: Just call numOverflowBytes()
- return fOutBuf->numOverflowBytes(newFrameSize);
-}
-
-void MultiFramedRTPSink::setMarkerBit() {
- unsigned rtpHdr = fOutBuf->extractWord(0);
- rtpHdr |= 0x00800000;
- fOutBuf->insertWord(rtpHdr, 0);
-}
-
-void MultiFramedRTPSink::setTimestamp(struct timeval timestamp) {
- // First, convert the timestamp to a 32-bit RTP timestamp:
- fCurrentTimestamp = convertToRTPTimestamp(timestamp);
-
- // Then, insert it into the RTP packet:
- fOutBuf->insertWord(fCurrentTimestamp, fTimestampPosition);
-}
-
-void MultiFramedRTPSink::setSpecialHeaderWord(unsigned word,
- unsigned wordPosition) {
- fOutBuf->insertWord(word, fSpecialHeaderPosition + 4*wordPosition);
-}
-
-void MultiFramedRTPSink::setSpecialHeaderBytes(unsigned char const* bytes,
- unsigned numBytes,
- unsigned bytePosition) {
- fOutBuf->insert(bytes, numBytes, fSpecialHeaderPosition + bytePosition);
-}
-
-void MultiFramedRTPSink::setFrameSpecificHeaderWord(unsigned word,
- unsigned wordPosition) {
- fOutBuf->insertWord(word, fCurFrameSpecificHeaderPosition + 4*wordPosition);
-}
-
-void MultiFramedRTPSink::setFrameSpecificHeaderBytes(unsigned char const* bytes,
- unsigned numBytes,
- unsigned bytePosition) {
- fOutBuf->insert(bytes, numBytes, fCurFrameSpecificHeaderPosition + bytePosition);
-}
-
-void MultiFramedRTPSink::setFramePadding(unsigned numPaddingBytes) {
- if (numPaddingBytes > 0) {
- // Add the padding bytes (with the last one being the padding size):
- unsigned char paddingBuffer[255]; //max padding
- memset(paddingBuffer, 0, numPaddingBytes);
- paddingBuffer[numPaddingBytes-1] = numPaddingBytes;
- fOutBuf->enqueue(paddingBuffer, numPaddingBytes);
-
- // Set the RTP padding bit:
- unsigned rtpHdr = fOutBuf->extractWord(0);
- rtpHdr |= 0x20000000;
- fOutBuf->insertWord(rtpHdr, 0);
- }
-}
-
-Boolean MultiFramedRTPSink::continuePlaying() {
- // Send the first packet.
- // (This will also schedule any future sends.)
- buildAndSendPacket(True);
- return True;
-}
-
-void MultiFramedRTPSink::stopPlaying() {
- fOutBuf->resetPacketStart();
- fOutBuf->resetOffset();
- fOutBuf->resetOverflowData();
-
- // Then call the default "stopPlaying()" function:
- MediaSink::stopPlaying();
-}
-
-void MultiFramedRTPSink::buildAndSendPacket(Boolean isFirstPacket) {
- fIsFirstPacket = isFirstPacket;
-
- // Set up the RTP header:
- unsigned rtpHdr = 0x80000000; // RTP version 2
- rtpHdr |= (fRTPPayloadType<<16);
- rtpHdr |= fSeqNo; // sequence number
- fOutBuf->enqueueWord(rtpHdr);
-
- // Note where the RTP timestamp will go.
- // (We can't fill this in until we start packing payload frames.)
- fTimestampPosition = fOutBuf->curPacketSize();
- fOutBuf->skipBytes(4); // leave a hole for the timestamp
-
- fOutBuf->enqueueWord(SSRC());
-
- // Allow for a special, payload-format-specific header following the
- // RTP header:
- fSpecialHeaderPosition = fOutBuf->curPacketSize();
- fSpecialHeaderSize = specialHeaderSize();
- fOutBuf->skipBytes(fSpecialHeaderSize);
-
- // Begin packing as many (complete) frames into the packet as we can:
- fTotalFrameSpecificHeaderSizes = 0;
- fNoFramesLeft = False;
- fNumFramesUsedSoFar = 0;
- packFrame();
-}
-
-void MultiFramedRTPSink::packFrame() {
- // Get the next frame.
-
- // First, see if we have an overflow frame that was too big for the last pkt
- if (fOutBuf->haveOverflowData()) {
- // Use this frame before reading a new one from the source
- unsigned frameSize = fOutBuf->overflowDataSize();
- struct timeval presentationTime = fOutBuf->overflowPresentationTime();
- unsigned durationInMicroseconds = fOutBuf->overflowDurationInMicroseconds();
- fOutBuf->useOverflowData();
-
- afterGettingFrame1(frameSize, 0, presentationTime, durationInMicroseconds);
- } else {
- // Normal case: we need to read a new frame from the source
- if (fSource == NULL) return;
-
- fCurFrameSpecificHeaderPosition = fOutBuf->curPacketSize();
- fCurFrameSpecificHeaderSize = frameSpecificHeaderSize();
- fOutBuf->skipBytes(fCurFrameSpecificHeaderSize);
- fTotalFrameSpecificHeaderSizes += fCurFrameSpecificHeaderSize;
-
- fSource->getNextFrame(fOutBuf->curPtr(), fOutBuf->totalBytesAvailable(),
- afterGettingFrame, this, ourHandleClosure, this);
- }
-}
-
-void MultiFramedRTPSink
-::afterGettingFrame(void* clientData, unsigned numBytesRead,
- unsigned numTruncatedBytes,
- struct timeval presentationTime,
- unsigned durationInMicroseconds) {
- MultiFramedRTPSink* sink = (MultiFramedRTPSink*)clientData;
- sink->afterGettingFrame1(numBytesRead, numTruncatedBytes,
- presentationTime, durationInMicroseconds);
-}
-
-void MultiFramedRTPSink
-::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
- struct timeval presentationTime,
- unsigned durationInMicroseconds) {
- if (fIsFirstPacket) {
- // Record the fact that we're starting to play now:
- gettimeofday(&fNextSendTime, NULL);
- }
-
- if (numTruncatedBytes > 0) {
- unsigned const bufferSize = fOutBuf->totalBytesAvailable();
- unsigned newMaxSize = frameSize + numTruncatedBytes;
- envir() << "MultiFramedRTPSink::afterGettingFrame1(): The input frame data was too large for our buffer size ("
- << bufferSize << "). "
- << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing \"OutPacketBuffer::maxSize\" to at least "
- << newMaxSize << ", *before* creating this 'RTPSink'. (Current value is "
- << OutPacketBuffer::maxSize << ".)\n";
- }
- unsigned curFragmentationOffset = fCurFragmentationOffset;
- unsigned numFrameBytesToUse = frameSize;
- unsigned overflowBytes = 0;
-
- // If we have already packed one or more frames into this packet,
- // check whether this new frame is eligible to be packed after them.
- // (This is independent of whether the packet has enough room for this
- // new frame; that check comes later.)
- if (fNumFramesUsedSoFar > 0) {
- if ((fPreviousFrameEndedFragmentation
- && !allowOtherFramesAfterLastFragment())
- || !frameCanAppearAfterPacketStart(fOutBuf->curPtr(), frameSize)) {
- // Save away this frame for next time:
- numFrameBytesToUse = 0;
- fOutBuf->setOverflowData(fOutBuf->curPacketSize(), frameSize,
- presentationTime, durationInMicroseconds);
- }
- }
- fPreviousFrameEndedFragmentation = False;
-
- if (numFrameBytesToUse > 0) {
- // Check whether this frame overflows the packet
- if (fOutBuf->wouldOverflow(frameSize)) {
- // Don't use this frame now; instead, save it as overflow data, and
- // send it in the next packet instead. However, if the frame is too
- // big to fit in a packet by itself, then we need to fragment it (and
- // use some of it in this packet, if the payload format permits this.)
- if (isTooBigForAPacket(frameSize)
- && (fNumFramesUsedSoFar == 0 || allowFragmentationAfterStart())) {
- // We need to fragment this frame, and use some of it now:
- overflowBytes = computeOverflowForNewFrame(frameSize);
- numFrameBytesToUse -= overflowBytes;
- fCurFragmentationOffset += numFrameBytesToUse;
- } else {
- // We don't use any of this frame now:
- overflowBytes = frameSize;
- numFrameBytesToUse = 0;
- }
- fOutBuf->setOverflowData(fOutBuf->curPacketSize() + numFrameBytesToUse,
- overflowBytes, presentationTime,
- durationInMicroseconds);
- } else if (fCurFragmentationOffset > 0) {
- // This is the last fragment of a frame that was fragmented over
- // more than one packet. Do any special handling for this case:
- fCurFragmentationOffset = 0;
- fPreviousFrameEndedFragmentation = True;
- }
- }
-
- if (numFrameBytesToUse == 0) {
- // Send our packet now, because we have filled it up:
- sendPacketIfNecessary();
- } else {
- // Use this frame in our outgoing packet:
- unsigned char* frameStart = fOutBuf->curPtr();
- fOutBuf->increment(numFrameBytesToUse);
- // do this now, in case "doSpecialFrameHandling()" calls "setFramePadding()" to append padding bytes
-
- // Here's where any payload format specific processing gets done:
- doSpecialFrameHandling(curFragmentationOffset, frameStart,
- numFrameBytesToUse, presentationTime,
- overflowBytes);
-
- ++fNumFramesUsedSoFar;
-
- // Update the time at which the next packet should be sent, based
- // on the duration of the frame that we just packed into it.
- // However, if this frame has overflow data remaining, then don't
- // count its duration yet.
- if (overflowBytes == 0) {
- fNextSendTime.tv_usec += durationInMicroseconds;
- fNextSendTime.tv_sec += fNextSendTime.tv_usec/1000000;
- fNextSendTime.tv_usec %= 1000000;
- }
-
- // Send our packet now if (i) it's already at our preferred size, or
- // (ii) (heuristic) another frame of the same size as the one we just
- // read would overflow the packet, or
- // (iii) it contains the last fragment of a fragmented frame, and we
- // don't allow anything else to follow this or
- // (iv) one frame per packet is allowed:
- if (fOutBuf->isPreferredSize()
- || fOutBuf->wouldOverflow(numFrameBytesToUse)
- || (fPreviousFrameEndedFragmentation &&
- !allowOtherFramesAfterLastFragment())
- || !frameCanAppearAfterPacketStart(fOutBuf->curPtr() - frameSize,
- frameSize) ) {
- // The packet is ready to be sent now
- sendPacketIfNecessary();
- } else {
- // There's room for more frames; try getting another:
- packFrame();
- }
- }
-}
-
-static unsigned const rtpHeaderSize = 12;
-
-Boolean MultiFramedRTPSink::isTooBigForAPacket(unsigned numBytes) const {
- // Check whether a 'numBytes'-byte frame - together with a RTP header and
- // (possible) special headers - would be too big for an output packet:
- // (Later allow for RTP extension header!) #####
- numBytes += rtpHeaderSize + specialHeaderSize() + frameSpecificHeaderSize();
- return fOutBuf->isTooBigForAPacket(numBytes);
-}
-
-void MultiFramedRTPSink::sendPacketIfNecessary() {
- if (fNumFramesUsedSoFar > 0) {
- // Send the packet:
-#ifdef TEST_LOSS
- if ((our_random()%10) != 0) // simulate 10% packet loss #####
-#endif
- fRTPInterface.sendPacket(fOutBuf->packet(), fOutBuf->curPacketSize());
- ++fPacketCount;
- fTotalOctetCount += fOutBuf->curPacketSize();
- fOctetCount += fOutBuf->curPacketSize()
- - rtpHeaderSize - fSpecialHeaderSize - fTotalFrameSpecificHeaderSizes;
-
- ++fSeqNo; // for next time
- }
-
- if (fOutBuf->haveOverflowData()
- && fOutBuf->totalBytesAvailable() > fOutBuf->totalBufferSize()/2) {
- // Efficiency hack: Reset the packet start pointer to just in front of
- // the overflow data (allowing for the RTP header and special headers),
- // so that we probably don't have to "memmove()" the overflow data
- // into place when building the next packet:
- unsigned newPacketStart = fOutBuf->curPacketSize()
- - (rtpHeaderSize + fSpecialHeaderSize + frameSpecificHeaderSize());
- fOutBuf->adjustPacketStart(newPacketStart);
- } else {
- // Normal case: Reset the packet start pointer back to the start:
- fOutBuf->resetPacketStart();
- }
- fOutBuf->resetOffset();
- fNumFramesUsedSoFar = 0;
-
- if (fNoFramesLeft) {
- // We're done:
- onSourceClosure(this);
- } else {
- // We have more frames left to send. Figure out when the next frame
- // is due to start playing, then make sure that we wait this long before
- // sending the next packet.
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- int uSecondsToGo;
- if (fNextSendTime.tv_sec < timeNow.tv_sec
- || (fNextSendTime.tv_sec == timeNow.tv_sec && fNextSendTime.tv_usec < timeNow.tv_usec)) {
- uSecondsToGo = 0; // prevents integer underflow if too far behind
- } else {
- uSecondsToGo = (fNextSendTime.tv_sec - timeNow.tv_sec)*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec);
- }
-
- // Delay this amount of time:
- nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo,
- (TaskFunc*)sendNext, this);
- }
-}
-
-// The following is called after each delay between packet sends:
-void MultiFramedRTPSink::sendNext(void* firstArg) {
- MultiFramedRTPSink* sink = (MultiFramedRTPSink*)firstArg;
- sink->buildAndSendPacket(False);
-}
-
-void MultiFramedRTPSink::ourHandleClosure(void* clientData) {
- MultiFramedRTPSink* sink = (MultiFramedRTPSink*)clientData;
- // There are no frames left, but we may have a partially built packet
- // to send
- sink->fNoFramesLeft = True;
- sink->sendPacketIfNecessary();
-}
diff --git a/liveMedia/DVVideoRTPSink.cpp b/liveMedia/DVVideoRTPSink.cpp
index e354d8e..4300e6d 100644
--- a/liveMedia/DVVideoRTPSink.cpp
+++ b/liveMedia/DVVideoRTPSink.cpp
@@ -51,7 +51,7 @@ void DVVideoRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
unsigned numRemainingBytes) {
if (fragmentationOffset == 0) {
// This packet contains the first (or only) fragment of the frame. Read its header to figure out our profile:
-
+ // TO COMPLETE #####@@@@@
}
@@ -65,8 +65,6 @@ void DVVideoRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
setTimestamp(frameTimestamp);
}
-#define DV_DIF_BLOCK_SIZE 80
-
unsigned DVVideoRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const {
unsigned initialOverflow = MultiFramedRTPSink::computeOverflowForNewFrame(newFrameSize);
diff --git a/liveMedia/DVVideoStreamFramer.cpp b/liveMedia/DVVideoStreamFramer.cpp
index d7e0d35..ffdff7e 100644
--- a/liveMedia/DVVideoStreamFramer.cpp
+++ b/liveMedia/DVVideoStreamFramer.cpp
@@ -24,7 +24,7 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
DVVideoStreamFramer::DVVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource)
: FramedFilter(env, inputSource),
- fProfileName(NULL) {
+ fProfileName(NULL), fInitialBlockPresent(False) {
}
DVVideoStreamFramer::~DVVideoStreamFramer() {
@@ -38,6 +38,11 @@ DVVideoStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource)
char const* DVVideoStreamFramer::profileName() const {
#if 0
if (fProfileName == NULL) {
+read into fSavedInitialBlock
+retry
+
+ need 6 blocks (480 bytes) to parse format?
+
unsigned char* fSavedFrame;
unsigned fSavedFrameSize;
char fSavedFrameFlag;
@@ -54,4 +59,11 @@ Boolean DVVideoStreamFramer::isDVVideoStreamFramer() const {
void DVVideoStreamFramer::doGetNextFrame() {
// COMPLETE THIS #####@@@@@
+#if 0
+ if we have a saved initial block, use it
+ (don't allow fMaxSize to be < 80)
+ then read rest of data
+ numInitialBytesUsed
+check packet buffer max size
+#endif
}
diff --git a/liveMedia/DarwinInjector.cpp b/liveMedia/DarwinInjector.cpp
index 208ecbf..b35cdf3 100644
--- a/liveMedia/DarwinInjector.cpp
+++ b/liveMedia/DarwinInjector.cpp
@@ -109,7 +109,8 @@ Boolean DarwinInjector
char const* remoteUserName,
char const* remotePassword,
char const* sessionAuthor,
- char const* sessionCopyright) {
+ char const* sessionCopyright,
+ int timeout) {
char* sdp = NULL;
char* url = NULL;
Boolean success = False; // until we learn otherwise
@@ -186,9 +187,9 @@ Boolean DarwinInjector
Boolean announceSuccess;
if (remoteUserName[0] != '\0' || remotePassword[0] != '\0') {
announceSuccess
- = fRTSPClient->announceWithPassword(url, sdp, remoteUserName, remotePassword);
+ = fRTSPClient->announceWithPassword(url, sdp, remoteUserName, remotePassword, timeout);
} else {
- announceSuccess = fRTSPClient->announceSDPDescription(url, sdp);
+ announceSuccess = fRTSPClient->announceSDPDescription(url, sdp, NULL, timeout);
}
if (!announceSuccess) break;
diff --git a/liveMedia/InputFile.cpp b/liveMedia/InputFile.cpp
index b60d4a9..2c6b20f 100644
--- a/liveMedia/InputFile.cpp
+++ b/liveMedia/InputFile.cpp
@@ -35,7 +35,7 @@ FILE* OpenInputFile(UsageEnvironment& env, char const* fileName) {
// Check for a special case file name: "stdin"
if (strcmp(fileName, "stdin") == 0) {
fid = stdin;
-#if defined(__WIN32__) || defined(_WIN32)
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
_setmode(_fileno(stdin), _O_BINARY); // convert to binary mode
#endif
} else {
diff --git a/liveMedia/MPEG1or2Demux.cpp b/liveMedia/MPEG1or2Demux.cpp
index 6f778f1..30818dc 100644
--- a/liveMedia/MPEG1or2Demux.cpp
+++ b/liveMedia/MPEG1or2Demux.cpp
@@ -538,7 +538,7 @@ unsigned char MPEGProgramStreamParser::parsePESPacket() {
unsigned char stream_id = get1Byte();
#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
unsigned char streamNum = stream_id;
- char* streamTypeStr;
+ char const* streamTypeStr;
if ((stream_id&0xE0) == 0xC0) {
streamTypeStr = "audio";
streamNum = stream_id&~0xE0;
diff --git a/liveMedia/MediaSession.cpp b/liveMedia/MediaSession.cpp
index f1f79f3..913bd50 100644
--- a/liveMedia/MediaSession.cpp
+++ b/liveMedia/MediaSession.cpp
@@ -211,6 +211,7 @@ Boolean MediaSession::initializeWithSDP(char const* sdpDescription) {
// Check for various special SDP lines that we understand:
if (subsession->parseSDPLine_c(sdpLine)) continue;
+ if (subsession->parseSDPLine_b(sdpLine)) continue;
if (subsession->parseSDPAttribute_rtpmap(sdpLine)) continue;
if (subsession->parseSDPAttribute_control(sdpLine)) continue;
if (subsession->parseSDPAttribute_range(sdpLine)) continue;
@@ -462,7 +463,8 @@ char* MediaSession::lookupPayloadFormat(unsigned char rtpPayloadType,
unsigned MediaSession::guessRTPTimestampFrequency(char const* mediumName,
char const* codecName) {
// By default, we assume that audio sessions use a frequency of 8000,
- // and that video sessions use a frequency of 90000.
+ // video sessions use a frequency of 90000,
+ // and text sessions use a frequency of 1000.
// Begin by checking for known exceptions to this rule
// (where the frequency is known unambiguously (e.g., not like "DVI4"))
if (strcmp(codecName, "L16") == 0) return 44100;
@@ -472,6 +474,7 @@ unsigned MediaSession::guessRTPTimestampFrequency(char const* mediumName,
// Now, guess default values:
if (strcmp(mediumName, "video") == 0) return 90000;
+ else if (strcmp(mediumName, "text") == 0) return 1000;
return 8000; // for "audio", and any other medium
}
@@ -540,7 +543,7 @@ MediaSubsession::MediaSubsession(MediaSession& parent)
fClientPortNum(0), fRTPPayloadFormat(0xFF),
fSavedSDPLines(NULL), fMediumName(NULL), fCodecName(NULL), fProtocolName(NULL),
fRTPTimestampFrequency(0), fControlPath(NULL),
- fSourceFilterAddr(parent.sourceFilterAddr()),
+ fSourceFilterAddr(parent.sourceFilterAddr()), fBandwidth(0),
fAuxiliarydatasizelength(0), fConstantduration(0), fConstantsize(0),
fCRC(0), fCtsdeltalength(0), fDe_interleavebuffersize(0), fDtsdeltalength(0),
fIndexdeltalength(0), fIndexlength(0), fInterleaving(0), fMaxdisplacement(0),
@@ -690,6 +693,13 @@ Boolean MediaSubsession::initiate(int useSpecialRTPoffset) {
if (!success) break; // a fatal error occurred trying to create the RTP and RTCP sockets; we can't continue
}
+ // Try to use a big receive buffer for RTP - at least 0.1 second of
+ // specified bandwidth and at least 50 KB
+ unsigned rtpBufSize = fBandwidth * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
+ if (rtpBufSize < 50 * 1024)
+ rtpBufSize = 50 * 1024;
+ increaseReceiveBufferTo(env(), fRTPSocket->socketNum(), rtpBufSize);
+
// ASSERT: fRTPSocket != NULL && fRTCPSocket != NULL
if (isSSM()) {
// Special case for RTCP SSM: Send RTCP packets back to the source via unicast:
@@ -855,6 +865,7 @@ Boolean MediaSubsession::initiate(int useSpecialRTPoffset) {
|| strcmp(fCodecName, "G726-32") == 0 // G.726, 32 kbps
|| strcmp(fCodecName, "G726-40") == 0 // G.726, 40 kbps
|| strcmp(fCodecName, "SPEEX") == 0 // SPEEX audio
+ || strcmp(fCodecName, "T140") == 0 // T.140 text (RFC 4103)
) {
createSimpleRTPSource = True;
useSpecialRTPoffset = 0;
@@ -887,7 +898,10 @@ Boolean MediaSubsession::initiate(int useSpecialRTPoffset) {
// Finally, create our RTCP instance. (It starts running automatically)
if (fRTPSource != NULL) {
- unsigned totSessionBandwidth = 500; // HACK - later get from SDP#####
+ // If bandwidth is specified, use it and add 5% for RTCP overhead.
+ // Otherwise make a guess at 500 kbps.
+ unsigned totSessionBandwidth
+ = fBandwidth ? fBandwidth + fBandwidth / 20 : 500;
fRTCPInstance = RTCPInstance::createNew(env(), fRTCPSocket,
totSessionBandwidth,
(unsigned char const*)
@@ -1023,6 +1037,12 @@ Boolean MediaSubsession::parseSDPLine_c(char const* sdpLine) {
return False;
}
+Boolean MediaSubsession::parseSDPLine_b(char const* sdpLine) {
+ // Check for "b=<bwtype>:<bandwidth>" line
+ // RTP applications are expected to use bwtype="AS"
+ return sscanf(sdpLine, "b=AS:%u", &fBandwidth) == 1;
+}
+
Boolean MediaSubsession::parseSDPAttribute_rtpmap(char const* sdpLine) {
// Check for a "a=rtpmap:<fmt> <codec>/<freq>" line:
// (Also check without the "/<freq>"; RealNetworks omits this)
diff --git a/liveMedia/OnDemandServerMediaSubsession.cpp b/liveMedia/OnDemandServerMediaSubsession.cpp
index 235e60a..8425114 100644
--- a/liveMedia/OnDemandServerMediaSubsession.cpp
+++ b/liveMedia/OnDemandServerMediaSubsession.cpp
@@ -77,7 +77,7 @@ OnDemandServerMediaSubsession::sdpLines() {
// subsession (as a unicast stream). To do so, we first create
// dummy (unused) source and "RTPSink" objects,
// whose parameters we use for the SDP lines:
- unsigned estBitrate; // unused
+ unsigned estBitrate;
FramedSource* inputSource = createNewStreamSource(0, estBitrate);
if (inputSource == NULL) return NULL; // file not found
@@ -88,7 +88,7 @@ OnDemandServerMediaSubsession::sdpLines() {
RTPSink* dummyRTPSink
= createNewRTPSink(&dummyGroupsock, rtpPayloadType, inputSource);
- setSDPLinesFromRTPSink(dummyRTPSink, inputSource);
+ setSDPLinesFromRTPSink(dummyRTPSink, inputSource, estBitrate);
Medium::close(dummyRTPSink);
closeStreamSource(inputSource);
}
@@ -228,6 +228,14 @@ void OnDemandServerMediaSubsession
if (rtpGroupsock != NULL) rtpGroupsock->removeAllDestinations();
if (rtcpGroupsock != NULL) rtcpGroupsock->removeAllDestinations();
+ if (rtpGroupsock != NULL) {
+ // Try to use a big send buffer for RTP - at least 0.1 second of
+ // specified bandwidth and at least 50 KB
+ unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
+ if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
+ increaseSendBufferTo(envir(), rtpGroupsock->socketNum(), rtpBufSize);
+ }
+
// Set up the state of the stream. The stream will get started later:
streamToken = fLastStreamToken
= new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink,
@@ -347,7 +355,8 @@ void OnDemandServerMediaSubsession::closeStreamSource(FramedSource *inputSource)
}
void OnDemandServerMediaSubsession
-::setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource) {
+::setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource,
+ unsigned estBitrate) {
if (rtpSink == NULL) return;
char const* mediaType = rtpSink->sdpMediaType();
@@ -362,6 +371,7 @@ void OnDemandServerMediaSubsession
char const* const sdpFmt =
"m=%s %u RTP/AVP %d\r\n"
"c=IN IP4 %s\r\n"
+ "b=AS:%u\r\n"
"%s"
"%s"
"%s"
@@ -369,6 +379,7 @@ void OnDemandServerMediaSubsession
unsigned sdpFmtSize = strlen(sdpFmt)
+ strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
+ strlen(ipAddressStr)
+ + 20 /* max int len */
+ strlen(rtpmapLine)
+ strlen(rangeLine)
+ strlen(auxSDPLine)
@@ -379,6 +390,7 @@ void OnDemandServerMediaSubsession
fPortNumForSDP, // m= <port>
rtpPayloadType, // m= <fmt list>
ipAddressStr, // c= address
+ estBitrate, // b=AS:<bandwidth>
rtpmapLine, // a=rtpmap:... (if present)
rangeLine, // a=range:... (if present)
auxSDPLine, // optional extra SDP line
diff --git a/liveMedia/OutputFile.cpp b/liveMedia/OutputFile.cpp
index 63ddbdf..615c7f7 100644
--- a/liveMedia/OutputFile.cpp
+++ b/liveMedia/OutputFile.cpp
@@ -35,12 +35,12 @@ FILE* OpenOutputFile(UsageEnvironment& env, char const* fileName) {
// Check for special case 'file names': "stdout" and "stderr"
if (strcmp(fileName, "stdout") == 0) {
fid = stdout;
-#if defined(__WIN32__) || defined(_WIN32)
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
_setmode(_fileno(stdout), _O_BINARY); // convert to binary mode
#endif
} else if (strcmp(fileName, "stderr") == 0) {
fid = stderr;
-#if defined(__WIN32__) || defined(_WIN32)
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
_setmode(_fileno(stderr), _O_BINARY); // convert to binary mode
#endif
} else {
diff --git a/liveMedia/PassiveServerMediaSubsession.cpp b/liveMedia/PassiveServerMediaSubsession.cpp
index d27d56f..1d6789c 100644
--- a/liveMedia/PassiveServerMediaSubsession.cpp
+++ b/liveMedia/PassiveServerMediaSubsession.cpp
@@ -47,6 +47,8 @@ PassiveServerMediaSubsession::sdpLines() {
unsigned char ttl = gs.ttl();
unsigned char rtpPayloadType = fRTPSink.rtpPayloadType();
char const* mediaType = fRTPSink.sdpMediaType();
+ unsigned estBitrate
+ = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW();
char* rtpmapLine = fRTPSink.rtpmapLine();
char const* rangeLine = rangeSDPLine();
char const* auxSDPLine = fRTPSink.auxSDPLine();
@@ -57,6 +59,7 @@ PassiveServerMediaSubsession::sdpLines() {
char const* const sdpFmt =
"m=%s %d RTP/AVP %d\r\n"
"c=IN IP4 %s/%d\r\n"
+ "b=AS:%u\r\n"
"%s"
"%s"
"%s"
@@ -64,6 +67,7 @@ PassiveServerMediaSubsession::sdpLines() {
unsigned sdpFmtSize = strlen(sdpFmt)
+ strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
+ strlen(ipAddressStr) + 3 /* max char len */
+ + 20 /* max int len */
+ strlen(rtpmapLine)
+ strlen(rangeLine)
+ strlen(auxSDPLine)
@@ -75,6 +79,7 @@ PassiveServerMediaSubsession::sdpLines() {
rtpPayloadType, // m= <fmt list>
ipAddressStr, // c= <connection address>
ttl, // c= TTL
+ estBitrate, // b=AS:<bandwidth>
rtpmapLine, // a=rtpmap:... (if present)
rangeLine, // a=range:... (if present)
auxSDPLine, // optional extra SDP line
@@ -134,6 +139,13 @@ void PassiveServerMediaSubsession::startStream(unsigned /*clientSessionId*/,
// in existence after "deleteStream()" is called.
rtpSeqNum = fRTPSink.currentSeqNo();
rtpTimestamp = fRTPSink.presetNextTimestamp();
+
+ // Try to use a big send buffer for RTP - at least 0.1 second of
+ // specified bandwidth and at least 50 KB
+ unsigned streamBitrate = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW(); // in kbps
+ unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
+ if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
+ increaseSendBufferTo(envir(), fRTPSink.groupsockBeingUsed().socketNum(), rtpBufSize);
}
PassiveServerMediaSubsession::~PassiveServerMediaSubsession() {
diff --git a/liveMedia/QuickTimeFileSink.cpp b/liveMedia/QuickTimeFileSink.cpp
index ef00f17..08316c7 100644
--- a/liveMedia/QuickTimeFileSink.cpp
+++ b/liveMedia/QuickTimeFileSink.cpp
@@ -1552,6 +1552,9 @@ addAtomEnd;
addAtom(stbl);
size += addAtom_stsd();
size += addAtom_stts();
+ if (fCurrentIOState->fQTcomponentSubtype == fourChar('v','i','d','e')) {
+ size += addAtom_stss(); // only for video streams
+ }
size += addAtom_stsc();
size += addAtom_stsz();
size += addAtom_stco();
@@ -1901,6 +1904,43 @@ addAtom(stts); // Time-to-Sample
setWord(numEntriesPosition, numEntries);
addAtomEnd;
+addAtom(stss); // Sync-Sample
+ size += addWord(0x00000000); // Version+flags
+
+ // First, add a dummy "Number of entries" field
+ // (and remember its position). We'll fill this field in later:
+ unsigned numEntriesPosition = ftell(fOutFid);
+ size += addWord(0); // dummy for "Number of entries"
+
+ // Then, run through the chunk descriptors, counting up the total nuber of samples:
+ unsigned numEntries = 0, numSamplesSoFar = 0;
+ unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
+ numSamplesSoFar += numSamples;
+ chunk = chunk->fNextChunk;
+ }
+
+ // Then, write out the sample numbers that we deem correspond to 'sync samples':
+ unsigned i;
+ for (i = 0; i < numSamplesSoFar; i += 12) {
+ // For an explanation of the constant "12", see http://lists.live555.com/pipermail/live-devel/2009-July/010969.html
+ // (Perhaps we should really try to keep track of which 'samples' ('frames' for video) really are 'key frames'?)
+ size += addWord(i+1);
+ ++numEntries;
+ }
+
+ // Then, write out the last entry (if we haven't already done so):
+ if (i != (numSamplesSoFar - 1)) {
+ size += addWord(numSamplesSoFar);
+ ++numEntries;
+ }
+
+ // Now go back and fill in the "Number of entries" field:
+ setWord(numEntriesPosition, numEntries);
+addAtomEnd;
+
addAtom(stsc); // Sample-to-Chunk
size += addWord(0x00000000); // Version+flags
diff --git a/liveMedia/RTSPClient.cpp b/liveMedia/RTSPClient.cpp
index d96a7a2..a1a21b0 100644
--- a/liveMedia/RTSPClient.cpp
+++ b/liveMedia/RTSPClient.cpp
@@ -1707,7 +1707,7 @@ Boolean RTSPClient::teardownMediaSession(MediaSession& session) {
// Get the response from the server:
unsigned bytesRead; unsigned responseCode;
char* firstLine; char* nextLineStart;
- if (!getResponse("TEARDOWN", bytesRead, responseCode, firstLine, nextLineStart)) break;
+ getResponse("TEARDOWN", bytesRead, responseCode, firstLine, nextLineStart); // ignore the response; from our POV, we're done
// Run through each subsession, deleting its "sessionId":
MediaSubsessionIterator iter(session);
@@ -1777,7 +1777,7 @@ Boolean RTSPClient::teardownMediaSubsession(MediaSubsession& subsession) {
// Get the response from the server:
unsigned bytesRead; unsigned responseCode;
char* firstLine; char* nextLineStart;
- if (!getResponse("TEARDOWN", bytesRead, responseCode, firstLine, nextLineStart)) break;
+ getResponse("TEARDOWN", bytesRead, responseCode, firstLine, nextLineStart); // ignore the response; from our POV, we're done
}
delete[] (char*)subsession.sessionId;
diff --git a/liveMedia/RTSPServer.cpp b/liveMedia/RTSPServer.cpp
index 31f7b26..3e2481e 100644
--- a/liveMedia/RTSPServer.cpp
+++ b/liveMedia/RTSPServer.cpp
@@ -431,7 +431,8 @@ static char const* dateHeader() {
}
static char const* allowedCommandNames
- = "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE, GET_PARAMETER, SET_PARAMETER";
+// = "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE, GET_PARAMETER, SET_PARAMETER";
+= "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE, SET_PARAMETER"; // TEMP HACK to stop VLC from using "GET_PARAMETER" as a client 'keep-alive' indicator; we don't need this, and it currently causes problems for RTP-over-TCP streams.
void RTSPServer::RTSPClientSession::handleCmd_bad(char const* /*cseq*/) {
// Don't do anything with "cseq", because it might be nonsense
diff --git a/liveMedia/include/DVVideoStreamFramer.hh b/liveMedia/include/DVVideoStreamFramer.hh
index c9f408e..9e3ba7e 100644
--- a/liveMedia/include/DVVideoStreamFramer.hh
+++ b/liveMedia/include/DVVideoStreamFramer.hh
@@ -25,6 +25,8 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
#include "FramedFilter.hh"
#endif
+#define DV_DIF_BLOCK_SIZE 80
+
class DVVideoStreamFramer: public FramedFilter {
public:
static DVVideoStreamFramer*
@@ -45,6 +47,8 @@ private:
private:
char const* fProfileName;
+ unsigned char fSavedInitialBlock[DV_DIF_BLOCK_SIZE];
+ Boolean fInitialBlockPresent;
};
#endif
diff --git a/liveMedia/include/DarwinInjector.hh b/liveMedia/include/DarwinInjector.hh
index f3ee4bd..8ecbc4c 100644
--- a/liveMedia/include/DarwinInjector.hh
+++ b/liveMedia/include/DarwinInjector.hh
@@ -73,7 +73,8 @@ public:
char const* remoteUserName = "",
char const* remotePassword = "",
char const* sessionAuthor = "",
- char const* sessionCopyright = "");
+ char const* sessionCopyright = "",
+ int timeout = -1);
private: // redefined virtual functions
virtual Boolean isDarwinInjector() const;
diff --git a/liveMedia/include/H264VideoRTPSink.hh b/liveMedia/include/H264VideoRTPSink.hh
index 98d344b..34bbb72 100644
--- a/liveMedia/include/H264VideoRTPSink.hh
+++ b/liveMedia/include/H264VideoRTPSink.hh
@@ -47,6 +47,9 @@ protected:
virtual ~H264VideoRTPSink();
+protected: // redefined virtual functions:
+ virtual char const* auxSDPLine();
+
private: // redefined virtual functions:
virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
virtual Boolean continuePlaying();
@@ -58,7 +61,6 @@ private: // redefined virtual functions:
unsigned numRemainingBytes);
virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
unsigned numBytesInFrame) const;
- virtual char const* auxSDPLine();
protected:
H264FUAFragmenter* fOurFragmenter;
diff --git a/liveMedia/include/MediaSession.hh b/liveMedia/include/MediaSession.hh
index 5b9ad06..63e90d5 100644
--- a/liveMedia/include/MediaSession.hh
+++ b/liveMedia/include/MediaSession.hh
@@ -249,6 +249,7 @@ protected:
void setNext(MediaSubsession* next) { fNext = next; }
Boolean parseSDPLine_c(char const* sdpLine);
+ Boolean parseSDPLine_b(char const* sdpLine);
Boolean parseSDPAttribute_rtpmap(char const* sdpLine);
Boolean parseSDPAttribute_control(char const* sdpLine);
Boolean parseSDPAttribute_range(char const* sdpLine);
@@ -274,6 +275,7 @@ protected:
unsigned fRTPTimestampFrequency;
char* fControlPath; // holds optional a=control: string
struct in_addr fSourceFilterAddr; // used for SSM
+ unsigned fBandwidth; // in kilobits-per-second, from b= line
// Parameters set by "a=fmtp:" SDP lines:
unsigned fAuxiliarydatasizelength, fConstantduration, fConstantsize;
diff --git a/liveMedia/include/OnDemandServerMediaSubsession.hh b/liveMedia/include/OnDemandServerMediaSubsession.hh
index 95d2266..70de0c7 100644
--- a/liveMedia/include/OnDemandServerMediaSubsession.hh
+++ b/liveMedia/include/OnDemandServerMediaSubsession.hh
@@ -76,7 +76,8 @@ protected: // new virtual functions, defined by all subclasses
FramedSource* inputSource) = 0;
private:
- void setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource);
+ void setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource,
+ unsigned estBitrate);
// used to implement "sdpLines()"
private:
diff --git a/liveMedia/include/QuickTimeFileSink.hh b/liveMedia/include/QuickTimeFileSink.hh
index de86941..1fa5040 100644
--- a/liveMedia/include/QuickTimeFileSink.hh
+++ b/liveMedia/include/QuickTimeFileSink.hh
@@ -148,6 +148,7 @@ private:
_atom(rtp);
_atom(tims);
_atom(stts);
+ _atom(stss);
_atom(stsc);
_atom(stsz);
_atom(stco);
diff --git a/liveMedia/include/RTCP.hh b/liveMedia/include/RTCP.hh
index 80fd790..16b39c2 100644
--- a/liveMedia/include/RTCP.hh
+++ b/liveMedia/include/RTCP.hh
@@ -54,6 +54,7 @@ public:
RTCPInstance*& resultInstance);
unsigned numMembers() const;
+ unsigned totSessionBW() const { return fTotSessionBW; }
void setByeHandler(TaskFunc* handlerTask, void* clientData,
Boolean handleActiveParticipantsOnly = True);
diff --git a/liveMedia/include/RTSPCommon.hh b/liveMedia/include/RTSPCommon.hh
index 0898d65..d1b065c 100644
--- a/liveMedia/include/RTSPCommon.hh
+++ b/liveMedia/include/RTSPCommon.hh
@@ -32,7 +32,7 @@ along with this library; if not, write to the Free Software Foundation, Inc.,
#define _strncasecmp strncasecmp
#endif
-#define RTSP_PARAM_STRING_MAX 100
+#define RTSP_PARAM_STRING_MAX 200
Boolean parseRTSPRequestString(char const *reqStr, unsigned reqStrSize,
char *resultCmdName,
diff --git a/liveMedia/include/liveMedia_version.hh b/liveMedia/include/liveMedia_version.hh
index 57514a8..6ec9662 100644
--- a/liveMedia/include/liveMedia_version.hh
+++ b/liveMedia/include/liveMedia_version.hh
@@ -4,7 +4,7 @@
#ifndef _LIVEMEDIA_VERSION_HH
#define _LIVEMEDIA_VERSION_HH
-#define LIVEMEDIA_LIBRARY_VERSION_STRING "2009.07.09"
-#define LIVEMEDIA_LIBRARY_VERSION_INT 1247097600
+#define LIVEMEDIA_LIBRARY_VERSION_STRING "2009.11.27"
+#define LIVEMEDIA_LIBRARY_VERSION_INT 1259280000
#endif
diff --git a/mediaServer/Makefile.tail b/mediaServer/Makefile.tail
index 51b5b66..606b8ae 100644
--- a/mediaServer/Makefile.tail
+++ b/mediaServer/Makefile.tail
@@ -25,7 +25,7 @@ LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(LIB_SUFFIX)
GROUPSOCK_DIR = ../groupsock
GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(LIB_SUFFIX)
LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
- $(USAGE_ENVIRONMENT_LIB) $(BASIC_USAGE_ENVIRONMENT_LIB)
+ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB)
LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION)
live555MediaServer$(EXE): $(MEDIA_SERVER_OBJS) $(LOCAL_LIBS)
diff --git a/testProgs/#playCommon.cpp# b/testProgs/#playCommon.cpp#
deleted file mode 100644
index 4b053b7..0000000
--- a/testProgs/#playCommon.cpp#
+++ /dev/null
@@ -1,1306 +0,0 @@
-/**********
-This library is free software; you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation; either version 2.1 of the License, or (at your
-option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
-
-This library is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
-more details.
-
-You should have received a copy of the GNU Lesser General Public License
-along with this library; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-**********/
-// Copyright (c) 1996-2009, Live Networks, Inc. All rights reserved
-// A common framework, used for the "openRTSP" and "playSIP" applications
-// Implementation
-
-#include "playCommon.hh"
-#include "BasicUsageEnvironment.hh"
-#include "GroupsockHelper.hh"
-#ifdef SUPPORT_REAL_RTSP
-#include "../RealRTSP/include/RealRTSP.hh"
-#endif
-
-#if defined(__WIN32__) || defined(_WIN32)
-#define snprintf _snprintf
-#else
-#include <signal.h>
-#define USE_SIGNALS 1
-#endif
-
-// Forward function definitions:
-void setupStreams();
-void startPlayingStreams();
-void tearDownStreams();
-void closeMediaSinks();
-void subsessionAfterPlaying(void* clientData);
-void subsessionByeHandler(void* clientData);
-void sessionAfterPlaying(void* clientData = NULL);
-void sessionTimerHandler(void* clientData);
-void shutdown(int exitCode = 1);
-void signalHandlerShutdown(int sig);
-void checkForPacketArrival(void* clientData);
-void checkInterPacketGaps(void* clientData);
-void beginQOSMeasurement();
-
-char const* progName;
-UsageEnvironment* env;
-Medium* ourClient = NULL;
-MediaSession* session = NULL;
-TaskToken sessionTimerTask = NULL;
-TaskToken arrivalCheckTimerTask = NULL;
-TaskToken interPacketGapCheckTimerTask = NULL;
-TaskToken qosMeasurementTimerTask = NULL;
-Boolean createReceivers = True;
-Boolean outputQuickTimeFile = False;
-Boolean generateMP4Format = False;
-QuickTimeFileSink* qtOut = NULL;
-Boolean outputAVIFile = False;
-AVIFileSink* aviOut = NULL;
-Boolean audioOnly = False;
-Boolean videoOnly = False;
-char const* singleMedium = NULL;
-int verbosityLevel = 1; // by default, print verbose output
-double duration = 0;
-double durationSlop = -1.0; // extra seconds to play at the end
-double initialSeekTime = 0.0f;
-double scale = 1.0f;
-unsigned interPacketGapMaxTime = 0;
-unsigned totNumPacketsReceived = ~0; // used if checking inter-packet gaps
-Boolean playContinuously = False;
-int simpleRTPoffsetArg = -1;
-Boolean sendOptionsRequest = True;
-Boolean sendOptionsRequestOnly = False;
-Boolean oneFilePerFrame = False;
-Boolean notifyOnPacketArrival = False;
-Boolean streamUsingTCP = False;
-portNumBits tunnelOverHTTPPortNum = 0;
-char* username = NULL;
-char* password = NULL;
-char* proxyServerName = NULL;
-unsigned short proxyServerPortNum = 0;
-unsigned char desiredAudioRTPPayloadFormat = 0;
-char* mimeSubtype = NULL;
-unsigned short movieWidth = 240; // default
-Boolean movieWidthOptionSet = False;
-unsigned short movieHeight = 180; // default
-Boolean movieHeightOptionSet = False;
-unsigned movieFPS = 15; // default
-Boolean movieFPSOptionSet = False;
-char const* fileNamePrefix = "";
-unsigned fileSinkBufferSize = 20000;
-unsigned socketInputBufferSize = 0;
-Boolean packetLossCompensate = False;
-Boolean syncStreams = False;
-Boolean generateHintTracks = False;
-unsigned qosMeasurementIntervalMS = 0; // 0 means: Don't output QOS data
-unsigned statusCode = 0;
-
-struct timeval startTime;
-
-void usage() {
- *env << "Usage: " << progName
- << " [-p <startPortNum>] [-r|-q|-4|-i] [-a|-v] [-V] [-d <duration>] [-D <max-inter-packet-gap-time> [-c] [-S <offset>] [-n] [-O]"
- << (controlConnectionUsesTCP ? " [-t|-T <http-port>]" : "")
- << " [-u <username> <password>"
- << (allowProxyServers ? " [<proxy-server> [<proxy-server-port>]]" : "")
- << "]" << (supportCodecSelection ? " [-A <audio-codec-rtp-payload-format-code>|-M <mime-subtype-name>]" : "")
- << " [-s <initial-seek-time>] [-z <scale>]"
- << " [-w <width> -h <height>] [-f <frames-per-second>] [-y] [-H] [-Q [<measurement-interval>]] [-F <filename-prefix>] [-b <file-sink-buffer-size>] [-B <input-socket-buffer-size>] [-I <input-interface-ip-address>] [-m] <url> (or " << progName << " -o [-V] <url>)\n";
- //##### Add "-R <dest-rtsp-url>" #####
- shutdown();
-}
-
-int main(int argc, char** argv) {
- // Begin by setting up our usage environment:
- TaskScheduler* scheduler = BasicTaskScheduler::createNew();
- env = BasicUsageEnvironment::createNew(*scheduler);
-
- progName = argv[0];
-
- gettimeofday(&startTime, NULL);
-
-#ifdef USE_SIGNALS
- // Allow ourselves to be shut down gracefully by a SIGHUP or a SIGUSR1:
- signal(SIGHUP, signalHandlerShutdown);
- signal(SIGUSR1, signalHandlerShutdown);
-#endif
-
- unsigned short desiredPortNum = 0;
-
- // unfortunately we can't use getopt() here, as Windoze doesn't have it
- while (argc > 2) {
- char* const opt = argv[1];
- if (opt[0] != '-') usage();
- switch (opt[1]) {
-
- case 'p': { // specify start port number
- int portArg;
- if (sscanf(argv[2], "%d", &portArg) != 1) {
- usage();
- }
- if (portArg <= 0 || portArg >= 65536 || portArg&1) {
- *env << "bad port number: " << portArg
- << " (must be even, and in the range (0,65536))\n";
- usage();
- }
- desiredPortNum = (unsigned short)portArg;
- ++argv; --argc;
- break;
- }
-
- case 'r': { // do not receive data (instead, just 'play' the stream(s))
- createReceivers = False;
- break;
- }
-
- case 'q': { // output a QuickTime file (to stdout)
- outputQuickTimeFile = True;
- break;
- }
-
- case '4': { // output a 'mp4'-format file (to stdout)
- outputQuickTimeFile = True;
- generateMP4Format = True;
- break;
- }
-
- case 'i': { // output an AVI file (to stdout)
- outputAVIFile = True;
- break;
- }
-
- case 'I': { // specify input interface...
- NetAddressList addresses(argv[2]);
- if (addresses.numAddresses() == 0) {
- *env << "Failed to find network address for \"" << argv[2] << "\"";
- break;
- }
- ReceivingInterfaceAddr = *(unsigned*)(addresses.firstAddress()->data());
- ++argv; --argc;
- break;
- }
-
- case 'a': { // receive/record an audio stream only
- audioOnly = True;
- singleMedium = "audio";
- break;
- }
-
- case 'v': { // receive/record a video stream only
- videoOnly = True;
- singleMedium = "video";
- break;
- }
-
- case 'V': { // disable verbose output
- verbosityLevel = 0;
- break;
- }
-
- case 'd': { // specify duration, or how much to delay after end time
- float arg;
- if (sscanf(argv[2], "%g", &arg) != 1) {
- usage();
- }
- if (argv[2][0] == '-') { // not "arg<0", in case argv[2] was "-0"
- // a 'negative' argument was specified; use this for "durationSlop":
- duration = 0; // use whatever's in the SDP
- durationSlop = -arg;
- } else {
- duration = arg;
- durationSlop = 0;
- }
- ++argv; --argc;
- break;
- }
-
- case 'D': { // specify maximum number of seconds to wait for packets:
- if (sscanf(argv[2], "%u", &interPacketGapMaxTime) != 1) {
- usage();
- }
- ++argv; --argc;
- break;
- }
-
- case 'c': { // play continuously
- playContinuously = True;
- break;
- }
-
- case 'S': { // specify an offset to use with "SimpleRTPSource"s
- if (sscanf(argv[2], "%d", &simpleRTPoffsetArg) != 1) {
- usage();
- }
- if (simpleRTPoffsetArg < 0) {
- *env << "offset argument to \"-S\" must be >= 0\n";
- usage();
- }
- ++argv; --argc;
- break;
- }
-
- case 'O': { // Don't send an "OPTIONS" request before "DESCRIBE"
- sendOptionsRequest = False;
- break;
- }
-
- case 'o': { // Send only the "OPTIONS" request to the server
- sendOptionsRequestOnly = True;
- break;
- }
-
- case 'm': { // output multiple files - one for each frame
- oneFilePerFrame = True;
- break;
- }
-
- case 'n': { // notify the user when the first data packet arrives
- notifyOnPacketArrival = True;
- break;
- }
-
- case 't': {
- // stream RTP and RTCP over the TCP 'control' connection
- if (controlConnectionUsesTCP) {
- streamUsingTCP = True;
- } else {
- usage();
- }
- break;
- }
-
- case 'T': {
- // stream RTP and RTCP over a HTTP connection
- if (controlConnectionUsesTCP) {
- if (argc > 3 && argv[2][0] != '-') {
- // The next argument is the HTTP server port number:
- if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1
- && tunnelOverHTTPPortNum > 0) {
- ++argv; --argc;
- break;
- }
- }
- }
-
- // If we get here, the option was specified incorrectly:
- usage();
- break;
- }
-
- case 'u': { // specify a username and password
- username = argv[2];
- password = argv[3];
- argv+=2; argc-=2;
- if (allowProxyServers && argc > 3 && argv[2][0] != '-') {
- // The next argument is the name of a proxy server:
- proxyServerName = argv[2];
- ++argv; --argc;
-
- if (argc > 3 && argv[2][0] != '-') {
- // The next argument is the proxy server port number:
- if (sscanf(argv[2], "%hu", &proxyServerPortNum) != 1) {
- usage();
- }
- ++argv; --argc;
- }
- }
- break;
- }
-
- case 'A': { // specify a desired audio RTP payload format
- unsigned formatArg;
- if (sscanf(argv[2], "%u", &formatArg) != 1
- || formatArg >= 96) {
- usage();
- }
- desiredAudioRTPPayloadFormat = (unsigned char)formatArg;
- ++argv; --argc;
- break;
- }
-
- case 'M': { // specify a MIME subtype for a dynamic RTP payload type
- mimeSubtype = argv[2];
- if (desiredAudioRTPPayloadFormat==0) desiredAudioRTPPayloadFormat =96;
- ++argv; --argc;
- break;
- }
-
- case 'w': { // specify a width (pixels) for an output QuickTime or AVI movie
- if (sscanf(argv[2], "%hu", &movieWidth) != 1) {
- usage();
- }
- movieWidthOptionSet = True;
- ++argv; --argc;
- break;
- }
-
- case 'h': { // specify a height (pixels) for an output QuickTime or AVI movie
- if (sscanf(argv[2], "%hu", &movieHeight) != 1) {
- usage();
- }
- movieHeightOptionSet = True;
- ++argv; --argc;
- break;
- }
-
- case 'f': { // specify a frame rate (per second) for an output QT or AVI movie
- if (sscanf(argv[2], "%u", &movieFPS) != 1) {
- usage();
- }
- movieFPSOptionSet = True;
- ++argv; --argc;
- break;
- }
-
- case 'F': { // specify a prefix for the audio and video output files
- fileNamePrefix = argv[2];
- ++argv; --argc;
- break;
- }
-
- case 'b': { // specify the size of buffers for "FileSink"s
- if (sscanf(argv[2], "%u", &fileSinkBufferSize) != 1) {
- usage();
- }
- ++argv; --argc;
- break;
- }
-
- case 'B': { // specify the size of input socket buffers
- if (sscanf(argv[2], "%u", &socketInputBufferSize) != 1) {
- usage();
- }
- ++argv; --argc;
- break;
- }
-
- // Note: The following option is deprecated, and may someday be removed:
- case 'l': { // try to compensate for packet loss by repeating frames
- packetLossCompensate = True;
- break;
- }
-
-grep syncstre case 'y': { // synchronize audio and video streams
- syncStreams = True;
- break;
- }
-
- case 'H': { // generate hint tracks (as well as the regular data tracks)
- generateHintTracks = True;
- break;
- }
-
- case 'Q': { // output QOS measurements
- qosMeasurementIntervalMS = 1000; // default: 1 second
-
- if (argc > 3 && argv[2][0] != '-') {
- // The next argument is the measurement interval,
- // in multiples of 100 ms
- if (sscanf(argv[2], "%u", &qosMeasurementIntervalMS) != 1) {
- usage();
- }
- qosMeasurementIntervalMS *= 100;
- ++argv; --argc;
- }
- break;
- }
-
- case 's': { // specify initial seek time (trick play)
- double arg;
- if (sscanf(argv[2], "%lg", &arg) != 1 || arg < 0) {
- usage();
- }
- initialSeekTime = arg;
- ++argv; --argc;
- break;
- }
-
- case 'z': { // scale (trick play)
- float arg;
- if (sscanf(argv[2], "%g", &arg) != 1 || arg == 0.0f) {
- usage();
- }
- scale = arg;
- ++argv; --argc;
- break;
- }
-
- default: {
- usage();
- break;
- }
- }
-
- ++argv; --argc;
- }
- if (argc != 2) usage();
- if (outputQuickTimeFile && outputAVIFile) {
- *env << "The -i and -q (or -4) flags cannot both be used!\n";
- usage();
- }
- Boolean outputCompositeFile = outputQuickTimeFile || outputAVIFile;
- if (!createReceivers && outputCompositeFile) {
- *env << "The -r and -q (or -4 or -i) flags cannot both be used!\n";
- usage();
- }
- if (outputCompositeFile && !movieWidthOptionSet) {
- *env << "Warning: The -q, -4 or -i option was used, but not -w. Assuming a video width of "
- << movieWidth << " pixels\n";
- }
- if (outputCompositeFile && !movieHeightOptionSet) {
- *env << "Warning: The -q, -4 or -i option was used, but not -h. Assuming a video height of "
- << movieHeight << " pixels\n";
- }
- if (outputCompositeFile && !movieFPSOptionSet) {
- *env << "Warning: The -q, -4 or -i option was used, but not -f. Assuming a video frame rate of "
- << movieFPS << " frames-per-second\n";
- }
- if (audioOnly && videoOnly) {
- *env << "The -a and -v flags cannot both be used!\n";
- usage();
- }
- if (sendOptionsRequestOnly && !sendOptionsRequest) {
- *env << "The -o and -O flags cannot both be used!\n";
- usage();
- }
- if (tunnelOverHTTPPortNum > 0) {
- if (streamUsingTCP) {
- *env << "The -t and -T flags cannot both be used!\n";
- usage();
- } else {
- streamUsingTCP = True;
- }
- }
- if (!createReceivers && notifyOnPacketArrival) {
- *env << "Warning: Because we're not receiving stream data, the -n flag has no effect\n";
- }
- if (durationSlop < 0) {
- // This parameter wasn't set, so use a default value.
- // If we're measuring QOS stats, then don't add any slop, to avoid
- // having 'empty' measurement intervals at the end.
- durationSlop = qosMeasurementIntervalMS > 0 ? 0.0 : 5.0;
- }
-
- char* url = argv[1];
-
- // Create our client object:
- ourClient = createClient(*env, verbosityLevel, progName);
- if (ourClient == NULL) {
- *env << "Failed to create " << clientProtocolName
- << " client: " << env->getResultMsg() << "\n";
- shutdown();
- }
-
- if (sendOptionsRequest) {
- // Begin by sending an "OPTIONS" command:
- char* optionsResponse
- = getOptionsResponse(ourClient, url, username, password);
- if (sendOptionsRequestOnly) {
- if (optionsResponse == NULL) {
- *env << clientProtocolName << " \"OPTIONS\" request failed: "
- << env->getResultMsg() << "\n";
- } else {
- *env << clientProtocolName << " \"OPTIONS\" request returned: "
- << optionsResponse << "\n";
- }
- shutdown();
- }
- delete[] optionsResponse;
- }
-
- // Open the URL, to get a SDP description:
- char* sdpDescription
- = getSDPDescriptionFromURL(ourClient, url, username, password,
- proxyServerName, proxyServerPortNum,
- desiredPortNum);
- if (sdpDescription == NULL) {
- *env << "Failed to get a SDP description from URL \"" << url
- << "\": " << env->getResultMsg() << "\n";
- shutdown();
- }
-
- *env << "Opened URL \"" << url
- << "\", returning a SDP description:\n" << sdpDescription << "\n";
-
- // Create a media session object from this SDP description:
- session = MediaSession::createNew(*env, sdpDescription);
- delete[] sdpDescription;
- if (session == NULL) {
- *env << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "\n";
- shutdown();
- } else if (!session->hasSubsessions()) {
- *env << "This session has no media subsessions (i.e., \"m=\" lines)\n";
- shutdown();
- }
-
- // Then, setup the "RTPSource"s for the session:
- MediaSubsessionIterator iter(*session);
- MediaSubsession *subsession;
- Boolean madeProgress = False;
- char const* singleMediumToTest = singleMedium;
- while ((subsession = iter.next()) != NULL) {
- // If we've asked to receive only a single medium, then check this now:
- if (singleMediumToTest != NULL) {
- if (strcmp(subsession->mediumName(), singleMediumToTest) != 0) {
- *env << "Ignoring \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession, because we've asked to receive a single " << singleMedium
- << " session only\n";
- continue;
- } else {
- // Receive this subsession only
- singleMediumToTest = "xxxxx";
- // this hack ensures that we get only 1 subsession of this type
- }
- }
-
- if (desiredPortNum != 0) {
- subsession->setClientPortNum(desiredPortNum);
- desiredPortNum += 2;
- }
-
- if (createReceivers) {
- if (!subsession->initiate(simpleRTPoffsetArg)) {
- *env << "Unable to create receiver for \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession: " << env->getResultMsg() << "\n";
- } else {
- *env << "Created receiver for \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession (client ports " << subsession->clientPortNum()
- << "-" << subsession->clientPortNum()+1 << ")\n";
- madeProgress = True;
-
- if (subsession->rtpSource() != NULL) {
- // Because we're saving the incoming data, rather than playing
- // it in real time, allow an especially large time threshold
- // (1 second) for reordering misordered incoming packets:
- unsigned const thresh = 1000000; // 1 second
- subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
-
- if (socketInputBufferSize > 0) {
- // Set the RTP source's input buffer size as specified:
- int socketNum
- = subsession->rtpSource()->RTPgs()->socketNum();
- unsigned curBufferSize
- = getReceiveBufferSize(*env, socketNum);
- unsigned newBufferSize
- = setReceiveBufferTo(*env, socketNum, socketInputBufferSize);
- *env << "Changed socket receive buffer size for the \""
- << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession from "
- << curBufferSize << " to "
- << newBufferSize << " bytes\n";
- }
- }
- }
- } else {
- if (subsession->clientPortNum() == 0) {
- *env << "No client port was specified for the \""
- << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession. (Try adding the \"-p <portNum>\" option.)\n";
- } else {
- madeProgress = True;
- }
- }
- }
- if (!madeProgress) shutdown();
-
- // Perform additional 'setup' on each subsession, before playing them:
- setupStreams();
-
- // Create output files:
- if (createReceivers) {
- if (outputQuickTimeFile) {
- // Create a "QuickTimeFileSink", to write to 'stdout':
- qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",
- fileSinkBufferSize,
- movieWidth, movieHeight,
- movieFPS,
- packetLossCompensate,
- syncStreams,
- generateHintTracks,
- generateMP4Format);
- if (qtOut == NULL) {
- *env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg();
- shutdown();
- }
-
- qtOut->startPlaying(sessionAfterPlaying, NULL);
- } else if (outputAVIFile) {
- // Create an "AVIFileSink", to write to 'stdout':
- aviOut = AVIFileSink::createNew(*env, *session, "stdout",
- fileSinkBufferSize,
- movieWidth, movieHeight,
- movieFPS,
- packetLossCompensate);
- if (aviOut == NULL) {
- *env << "Failed to create AVI file sink for stdout: " << env->getResultMsg();
- shutdown();
- }
-
- aviOut->startPlaying(sessionAfterPlaying, NULL);
-#ifdef SUPPORT_REAL_RTSP
- } else if (session->isRealNetworksRDT) {
- // For RealNetworks' sessions, we create a single output file,
- // named "output.rm".
- char outFileName[1000];
- if (singleMedium == NULL) {
- snprintf(outFileName, sizeof outFileName, "%soutput.rm", fileNamePrefix);
- } else {
- // output to 'stdout' as normal, even though we actually output all media
- sprintf(outFileName, "stdout");
- }
- FileSink* fileSink = FileSink::createNew(*env, outFileName,
- fileSinkBufferSize, oneFilePerFrame);
-
- // The output file needs to begin with a special 'RMFF' header,
- // in order for it to be usable. Write this header first:
- unsigned headerSize;
- unsigned char* headerData = RealGenerateRMFFHeader(session, headerSize);
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- fileSink->addData(headerData, headerSize, timeNow);
- delete[] headerData;
-
- // Start playing the output file from the first subsession.
- // (Hack: Because all subsessions' data is actually multiplexed on the
- // single RTSP TCP connection, playing from one subsession is sufficient.)
- iter.reset();
- madeProgress = False;
- while ((subsession = iter.next()) != NULL) {
- if (subsession->readSource() == NULL) continue; // was not initiated
-
- fileSink->startPlaying(*(subsession->readSource()),
- subsessionAfterPlaying, subsession);
- madeProgress = True;
- break; // play from one subsession only
- }
- if (!madeProgress) shutdown();
-#endif
- } else {
- // Create and start "FileSink"s for each subsession:
- madeProgress = False;
- iter.reset();
- while ((subsession = iter.next()) != NULL) {
- if (subsession->readSource() == NULL) continue; // was not initiated
-
- // Create an output file for each desired stream:
- char outFileName[1000];
- if (singleMedium == NULL) {
- // Output file name is
- // "<filename-prefix><medium_name>-<codec_name>-<counter>"
- static unsigned streamCounter = 0;
- snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",
- fileNamePrefix, subsession->mediumName(),
- subsession->codecName(), ++streamCounter);
- } else {
- sprintf(outFileName, "stdout");
- }
- FileSink* fileSink;
- if (strcmp(subsession->mediumName(), "audio") == 0 &&
- (strcmp(subsession->codecName(), "AMR") == 0 ||
- strcmp(subsession->codecName(), "AMR-WB") == 0)) {
- // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
- fileSink = AMRAudioFileSink::createNew(*env, outFileName,
- fileSinkBufferSize, oneFilePerFrame);
- } else if (strcmp(subsession->mediumName(), "video") == 0 &&
- (strcmp(subsession->codecName(), "H264") == 0)) {
- // For H.264 video stream, we use a special sink that insert start_codes:
- fileSink = H264VideoFileSink::createNew(*env, outFileName,
- fileSinkBufferSize, oneFilePerFrame);
- } else {
- // Normal case:
- fileSink = FileSink::createNew(*env, outFileName,
- fileSinkBufferSize, oneFilePerFrame);
- }
- subsession->sink = fileSink;
- if (subsession->sink == NULL) {
- *env << "Failed to create FileSink for \"" << outFileName
- << "\": " << env->getResultMsg() << "\n";
- } else {
- if (singleMedium == NULL) {
- *env << "Created output file: \"" << outFileName << "\"\n";
- } else {
- *env << "Outputting data from the \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession to 'stdout'\n";
- }
-
- if (strcmp(subsession->mediumName(), "video") == 0 &&
- strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
- subsession->fmtp_config() != NULL) {
- // For MPEG-4 video RTP streams, the 'config' information
- // from the SDP description contains useful VOL etc. headers.
- // Insert this data at the front of the output file:
- unsigned configLen;
- unsigned char* configData
- = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- fileSink->addData(configData, configLen, timeNow);
- delete[] configData;
- }
-
- subsession->sink->startPlaying(*(subsession->readSource()),
- subsessionAfterPlaying,
- subsession);
-
- // Also set a handler to be called if a RTCP "BYE" arrives
- // for this subsession:
- if (subsession->rtcpInstance() != NULL) {
- subsession->rtcpInstance()->setByeHandler(subsessionByeHandler,
- subsession);
- }
-
- madeProgress = True;
- }
- }
- if (!madeProgress) shutdown();
- }
- }
-
- // Finally, start playing each subsession, to start the data flow:
-
- startPlayingStreams();
-
- env->taskScheduler().doEventLoop(); // does not return
-
- return 0; // only to prevent compiler warning
-}
-
-
-void setupStreams() {
- MediaSubsessionIterator iter(*session);
- MediaSubsession *subsession;
- Boolean madeProgress = False;
-
- while ((subsession = iter.next()) != NULL) {
- if (subsession->clientPortNum() == 0) continue; // port # was not set
-
- if (!clientSetupSubsession(ourClient, subsession, streamUsingTCP)) {
- *env << "Failed to setup \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession: " << env->getResultMsg() << "\n";
- } else {
- *env << "Setup \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession (client ports " << subsession->clientPortNum()
- << "-" << subsession->clientPortNum()+1 << ")\n";
- madeProgress = True;
- }
- }
- if (!madeProgress) shutdown();
-}
-
-void startPlayingStreams() {
- if (duration == 0) {
- if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time
- else if (scale < 0) duration = initialSeekTime;
- }
- if (duration < 0) duration = 0.0;
-
- if (!clientStartPlayingSession(ourClient, session)) {
- *env << "Failed to start playing session: " << env->getResultMsg() << "\n";
- shutdown();
- } else {
- *env << "Started playing session\n";
- }
-
- if (qosMeasurementIntervalMS > 0) {
- // Begin periodic QOS measurements:
- beginQOSMeasurement();
- }
-
- // Figure out how long to delay (if at all) before shutting down, or
- // repeating the playing
- Boolean timerIsBeingUsed = False;
- double secondsToDelay = duration;
- if (duration > 0) {
- double const maxDelayTime
- = (double)( ((unsigned)0x7FFFFFFF)/1000000.0 );
- if (duration > maxDelayTime) {
- *env << "Warning: specified end time " << duration
- << " exceeds maximum " << maxDelayTime
- << "; will not do a delayed shutdown\n";
- } else {
- timerIsBeingUsed = True;
- double absScale = scale > 0 ? scale : -scale; // ASSERT: scale != 0
- secondsToDelay = duration/absScale + durationSlop;
-
- int uSecsToDelay = (int)(secondsToDelay*1000000.0);
- sessionTimerTask = env->taskScheduler().scheduleDelayedTask(
- uSecsToDelay, (TaskFunc*)sessionTimerHandler, (void*)NULL);
- }
- }
-
- char const* actionString
- = createReceivers? "Receiving streamed data":"Data is being streamed";
- if (timerIsBeingUsed) {
- *env << actionString
- << " (for up to " << secondsToDelay
- << " seconds)...\n";
- } else {
-#ifdef USE_SIGNALS
- pid_t ourPid = getpid();
- *env << actionString
- << " (signal with \"kill -HUP " << (int)ourPid
- << "\" or \"kill -USR1 " << (int)ourPid
- << "\" to terminate)...\n";
-#else
- *env << actionString << "...\n";
-#endif
- }
-
- // Watch for incoming packets (if desired):
- checkForPacketArrival(NULL);
- checkInterPacketGaps(NULL);
-}
-
-void tearDownStreams() {
- if (session == NULL) return;
-
- clientTearDownSession(ourClient, session);
-}
-
-void closeMediaSinks() {
- Medium::close(qtOut);
- Medium::close(aviOut);
-
- if (session == NULL) return;
- MediaSubsessionIterator iter(*session);
- MediaSubsession* subsession;
- while ((subsession = iter.next()) != NULL) {
- Medium::close(subsession->sink);
- subsession->sink = NULL;
- }
-}
-
-void subsessionAfterPlaying(void* clientData) {
- // Begin by closing this media subsession's stream:
- MediaSubsession* subsession = (MediaSubsession*)clientData;
- Medium::close(subsession->sink);
- subsession->sink = NULL;
-
- // Next, check whether *all* subsessions' streams have now been closed:
- MediaSession& session = subsession->parentSession();
- MediaSubsessionIterator iter(session);
- while ((subsession = iter.next()) != NULL) {
- if (subsession->sink != NULL) return; // this subsession is still active
- }
-
- // All subsessions' streams have now been closed
- sessionAfterPlaying();
-}
-
-void subsessionByeHandler(void* clientData) {
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- unsigned secsDiff = timeNow.tv_sec - startTime.tv_sec;
-
- MediaSubsession* subsession = (MediaSubsession*)clientData;
- *env << "Received RTCP \"BYE\" on \"" << subsession->mediumName()
- << "/" << subsession->codecName()
- << "\" subsession (after " << secsDiff
- << " seconds)\n";
-
- // Act now as if the subsession had closed:
- subsessionAfterPlaying(subsession);
-}
-
-void sessionAfterPlaying(void* /*clientData*/) {
- if (!playContinuously) {
- shutdown(0);
- } else {
- // We've been asked to play the stream(s) over again:
- startPlayingStreams();
- }
-}
-
-void sessionTimerHandler(void* /*clientData*/) {
- sessionTimerTask = NULL;
-
- sessionAfterPlaying();
-}
-
-class qosMeasurementRecord {
-public:
- qosMeasurementRecord(struct timeval const& startTime, RTPSource* src)
- : fSource(src), fNext(NULL),
- kbits_per_second_min(1e20), kbits_per_second_max(0),
- kBytesTotal(0.0),
- packet_loss_fraction_min(1.0), packet_loss_fraction_max(0.0),
- totNumPacketsReceived(0), totNumPacketsExpected(0) {
- measurementEndTime = measurementStartTime = startTime;
-
-#ifdef SUPPORT_REAL_RTSP
- if (session->isRealNetworksRDT) { // hack for RealMedia sessions (RDT, not RTP)
- RealRDTSource* rdt = (RealRDTSource*)src;
- kBytesTotal = rdt->totNumKBytesReceived();
- totNumPacketsReceived = rdt->totNumPacketsReceived();
- totNumPacketsExpected = totNumPacketsReceived; // because we use TCP
- return;
- }
-#endif
- RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB());
- // Assume that there's only one SSRC source (usually the case):
- RTPReceptionStats* stats = statsIter.next(True);
- if (stats != NULL) {
- kBytesTotal = stats->totNumKBytesReceived();
- totNumPacketsReceived = stats->totNumPacketsReceived();
- totNumPacketsExpected = stats->totNumPacketsExpected();
- }
- }
- virtual ~qosMeasurementRecord() { delete fNext; }
-
- void periodicQOSMeasurement(struct timeval const& timeNow);
-
-public:
- RTPSource* fSource;
- qosMeasurementRecord* fNext;
-
-public:
- struct timeval measurementStartTime, measurementEndTime;
- double kbits_per_second_min, kbits_per_second_max;
- double kBytesTotal;
- double packet_loss_fraction_min, packet_loss_fraction_max;
- unsigned totNumPacketsReceived, totNumPacketsExpected;
-};
-
-static qosMeasurementRecord* qosRecordHead = NULL;
-
-static void periodicQOSMeasurement(void* clientData); // forward
-
-static unsigned nextQOSMeasurementUSecs;
-
-static void scheduleNextQOSMeasurement() {
- nextQOSMeasurementUSecs += qosMeasurementIntervalMS*1000;
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- unsigned timeNowUSecs = timeNow.tv_sec*1000000 + timeNow.tv_usec;
- unsigned usecsToDelay = nextQOSMeasurementUSecs - timeNowUSecs;
- // Note: This works even when nextQOSMeasurementUSecs wraps around
-
- qosMeasurementTimerTask = env->taskScheduler().scheduleDelayedTask(
- usecsToDelay, (TaskFunc*)periodicQOSMeasurement, (void*)NULL);
-}
-
-static void periodicQOSMeasurement(void* /*clientData*/) {
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
-
- for (qosMeasurementRecord* qosRecord = qosRecordHead;
- qosRecord != NULL; qosRecord = qosRecord->fNext) {
- qosRecord->periodicQOSMeasurement(timeNow);
- }
-
- // Do this again later:
- scheduleNextQOSMeasurement();
-}
-
-void qosMeasurementRecord
-::periodicQOSMeasurement(struct timeval const& timeNow) {
- unsigned secsDiff = timeNow.tv_sec - measurementEndTime.tv_sec;
- int usecsDiff = timeNow.tv_usec - measurementEndTime.tv_usec;
- double timeDiff = secsDiff + usecsDiff/1000000.0;
- measurementEndTime = timeNow;
-
-#ifdef SUPPORT_REAL_RTSP
- if (session->isRealNetworksRDT) { // hack for RealMedia sessions (RDT, not RTP)
- RealRDTSource* rdt = (RealRDTSource*)fSource;
- double kBytesTotalNow = rdt->totNumKBytesReceived();
- double kBytesDeltaNow = kBytesTotalNow - kBytesTotal;
- kBytesTotal = kBytesTotalNow;
-
- double kbpsNow = timeDiff == 0.0 ? 0.0 : 8*kBytesDeltaNow/timeDiff;
- if (kbpsNow < 0.0) kbpsNow = 0.0; // in case of roundoff error
- if (kbpsNow < kbits_per_second_min) kbits_per_second_min = kbpsNow;
- if (kbpsNow > kbits_per_second_max) kbits_per_second_max = kbpsNow;
-
- totNumPacketsReceived = rdt->totNumPacketsReceived();
- totNumPacketsExpected = totNumPacketsReceived; // because we use TCP
- packet_loss_fraction_min = packet_loss_fraction_max = 0.0; // ditto
- return;
- }
-#endif
- RTPReceptionStatsDB::Iterator statsIter(fSource->receptionStatsDB());
- // Assume that there's only one SSRC source (usually the case):
- RTPReceptionStats* stats = statsIter.next(True);
- if (stats != NULL) {
- double kBytesTotalNow = stats->totNumKBytesReceived();
- double kBytesDeltaNow = kBytesTotalNow - kBytesTotal;
- kBytesTotal = kBytesTotalNow;
-
- double kbpsNow = timeDiff == 0.0 ? 0.0 : 8*kBytesDeltaNow/timeDiff;
- if (kbpsNow < 0.0) kbpsNow = 0.0; // in case of roundoff error
- if (kbpsNow < kbits_per_second_min) kbits_per_second_min = kbpsNow;
- if (kbpsNow > kbits_per_second_max) kbits_per_second_max = kbpsNow;
-
- unsigned totReceivedNow = stats->totNumPacketsReceived();
- unsigned totExpectedNow = stats->totNumPacketsExpected();
- unsigned deltaReceivedNow = totReceivedNow - totNumPacketsReceived;
- unsigned deltaExpectedNow = totExpectedNow - totNumPacketsExpected;
- totNumPacketsReceived = totReceivedNow;
- totNumPacketsExpected = totExpectedNow;
-
- double lossFractionNow = deltaExpectedNow == 0 ? 0.0
- : 1.0 - deltaReceivedNow/(double)deltaExpectedNow;
- //if (lossFractionNow < 0.0) lossFractionNow = 0.0; //reordering can cause
- if (lossFractionNow < packet_loss_fraction_min) {
- packet_loss_fraction_min = lossFractionNow;
- }
- if (lossFractionNow > packet_loss_fraction_max) {
- packet_loss_fraction_max = lossFractionNow;
- }
- }
-}
-
-void beginQOSMeasurement() {
- // Set up a measurement record for each active subsession:
- struct timeval startTime;
- gettimeofday(&startTime, NULL);
- nextQOSMeasurementUSecs = startTime.tv_sec*1000000 + startTime.tv_usec;
- qosMeasurementRecord* qosRecordTail = NULL;
- MediaSubsessionIterator iter(*session);
- MediaSubsession* subsession;
- while ((subsession = iter.next()) != NULL) {
- RTPSource* src = subsession->rtpSource();
-#ifdef SUPPORT_REAL_RTSP
- if (session->isRealNetworksRDT) src = (RTPSource*)(subsession->readSource()); // hack
-#endif
- if (src == NULL) continue;
-
- qosMeasurementRecord* qosRecord
- = new qosMeasurementRecord(startTime, src);
- if (qosRecordHead == NULL) qosRecordHead = qosRecord;
- if (qosRecordTail != NULL) qosRecordTail->fNext = qosRecord;
- qosRecordTail = qosRecord;
- }
-
- // Then schedule the first of the periodic measurements:
- scheduleNextQOSMeasurement();
-}
-
-void printQOSData(int exitCode) {
- if (exitCode != 0 && statusCode == 0) statusCode = 2;
- *env << "begin_QOS_statistics\n";
- *env << "server_availability\t" << (statusCode == 1 ? 0 : 100) << "\n";
- *env << "stream_availability\t" << (statusCode == 0 ? 100 : 0) << "\n";
-
- // Print out stats for each active subsession:
- qosMeasurementRecord* curQOSRecord = qosRecordHead;
- if (session != NULL) {
- MediaSubsessionIterator iter(*session);
- MediaSubsession* subsession;
- while ((subsession = iter.next()) != NULL) {
- RTPSource* src = subsession->rtpSource();
-#ifdef SUPPORT_REAL_RTSP
- if (session->isRealNetworksRDT) src = (RTPSource*)(subsession->readSource()); // hack
-#endif
- if (src == NULL) continue;
-
- *env << "subsession\t" << subsession->mediumName()
- << "/" << subsession->codecName() << "\n";
-
- unsigned numPacketsReceived = 0, numPacketsExpected = 0;
-
- if (curQOSRecord != NULL) {
- numPacketsReceived = curQOSRecord->totNumPacketsReceived;
- numPacketsExpected = curQOSRecord->totNumPacketsExpected;
- }
- *env << "num_packets_received\t" << numPacketsReceived << "\n";
- *env << "num_packets_lost\t" << numPacketsExpected - numPacketsReceived << "\n";
-
- if (curQOSRecord != NULL) {
- unsigned secsDiff = curQOSRecord->measurementEndTime.tv_sec
- - curQOSRecord->measurementStartTime.tv_sec;
- int usecsDiff = curQOSRecord->measurementEndTime.tv_usec
- - curQOSRecord->measurementStartTime.tv_usec;
- double measurementTime = secsDiff + usecsDiff/1000000.0;
- *env << "elapsed_measurement_time\t" << measurementTime << "\n";
-
- *env << "kBytes_received_total\t" << curQOSRecord->kBytesTotal << "\n";
-
- *env << "measurement_sampling_interval_ms\t" << qosMeasurementIntervalMS << "\n";
-
- if (curQOSRecord->kbits_per_second_max == 0) {
- // special case: we didn't receive any data:
- *env <<
- "kbits_per_second_min\tunavailable\n"
- "kbits_per_second_ave\tunavailable\n"
- "kbits_per_second_max\tunavailable\n";
- } else {
- *env << "kbits_per_second_min\t" << curQOSRecord->kbits_per_second_min << "\n";
- *env << "kbits_per_second_ave\t"
- << (measurementTime == 0.0 ? 0.0 : 8*curQOSRecord->kBytesTotal/measurementTime) << "\n";
- *env << "kbits_per_second_max\t" << curQOSRecord->kbits_per_second_max << "\n";
- }
-
- *env << "packet_loss_percentage_min\t" << 100*curQOSRecord->packet_loss_fraction_min << "\n";
- double packetLossFraction = numPacketsExpected == 0 ? 1.0
- : 1.0 - numPacketsReceived/(double)numPacketsExpected;
- if (packetLossFraction < 0.0) packetLossFraction = 0.0;
- *env << "packet_loss_percentage_ave\t" << 100*packetLossFraction << "\n";
- *env << "packet_loss_percentage_max\t"
- << (packetLossFraction == 1.0 ? 100.0 : 100*curQOSRecord->packet_loss_fraction_max) << "\n";
-
-#ifdef SUPPORT_REAL_RTSP
- if (session->isRealNetworksRDT) {
- RealRDTSource* rdt = (RealRDTSource*)src;
- *env << "inter_packet_gap_ms_min\t" << rdt->minInterPacketGapUS()/1000.0 << "\n";
- struct timeval totalGaps = rdt->totalInterPacketGaps();
- double totalGapsMS = totalGaps.tv_sec*1000.0 + totalGaps.tv_usec/1000.0;
- unsigned totNumPacketsReceived = rdt->totNumPacketsReceived();
- *env << "inter_packet_gap_ms_ave\t"
- << (totNumPacketsReceived == 0 ? 0.0 : totalGapsMS/totNumPacketsReceived) << "\n";
- *env << "inter_packet_gap_ms_max\t" << rdt->maxInterPacketGapUS()/1000.0 << "\n";
- } else {
-#endif
- RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB());
- // Assume that there's only one SSRC source (usually the case):
- RTPReceptionStats* stats = statsIter.next(True);
- if (stats != NULL) {
- *env << "inter_packet_gap_ms_min\t" << stats->minInterPacketGapUS()/1000.0 << "\n";
- struct timeval totalGaps = stats->totalInterPacketGaps();
- double totalGapsMS = totalGaps.tv_sec*1000.0 + totalGaps.tv_usec/1000.0;
- unsigned totNumPacketsReceived = stats->totNumPacketsReceived();
- *env << "inter_packet_gap_ms_ave\t"
- << (totNumPacketsReceived == 0 ? 0.0 : totalGapsMS/totNumPacketsReceived) << "\n";
- *env << "inter_packet_gap_ms_max\t" << stats->maxInterPacketGapUS()/1000.0 << "\n";
- }
-#ifdef SUPPORT_REAL_RTSP
- }
-#endif
-
- curQOSRecord = curQOSRecord->fNext;
- }
- }
- }
-
- *env << "end_QOS_statistics\n";
- delete qosRecordHead;
-}
-
-void shutdown(int exitCode) {
- if (env != NULL) {
- env->taskScheduler().unscheduleDelayedTask(sessionTimerTask);
- env->taskScheduler().unscheduleDelayedTask(arrivalCheckTimerTask);
- env->taskScheduler().unscheduleDelayedTask(interPacketGapCheckTimerTask);
- env->taskScheduler().unscheduleDelayedTask(qosMeasurementTimerTask);
- }
-
- if (qosMeasurementIntervalMS > 0) {
- printQOSData(exitCode);
- }
-
- // Close our output files:
- closeMediaSinks();
-
- // Teardown, then shutdown, any outstanding RTP/RTCP subsessions
- tearDownStreams();
- Medium::close(session);
-
- // Finally, shut down our client:
- Medium::close(ourClient);
-
- // Adios...
- exit(exitCode);
-}
-
-void signalHandlerShutdown(int /*sig*/) {
- *env << "Got shutdown signal\n";
- shutdown(0);
-}
-
-void checkForPacketArrival(void* /*clientData*/) {
- if (!notifyOnPacketArrival) return; // we're not checking
-
- // Check each subsession, to see whether it has received data packets:
- unsigned numSubsessionsChecked = 0;
- unsigned numSubsessionsWithReceivedData = 0;
- unsigned numSubsessionsThatHaveBeenSynced = 0;
-
- MediaSubsessionIterator iter(*session);
- MediaSubsession* subsession;
- while ((subsession = iter.next()) != NULL) {
- RTPSource* src = subsession->rtpSource();
- if (src == NULL) continue;
- ++numSubsessionsChecked;
-
- if (src->receptionStatsDB().numActiveSourcesSinceLastReset() > 0) {
- // At least one data packet has arrived
- ++numSubsessionsWithReceivedData;
- }
- if (src->hasBeenSynchronizedUsingRTCP()) {
- ++numSubsessionsThatHaveBeenSynced;
- }
- }
-
- unsigned numSubsessionsToCheck = numSubsessionsChecked;
- // Special case for "QuickTimeFileSink"s and "AVIFileSink"s:
- // They might not use all of the input sources:
- if (qtOut != NULL) {
- numSubsessionsToCheck = qtOut->numActiveSubsessions();
- } else if (aviOut != NULL) {
- numSubsessionsToCheck = aviOut->numActiveSubsessions();
- }
-
- Boolean notifyTheUser;
- if (!syncStreams) {
- notifyTheUser = numSubsessionsWithReceivedData > 0; // easy case
- } else {
- notifyTheUser = numSubsessionsWithReceivedData >= numSubsessionsToCheck
- && numSubsessionsThatHaveBeenSynced == numSubsessionsChecked;
- // Note: A subsession with no active sources is considered to be synced
- }
- if (notifyTheUser) {
- struct timeval timeNow;
- gettimeofday(&timeNow, NULL);
- char timestampStr[100];
- sprintf(timestampStr, "%ld%03ld", timeNow.tv_sec, (long)(timeNow.tv_usec/1000));
- *env << (syncStreams ? "Synchronized d" : "D")
- << "ata packets have begun arriving [" << timestampStr << "]\007\n";
- return;
- }
-
- // No luck, so reschedule this check again, after a delay:
- int uSecsToDelay = 100000; // 100 ms
- arrivalCheckTimerTask
- = env->taskScheduler().scheduleDelayedTask(uSecsToDelay,
- (TaskFunc*)checkForPacketArrival, NULL);
-}
-
-void checkInterPacketGaps(void* /*clientData*/) {
- if (interPacketGapMaxTime == 0) return; // we're not checking
-
- // Check each subsession, counting up how many packets have been received:
- unsigned newTotNumPacketsReceived = 0;
-
- MediaSubsessionIterator iter(*session);
- MediaSubsession* subsession;
- while ((subsession = iter.next()) != NULL) {
- RTPSource* src = subsession->rtpSource();
- if (src == NULL) continue;
- newTotNumPacketsReceived += src->receptionStatsDB().totNumPacketsReceived();
- }
-
- if (newTotNumPacketsReceived == totNumPacketsReceived) {
- // No additional packets have been received since the last time we
- // checked, so end this stream:
- *env << "Closing session, because we stopped receiving packets.\n";
- interPacketGapCheckTimerTask = NULL;
- sessionAfterPlaying();
- } else {
- totNumPacketsReceived = newTotNumPacketsReceived;
- // Check again, after the specified delay:
- interPacketGapCheckTimerTask
- = env->taskScheduler().scheduleDelayedTask(interPacketGapMaxTime*1000000,
- (TaskFunc*)checkInterPacketGaps, NULL);
- }
-}
diff --git a/testProgs/Makefile.tail b/testProgs/Makefile.tail
index 8707646..7c40a8b 100644
--- a/testProgs/Makefile.tail
+++ b/testProgs/Makefile.tail
@@ -59,7 +59,7 @@ LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(LIB_SUFFIX)
GROUPSOCK_DIR = ../groupsock
GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(LIB_SUFFIX)
LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
- $(USAGE_ENVIRONMENT_LIB) $(BASIC_USAGE_ENVIRONMENT_LIB)
+ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB)
LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION)
testMP3Streamer$(EXE): $(MP3_STREAMER_OBJS) $(LOCAL_LIBS)
diff --git a/testProgs/playCommon.cpp b/testProgs/playCommon.cpp
index 44a697a..82df645 100644
--- a/testProgs/playCommon.cpp
+++ b/testProgs/playCommon.cpp
@@ -712,7 +712,7 @@ int main(int argc, char** argv) {
fileSinkBufferSize, oneFilePerFrame);
} else if (strcmp(subsession->mediumName(), "video") == 0 &&
(strcmp(subsession->codecName(), "H264") == 0)) {
- // For H.264 video stream, we use a special sink that insert start_codes:
+ // For H.264 video stream, we use a special sink that insert start_codes:
fileSink = H264VideoFileSink::createNew(*env, outFileName,
fileSinkBufferSize, oneFilePerFrame);
} else {
--
liblivemedia packaging
More information about the pkg-multimedia-commits
mailing list