转:RTMPDump源代码分析

时间:2022-04-01 04:54:34

0: 主要函数调用分析

rtmpdump 是一个用来处理 RTMP 流媒体的开源工具包,支持 rtmp://, rtmpt://, rtmpe://, rtmpte://, and rtmps://.也提供 Android 版本。

最近研究了一下它内部函数调用的关系。

下面列出几个主要的函数的调用关系。

RTMPDump用于下载RTMP流媒体的函数Download:

转:RTMPDump源代码分析

用于建立网络连接(NetConnect)的函数Connect:

转:RTMPDump源代码分析

用于建立网络流(NetStream)的函数

转:RTMPDump源代码分析

rtmpdump源代码(Linux):http://download.csdn.net/detail/leixiaohua1020/6376561

rtmpdump源代码(VC 2005 工程):http://download.csdn.net/detail/leixiaohua1020/6563163

1: main()函数

rtmpdump 是一个用来处理 RTMP 流媒体的工具包,支持 rtmp://, rtmpt://, rtmpe://, rtmpte://, and rtmps:// 等。之前在学习RTMP协议的时候,发现没有讲它源代码的,只好自己分析,现在打算把自己学习的成果写出来,可能结果不一定都对,先暂且记录一下。

使用RTMPdump下载一个流媒体的大致流程是这样的:

RTMP_Init();//初始化结构体
InitSockets();//初始化Socket
RTMP_ParseURL();//解析输入URL
RTMP_SetupStream();//一些设置
fopen();//打开文件,准备写入
RTMP_Connect();//建立NetConnection
RTMP_ConnectStream()//建立NetStream
Download();//下载函数
RTMP_Close();//关闭连接
fclose();//关闭文件
CleanupSockets();//清理Socket

其中Download()主要是使用RTMP_Read()进行下载的。

注:可以参考:RTMP流媒体播放过程

下面贴上自己注释的RTMPDump源代码。注意以下几点:

1.此RTMPDump已经被移植进VC 2010 的 MFC的工程,所以main()函数已经被改名为rtmpdump(),而且参数也改了,传进来一个MFC窗口的句柄。不过功能没怎么改(控制台程序移植到MFC以后,main()就不是程序的入口了,所以main()名字改成什么是无所谓的)

2.里面有很多提取信息的代码形如:rtmp.dlg->AppendCInfo("开始初始化Socket...");这些代码是我为了获取RTMP信息而自己加的,并不影响程序的执行。

int rtmpdump(LPVOID lpParam,int argc,char **argv)
{ extern char *optarg;
//一定要设置,否则只能运行一次
extern int optind;
optind=;
int nStatus = RD_SUCCESS;
double percent = ;
double duration = 0.0; int nSkipKeyFrames = DEF_SKIPFRM; // skip this number of keyframes when resuming int bOverrideBufferTime = FALSE; // if the user specifies a buffer time override this is true
int bStdoutMode = TRUE; // if true print the stream directly to stdout, messages go to stderr
int bResume = FALSE; // true in resume mode
uint32_t dSeek = ; // seek position in resume mode, 0 otherwise
uint32_t bufferTime = DEF_BUFTIME; // meta header and initial frame for the resume mode (they are read from the file and compared with
// the stream we are trying to continue
char *metaHeader = ;
uint32_t nMetaHeaderSize = ; // video keyframe for matching
char *initialFrame = ;
uint32_t nInitialFrameSize = ;
int initialFrameType = ; // tye: audio or video AVal hostname = { , };
AVal playpath = { , };
AVal subscribepath = { , };
int port = -;
int protocol = RTMP_PROTOCOL_UNDEFINED;
int retries = ;
int bLiveStream = FALSE; // 是直播流吗? then we can't seek/resume
int bHashes = FALSE; // display byte counters not hashes by default long int timeout = DEF_TIMEOUT; // timeout connection after 120 seconds
uint32_t dStartOffset = ; // 非直播流搜寻点seek position in non-live mode
uint32_t dStopOffset = ;
RTMP rtmp = { }; AVal swfUrl = { , };
AVal tcUrl = { , };
AVal pageUrl = { , };
AVal app = { , };
AVal auth = { , };
AVal swfHash = { , };
uint32_t swfSize = ;
AVal flashVer = { , };
AVal sockshost = { , }; #ifdef CRYPTO
int swfAge = ; /* 30 days for SWF cache by default */
int swfVfy = ;
unsigned char hash[RTMP_SWF_HASHLEN];
#endif char *flvFile = ; signal(SIGINT, sigIntHandler);
signal(SIGTERM, sigIntHandler);
#ifndef WIN32
signal(SIGHUP, sigIntHandler);
signal(SIGPIPE, sigIntHandler);
signal(SIGQUIT, sigIntHandler);
#endif RTMP_debuglevel = RTMP_LOGINFO; //首先搜寻“ --quiet”选项
int index = ;
while (index < argc)
{
if (strcmp(argv[index], "--quiet") ==
|| strcmp(argv[index], "-q") == )
RTMP_debuglevel = RTMP_LOGCRIT;
index++;
}
#define RTMPDUMP_VERSION "1.0"
RTMP_LogPrintf("RTMP流媒体下载 %s\n", RTMPDUMP_VERSION);
RTMP_LogPrintf
("2012 雷霄骅 中国传媒大学/信息工程学院/通信与信息系统/数字电视技术\n");
//RTMP_LogPrintf("输入 -h 获取命令选项\n");
RTMP_Init(&rtmp);
//句柄-----------------------------
rtmp.dlg=(CSpecialPRTMPDlg *)lpParam;
//---------------------------------
//----------------------
rtmp.dlg->AppendCInfo("开始初始化Socket...");
//-----------------------------
if (!InitSockets())
{
//----------------------
rtmp.dlg->AppendCInfo("初始化Socket失败!");
//-----------------------------
RTMP_Log(RTMP_LOGERROR,
"Couldn't load sockets support on your platform, exiting!");
return RD_FAILED;
}
//----------------------
rtmp.dlg->AppendCInfo("成功初始化Socket");
//-----------------------------
/* sleep(30); */ int opt;
/* struct option longopts[] = {
{"help", 0, NULL, 'h'},
{"host", 1, NULL, 'n'},
{"port", 1, NULL, 'c'},
{"socks", 1, NULL, 'S'},
{"protocol", 1, NULL, 'l'},
{"playpath", 1, NULL, 'y'},
{"playlist", 0, NULL, 'Y'},
{"rtmp", 1, NULL, 'r'},
{"swfUrl", 1, NULL, 's'},
{"tcUrl", 1, NULL, 't'},
{"pageUrl", 1, NULL, 'p'},
{"app", 1, NULL, 'a'},
{"auth", 1, NULL, 'u'},
{"conn", 1, NULL, 'C'},
#ifdef CRYPTO
{"swfhash", 1, NULL, 'w'},
{"swfsize", 1, NULL, 'x'},
{"swfVfy", 1, NULL, 'W'},
{"swfAge", 1, NULL, 'X'},
#endif
{"flashVer", 1, NULL, 'f'},
{"live", 0, NULL, 'v'},
{"flv", 1, NULL, 'o'},
{"resume", 0, NULL, 'e'},
{"timeout", 1, NULL, 'm'},
{"buffer", 1, NULL, 'b'},
{"skip", 1, NULL, 'k'},
{"subscribe", 1, NULL, 'd'},
{"start", 1, NULL, 'A'},
{"stop", 1, NULL, 'B'},
{"token", 1, NULL, 'T'},
{"hashes", 0, NULL, '#'},
{"debug", 0, NULL, 'z'},
{"quiet", 0, NULL, 'q'},
{"verbose", 0, NULL, 'V'},
{0, 0, 0, 0}
};*/
//分析命令行参数,注意用法。
//选项都是一个字母,后面有冒号的代表该选项还有相关参数
//一直循环直到获取所有的opt
while ((opt =
getopt/*_long*/(argc, argv,
"hVveqzr:s:t:p:a:b:f:o:u:C:n:c:l:y:Ym:k:d:A:B:T:w:x:W:X:S:#"/*,
longopts, NULL*/)) != -)
{
//不同的选项做不同的处理
switch (opt)
{
case 'h':
usage(argv[]);
return RD_SUCCESS;
#ifdef CRYPTO
case 'w':
{
int res = hex2bin(optarg, &swfHash.av_val);
if (res != RTMP_SWF_HASHLEN)
{
swfHash.av_val = NULL;
RTMP_Log(RTMP_LOGWARNING,
"Couldn't parse swf hash hex string, not hexstring or not %d bytes, ignoring!", RTMP_SWF_HASHLEN);
}
swfHash.av_len = RTMP_SWF_HASHLEN;
break;
}
case 'x':
{
int size = atoi(optarg);
if (size <= )
{
RTMP_Log(RTMP_LOGERROR, "SWF Size must be at least 1, ignoring\n");
}
else
{
swfSize = size;
}
break;
}
case 'W':
STR2AVAL(swfUrl, optarg);
swfVfy = ;
break;
case 'X':
{
int num = atoi(optarg);
if (num < )
{
RTMP_Log(RTMP_LOGERROR, "SWF Age must be non-negative, ignoring\n");
}
else
{
swfAge = num;
}
}
break;
#endif
case 'k':
nSkipKeyFrames = atoi(optarg);
if (nSkipKeyFrames < )
{
RTMP_Log(RTMP_LOGERROR,
"Number of keyframes skipped must be greater or equal zero, using zero!");
nSkipKeyFrames = ;
}
else
{
RTMP_Log(RTMP_LOGDEBUG, "Number of skipped key frames for resume: %d",
nSkipKeyFrames);
}
break;
case 'b':
{
int32_t bt = atol(optarg);
if (bt < )
{
RTMP_Log(RTMP_LOGERROR,
"Buffer time must be greater than zero, ignoring the specified value %d!",
bt);
}
else
{
bufferTime = bt;
bOverrideBufferTime = TRUE;
}
break;
}
//直播流
case 'v':
//----------------
rtmp.dlg->AppendCInfo("该RTMP的URL是一个直播流");
//----------------
bLiveStream = TRUE; // no seeking or resuming possible!
break;
case 'd':
STR2AVAL(subscribepath, optarg);
break;
case 'n':
STR2AVAL(hostname, optarg);
break;
case 'c':
port = atoi(optarg);
break;
case 'l':
protocol = atoi(optarg);
if (protocol < RTMP_PROTOCOL_RTMP || protocol > RTMP_PROTOCOL_RTMPTS)
{
RTMP_Log(RTMP_LOGERROR, "Unknown protocol specified: %d", protocol);
return RD_FAILED;
}
break;
case 'y':
STR2AVAL(playpath, optarg);
break;
case 'Y':
RTMP_SetOpt(&rtmp, &av_playlist, (AVal *)&av_true);
break;
//路径参数-r
case 'r':
{
AVal parsedHost, parsedApp, parsedPlaypath;
unsigned int parsedPort = ;
int parsedProtocol = RTMP_PROTOCOL_UNDEFINED;
//解析URL。注optarg指向参数(URL)
RTMP_LogPrintf("RTMP URL : %s\n",optarg);
//----------------
rtmp.dlg->AppendCInfo("解析RTMP的URL...");
//----------------
if (!RTMP_ParseURL
(optarg, &parsedProtocol, &parsedHost, &parsedPort,
&parsedPlaypath, &parsedApp))
{
//----------------
rtmp.dlg->AppendCInfo("解析RTMP的URL失败!");
//----------------
RTMP_Log(RTMP_LOGWARNING, "无法解析 url (%s)!",
optarg);
}
else
{
//----------------
rtmp.dlg->AppendCInfo("解析RTMP的URL成功");
//----------------
//把解析出来的数据赋值
if (!hostname.av_len)
hostname = parsedHost;
if (port == -)
port = parsedPort;
if (playpath.av_len == && parsedPlaypath.av_len)
{
playpath = parsedPlaypath;
}
if (protocol == RTMP_PROTOCOL_UNDEFINED)
protocol = parsedProtocol;
if (app.av_len == && parsedApp.av_len)
{
app = parsedApp;
}
}
break;
}
case 's':
STR2AVAL(swfUrl, optarg);
break;
case 't':
STR2AVAL(tcUrl, optarg);
break;
case 'p':
STR2AVAL(pageUrl, optarg);
break;
case 'a':
STR2AVAL(app, optarg);
break;
case 'f':
STR2AVAL(flashVer, optarg);
break;
//指定输出文件
case 'o':
flvFile = optarg;
if (strcmp(flvFile, "-"))
bStdoutMode = FALSE; break;
case 'e':
bResume = TRUE;
break;
case 'u':
STR2AVAL(auth, optarg);
break;
case 'C': {
AVal av;
STR2AVAL(av, optarg);
if (!RTMP_SetOpt(&rtmp, &av_conn, &av))
{
RTMP_Log(RTMP_LOGERROR, "Invalid AMF parameter: %s", optarg);
return RD_FAILED;
}
}
break;
case 'm':
timeout = atoi(optarg);
break;
case 'A':
dStartOffset = (int) (atof(optarg) * 1000.0);
break;
case 'B':
dStopOffset = (int) (atof(optarg) * 1000.0);
break;
case 'T': {
AVal token;
STR2AVAL(token, optarg);
RTMP_SetOpt(&rtmp, &av_token, &token);
}
break;
case '#':
bHashes = TRUE;
break;
case 'q':
RTMP_debuglevel = RTMP_LOGCRIT;
break;
case 'V':
RTMP_debuglevel = RTMP_LOGDEBUG;
break;
case 'z':
RTMP_debuglevel = RTMP_LOGALL;
break;
case 'S':
STR2AVAL(sockshost, optarg);
break;
default:
RTMP_LogPrintf("unknown option: %c\n", opt);
usage(argv[]);
return RD_FAILED;
break;
}
} if (!hostname.av_len)
{
RTMP_Log(RTMP_LOGERROR,
"您必须指定 主机名(hostname) (--host) 或 url (-r \"rtmp://host[:port]/playpath\") 包含 a hostname");
return RD_FAILED;
}
if (playpath.av_len == )
{
RTMP_Log(RTMP_LOGERROR,
"您必须指定 播放路径(playpath) (--playpath) 或 url (-r \"rtmp://host[:port]/playpath\") 包含 a playpath");
return RD_FAILED;
} if (protocol == RTMP_PROTOCOL_UNDEFINED)
{
RTMP_Log(RTMP_LOGWARNING,
"您没有指定 协议(protocol) (--protocol) 或 rtmp url (-r), 默认协议 RTMP");
protocol = RTMP_PROTOCOL_RTMP;
}
if (port == -)
{
RTMP_Log(RTMP_LOGWARNING,
"您没有指定 端口(port) (--port) 或 rtmp url (-r), 默认端口 1935");
port = ;
}
if (port == )
{
if (protocol & RTMP_FEATURE_SSL)
port = ;
else if (protocol & RTMP_FEATURE_HTTP)
port = ;
else
port = ;
} if (flvFile == )
{
RTMP_Log(RTMP_LOGWARNING,
"请指定一个输出文件 (-o filename), using stdout");
bStdoutMode = TRUE;
} if (bStdoutMode && bResume)
{
RTMP_Log(RTMP_LOGWARNING,
"Can't resume in stdout mode, ignoring --resume option");
bResume = FALSE;
} if (bLiveStream && bResume)
{
RTMP_Log(RTMP_LOGWARNING, "Can't resume live stream, ignoring --resume option");
bResume = FALSE;
} #ifdef CRYPTO
if (swfVfy)
{
if (RTMP_HashSWF(swfUrl.av_val, (unsigned int *)&swfSize, hash, swfAge) == )
{
swfHash.av_val = (char *)hash;
swfHash.av_len = RTMP_SWF_HASHLEN;
}
} if (swfHash.av_len == && swfSize > )
{
RTMP_Log(RTMP_LOGWARNING,
"Ignoring SWF size, supply also the hash with --swfhash");
swfSize = ;
} if (swfHash.av_len != && swfSize == )
{
RTMP_Log(RTMP_LOGWARNING,
"Ignoring SWF hash, supply also the swf size with --swfsize");
swfHash.av_len = ;
swfHash.av_val = NULL;
}
#endif if (tcUrl.av_len == )
{
char str[] = { }; tcUrl.av_len = snprintf(str, , "%s://%.*s:%d/%.*s",
RTMPProtocolStringsLower[protocol], hostname.av_len,
hostname.av_val, port, app.av_len, app.av_val);
tcUrl.av_val = (char *) malloc(tcUrl.av_len + );
strcpy(tcUrl.av_val, str);
} int first = ; // User defined seek offset
if (dStartOffset > )
{
//直播流
if (bLiveStream)
{
RTMP_Log(RTMP_LOGWARNING,
"Can't seek in a live stream, ignoring --start option");
dStartOffset = ;
}
}
//----------------
rtmp.dlg->AppendCInfo("开始初始化RTMP连接的参数...");
//----------------
//设置
RTMP_SetupStream(&rtmp, protocol, &hostname, port, &sockshost, &playpath,
&tcUrl, &swfUrl, &pageUrl, &app, &auth, &swfHash, swfSize,
&flashVer, &subscribepath, dSeek, dStopOffset, bLiveStream, timeout);
//此处设置参数-----------------
rtmp.dlg->AppendCInfo("成功初始化RTMP连接的参数");
//-----------------------------
char *temp=(char *)malloc(MAX_URL_LENGTH); memcpy(temp,rtmp.Link.hostname.av_val,rtmp.Link.hostname.av_len);
temp[rtmp.Link.hostname.av_len]='\0';
rtmp.dlg->AppendB_R_L_Info("主机名",temp); itoa(rtmp.Link.port,temp,);
rtmp.dlg->AppendB_R_L_Info("端口号",temp); memcpy(temp,rtmp.Link.app.av_val,rtmp.Link.app.av_len);
temp[rtmp.Link.app.av_len]='\0';
rtmp.dlg->AppendB_R_L_Info("应用程序",temp); memcpy(temp,rtmp.Link.playpath.av_val,rtmp.Link.playpath.av_len);
temp[rtmp.Link.playpath.av_len]='\0';
rtmp.dlg->AppendB_R_L_Info("路径",temp); //----------------------------- /* Try to keep the stream moving if it pauses on us */
if (!bLiveStream && !(protocol & RTMP_FEATURE_HTTP))
rtmp.Link.lFlags |= RTMP_LF_BUFX; off_t size = ; // ok,我们必须获得timestamp of the last keyframe (only keyframes are seekable) / last audio frame (audio only streams)
if (bResume)
{
//打开文件,输出的文件(Resume)
nStatus =
OpenResumeFile(flvFile, &file, &size, &metaHeader, &nMetaHeaderSize,
&duration);
if (nStatus == RD_FAILED)
goto clean; if (!file)
{
// file does not exist, so go back into normal mode
bResume = FALSE; // we are back in fresh file mode (otherwise finalizing file won't be done)
}
else
{
//获取最后一个关键帧
nStatus = GetLastKeyframe(file, nSkipKeyFrames,
&dSeek, &initialFrame,
&initialFrameType, &nInitialFrameSize);
if (nStatus == RD_FAILED)
{
RTMP_Log(RTMP_LOGDEBUG, "Failed to get last keyframe.");
goto clean;
} if (dSeek == )
{
RTMP_Log(RTMP_LOGDEBUG,
"Last keyframe is first frame in stream, switching from resume to normal mode!");
bResume = FALSE;
}
}
}
//如果输出文件不存在
if (!file)
{
if (bStdoutMode)
{
//直接输出到stdout
file = stdout;
SET_BINMODE(file);
}
else
{
//打开一个文件
//w+b 读写打开或建立一个二进制文件,允许读和写。
//-----------------
rtmp.dlg->AppendCInfo("创建输出文件...");
//-----------------------------
file = fopen(flvFile, "w+b");
if (file == )
{
//-----------------
rtmp.dlg->AppendCInfo("创建输出文件失败!");
//-----------------------------
RTMP_LogPrintf("Failed to open file! %s\n", flvFile);
return RD_FAILED;
}
rtmp.dlg->AppendCInfo("成功创建输出文件");
}
} #ifdef _DEBUG
netstackdump = fopen("netstackdump", "wb");
netstackdump_read = fopen("netstackdump_read", "wb");
#endif while (!RTMP_ctrlC)
{
RTMP_Log(RTMP_LOGDEBUG, "Setting buffer time to: %dms", bufferTime);
//设置Buffer时间
//-----------------
rtmp.dlg->AppendCInfo("设置缓冲(Buffer)的时间");
//-----------------------------
RTMP_SetBufferMS(&rtmp, bufferTime);
//第一次执行
if (first)
{
first = ;
RTMP_LogPrintf("开始建立连接!\n");
//-----------------
rtmp.dlg->AppendCInfo("开始建立连接(NetConnection)...");
//-----------------------------
//建立连接(Connect)
if (!RTMP_Connect(&rtmp, NULL))
{
//-----------------
rtmp.dlg->AppendCInfo("建立连接(NetConnection)失败!");
//-----------------------------
nStatus = RD_FAILED;
break;
}
//-----------------
rtmp.dlg->AppendCInfo("成功建立连接(NetConnection)");
//-----------------------------
//RTMP_Log(RTMP_LOGINFO, "已链接..."); // User defined seek offset
if (dStartOffset > )
{
// Don't need the start offset if resuming an existing file
if (bResume)
{
RTMP_Log(RTMP_LOGWARNING,
"Can't seek a resumed stream, ignoring --start option");
dStartOffset = ;
}
else
{
dSeek = dStartOffset;
}
} // Calculate the length of the stream to still play
if (dStopOffset > )
{
// Quit if start seek is past required stop offset
if (dStopOffset <= dSeek)
{
RTMP_LogPrintf("Already Completed\n");
nStatus = RD_SUCCESS;
break;
}
}
//创建流(Stream)(发送connect命令消息后处理传来的数据)
itoa(rtmp.m_inChunkSize,temp,);
rtmp.dlg->AppendB_R_Info("输入Chunk大小",temp);
itoa(rtmp.m_outChunkSize,temp,);
rtmp.dlg->AppendB_R_Info("输出Chunk大小",temp);
itoa(rtmp.m_stream_id,temp,);
rtmp.dlg->AppendB_R_Info("Stream ID",temp);
itoa(rtmp.m_nBufferMS,temp,);
rtmp.dlg->AppendB_R_Info("Buffer时长(ms)",temp);
itoa(rtmp.m_nServerBW,temp,);
rtmp.dlg->AppendB_R_Info("ServerBW",temp);
itoa(rtmp.m_nClientBW,temp,);
rtmp.dlg->AppendB_R_Info("ClientBW",temp);
itoa((int)rtmp.m_fEncoding,temp,);
rtmp.dlg->AppendB_R_Info("命令消息编码方法",temp);
itoa((int)rtmp.m_fDuration,temp,);
rtmp.dlg->AppendB_R_Info("时长(s)",temp); rtmp.dlg->ShowBInfo();
free(temp);
//-----------------
rtmp.dlg->AppendCInfo("开始建立网络流(NetStream)");
//-----------------------------
if (!RTMP_ConnectStream(&rtmp, dSeek))
{
//-----------------
rtmp.dlg->AppendCInfo("建立网络流(NetStream)失败!");
//-----------------
nStatus = RD_FAILED;
break;
}
//-----------------
rtmp.dlg->AppendCInfo("成功建立网络流(NetStream)!");
//-----------------
}
else
{
nInitialFrameSize = ; if (retries)
{
RTMP_Log(RTMP_LOGERROR, "Failed to resume the stream\n\n");
if (!RTMP_IsTimedout(&rtmp))
nStatus = RD_FAILED;
else
nStatus = RD_INCOMPLETE;
break;
}
RTMP_Log(RTMP_LOGINFO, "Connection timed out, trying to resume.\n\n");
/* Did we already try pausing, and it still didn't work? */
if (rtmp.m_pausing == )
{
/* Only one try at reconnecting... */
retries = ;
dSeek = rtmp.m_pauseStamp;
if (dStopOffset > )
{
if (dStopOffset <= dSeek)
{
RTMP_LogPrintf("Already Completed\n");
nStatus = RD_SUCCESS;
break;
}
}
if (!RTMP_ReconnectStream(&rtmp, dSeek))
{
RTMP_Log(RTMP_LOGERROR, "Failed to resume the stream\n\n");
if (!RTMP_IsTimedout(&rtmp))
nStatus = RD_FAILED;
else
nStatus = RD_INCOMPLETE;
break;
}
}
else if (!RTMP_ToggleStream(&rtmp))
{
RTMP_Log(RTMP_LOGERROR, "Failed to resume the stream\n\n");
if (!RTMP_IsTimedout(&rtmp))
nStatus = RD_FAILED;
else
nStatus = RD_INCOMPLETE;
break;
}
bResume = TRUE;
}
//----------------- //-----------------
rtmp.dlg->AppendCInfo("开始将媒体数据写入文件");
//-----------------
//下载,写入文件
nStatus = Download(&rtmp, file, dSeek, dStopOffset, duration, bResume,
metaHeader, nMetaHeaderSize, initialFrame,
initialFrameType, nInitialFrameSize,
nSkipKeyFrames, bStdoutMode, bLiveStream, bHashes,
bOverrideBufferTime, bufferTime, &percent);
free(initialFrame);
initialFrame = NULL; /* If we succeeded, we're done.
*/
if (nStatus != RD_INCOMPLETE || !RTMP_IsTimedout(&rtmp) || bLiveStream)
break;
}
//当下载完的时候
if (nStatus == RD_SUCCESS)
{
//-----------------
rtmp.dlg->AppendCInfo("写入文件完成");
//-----------------
RTMP_LogPrintf("Download complete\n");
}
//没下载完的时候
else if (nStatus == RD_INCOMPLETE)
{
//-----------------
rtmp.dlg->AppendCInfo("写入文件可能不完整");
//-----------------
RTMP_LogPrintf
("Download may be incomplete (downloaded about %.2f%%), try resuming\n",
percent);
}
//后续清理工作
clean:
//-----------------
rtmp.dlg->AppendCInfo("关闭连接");
//-----------------
RTMP_Log(RTMP_LOGDEBUG, "Closing connection.\n");
RTMP_Close(&rtmp);
rtmp.dlg->AppendCInfo("关闭文件");
if (file != )
fclose(file);
rtmp.dlg->AppendCInfo("关闭Socket");
CleanupSockets(); #ifdef _DEBUG
if (netstackdump != )
fclose(netstackdump);
if (netstackdump_read != )
fclose(netstackdump_read);
#endif
return nStatus;
}

其中InitSocket()代码很简单,初始化了Socket,如下:

// 初始化 sockets
int
InitSockets()
{
#ifdef WIN32
WORD version;
WSADATA wsaData; version = MAKEWORD(, );
return (WSAStartup(version, &wsaData) == );
#else
return TRUE;
#endif
}

CleanupSockets()则更简单:

inline void
CleanupSockets()
{
#ifdef WIN32
WSACleanup();
#endif
}

Download()函数则比较复杂:

int
Download(RTMP * rtmp, // connected RTMP object
FILE * file, uint32_t dSeek, uint32_t dStopOffset, double duration, int bResume, char *metaHeader, uint32_t nMetaHeaderSize, char *initialFrame, int initialFrameType, uint32_t nInitialFrameSize, int nSkipKeyFrames, int bStdoutMode, int bLiveStream, int bHashes, int bOverrideBufferTime, uint32_t bufferTime, double *percent) // percentage downloaded [out]
{
int32_t now, lastUpdate;
int bufferSize = * ;
char *buffer = (char *) malloc(bufferSize);
int nRead = ; //long ftell(FILE *stream);
//返回当前文件指针
RTMP_LogPrintf("开始下载!\n");
off_t size = ftello(file);
unsigned long lastPercent = ;
//时间戳
rtmp->m_read.timestamp = dSeek; *percent = 0.0; if (rtmp->m_read.timestamp)
{
RTMP_Log(RTMP_LOGDEBUG, "Continuing at TS: %d ms\n", rtmp->m_read.timestamp);
}
//是直播
if (bLiveStream)
{
RTMP_LogPrintf("直播流\n");
}
else
{
// print initial status
// Workaround to exit with 0 if the file is fully (> 99.9%) downloaded
if (duration > )
{
if ((double) rtmp->m_read.timestamp >= (double) duration * 999.0)
{
RTMP_LogPrintf("Already Completed at: %.3f sec Duration=%.3f sec\n",
(double) rtmp->m_read.timestamp / 1000.0,
(double) duration / 1000.0);
return RD_SUCCESS;
}
else
{
*percent = ((double) rtmp->m_read.timestamp) / (duration * 1000.0) * 100.0;
*percent = ((double) (int) (*percent * 10.0)) / 10.0;
RTMP_LogPrintf("%s download at: %.3f kB / %.3f sec (%.1f%%)\n",
bResume ? "Resuming" : "Starting",
(double) size / 1024.0, (double) rtmp->m_read.timestamp / 1000.0,
*percent);
}
}
else
{
RTMP_LogPrintf("%s download at: %.3f kB\n",
bResume ? "Resuming" : "Starting",
(double) size / 1024.0);
}
} if (dStopOffset > )
RTMP_LogPrintf("For duration: %.3f sec\n", (double) (dStopOffset - dSeek) / 1000.0); //各种设置参数到rtmp连接
if (bResume && nInitialFrameSize > )
rtmp->m_read.flags |= RTMP_READ_RESUME;
rtmp->m_read.initialFrameType = initialFrameType;
rtmp->m_read.nResumeTS = dSeek;
rtmp->m_read.metaHeader = metaHeader;
rtmp->m_read.initialFrame = initialFrame;
rtmp->m_read.nMetaHeaderSize = nMetaHeaderSize;
rtmp->m_read.nInitialFrameSize = nInitialFrameSize; now = RTMP_GetTime();
lastUpdate = now - ;
do
{
//从rtmp中把bufferSize(64k)个数据读入buffer
nRead = RTMP_Read(rtmp, buffer, bufferSize);
//RTMP_LogPrintf("nRead: %d\n", nRead);
if (nRead > )
{
//函数:size_t fwrite(const void* buffer,size_t size,size_t count,FILE* stream);
//向文件读入写入一个数据块。返回值:返回实际写入的数据块数目
//(1)buffer:是一个指针,对fwrite来说,是要输出数据的地址。
//(2)size:要写入内容的单字节数;
//(3)count:要进行写入size字节的数据项的个数;
//(4)stream:目标文件指针。
//(5)返回实际写入的数据项个数count。
//关键。把buffer里面的数据写成文件
if (fwrite(buffer, sizeof(unsigned char), nRead, file) !=
(size_t) nRead)
{
RTMP_Log(RTMP_LOGERROR, "%s: Failed writing, exiting!", __FUNCTION__);
free(buffer);
return RD_FAILED;
}
//记录已经写入的字节数
size += nRead; //RTMP_LogPrintf("write %dbytes (%.1f kB)\n", nRead, nRead/1024.0);
if (duration <= ) // if duration unknown try to get it from the stream (onMetaData)
duration = RTMP_GetDuration(rtmp); if (duration > )
{
// make sure we claim to have enough buffer time!
if (!bOverrideBufferTime && bufferTime < (duration * 1000.0))
{
bufferTime = (uint32_t) (duration * 1000.0) + ; // 再加5s以确保buffertime足够长 RTMP_Log(RTMP_LOGDEBUG,
"Detected that buffer time is less than duration, resetting to: %dms",
bufferTime);
//重设Buffer长度
RTMP_SetBufferMS(rtmp, bufferTime);
//给服务器发送UserControl消息通知Buffer改变
RTMP_UpdateBufferMS(rtmp);
}
//计算百分比
*percent = ((double) rtmp->m_read.timestamp) / (duration * 1000.0) * 100.0;
*percent = ((double) (int) (*percent * 10.0)) / 10.0;
if (bHashes)
{
if (lastPercent + <= *percent)
{
RTMP_LogStatus("#");
lastPercent = (unsigned long) *percent;
}
}
else
{
//设置显示数据的更新间隔200ms
now = RTMP_GetTime();
if (abs(now - lastUpdate) > )
{
RTMP_LogStatus("\r%.3f kB / %.2f sec (%.1f%%)",
(double) size / 1024.0,
(double) (rtmp->m_read.timestamp) / 1000.0, *percent);
lastUpdate = now;
}
}
}
else
{
//现在距离开机的毫秒数
now = RTMP_GetTime();
//每间隔200ms刷新一次数据
if (abs(now - lastUpdate) > )
{
if (bHashes)
RTMP_LogStatus("#");
else
//size为已写入文件的字节数
RTMP_LogStatus("\r%.3f kB / %.2f sec", (double) size / 1024.0,
(double) (rtmp->m_read.timestamp) / 1000.0);
lastUpdate = now;
}
}
}
#ifdef _DEBUG
else
{
RTMP_Log(RTMP_LOGDEBUG, "zero read!");
}
#endif }
while (!RTMP_ctrlC && nRead > - && RTMP_IsConnected(rtmp) && !RTMP_IsTimedout(rtmp));
free(buffer);
if (nRead < )
//nRead是读取情况
nRead = rtmp->m_read.status; /* Final status update */
if (!bHashes)
{
if (duration > )
{
*percent = ((double) rtmp->m_read.timestamp) / (duration * 1000.0) * 100.0;
*percent = ((double) (int) (*percent * 10.0)) / 10.0;
//输出
RTMP_LogStatus("\r%.3f kB / %.2f sec (%.1f%%)",
(double) size / 1024.0,
(double) (rtmp->m_read.timestamp) / 1000.0, *percent);
}
else
{
RTMP_LogStatus("\r%.3f kB / %.2f sec", (double) size / 1024.0,
(double) (rtmp->m_read.timestamp) / 1000.0);
}
} RTMP_Log(RTMP_LOGDEBUG, "RTMP_Read returned: %d", nRead);
//读取错误
if (bResume && nRead == -)
{
RTMP_LogPrintf("Couldn't resume FLV file, try --skip %d\n\n",
nSkipKeyFrames + );
return RD_FAILED;
}
//读取正确
if (nRead == -)
return RD_SUCCESS;
//没读完...
if ((duration > && *percent < 99.9) || RTMP_ctrlC || nRead <
|| RTMP_IsTimedout(rtmp))
{
return RD_INCOMPLETE;
} return RD_SUCCESS;
}

以上内容是我能理解到的rtmpdump.c里面的内容。

2:解析RTMP地址——RTMP_ParseURL()

之前分析了一下RTMPDump的Main()函数,其中获取RTMP流媒体数据很重要的前提是RTMP的URL的解析。如果没有这一步,那程序在强大也是白搭。现在来解析一下这个函数吧:RTMP_ParseURL()。

下面首先回顾一下RTMP的URL的格式:

rtmp://localhost/vod/mp4:sample1_1500kbps.f4v

“://”之前的是使用的协议类型,可以是rtmp,rtmpt,rtmps等

之后是服务器地址

再之后是端口号(可以没有,默认1935)

在之后是application的名字,在这里是“vod”

最后是流媒体文件路径。

关于URL就不多说了,可以查看相关文档,下面贴上注释后的代码(整个parseurl.c):

/*
* 本文件主要包含了对输入URL的解析
*/
#include "stdafx.h"
#include <stdlib.h>
#include <string.h> #include <assert.h>
#include <ctype.h> #include "rtmp_sys.h"
#include "log.h" /*解析URL,得到协议名称(protocol),主机名称(host),应用程序名称(app)
*
*/
int RTMP_ParseURL(const char *url, int *protocol, AVal *host, unsigned int *port,
AVal *playpath, AVal *app)
{
char *p, *end, *col, *ques, *slash; RTMP_Log(RTMP_LOGDEBUG, "Parsing..."); *protocol = RTMP_PROTOCOL_RTMP;
*port = ;
playpath->av_len = ;
playpath->av_val = NULL;
app->av_len = ;
app->av_val = NULL; /* 字符串解析 */
/* 查找“://” */
//函数原型:char *strstr(char *str1, char *str2);
//功能:找出str2字符串在str1字符串中第一次出现的位置(不包括str2的串结束符)。
//返回值:返回该位置的指针,如找不到,返回空指针。
p = strstr((char *)url, "://");
if(!p) {
RTMP_Log(RTMP_LOGERROR, "RTMP URL: No :// in url!");
return FALSE;
}
{
//指针相减,返回“://”之前字符串长度len
int len = (int)(p-url);
//获取使用的协议
//通过比较字符串的方法
if(len == && strncasecmp(url, "rtmp", )==)
*protocol = RTMP_PROTOCOL_RTMP;
else if(len == && strncasecmp(url, "rtmpt", )==)
*protocol = RTMP_PROTOCOL_RTMPT;
else if(len == && strncasecmp(url, "rtmps", )==)
*protocol = RTMP_PROTOCOL_RTMPS;
else if(len == && strncasecmp(url, "rtmpe", )==)
*protocol = RTMP_PROTOCOL_RTMPE;
else if(len == && strncasecmp(url, "rtmfp", )==)
*protocol = RTMP_PROTOCOL_RTMFP;
else if(len == && strncasecmp(url, "rtmpte", )==)
*protocol = RTMP_PROTOCOL_RTMPTE;
else if(len == && strncasecmp(url, "rtmpts", )==)
*protocol = RTMP_PROTOCOL_RTMPTS;
else {
RTMP_Log(RTMP_LOGWARNING, "Unknown protocol!\n");
goto parsehost;
}
} RTMP_Log(RTMP_LOGDEBUG, "Parsed protocol: %d", *protocol); parsehost:
//获取主机名称
//跳过“://”
p+=; /* 检查一下主机名 */
if(*p==) {
RTMP_Log(RTMP_LOGWARNING, "No hostname in URL!");
return FALSE;
}
//原型:char *strchr(const char *s,char c);
//功能:查找字符串s中首次出现字符c的位置
//说明:返回首次出现c的位置的指针,如果s中不存在c则返回NULL。
end = p + strlen(p);//指向结尾的指针
col = strchr(p, ':');//指向冒号(第一个)的指针
ques = strchr(p, '?');//指向问号(第一个)的指针
slash = strchr(p, '/');//指向斜杠(第一个)的指针 {
int hostlen;
if(slash)
hostlen = slash - p;
else
hostlen = end - p;
if(col && col -p < hostlen)
hostlen = col - p; if(hostlen < ) {
host->av_val = p;
host->av_len = hostlen;
RTMP_Log(RTMP_LOGDEBUG, "Parsed host : %.*s", hostlen, host->av_val);
} else {
RTMP_Log(RTMP_LOGWARNING, "Hostname exceeds 255 characters!");
} p+=hostlen;
} /* 获取端口号 */
if(*p == ':') {
unsigned int p2;
p++;
p2 = atoi(p);
if(p2 > ) {
RTMP_Log(RTMP_LOGWARNING, "Invalid port number!");
} else {
*port = p2;
}
} if(!slash) {
RTMP_Log(RTMP_LOGWARNING, "No application or playpath in URL!");
return TRUE;
}
p = slash+; {
/* 获取应用程序(application)
*
* rtmp://host[:port]/app[/appinstance][/...]
* application = app[/appinstance]
*/ char *slash2, *slash3 = NULL;//指向第二个斜杠,第三个斜杠的指针
int applen, appnamelen; slash2 = strchr(p, '/');//指向第二个斜杠
if(slash2)
slash3 = strchr(slash2+, '/');//指向第三个斜杠,注意slash2之所以+1是因为让其后移一位 applen = end-p; /* ondemand, pass all parameters as app */
appnamelen = applen; /* ondemand length */ if(ques && strstr(p, "slist=")) { /* whatever it is, the '?' and slist= means we need to use everything as app and parse plapath from slist= */
appnamelen = ques-p;
}
else if(strncmp(p, "ondemand/", )==) {
/* app = ondemand/foobar, only pass app=ondemand */
applen = ;
appnamelen = ;
}
else { /* app!=ondemand, so app is app[/appinstance] */
if(slash3)
appnamelen = slash3-p;
else if(slash2)
appnamelen = slash2-p; applen = appnamelen;
} app->av_val = p;
app->av_len = applen;
RTMP_Log(RTMP_LOGDEBUG, "Parsed app : %.*s", applen, p); p += appnamelen;
} if (*p == '/')
p++; if (end-p) {
AVal av = {p, end-p};
RTMP_ParsePlaypath(&av, playpath);
} return TRUE;
} /*
* 从URL中获取播放路径(playpath)。播放路径是URL中“rtmp://host:port/app/”后面的部分
*
* 获取FMS能够识别的播放路径
* mp4 流: 前面添加 "mp4:", 删除扩展名
* mp3 流: 前面添加 "mp3:", 删除扩展名
* flv 流: 删除扩展名
*/
void RTMP_ParsePlaypath(AVal *in, AVal *out) {
int addMP4 = ;
int addMP3 = ;
int subExt = ;
const char *playpath = in->av_val;
const char *temp, *q, *ext = NULL;
const char *ppstart = playpath;
char *streamname, *destptr, *p; int pplen = in->av_len; out->av_val = NULL;
out->av_len = ; if ((*ppstart == '?') &&
(temp=strstr(ppstart, "slist=")) != ) {
ppstart = temp+;
pplen = strlen(ppstart); temp = strchr(ppstart, '&');
if (temp) {
pplen = temp-ppstart;
}
} q = strchr(ppstart, '?');
if (pplen >= ) {
if (q)
ext = q-;
else
ext = &ppstart[pplen-];
if ((strncmp(ext, ".f4v", ) == ) ||
(strncmp(ext, ".mp4", ) == )) {
addMP4 = ;
subExt = ;
/* Only remove .flv from rtmp URL, not slist params */
} else if ((ppstart == playpath) &&
(strncmp(ext, ".flv", ) == )) {
subExt = ;
} else if (strncmp(ext, ".mp3", ) == ) {
addMP3 = ;
subExt = ;
}
} streamname = (char *)malloc((pplen++)*sizeof(char));
if (!streamname)
return; destptr = streamname;
if (addMP4) {
if (strncmp(ppstart, "mp4:", )) {
strcpy(destptr, "mp4:");
destptr += ;
} else {
subExt = ;
}
} else if (addMP3) {
if (strncmp(ppstart, "mp3:", )) {
strcpy(destptr, "mp3:");
destptr += ;
} else {
subExt = ;
}
} for (p=(char *)ppstart; pplen >;) {
/* skip extension */
if (subExt && p == ext) {
p += ;
pplen -= ;
continue;
}
if (*p == '%') {
unsigned int c;
sscanf(p+, "%02x", &c);
*destptr++ = c;
pplen -= ;
p += ;
} else {
*destptr++ = *p++;
pplen--;
}
}
*destptr = '\0'; out->av_val = streamname;
out->av_len = destptr - streamname;
}

3: AMF编码

之前分析了RTMPDump(libRTMP)解析RTMP的URL的源代码,在这里简单分析一下其AMF编码方面的源码。

AMF编码广泛用于Adobe公司的Flash以及Flex系统中。由于RTMP协议也是Adobe公司的,所以它也使用AMF进行通信。具体 AMF是怎么使用的在这里就不做详细讨论了。RTMPDump如果想实现RTMP协议的流媒体的下载保存,就必须可以编码和解码AMF格式的数据。

amf.c是RTMPDump解析RTMP协议的函数存放的地方,在这里贴上其源代码。先不做详细解释了,以后有机会再补充。

#include "stdafx.h"
/* 本文件主要包含了对AMF对象的操作
*-------------------------------------
*AMF数据类型:
*Type Byte code
*Number 0x00
*Boolean 0x01
*String 0x02
*Object 0x03
*MovieClip 0x04
*Null 0x05
*Undefined 0x06
*Reference 0x07
*MixedArray 0x08
*EndOfObject 0x09
*Array 0x0a
*Date 0x0b
*LongString 0x0c
*Unsupported 0x0d
*Recordset 0x0e
*XML 0x0f
*TypedObject (Class instance) 0x10
*AMF3 data 0×11
*--------------------------------------
*应用举例:
*0.Number这里指的是double类型,数据用8字节表示,比如十六进制00 40 10 00 00 00 00 00 00就表示的是一个double数4.0
*1.Boolean对应的是.net中的bool类型,数据使用1字节表示,和C语言差不多,使用00表示false,使用01表示true。比如十六进制01 01就表示true。
*2.String相当于.net中的string类型,String所占用的空间有1个类型标识字节和2个表示字符串UTF8长度的字节加上字符串UTF8格式的内容组成。
* 比如十六进制03 00 08 73 68 61 6E 67 67 75 61表示的就是字符串,该字符串长8字节,字符串内容为73 68 61 6E 67 67 75 61,对应的就是“shanggua”。
*3.Object在对应的就是Hashtable,内容由UTF8字符串作为Key,其他AMF类型作为Value,该对象由3个字节:00 00 09来表示结束。
*5.Null就是空对象,该对象只占用一个字节,那就是Null对象标识0x05。
*6.Undefined 也是只占用一个字节0x06。
*8.MixedArray相当于Hashtable,与3不同的是该对象定义了Hashtable的大小。
*/ #include <string.h>
#include <assert.h>
#include <stdlib.h> #include "rtmp_sys.h"
#include "amf.h"
#include "log.h"
#include "bytes.h" static const AMFObjectProperty AMFProp_Invalid = { {, }, AMF_INVALID };
static const AVal AV_empty = { , }; //大端Big-Endian
//低地址存放最高有效位(MSB),既高位字节排放在内存的低地址端,低位字节排放在内存的高地址端。
//符合人脑逻辑,与计算机逻辑不同
//网络字节序 Network Order:TCP/IP各层协议将字节序定义为Big-Endian,因此TCP/IP协议中使
//用的字节序通常称之为网络字节序。
//主机序 Host Orader:它遵循Little-Endian规则。所以当两台主机之间要通过TCP/IP协议进行通
//信的时候就需要调用相应的函数进行主机序(Little-Endian)和网络序(Big-Endian)的转换。 /*AMF数据采用 Big-Endian(大端模式),主机采用Little-Endian(小端模式) */ unsigned short
AMF_DecodeInt16(const char *data)
{
unsigned char *c = (unsigned char *) data;
unsigned short val;
val = (c[] << ) | c[];//转换
return val;
} unsigned int
AMF_DecodeInt24(const char *data)
{
unsigned char *c = (unsigned char *) data;
unsigned int val;
val = (c[] << ) | (c[] << ) | c[];
return val;
} unsigned int
AMF_DecodeInt32(const char *data)
{
unsigned char *c = (unsigned char *)data;
unsigned int val;
val = (c[] << ) | (c[] << ) | (c[] << ) | c[];
return val;
} void
AMF_DecodeString(const char *data, AVal *bv)
{
bv->av_len = AMF_DecodeInt16(data);
bv->av_val = (bv->av_len > ) ? (char *)data + : NULL;
} void
AMF_DecodeLongString(const char *data, AVal *bv)
{
bv->av_len = AMF_DecodeInt32(data);
bv->av_val = (bv->av_len > ) ? (char *)data + : NULL;
} double
AMF_DecodeNumber(const char *data)
{
double dVal;
#if __FLOAT_WORD_ORDER == __BYTE_ORDER
#if __BYTE_ORDER == __BIG_ENDIAN
memcpy(&dVal, data, );
#elif __BYTE_ORDER == __LITTLE_ENDIAN
unsigned char *ci, *co;
ci = (unsigned char *)data;
co = (unsigned char *)&dVal;
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
#endif
#else
#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */
unsigned char *ci, *co;
ci = (unsigned char *)data;
co = (unsigned char *)&dVal;
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */
unsigned char *ci, *co;
ci = (unsigned char *)data;
co = (unsigned char *)&dVal;
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
#endif
#endif
return dVal;
} int
AMF_DecodeBoolean(const char *data)
{
return *data != ;
} char *
AMF_EncodeInt16(char *output, char *outend, short nVal)
{
if (output+ > outend)
return NULL; output[] = nVal & 0xff;
output[] = nVal >> ;
return output+;
}
//3字节的int数据进行AMF编码,AMF采用大端模式
char *
AMF_EncodeInt24(char *output, char *outend, int nVal)
{
if (output+ > outend)
return NULL;
//倒过来
output[] = nVal & 0xff;
output[] = nVal >> ;
output[] = nVal >> ;
//返回指针指向编码后数据的尾部
return output+;
} char *
AMF_EncodeInt32(char *output, char *outend, int nVal)
{
if (output+ > outend)
return NULL; output[] = nVal & 0xff;
output[] = nVal >> ;
output[] = nVal >> ;
output[] = nVal >> ;
return output+;
} char *
AMF_EncodeString(char *output, char *outend, const AVal *bv)
{
if ((bv->av_len < && output + + + bv->av_len > outend) ||
output + + + bv->av_len > outend)
return NULL; if (bv->av_len < )
{
*output++ = AMF_STRING; output = AMF_EncodeInt16(output, outend, bv->av_len);
}
else
{
*output++ = AMF_LONG_STRING; output = AMF_EncodeInt32(output, outend, bv->av_len);
}
memcpy(output, bv->av_val, bv->av_len);
output += bv->av_len; return output;
} char *
AMF_EncodeNumber(char *output, char *outend, double dVal)
{
if (output++ > outend)
return NULL; *output++ = AMF_NUMBER; /* type: Number */ #if __FLOAT_WORD_ORDER == __BYTE_ORDER
#if __BYTE_ORDER == __BIG_ENDIAN
memcpy(output, &dVal, );
#elif __BYTE_ORDER == __LITTLE_ENDIAN
{
unsigned char *ci, *co;
ci = (unsigned char *)&dVal;
co = (unsigned char *)output;
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
}
#endif
#else
#if __BYTE_ORDER == __LITTLE_ENDIAN /* __FLOAT_WORD_ORER == __BIG_ENDIAN */
{
unsigned char *ci, *co;
ci = (unsigned char *)&dVal;
co = (unsigned char *)output;
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
}
#else /* __BYTE_ORDER == __BIG_ENDIAN && __FLOAT_WORD_ORER == __LITTLE_ENDIAN */
{
unsigned char *ci, *co;
ci = (unsigned char *)&dVal;
co = (unsigned char *)output;
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
co[] = ci[];
}
#endif
#endif return output+;
} char *
AMF_EncodeBoolean(char *output, char *outend, int bVal)
{
if (output+ > outend)
return NULL; *output++ = AMF_BOOLEAN; *output++ = bVal ? 0x01 : 0x00; return output;
} char *
AMF_EncodeNamedString(char *output, char *outend, const AVal *strName, const AVal *strValue)
{
if (output++strName->av_len > outend)
return NULL;
output = AMF_EncodeInt16(output, outend, strName->av_len); memcpy(output, strName->av_val, strName->av_len);
output += strName->av_len; return AMF_EncodeString(output, outend, strValue);
} char *
AMF_EncodeNamedNumber(char *output, char *outend, const AVal *strName, double dVal)
{
if (output++strName->av_len > outend)
return NULL;
output = AMF_EncodeInt16(output, outend, strName->av_len); memcpy(output, strName->av_val, strName->av_len);
output += strName->av_len; return AMF_EncodeNumber(output, outend, dVal);
} char *
AMF_EncodeNamedBoolean(char *output, char *outend, const AVal *strName, int bVal)
{
if (output++strName->av_len > outend)
return NULL;
output = AMF_EncodeInt16(output, outend, strName->av_len); memcpy(output, strName->av_val, strName->av_len);
output += strName->av_len; return AMF_EncodeBoolean(output, outend, bVal);
} void
AMFProp_GetName(AMFObjectProperty *prop, AVal *name)
{
*name = prop->p_name;
} void
AMFProp_SetName(AMFObjectProperty *prop, AVal *name)
{
prop->p_name = *name;
} AMFDataType
AMFProp_GetType(AMFObjectProperty *prop)
{
return prop->p_type;
} double
AMFProp_GetNumber(AMFObjectProperty *prop)
{
return prop->p_vu.p_number;
} int
AMFProp_GetBoolean(AMFObjectProperty *prop)
{
return prop->p_vu.p_number != ;
} void
AMFProp_GetString(AMFObjectProperty *prop, AVal *str)
{
*str = prop->p_vu.p_aval;
} void
AMFProp_GetObject(AMFObjectProperty *prop, AMFObject *obj)
{
*obj = prop->p_vu.p_object;
} int
AMFProp_IsValid(AMFObjectProperty *prop)
{
return prop->p_type != AMF_INVALID;
} char *
AMFProp_Encode(AMFObjectProperty *prop, char *pBuffer, char *pBufEnd)
{
if (prop->p_type == AMF_INVALID)
return NULL; if (prop->p_type != AMF_NULL && pBuffer + prop->p_name.av_len + + >= pBufEnd)
return NULL; if (prop->p_type != AMF_NULL && prop->p_name.av_len)
{
*pBuffer++ = prop->p_name.av_len >> ;
*pBuffer++ = prop->p_name.av_len & 0xff;
memcpy(pBuffer, prop->p_name.av_val, prop->p_name.av_len);
pBuffer += prop->p_name.av_len;
} switch (prop->p_type)
{
case AMF_NUMBER:
pBuffer = AMF_EncodeNumber(pBuffer, pBufEnd, prop->p_vu.p_number);
break; case AMF_BOOLEAN:
pBuffer = AMF_EncodeBoolean(pBuffer, pBufEnd, prop->p_vu.p_number != );
break; case AMF_STRING:
pBuffer = AMF_EncodeString(pBuffer, pBufEnd, &prop->p_vu.p_aval);
break; case AMF_NULL:
if (pBuffer+ >= pBufEnd)
return NULL;
*pBuffer++ = AMF_NULL;
break; case AMF_OBJECT:
pBuffer = AMF_Encode(&prop->p_vu.p_object, pBuffer, pBufEnd);
break; default:
RTMP_Log(RTMP_LOGERROR, "%s, invalid type. %d", __FUNCTION__, prop->p_type);
pBuffer = NULL;
}; return pBuffer;
} #define AMF3_INTEGER_MAX 268435455
#define AMF3_INTEGER_MIN -268435456 int
AMF3ReadInteger(const char *data, int32_t *valp)
{
int i = ;
int32_t val = ; while (i <= )
{ /* handle first 3 bytes */
if (data[i] & 0x80)
{ /* byte used */
val <<= ; /* shift up */
val |= (data[i] & 0x7f); /* add bits */
i++;
}
else
{
break;
}
} if (i > )
{ /* use 4th byte, all 8bits */
val <<= ;
val |= data[]; /* range check */
if (val > AMF3_INTEGER_MAX)
val -= ( << );
}
else
{ /* use 7bits of last unparsed byte (0xxxxxxx) */
val <<= ;
val |= data[i];
} *valp = val; return i > ? : i + ;
} int
AMF3ReadString(const char *data, AVal *str)
{
int32_t ref = ;
int len;
assert(str != ); len = AMF3ReadInteger(data, &ref);
data += len; if ((ref & 0x1) == )
{ /* reference: 0xxx */
uint32_t refIndex = (ref >> );
RTMP_Log(RTMP_LOGDEBUG,
"%s, string reference, index: %d, not supported, ignoring!",
__FUNCTION__, refIndex);
return len;
}
else
{
uint32_t nSize = (ref >> ); str->av_val = (char *)data;
str->av_len = nSize; return len + nSize;
}
return len;
} int
AMF3Prop_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize,
int bDecodeName)
{
int nOriginalSize = nSize;
AMF3DataType type; prop->p_name.av_len = ;
prop->p_name.av_val = NULL; if (nSize == || !pBuffer)
{
RTMP_Log(RTMP_LOGDEBUG, "empty buffer/no buffer pointer!");
return -;
} /* decode name */
if (bDecodeName)
{
AVal name;
int nRes = AMF3ReadString(pBuffer, &name); if (name.av_len <= )
return nRes; prop->p_name = name;
pBuffer += nRes;
nSize -= nRes;
} /* decode */
type = (AMF3DataType) *pBuffer++;
nSize--; switch (type)
{
case AMF3_UNDEFINED:
case AMF3_NULL:
prop->p_type = AMF_NULL;
break;
case AMF3_FALSE:
prop->p_type = AMF_BOOLEAN;
prop->p_vu.p_number = 0.0;
break;
case AMF3_TRUE:
prop->p_type = AMF_BOOLEAN;
prop->p_vu.p_number = 1.0;
break;
case AMF3_INTEGER:
{
int32_t res = ;
int len = AMF3ReadInteger(pBuffer, &res);
prop->p_vu.p_number = (double)res;
prop->p_type = AMF_NUMBER;
nSize -= len;
break;
}
case AMF3_DOUBLE:
if (nSize < )
return -;
prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
prop->p_type = AMF_NUMBER;
nSize -= ;
break;
case AMF3_STRING:
case AMF3_XML_DOC:
case AMF3_XML:
{
int len = AMF3ReadString(pBuffer, &prop->p_vu.p_aval);
prop->p_type = AMF_STRING;
nSize -= len;
break;
}
case AMF3_DATE:
{
int32_t res = ;
int len = AMF3ReadInteger(pBuffer, &res); nSize -= len;
pBuffer += len; if ((res & 0x1) == )
{ /* reference */
uint32_t nIndex = (res >> );
RTMP_Log(RTMP_LOGDEBUG, "AMF3_DATE reference: %d, not supported!", nIndex);
}
else
{
if (nSize < )
return -; prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
nSize -= ;
prop->p_type = AMF_NUMBER;
}
break;
}
case AMF3_OBJECT:
{
int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE);
if (nRes == -)
return -;
nSize -= nRes;
prop->p_type = AMF_OBJECT;
break;
}
case AMF3_ARRAY:
case AMF3_BYTE_ARRAY:
default:
RTMP_Log(RTMP_LOGDEBUG, "%s - AMF3 unknown/unsupported datatype 0x%02x, @0x%08X",
__FUNCTION__, (unsigned char)(*pBuffer), pBuffer);
return -;
} return nOriginalSize - nSize;
}
//对AMF数据类型解析
int
AMFProp_Decode(AMFObjectProperty *prop, const char *pBuffer, int nSize,
int bDecodeName)
{
int nOriginalSize = nSize;
int nRes; prop->p_name.av_len = ;
prop->p_name.av_val = NULL; if (nSize == || !pBuffer)
{
RTMP_Log(RTMP_LOGDEBUG, "%s: Empty buffer/no buffer pointer!", __FUNCTION__);
return -;
} if (bDecodeName && nSize < )
{ /* at least name (length + at least 1 byte) and 1 byte of data */
RTMP_Log(RTMP_LOGDEBUG,
"%s: Not enough data for decoding with name, less than 4 bytes!",
__FUNCTION__);
return -;
} if (bDecodeName)
{
unsigned short nNameSize = AMF_DecodeInt16(pBuffer);
if (nNameSize > nSize - )
{
RTMP_Log(RTMP_LOGDEBUG,
"%s: Name size out of range: namesize (%d) > len (%d) - 2",
__FUNCTION__, nNameSize, nSize);
return -;
} AMF_DecodeString(pBuffer, &prop->p_name);
nSize -= + nNameSize;
pBuffer += + nNameSize;
} if (nSize == )
{
return -;
} nSize--; prop->p_type = (AMFDataType) *pBuffer++;
switch (prop->p_type)
{
//Number数据类型
case AMF_NUMBER:
if (nSize < )
return -;
prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
nSize -= ;
break;
//Boolean数据类型
case AMF_BOOLEAN:
if (nSize < )
return -;
prop->p_vu.p_number = (double)AMF_DecodeBoolean(pBuffer);
nSize--;
break;
//String数据类型
case AMF_STRING:
{
unsigned short nStringSize = AMF_DecodeInt16(pBuffer); if (nSize < (long)nStringSize + )
return -;
AMF_DecodeString(pBuffer, &prop->p_vu.p_aval);
nSize -= ( + nStringSize);
break;
}
//Object数据类型
case AMF_OBJECT:
{
int nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE);
if (nRes == -)
return -;
nSize -= nRes;
break;
}
case AMF_MOVIECLIP:
{
RTMP_Log(RTMP_LOGERROR, "AMF_MOVIECLIP reserved!");
return -;
break;
}
case AMF_NULL:
case AMF_UNDEFINED:
case AMF_UNSUPPORTED:
prop->p_type = AMF_NULL;
break;
case AMF_REFERENCE:
{
RTMP_Log(RTMP_LOGERROR, "AMF_REFERENCE not supported!");
return -;
break;
}
case AMF_ECMA_ARRAY:
{
nSize -= ; /* next comes the rest, mixed array has a final 0x000009 mark and names, so its an object */
nRes = AMF_Decode(&prop->p_vu.p_object, pBuffer + , nSize, TRUE);
if (nRes == -)
return -;
nSize -= nRes;
prop->p_type = AMF_OBJECT;
break;
}
case AMF_OBJECT_END:
{
return -;
break;
}
case AMF_STRICT_ARRAY:
{
unsigned int nArrayLen = AMF_DecodeInt32(pBuffer);
nSize -= ; nRes = AMF_DecodeArray(&prop->p_vu.p_object, pBuffer + , nSize,
nArrayLen, FALSE);
if (nRes == -)
return -;
nSize -= nRes;
prop->p_type = AMF_OBJECT;
break;
}
case AMF_DATE:
{
RTMP_Log(RTMP_LOGDEBUG, "AMF_DATE"); if (nSize < )
return -; prop->p_vu.p_number = AMF_DecodeNumber(pBuffer);
prop->p_UTCoffset = AMF_DecodeInt16(pBuffer + ); nSize -= ;
break;
}
case AMF_LONG_STRING:
{
unsigned int nStringSize = AMF_DecodeInt32(pBuffer);
if (nSize < (long)nStringSize + )
return -;
AMF_DecodeLongString(pBuffer, &prop->p_vu.p_aval);
nSize -= ( + nStringSize);
prop->p_type = AMF_STRING;
break;
}
case AMF_RECORDSET:
{
RTMP_Log(RTMP_LOGERROR, "AMF_RECORDSET reserved!");
return -;
break;
}
case AMF_XML_DOC:
{
RTMP_Log(RTMP_LOGERROR, "AMF_XML_DOC not supported!");
return -;
break;
}
case AMF_TYPED_OBJECT:
{
RTMP_Log(RTMP_LOGERROR, "AMF_TYPED_OBJECT not supported!");
return -;
break;
}
case AMF_AVMPLUS:
{
int nRes = AMF3_Decode(&prop->p_vu.p_object, pBuffer, nSize, TRUE);
if (nRes == -)
return -;
nSize -= nRes;
prop->p_type = AMF_OBJECT;
break;
}
default:
RTMP_Log(RTMP_LOGDEBUG, "%s - unknown datatype 0x%02x, @0x%08X", __FUNCTION__,
prop->p_type, pBuffer - );
return -;
} return nOriginalSize - nSize;
} void
AMFProp_Dump(AMFObjectProperty *prop)
{
char strRes[];
char str[];
AVal name; if (prop->p_type == AMF_INVALID)
{
RTMP_Log(RTMP_LOGDEBUG, "Property: INVALID");
return;
} if (prop->p_type == AMF_NULL)
{
RTMP_Log(RTMP_LOGDEBUG, "Property: NULL");
return;
} if (prop->p_name.av_len)
{
name = prop->p_name;
}
else
{
name.av_val = "no-name.";
name.av_len = sizeof("no-name.") - ;
}
if (name.av_len > )
name.av_len = ; snprintf(strRes, , "Name: %18.*s, ", name.av_len, name.av_val); if (prop->p_type == AMF_OBJECT)
{
RTMP_Log(RTMP_LOGDEBUG, "Property: <%sOBJECT>", strRes);
AMF_Dump(&prop->p_vu.p_object);
return;
} switch (prop->p_type)
{
case AMF_NUMBER:
snprintf(str, , "NUMBER:\t%.2f", prop->p_vu.p_number);
break;
case AMF_BOOLEAN:
snprintf(str, , "BOOLEAN:\t%s",
prop->p_vu.p_number != 0.0 ? "TRUE" : "FALSE");
break;
case AMF_STRING:
snprintf(str, , "STRING:\t%.*s", prop->p_vu.p_aval.av_len,
prop->p_vu.p_aval.av_val);
break;
case AMF_DATE:
snprintf(str, , "DATE:\ttimestamp: %.2f, UTC offset: %d",
prop->p_vu.p_number, prop->p_UTCoffset);
break;
default:
snprintf(str, , "INVALID TYPE 0x%02x", (unsigned char)prop->p_type);
} RTMP_Log(RTMP_LOGDEBUG, "Property: <%s%s>", strRes, str);
} void
AMFProp_Reset(AMFObjectProperty *prop)
{
if (prop->p_type == AMF_OBJECT)
AMF_Reset(&prop->p_vu.p_object);
else
{
prop->p_vu.p_aval.av_len = ;
prop->p_vu.p_aval.av_val = NULL;
}
prop->p_type = AMF_INVALID;
} /* AMFObject */ char *
AMF_Encode(AMFObject *obj, char *pBuffer, char *pBufEnd)
{
int i; if (pBuffer+ >= pBufEnd)
return NULL; *pBuffer++ = AMF_OBJECT; for (i = ; i < obj->o_num; i++)
{
char *res = AMFProp_Encode(&obj->o_props[i], pBuffer, pBufEnd);
if (res == NULL)
{
RTMP_Log(RTMP_LOGERROR, "AMF_Encode - failed to encode property in index %d",
i);
break;
}
else
{
pBuffer = res;
}
} if (pBuffer + >= pBufEnd)
return NULL; /* no room for the end marker */ pBuffer = AMF_EncodeInt24(pBuffer, pBufEnd, AMF_OBJECT_END); return pBuffer;
} int
AMF_DecodeArray(AMFObject *obj, const char *pBuffer, int nSize,
int nArrayLen, int bDecodeName)
{
int nOriginalSize = nSize;
int bError = FALSE; obj->o_num = ;
obj->o_props = NULL;
while (nArrayLen > )
{
AMFObjectProperty prop;
int nRes;
nArrayLen--; nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName);
if (nRes == -)
bError = TRUE;
else
{
nSize -= nRes;
pBuffer += nRes;
AMF_AddProp(obj, &prop);
}
}
if (bError)
return -; return nOriginalSize - nSize;
} int
AMF3_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bAMFData)
{
int nOriginalSize = nSize;
int32_t ref;
int len; obj->o_num = ;
obj->o_props = NULL;
if (bAMFData)
{
if (*pBuffer != AMF3_OBJECT)
RTMP_Log(RTMP_LOGERROR,
"AMF3 Object encapsulated in AMF stream does not start with AMF3_OBJECT!");
pBuffer++;
nSize--;
} ref = ;
len = AMF3ReadInteger(pBuffer, &ref);
pBuffer += len;
nSize -= len; if ((ref & ) == )
{ /* object reference, 0xxx */
uint32_t objectIndex = (ref >> ); RTMP_Log(RTMP_LOGDEBUG, "Object reference, index: %d", objectIndex);
}
else /* object instance */
{
int32_t classRef = (ref >> ); AMF3ClassDef cd = { {, }
};
AMFObjectProperty prop; if ((classRef & 0x1) == )
{ /* class reference */
uint32_t classIndex = (classRef >> );
RTMP_Log(RTMP_LOGDEBUG, "Class reference: %d", classIndex);
}
else
{
int32_t classExtRef = (classRef >> );
int i; cd.cd_externalizable = (classExtRef & 0x1) == ;
cd.cd_dynamic = ((classExtRef >> ) & 0x1) == ; cd.cd_num = classExtRef >> ; /* class name */ len = AMF3ReadString(pBuffer, &cd.cd_name);
nSize -= len;
pBuffer += len; /*std::string str = className; */ RTMP_Log(RTMP_LOGDEBUG,
"Class name: %s, externalizable: %d, dynamic: %d, classMembers: %d",
cd.cd_name.av_val, cd.cd_externalizable, cd.cd_dynamic,
cd.cd_num); for (i = ; i < cd.cd_num; i++)
{
AVal memberName;
len = AMF3ReadString(pBuffer, &memberName);
RTMP_Log(RTMP_LOGDEBUG, "Member: %s", memberName.av_val);
AMF3CD_AddProp(&cd, &memberName);
nSize -= len;
pBuffer += len;
}
} /* add as referencable object */ if (cd.cd_externalizable)
{
int nRes;
AVal name = AVC("DEFAULT_ATTRIBUTE"); RTMP_Log(RTMP_LOGDEBUG, "Externalizable, TODO check"); nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE);
if (nRes == -)
RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!",
__FUNCTION__);
else
{
nSize -= nRes;
pBuffer += nRes;
} AMFProp_SetName(&prop, &name);
AMF_AddProp(obj, &prop);
}
else
{
int nRes, i;
for (i = ; i < cd.cd_num; i++) /* non-dynamic */
{
nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, FALSE);
if (nRes == -)
RTMP_Log(RTMP_LOGDEBUG, "%s, failed to decode AMF3 property!",
__FUNCTION__); AMFProp_SetName(&prop, AMF3CD_GetProp(&cd, i));
AMF_AddProp(obj, &prop); pBuffer += nRes;
nSize -= nRes;
}
if (cd.cd_dynamic)
{
int len = ; do
{
nRes = AMF3Prop_Decode(&prop, pBuffer, nSize, TRUE);
AMF_AddProp(obj, &prop); pBuffer += nRes;
nSize -= nRes; len = prop.p_name.av_len;
}
while (len > );
}
}
RTMP_Log(RTMP_LOGDEBUG, "class object!");
}
return nOriginalSize - nSize;
}
//解AMF编码的Object数据类型
int
AMF_Decode(AMFObject *obj, const char *pBuffer, int nSize, int bDecodeName)
{
int nOriginalSize = nSize;
int bError = FALSE; /* if there is an error while decoding - try to at least find the end mark AMF_OBJECT_END */ obj->o_num = ;
obj->o_props = NULL;
while (nSize > )
{
AMFObjectProperty prop;
int nRes; if (nSize >= && AMF_DecodeInt24(pBuffer) == AMF_OBJECT_END)
{
nSize -= ;
bError = FALSE;
break;
} if (bError)
{
RTMP_Log(RTMP_LOGERROR,
"DECODING ERROR, IGNORING BYTES UNTIL NEXT KNOWN PATTERN!");
nSize--;
pBuffer++;
continue;
}
//解Object里的Property
nRes = AMFProp_Decode(&prop, pBuffer, nSize, bDecodeName);
if (nRes == -)
bError = TRUE;
else
{
nSize -= nRes;
pBuffer += nRes;
AMF_AddProp(obj, &prop);
}
} if (bError)
return -; return nOriginalSize - nSize;
} void
AMF_AddProp(AMFObject *obj, const AMFObjectProperty *prop)
{
if (!(obj->o_num & 0x0f))
obj->o_props = (AMFObjectProperty *)
realloc(obj->o_props, (obj->o_num + ) * sizeof(AMFObjectProperty));
obj->o_props[obj->o_num++] = *prop;
} int
AMF_CountProp(AMFObject *obj)
{
return obj->o_num;
} AMFObjectProperty *
AMF_GetProp(AMFObject *obj, const AVal *name, int nIndex)
{
if (nIndex >= )
{
if (nIndex <= obj->o_num)
return &obj->o_props[nIndex];
}
else
{
int n;
for (n = ; n < obj->o_num; n++)
{
if (AVMATCH(&obj->o_props[n].p_name, name))
return &obj->o_props[n];
}
} return (AMFObjectProperty *)&AMFProp_Invalid;
} void
AMF_Dump(AMFObject *obj)
{
int n;
RTMP_Log(RTMP_LOGDEBUG, "(object begin)");
for (n = ; n < obj->o_num; n++)
{
AMFProp_Dump(&obj->o_props[n]);
}
RTMP_Log(RTMP_LOGDEBUG, "(object end)");
} void
AMF_Reset(AMFObject *obj)
{
int n;
for (n = ; n < obj->o_num; n++)
{
AMFProp_Reset(&obj->o_props[n]);
}
free(obj->o_props);
obj->o_props = NULL;
obj->o_num = ;
} /* AMF3ClassDefinition */ void
AMF3CD_AddProp(AMF3ClassDef *cd, AVal *prop)
{
if (!(cd->cd_num & 0x0f))
cd->cd_props = (AVal *)realloc(cd->cd_props, (cd->cd_num + ) * sizeof(AVal));
cd->cd_props[cd->cd_num++] = *prop;
} AVal *
AMF3CD_GetProp(AMF3ClassDef *cd, int nIndex)
{
if (nIndex >= cd->cd_num)
return (AVal *)&AV_empty;
return &cd->cd_props[nIndex];
}

可参考文件:

AMF3 中文版介绍:http://download.csdn.net/detail/leixiaohua1020/6389977

4: 连接第一步——握手(Hand Shake)

在这里分析一下RTMPdump(libRTMP)连接到支持RTMP协议的服务器的第一步:握手(Hand Shake)。

RTMP连接的过程曾经分析过:RTMP流媒体播放过程

在这里不再细说,分析一下位于handshake.h文件里面实现握手(HandShake)功能的函数:

注意:handshake.h里面代码量很大,但是很多代码都是为了处理RTMP的加密版协议的,例如rtmps;因此在这里就不做过多分析了,我们只考虑普通的RTMP协议。

static int
HandShake(RTMP * r, int FP9HandShake)
{
int i, offalg = ;
int dhposClient = ;
int digestPosClient = ;
int encrypted = r->Link.protocol & RTMP_FEATURE_ENC; RC4_handle keyIn = ;
RC4_handle keyOut = ; int32_t *ip;
uint32_t uptime; uint8_t clientbuf[RTMP_SIG_SIZE + ], *clientsig=clientbuf+;
uint8_t serversig[RTMP_SIG_SIZE], client2[RTMP_SIG_SIZE], *reply;
uint8_t type;
getoff *getdh = NULL, *getdig = NULL; if (encrypted || r->Link.SWFSize)
FP9HandShake = TRUE;
else
//普通的
FP9HandShake = FALSE; r->Link.rc4keyIn = r->Link.rc4keyOut = ; if (encrypted)
{
clientsig[-] = 0x06; /* 0x08 is RTMPE as well */
offalg = ;
}
else
//0x03代表RTMP协议的版本(客户端要求的)
//数组竟然能有“-1”下标
//C0中的字段(1B)
clientsig[-] = 0x03; uptime = htonl(RTMP_GetTime());
//void *memcpy(void *dest, const void *src, int n);
//由src指向地址为起始地址的连续n个字节的数据复制到以dest指向地址为起始地址的空间内
//把uptime的前4字节(其实一共就4字节)数据拷贝到clientsig指向的地址中
//C1中的字段(4B)
memcpy(clientsig, &uptime, ); if (FP9HandShake)
{
/* set version to at least 9.0.115.0 */
if (encrypted)
{
clientsig[] = ;
clientsig[] = ;
}
else
{
clientsig[] = ;
clientsig[] = ;
}
clientsig[] = ;
clientsig[] = ; RTMP_Log(RTMP_LOGDEBUG, "%s: Client type: %02X", __FUNCTION__, clientsig[-]);
getdig = digoff[offalg];
getdh = dhoff[offalg];
}
else
{
//void *memset(void *s, int ch, size_t n);将s中前n个字节替换为ch并返回s;
//将clientsig[4]开始的4个字节替换为0
//这是C1的字段
memset(&clientsig[], , );
} /* generate random data */
#ifdef _DEBUG
//将clientsig+8开始的1528个字节替换为0(这是一种简单的方法)
//这是C1中的random字段
memset(clientsig+, , RTMP_SIG_SIZE-);
#else
//实际中使用rand()循环生成1528字节的伪随机数
ip = (int32_t *)(clientsig+);
for (i = ; i < RTMP_SIG_SIZE/; i++)
*ip++ = rand();
#endif /* set handshake digest */
if (FP9HandShake)
{
if (encrypted)
{
/* generate Diffie-Hellmann parameters */
r->Link.dh = DHInit();
if (!r->Link.dh)
{
RTMP_Log(RTMP_LOGERROR, "%s: Couldn't initialize Diffie-Hellmann!",
__FUNCTION__);
return FALSE;
} dhposClient = getdh(clientsig, RTMP_SIG_SIZE);
RTMP_Log(RTMP_LOGDEBUG, "%s: DH pubkey position: %d", __FUNCTION__, dhposClient); if (!DHGenerateKey((DH *)r->Link.dh))
{
RTMP_Log(RTMP_LOGERROR, "%s: Couldn't generate Diffie-Hellmann public key!",
__FUNCTION__);
return FALSE;
} if (!DHGetPublicKey((DH *)r->Link.dh, &clientsig[dhposClient], ))
{
RTMP_Log(RTMP_LOGERROR, "%s: Couldn't write public key!", __FUNCTION__);
return FALSE;
}
} digestPosClient = getdig(clientsig, RTMP_SIG_SIZE); /* reuse this value in verification */
RTMP_Log(RTMP_LOGDEBUG, "%s: Client digest offset: %d", __FUNCTION__,
digestPosClient); CalculateDigest(digestPosClient, clientsig, GenuineFPKey, ,
&clientsig[digestPosClient]); RTMP_Log(RTMP_LOGDEBUG, "%s: Initial client digest: ", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, clientsig + digestPosClient,
SHA256_DIGEST_LENGTH);
} #ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG, "Clientsig: ");
RTMP_LogHex(RTMP_LOGDEBUG, clientsig, RTMP_SIG_SIZE);
#endif
//发送数据报C0+C1
//从clientsig-1开始发,长度1536+1,两个包合并
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。发送握手数据C0+C1");
//-----------------------------
if (!WriteN(r, (char *)clientsig-, RTMP_SIG_SIZE + ))
return FALSE;
//读取数据报,长度1,存入type
//是服务器的S0,表示服务器使用的RTMP版本
if (ReadN(r, (char *)&type, ) != ) /* 0x03 or 0x06 */
return FALSE;
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。接收握手数据S0");
//-----------------------------
RTMP_Log(RTMP_LOGDEBUG, "%s: Type Answer : %02X", __FUNCTION__, type);
//客户端要求的版本和服务器提供的版本不同
if (type != clientsig[-])
RTMP_Log(RTMP_LOGWARNING, "%s: Type mismatch: client sent %d, server answered %d",
__FUNCTION__, clientsig[-], type);
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。成功接收握手数据S0,服务器和客户端版本相同");
//-----------------------------
//客户端和服务端随机序列长度是否相同
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。接收握手数据S1");
//-----------------------------
if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE)
return FALSE; /* decode server response */
//把serversig的前四个字节赋值给uptime
memcpy(&uptime, serversig, );
//大端转小端
uptime = ntohl(uptime); RTMP_Log(RTMP_LOGDEBUG, "%s: Server Uptime : %d", __FUNCTION__, uptime);
RTMP_Log(RTMP_LOGDEBUG, "%s: FMS Version : %d.%d.%d.%d", __FUNCTION__, serversig[],
serversig[], serversig[], serversig[]); if (FP9HandShake && type == && !serversig[])
FP9HandShake = FALSE; #ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG, "Server signature:");
RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE);
#endif if (FP9HandShake)
{
uint8_t digestResp[SHA256_DIGEST_LENGTH];
uint8_t *signatureResp = NULL; /* we have to use this signature now to find the correct algorithms for getting the digest and DH positions */
int digestPosServer = getdig(serversig, RTMP_SIG_SIZE); if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, ))
{
RTMP_Log(RTMP_LOGWARNING, "Trying different position for server digest!");
offalg ^= ;
getdig = digoff[offalg];
getdh = dhoff[offalg];
digestPosServer = getdig(serversig, RTMP_SIG_SIZE); if (!VerifyDigest(digestPosServer, serversig, GenuineFMSKey, ))
{
RTMP_Log(RTMP_LOGERROR, "Couldn't verify the server digest"); /* continuing anyway will probably fail */
return FALSE;
}
} /* generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, key are the last 32 bytes of the server handshake) */
if (r->Link.SWFSize)
{
const char swfVerify[] = { 0x01, 0x01 };
char *vend = r->Link.SWFVerificationResponse+sizeof(r->Link.SWFVerificationResponse); memcpy(r->Link.SWFVerificationResponse, swfVerify, );
AMF_EncodeInt32(&r->Link.SWFVerificationResponse[], vend, r->Link.SWFSize);
AMF_EncodeInt32(&r->Link.SWFVerificationResponse[], vend, r->Link.SWFSize);
HMACsha256(r->Link.SWFHash, SHA256_DIGEST_LENGTH,
&serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH],
SHA256_DIGEST_LENGTH,
(uint8_t *)&r->Link.SWFVerificationResponse[]);
} /* do Diffie-Hellmann Key exchange for encrypted RTMP */
if (encrypted)
{
/* compute secret key */
uint8_t secretKey[] = { };
int len, dhposServer; dhposServer = getdh(serversig, RTMP_SIG_SIZE);
RTMP_Log(RTMP_LOGDEBUG, "%s: Server DH public key offset: %d", __FUNCTION__,
dhposServer);
len = DHComputeSharedSecretKey((DH *)r->Link.dh, &serversig[dhposServer],
, secretKey);
if (len < )
{
RTMP_Log(RTMP_LOGDEBUG, "%s: Wrong secret key position!", __FUNCTION__);
return FALSE;
} RTMP_Log(RTMP_LOGDEBUG, "%s: Secret key: ", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, secretKey, ); InitRC4Encryption(secretKey,
(uint8_t *) & serversig[dhposServer],
(uint8_t *) & clientsig[dhposClient],
&keyIn, &keyOut);
} reply = client2;
#ifdef _DEBUG
memset(reply, 0xff, RTMP_SIG_SIZE);
#else
ip = (int32_t *)reply;
for (i = ; i < RTMP_SIG_SIZE/; i++)
*ip++ = rand();
#endif
/* calculate response now */
signatureResp = reply+RTMP_SIG_SIZE-SHA256_DIGEST_LENGTH; HMACsha256(&serversig[digestPosServer], SHA256_DIGEST_LENGTH,
GenuineFPKey, sizeof(GenuineFPKey), digestResp);
HMACsha256(reply, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digestResp,
SHA256_DIGEST_LENGTH, signatureResp); /* some info output */
RTMP_Log(RTMP_LOGDEBUG,
"%s: Calculated digest key from secure key and server digest: ",
__FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, digestResp, SHA256_DIGEST_LENGTH); #ifdef FP10
if (type == )
{
uint8_t *dptr = digestResp;
uint8_t *sig = signatureResp;
/* encrypt signatureResp */
for (i=; i<SHA256_DIGEST_LENGTH; i+=)
rtmpe8_sig(sig+i, sig+i, dptr[i] % );
}
#if 0
else if (type == ))
{
uint8_t *dptr = digestResp;
uint8_t *sig = signatureResp;
/* encrypt signatureResp */
for (i=; i<SHA256_DIGEST_LENGTH; i+=)
rtmpe9_sig(sig+i, sig+i, dptr[i] % );
}
#endif
#endif
RTMP_Log(RTMP_LOGDEBUG, "%s: Client signature calculated:", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, signatureResp, SHA256_DIGEST_LENGTH);
}
else
{
//直接赋值
reply = serversig;
#if 0
uptime = htonl(RTMP_GetTime());
memcpy(reply+, &uptime, );
#endif
} #ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG, "%s: Sending handshake response: ",
__FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, reply, RTMP_SIG_SIZE);
#endif
//把reply中的1536字节数据发送出去
//对应C2
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。发送握手数据C2");
//-----------------------------
if (!WriteN(r, (char *)reply, RTMP_SIG_SIZE))
return FALSE; /* 2nd part of handshake */
//读取1536字节数据到serversig
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。读取握手数据S2");
//-----------------------------
if (ReadN(r, (char *)serversig, RTMP_SIG_SIZE) != RTMP_SIG_SIZE)
return FALSE; #ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG, "%s: 2nd handshake: ", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, serversig, RTMP_SIG_SIZE);
#endif if (FP9HandShake)
{
uint8_t signature[SHA256_DIGEST_LENGTH];
uint8_t digest[SHA256_DIGEST_LENGTH]; if (serversig[] == && serversig[] == && serversig[] ==
&& serversig[] == )
{
RTMP_Log(RTMP_LOGDEBUG,
"%s: Wait, did the server just refuse signed authentication?",
__FUNCTION__);
}
RTMP_Log(RTMP_LOGDEBUG, "%s: Server sent signature:", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH],
SHA256_DIGEST_LENGTH); /* verify server response */
HMACsha256(&clientsig[digestPosClient], SHA256_DIGEST_LENGTH,
GenuineFMSKey, sizeof(GenuineFMSKey), digest);
HMACsha256(serversig, RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH, digest,
SHA256_DIGEST_LENGTH, signature); /* show some information */
RTMP_Log(RTMP_LOGDEBUG, "%s: Digest key: ", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, digest, SHA256_DIGEST_LENGTH); #ifdef FP10
if (type == )
{
uint8_t *dptr = digest;
uint8_t *sig = signature;
/* encrypt signature */
for (i=; i<SHA256_DIGEST_LENGTH; i+=)
rtmpe8_sig(sig+i, sig+i, dptr[i] % );
}
#if 0
else if (type == )
{
uint8_t *dptr = digest;
uint8_t *sig = signature;
/* encrypt signatureResp */
for (i=; i<SHA256_DIGEST_LENGTH; i+=)
rtmpe9_sig(sig+i, sig+i, dptr[i] % );
}
#endif
#endif
RTMP_Log(RTMP_LOGDEBUG, "%s: Signature calculated:", __FUNCTION__);
RTMP_LogHex(RTMP_LOGDEBUG, signature, SHA256_DIGEST_LENGTH);
if (memcmp
(signature, &serversig[RTMP_SIG_SIZE - SHA256_DIGEST_LENGTH],
SHA256_DIGEST_LENGTH) != )
{
RTMP_Log(RTMP_LOGWARNING, "%s: Server not genuine Adobe!", __FUNCTION__);
return FALSE;
}
else
{
RTMP_Log(RTMP_LOGDEBUG, "%s: Genuine Adobe Flash Media Server", __FUNCTION__);
} if (encrypted)
{
char buff[RTMP_SIG_SIZE];
/* set keys for encryption from now on */
r->Link.rc4keyIn = keyIn;
r->Link.rc4keyOut = keyOut; /* update the keystreams */
if (r->Link.rc4keyIn)
{
RC4_encrypt((RC4_KEY *)r->Link.rc4keyIn, RTMP_SIG_SIZE, (uint8_t *) buff);
} if (r->Link.rc4keyOut)
{
RC4_encrypt((RC4_KEY *)r->Link.rc4keyOut, RTMP_SIG_SIZE, (uint8_t *) buff);
}
}
}
else
{
//int memcmp(const void *buf1, const void *buf2, unsigned int count); 当buf1=buf2时,返回值=0
//比较serversig和clientsig是否相等
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。比较握手数据签名");
//-----------------------------
if (memcmp(serversig, clientsig, RTMP_SIG_SIZE) != )
{
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。握手数据签名不匹配!");
//-----------------------------
RTMP_Log(RTMP_LOGWARNING, "%s: client signature does not match!",
__FUNCTION__);
}
}
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。握手成功");
//-----------------------------
RTMP_Log(RTMP_LOGDEBUG, "%s: Handshaking finished....", __FUNCTION__);
return TRUE;
}

5: 建立一个流媒体连接 (NetConnection部分)

本篇文章分析一下RTMPdump里面的建立一个流媒体连接过程中的函数调用。

之前已经简单分析过流媒体链接的建立过程:

RTMP流媒体播放过程

而且分析过其函数调用过程:

RTMPDump源代码分析 0: 主要函数调用分析

在这里就不详细叙述了,其实主要是这两个函数:

RTMP_Connect()

RTMP_ConnectStream()

第一个函数用于建立RTMP中的NetConnection,第二个函数用于建立RTMP中的NetStream。一般是先调用第一个函数,然后调用第二个函数。

下面先来看看RTMP_Connect():

注意:贴上去的源代码是修改过的RTMPdump,我添加了输出信息的代码,形如:r->dlg->AppendCInfo("建立连接:第0次连接。开始建立Socket连接");改代码不影响程序运行,可忽略。

RTMP_Connect()

//连接
int
RTMP_Connect(RTMP *r, RTMPPacket *cp)
{
//Socket结构体
struct sockaddr_in service;
if (!r->Link.hostname.av_len)
return FALSE; memset(&service, , sizeof(struct sockaddr_in));
service.sin_family = AF_INET; if (r->Link.socksport)
{
//加入地址信息
/* 使用SOCKS连接 */
if (!add_addr_info(&service, &r->Link.sockshost, r->Link.socksport))
return FALSE;
}
else
{
/* 直接连接 */
if (!add_addr_info(&service, &r->Link.hostname, r->Link.port))
return FALSE;
}
//-----------------
r->dlg->AppendCInfo("建立连接:第0次连接。开始建立Socket连接");
//-----------------------------
if (!RTMP_Connect0(r, (struct sockaddr *)&service)){
r->dlg->AppendCInfo("建立连接:第0次连接。建立Socket连接失败");
return FALSE;
}
//-----------------
r->dlg->AppendCInfo("建立连接:第0次连接。建立Socket连接成功");
//-----------------------------
r->m_bSendCounter = TRUE; return RTMP_Connect1(r, cp);
}

我们可以看出调用了两个函数RTMP_Connect0()以及RTMP_Connect1()。按照按先后顺序看看吧:

RTMP_Connect0()

//sockaddr是Linux网络编程的地址结构体一种,其定义如下:
//struct sockaddr{
// unsigned short sa_family;
// char sa_data[14];
//};
//说明:sa_family:是地址家族,也称作,协议族,一般都是“AF_xxx”的形式。通常大多用的是都是AF_INET。
// sa_data:是14字节协议地址。
//有时不使用sockaddr,而使用sockaddr_in(多用在windows)(等价)
//struct sockaddr_in {
// short int sin_family; /* Address family */
// unsigned short int sin_port; /* Port number */
// struct in_addr sin_addr; /* Internet address */
// unsigned char sin_zero[8]; /* Same size as struct sockaddr */
//};
//union {
// struct{
// unsigned char s_b1,s_b2,s_b3,s_b4;
// } S_un_b;
// struct {
// unsigned short s_w1,s_w2;
// } S_un_w;
// unsigned long S_addr;
// } S_un;
//} in_addr;
//第0次连接,建立Socket连接
int
RTMP_Connect0(RTMP *r, struct sockaddr * service)
{
int on = ;
r->m_sb.sb_timedout = FALSE;
r->m_pausing = ;
r->m_fDuration = 0.0;
//创建一个Socket,并把Socket序号赋值给相应变量
//-----------------
r->dlg->AppendCInfo("建立连接:第0次连接。create一个Socket");
//-----------------------------
r->m_sb.sb_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (r->m_sb.sb_socket != -)
{ //定义函数 int connect (int sockfd,struct sockaddr * serv_addr,int addrlen);
//函数说明 connect()用来将参数sockfd 的Socket(刚刚创建)连至参数serv_addr
//指定的网络地址。参数addrlen为sockaddr的结构长度。
//连接
RTMP_LogPrintf("建立Socket连接!\n");
//-----------------
r->dlg->AppendCInfo("建立连接:第0次连接。connect该Socket");
//-----------------------------
if (connect(r->m_sb.sb_socket, service, sizeof(struct sockaddr)) < )
{
//-----------------
r->dlg->AppendCInfo("建立连接:第0次连接。connect该Socket失败");
//-----------------------------
int err = GetSockError();
RTMP_Log(RTMP_LOGERROR, "%s, failed to connect socket. %d (%s)",
__FUNCTION__, err, strerror(err));
RTMP_Close(r);
return FALSE;
}
//-----------------
r->dlg->AppendCInfo("建立连接:第0次连接。connect该Socket成功");
//-----------------------------
//指定了端口号。注:这不是必需的。
if (r->Link.socksport)
{
RTMP_Log(RTMP_LOGDEBUG, "%s ... SOCKS negotiation", __FUNCTION__);
//谈判?发送数据报以进行谈判?!
if (!SocksNegotiate(r))
{
RTMP_Log(RTMP_LOGERROR, "%s, SOCKS negotiation failed.", __FUNCTION__);
RTMP_Close(r);
return FALSE;
}
}
}
else
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to create socket. Error: %d", __FUNCTION__,
GetSockError());
return FALSE;
} /* set timeout */
//超时
{
SET_RCVTIMEO(tv, r->Link.timeout);
if (setsockopt
(r->m_sb.sb_socket, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)))
{
RTMP_Log(RTMP_LOGERROR, "%s, Setting socket timeout to %ds failed!",
__FUNCTION__, r->Link.timeout);
}
} setsockopt(r->m_sb.sb_socket, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)); return TRUE;
}

可见RTMP_Connect0()主要用于建立Socket连接,并未开始真正的建立RTMP连接。

再来看看RTMP_Connect1(),这是真正建立RTMP连接的函数:

RTMP_Connect1()

//第1次连接,从握手开始
int
RTMP_Connect1(RTMP *r, RTMPPacket *cp)
{
if (r->Link.protocol & RTMP_FEATURE_SSL)
{
#if defined(CRYPTO) && !defined(NO_SSL)
TLS_client(RTMP_TLS_ctx, r->m_sb.sb_ssl);
TLS_setfd((SSL *)r->m_sb.sb_ssl, r->m_sb.sb_socket);
if (TLS_connect((SSL *)r->m_sb.sb_ssl) < )
{
RTMP_Log(RTMP_LOGERROR, "%s, TLS_Connect failed", __FUNCTION__);
RTMP_Close(r);
return FALSE;
}
#else
RTMP_Log(RTMP_LOGERROR, "%s, no SSL/TLS support", __FUNCTION__);
RTMP_Close(r);
return FALSE; #endif
}
//使用HTTP
if (r->Link.protocol & RTMP_FEATURE_HTTP)
{
r->m_msgCounter = ;
r->m_clientID.av_val = NULL;
r->m_clientID.av_len = ;
HTTP_Post(r, RTMPT_OPEN, "", );
HTTP_read(r, );
r->m_msgCounter = ;
}
RTMP_Log(RTMP_LOGDEBUG, "%s, ... connected, handshaking", __FUNCTION__);
//握手----------------
r->dlg->AppendCInfo("建立连接:第1次连接。开始握手(HandShake)");
//-----------------------------
RTMP_LogPrintf("开始握手(HandShake)!\n");
if (!HandShake(r, TRUE))
{
//----------------
r->dlg->AppendCInfo("建立连接:第1次连接。握手(HandShake)失败!");
//-----------------------------
RTMP_Log(RTMP_LOGERROR, "%s, handshake failed.", __FUNCTION__);
RTMP_Close(r);
return FALSE;
}
//----------------
r->dlg->AppendCInfo("建立连接:第1次连接。握手(HandShake)成功");
//-----------------------------
RTMP_LogPrintf("握手(HandShake)完毕!\n");
RTMP_Log(RTMP_LOGDEBUG, "%s, handshaked", __FUNCTION__);
//发送“connect”命令--------------
//----------------
r->dlg->AppendCInfo("建立连接:第1次连接。开始建立网络连接(NetConnection)");
//-----------------------------
RTMP_LogPrintf("开始建立网络连接(NetConnection)!\n");
//----------------
r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (Connect)。");
//-----------------------------
if (!SendConnectPacket(r, cp))
{
//----------------
r->dlg->AppendCInfo("建立连接:第1次连接。建立网络连接(NetConnection)失败!");
//-----------------------------
RTMP_Log(RTMP_LOGERROR, "%s, RTMP connect failed.", __FUNCTION__);
RTMP_Close(r);
return FALSE;
}
//----------------
r->dlg->AppendCInfo("建立连接:第1次连接。建立网络连接(NetConnection)成功");
//-----------------------------
RTMP_LogPrintf("命令消息“Connect”发送完毕!\n");
return TRUE;
}

该函数做了以下事情:

HandShake()完成握手,之前已经分析过:RTMPdump 源代码分析 4: 连接第一步——握手(Hand Shake)

SendConnectPacket()发送包含“connect”命令的数据报,用于开始建立RTMP连接。具体该函数是怎么调用的,以后有机会再进行分析。

至此RTMP_Connect()分析完毕。

6: 建立一个流媒体连接 (NetStream部分 1)

前文已经分析了 RTMPdump中建立一个NetConnection的过程:RTMPdump 源代码分析 5: 建立一个流媒体连接 (NetConnection部分)

多余的话不多说,下面先来看看RTMP_ConnectStream(),该函数主要用于在NetConnection基础上建立一个NetStream。

RTMP_ConnectStream()

//创建流
int
RTMP_ConnectStream(RTMP *r, int seekTime)
{
RTMPPacket packet = { }; /* seekTime was already set by SetupStream / SetupURL.
* This is only needed by ReconnectStream.
*/
if (seekTime > )
r->Link.seekTime = seekTime; r->m_mediaChannel = ; while (!r->m_bPlaying && RTMP_IsConnected(r) && RTMP_ReadPacket(r, &packet))
{
if (RTMPPacket_IsReady(&packet))
{
if (!packet.m_nBodySize)
continue;
if ((packet.m_packetType == RTMP_PACKET_TYPE_AUDIO) ||
(packet.m_packetType == RTMP_PACKET_TYPE_VIDEO) ||
(packet.m_packetType == RTMP_PACKET_TYPE_INFO))
{
RTMP_Log(RTMP_LOGWARNING, "Received FLV packet before play()! Ignoring.");
RTMPPacket_Free(&packet);
continue;
}
//处理Packet!
//----------------
r->dlg->AppendCInfo("建立网络流:处理收到的数据。开始处理收到的数据");
//-----------------------------
RTMP_ClientPacket(r, &packet);
//----------------
r->dlg->AppendCInfo("建立网络流:处理收到的数据。处理完毕,清除数据。");
//-----------------------------
RTMPPacket_Free(&packet);
}
} return r->m_bPlaying;
}

乍一看,这个函数的代码量好像挺少的,实际上不然,其复杂度还是挺高的。我觉得比RTMP_Connect()要复杂不少。

其关键就在于这个While()循环。首先,循环的三个条件都满足,就能进行循环。只有出错或者建立网络流(NetStream)的步骤完成后,才能跳出循环。

在这个函数中有两个函数尤为重要:

RTMP_ReadPacket()

RTMP_ClientPacket()

第一个函数的作用是读取通过Socket接收下来的消息(Message)包,但是不做任何处理。第二个函数则是处理消息(Message),并做出响应。这两个函数结合,就可以完成接收消息然后响应消息的步骤。

下面来开一下RTMP_ReadPacket():

//读取收下来的Chunk
int
RTMP_ReadPacket(RTMP *r, RTMPPacket *packet)
{
//packet 存读取完后的的数据
//Chunk Header最大值18
uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = { };
//header 指向的是从Socket中收下来的数据
char *header = (char *)hbuf;
int nSize, hSize, nToRead, nChunk;
int didAlloc = FALSE; RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket);
//收下来的数据存入hbuf
if (ReadN(r, (char *)hbuf, ) == )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header", __FUNCTION__);
return FALSE;
}
//块类型fmt
packet->m_headerType = (hbuf[] & 0xc0) >> ;
//块流ID(2-63)
packet->m_nChannel = (hbuf[] & 0x3f);
header++;
//块流ID第1字节为0时,块流ID占2个字节
if (packet->m_nChannel == )
{
if (ReadN(r, (char *)&hbuf[], ) != )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 2nd byte",
__FUNCTION__);
return FALSE;
}
//计算块流ID(64-319)
packet->m_nChannel = hbuf[];
packet->m_nChannel += ;
header++;
}
//块流ID第1字节为0时,块流ID占3个字节
else if (packet->m_nChannel == )
{
int tmp;
if (ReadN(r, (char *)&hbuf[], ) != )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 3nd byte",
__FUNCTION__);
return FALSE;
}
tmp = (hbuf[] << ) + hbuf[];
//计算块流ID(64-65599)
packet->m_nChannel = tmp + ;
RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel);
header += ;
}
//ChunkHeader的大小(4种)
nSize = packetSize[packet->m_headerType]; if (nSize == RTMP_LARGE_HEADER_SIZE) /* if we get a full header the timestamp is absolute */
packet->m_hasAbsTimestamp = TRUE; //11字节的完整ChunkMsgHeader的TimeStamp是绝对值 else if (nSize < RTMP_LARGE_HEADER_SIZE)
{ /* using values from the last message of this channel */
if (r->m_vecChannelsIn[packet->m_nChannel])
memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel],
sizeof(RTMPPacket));
} nSize--; if (nSize > && ReadN(r, header, nSize) != nSize)
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header. type: %x",
__FUNCTION__, (unsigned int)hbuf[]);
return FALSE;
} hSize = nSize + (header - (char *)hbuf); if (nSize >= )
{
//TimeStamp(注意 BigEndian to SmallEndian)(11,7,3字节首部都有)
packet->m_nTimeStamp = AMF_DecodeInt24(header); /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */
//消息长度(11,7字节首部都有)
if (nSize >= )
{
packet->m_nBodySize = AMF_DecodeInt24(header + );
packet->m_nBytesRead = ;
RTMPPacket_Free(packet);
//(11,7字节首部都有)
if (nSize > )
{
//Msg type ID
packet->m_packetType = header[];
//Msg Stream ID
if (nSize == )
packet->m_nInfoField2 = DecodeInt32LE(header + );
}
}
//Extend TimeStamp
if (packet->m_nTimeStamp == 0xffffff)
{
if (ReadN(r, header + nSize, ) != )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp",
__FUNCTION__);
return FALSE;
}
packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize);
hSize += ;
}
} RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize); if (packet->m_nBodySize > && packet->m_body == NULL)
{
if (!RTMPPacket_Alloc(packet, packet->m_nBodySize))
{
RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__);
return FALSE;
}
didAlloc = TRUE;
packet->m_headerType = (hbuf[] & 0xc0) >> ;
} nToRead = packet->m_nBodySize - packet->m_nBytesRead;
nChunk = r->m_inChunkSize;
if (nToRead < nChunk)
nChunk = nToRead; /* Does the caller want the raw chunk? */
if (packet->m_chunk)
{
packet->m_chunk->c_headerSize = hSize;
memcpy(packet->m_chunk->c_header, hbuf, hSize);
packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead;
packet->m_chunk->c_chunkSize = nChunk;
} if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk)
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet body. len: %lu",
__FUNCTION__, packet->m_nBodySize);
return FALSE;
} RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk); packet->m_nBytesRead += nChunk; /* keep the packet as ref for other packets on this channel */
if (!r->m_vecChannelsIn[packet->m_nChannel])
r->m_vecChannelsIn[packet->m_nChannel] = (RTMPPacket *) malloc(sizeof(RTMPPacket));
memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(RTMPPacket));
//读取完毕
if (RTMPPacket_IsReady(packet))
{
/* make packet's timestamp absolute */
if (!packet->m_hasAbsTimestamp)
packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel]; /* timestamps seem to be always relative!! */ r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp; /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */
/* arrives and requests to re-use some info (small packet header) */
r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL;
r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = ;
r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE; /* can only be false if we reuse header */
}
else
{
packet->m_body = NULL; /* so it won't be erased on free */
} return TRUE;
}

在这里要注意的是,接收下来的实际上是块(Chunk)而不是消息(Message),因为消息(Message)在网络上传播的时候,实际上要分割成块(Chunk)。

这里解析的就是块(Chunk)

可参考:RTMP规范简单分析

具体的解析代码我就不多说了,直接参考RTMP协议规范就可以了,一个字节一个字节的解析就OK了。

7: 建立一个流媒体连接 (NetStream部分 2)

上回说到,有两个函数尤为重要:

RTMP_ReadPacket()

RTMP_ClientPacket()

而且分析了第一个函数。现在我们再来看看第二个函数吧。第二个函数的主要作用是:处理消息(Message),并做出响应。

先把带注释的代码贴上:

//处理接收到的Chunk
int
RTMP_ClientPacket(RTMP *r, RTMPPacket *packet)
{
int bHasMediaPacket = ;
switch (packet->m_packetType)
{
//RTMP消息类型ID=1,设置块大小
case 0x01:
/* chunk size */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 Set Chunk Size (typeID=1)。");
//-----------------------------
RTMP_LogPrintf("处理消息 Set Chunk Size (typeID=1)\n");
HandleChangeChunkSize(r, packet);
break;
//RTMP消息类型ID=3,致谢
case 0x03:
/* bytes read report */
RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__);
break;
//RTMP消息类型ID=4,用户控制
case 0x04:
/* ctrl */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 User Control (typeID=4)。");
//-----------------------------
RTMP_LogPrintf("处理消息 User Control (typeID=4)\n");
HandleCtrl(r, packet);
break;
//RTMP消息类型ID=5
case 0x05:
/* server bw */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 Window Acknowledgement Size (typeID=5)。");
//-----------------------------
RTMP_LogPrintf("处理消息 Window Acknowledgement Size (typeID=5)\n");
HandleServerBW(r, packet);
break;
//RTMP消息类型ID=6
case 0x06:
/* client bw */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 Set Peer Bandwidth (typeID=6)。");
//-----------------------------
RTMP_LogPrintf("处理消息 Set Peer Bandwidth (typeID=6)\n");
HandleClientBW(r, packet);
break;
//RTMP消息类型ID=8,音频数据
case 0x08:
/* audio data */
/*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */
HandleAudio(r, packet);
bHasMediaPacket = ;
if (!r->m_mediaChannel)
r->m_mediaChannel = packet->m_nChannel;
if (!r->m_pausing)
r->m_mediaStamp = packet->m_nTimeStamp;
break;
//RTMP消息类型ID=9,视频数据
case 0x09:
/* video data */
/*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */
HandleVideo(r, packet);
bHasMediaPacket = ;
if (!r->m_mediaChannel)
r->m_mediaChannel = packet->m_nChannel;
if (!r->m_pausing)
r->m_mediaStamp = packet->m_nTimeStamp;
break;
//RTMP消息类型ID=15,AMF3编码,忽略
case 0x0F: /* flex stream send */
RTMP_Log(RTMP_LOGDEBUG,
"%s, flex stream send, size %lu bytes, not supported, ignoring",
__FUNCTION__, packet->m_nBodySize);
break;
//RTMP消息类型ID=16,AMF3编码,忽略
case 0x10: /* flex shared object */
RTMP_Log(RTMP_LOGDEBUG,
"%s, flex shared object, size %lu bytes, not supported, ignoring",
__FUNCTION__, packet->m_nBodySize);
break;
//RTMP消息类型ID=17,AMF3编码,忽略
case 0x11: /* flex message */
{
RTMP_Log(RTMP_LOGDEBUG,
"%s, flex message, size %lu bytes, not fully supported",
__FUNCTION__, packet->m_nBodySize);
/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ /* some DEBUG code */
#if 0
RTMP_LIB_AMFObject obj;
int nRes = obj.Decode(packet.m_body+, packet.m_nBodySize-);
if(nRes < ) {
RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__);
/*return; */
} obj.Dump();
#endif if (HandleInvoke(r, packet->m_body + , packet->m_nBodySize - ) == )
bHasMediaPacket = ;
break;
}
//RTMP消息类型ID=18,AMF0编码,数据消息
case 0x12:
/* metadata (notify) */ RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__,
packet->m_nBodySize);
//处理元数据,暂时注释
/*
if (HandleMetadata(r, packet->m_body, packet->m_nBodySize))
bHasMediaPacket = 1;
break;
*/
//RTMP消息类型ID=19,AMF0编码,忽略
case 0x13:
RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring",
__FUNCTION__);
break;
//RTMP消息类型ID=20,AMF0编码,命令消息
//处理命令消息!
case 0x14:
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 命令 (AMF0编码) (typeID=20)。");
//-----------------------------
/* invoke */
RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__,
packet->m_nBodySize);
RTMP_LogPrintf("处理命令消息 (typeID=20,AMF0编码)\n");
/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == )
bHasMediaPacket = ;
break;
//RTMP消息类型ID=22
case 0x16:
{
/* go through FLV packets and handle metadata packets */
unsigned int pos = ;
uint32_t nTimeStamp = packet->m_nTimeStamp; while (pos + < packet->m_nBodySize)
{
uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + ); /* size without header (11) and prevTagSize (4) */ if (pos + + dataSize + > packet->m_nBodySize)
{
RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!");
break;
}
if (packet->m_body[pos] == 0x12)
{
HandleMetadata(r, packet->m_body + pos + , dataSize);
}
else if (packet->m_body[pos] == || packet->m_body[pos] == )
{
nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + );
nTimeStamp |= (packet->m_body[pos + ] << );
}
pos += ( + dataSize + );
}
if (!r->m_pausing)
r->m_mediaStamp = nTimeStamp; /* FLV tag(s) */
/*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */
bHasMediaPacket = ;
break;
}
default:
RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__,
packet->m_packetType);
#ifdef _DEBUG
RTMP_LogHex(RTMP_LOGDEBUG, (const uint8_t *)packet->m_body, packet->m_nBodySize);
#endif
} return bHasMediaPacket;
}

里面注释的比较多,可以看出,大体的思路是,根据接收到的消息(Message)类型的不同,做出不同的响应。例如收到的消息类型为0x01,那么就是设置块(Chunk)大小的协议,那么就调用相应的函数进行处理。

因此,本函数可以说是程序的灵魂,收到的各种命令消息都要经过本函数的判断决定调用哪个函数进行相应的处理。

在这里注意一下消息类型为0x14的消息,即消息类型ID为20的消息,是AMF0编码的命令消息。这在RTMP连接中是非常常见的,比如说各种控制命令:播放,暂停,停止等等。我们来仔细看看它的调用。

可以发现它调用了HandleInvoke()函数来处理服务器发来的AMF0编码的命令,来看看细节:

/* Returns 0 for OK/Failed/error, 1 for 'Stop or Complete' */
static int
HandleInvoke(RTMP *r, const char *body, unsigned int nBodySize)
{
AMFObject obj;
AVal method;
int txn;
int ret = , nRes;
if (body[] != 0x02) /* make sure it is a string method name we start with */
{
RTMP_Log(RTMP_LOGWARNING, "%s, Sanity failed. no string method in invoke packet",
__FUNCTION__);
return ;
} nRes = AMF_Decode(&obj, body, nBodySize, FALSE);
if (nRes < )
{
RTMP_Log(RTMP_LOGERROR, "%s, error decoding invoke packet", __FUNCTION__);
return ;
} AMF_Dump(&obj);
AMFProp_GetString(AMF_GetProp(&obj, NULL, ), &method);
txn = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, ));
RTMP_Log(RTMP_LOGDEBUG, "%s, server invoking <%s>", __FUNCTION__, method.av_val); if (AVMATCH(&method, &av__result))
{
AVal methodInvoked = {};
int i; for (i=; i<r->m_numCalls; i++) {
if (r->m_methodCalls[i].num == txn) {
methodInvoked = r->m_methodCalls[i].name;
AV_erase(r->m_methodCalls, &r->m_numCalls, i, FALSE);
break;
}
}
if (!methodInvoked.av_val) {
RTMP_Log(RTMP_LOGDEBUG, "%s, received result id %d without matching request",
__FUNCTION__, txn);
goto leave;
}
//----------------
char temp_str[];
sprintf(temp_str,"接收数据。消息 %s 的 Result",methodInvoked.av_val);
r->dlg->AppendCInfo(temp_str);
//-----------------------------
RTMP_Log(RTMP_LOGDEBUG, "%s, received result for method call <%s>", __FUNCTION__,
methodInvoked.av_val); if (AVMATCH(&methodInvoked, &av_connect))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Result (Connect)");
//-----------------------------
if (r->Link.token.av_len)
{
AMFObjectProperty p;
if (RTMP_FindFirstMatchingProperty(&obj, &av_secureToken, &p))
{
DecodeTEA(&r->Link.token, &p.p_vu.p_aval);
SendSecureTokenResponse(r, &p.p_vu.p_aval);
}
}
if (r->Link.protocol & RTMP_FEATURE_WRITE)
{
SendReleaseStream(r);
SendFCPublish(r);
}
else
{
//----------------
r->dlg->AppendCInfo("发送数据。消息 Window Acknowledgement Size (typeID=5)。");
//-----------------------------
RTMP_LogPrintf("发送消息Window Acknowledgement Size(typeID=5)\n");
RTMP_SendServerBW(r);
RTMP_SendCtrl(r, , , );
}
//----------------
r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (CreateStream)。");
//-----------------------------
RTMP_LogPrintf("发送命令消息“CreateStream” (typeID=20)\n");
RTMP_SendCreateStream(r); if (!(r->Link.protocol & RTMP_FEATURE_WRITE))
{
/* Send the FCSubscribe if live stream or if subscribepath is set */
if (r->Link.subscribepath.av_len)
SendFCSubscribe(r, &r->Link.subscribepath);
else if (r->Link.lFlags & RTMP_LF_LIVE)
SendFCSubscribe(r, &r->Link.playpath);
}
}
else if (AVMATCH(&methodInvoked, &av_createStream))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Result (CreateStream)");
//-----------------------------
r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, )); if (r->Link.protocol & RTMP_FEATURE_WRITE)
{
SendPublish(r);
}
else
{
if (r->Link.lFlags & RTMP_LF_PLST)
SendPlaylist(r);
//----------------
r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (Play)。");
//-----------------------------
RTMP_LogPrintf("发送命令消息“play” (typeID=20)\n");
SendPlay(r);
RTMP_SendCtrl(r, , r->m_stream_id, r->m_nBufferMS);
}
}
else if (AVMATCH(&methodInvoked, &av_play) ||
AVMATCH(&methodInvoked, &av_publish))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Result (Play or Publish)");
//-----------------------------
r->m_bPlaying = TRUE;
}
free(methodInvoked.av_val);
}
else if (AVMATCH(&method, &av_onBWDone))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","onBWDone");
//-----------------------------
if (!r->m_nBWCheckCounter)
SendCheckBW(r);
}
else if (AVMATCH(&method, &av_onFCSubscribe))
{
/* SendOnFCSubscribe(); */
}
else if (AVMATCH(&method, &av_onFCUnsubscribe))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","onFCUnsubscribe");
//-----------------------------
RTMP_Close(r);
ret = ;
}
else if (AVMATCH(&method, &av_ping))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Ping");
//-----------------------------
SendPong(r, txn);
}
else if (AVMATCH(&method, &av__onbwcheck))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","onBWcheck");
//-----------------------------
SendCheckBWResult(r, txn);
}
else if (AVMATCH(&method, &av__onbwdone))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","onBWdone");
//-----------------------------
int i;
for (i = ; i < r->m_numCalls; i++)
if (AVMATCH(&r->m_methodCalls[i].name, &av__checkbw))
{
AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
break;
}
}
else if (AVMATCH(&method, &av__error))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","error");
//-----------------------------
RTMP_Log(RTMP_LOGERROR, "rtmp server sent error");
}
else if (AVMATCH(&method, &av_close))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","close");
//-----------------------------
RTMP_Log(RTMP_LOGERROR, "rtmp server requested close");
RTMP_Close(r);
}
else if (AVMATCH(&method, &av_onStatus))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","onStatus");
//-----------------------------
AMFObject obj2;
AVal code, level;
AMFProp_GetObject(AMF_GetProp(&obj, NULL, ), &obj2);
AMFProp_GetString(AMF_GetProp(&obj2, &av_code, -), &code);
AMFProp_GetString(AMF_GetProp(&obj2, &av_level, -), &level); RTMP_Log(RTMP_LOGDEBUG, "%s, onStatus: %s", __FUNCTION__, code.av_val);
if (AVMATCH(&code, &av_NetStream_Failed)
|| AVMATCH(&code, &av_NetStream_Play_Failed)
|| AVMATCH(&code, &av_NetStream_Play_StreamNotFound)
|| AVMATCH(&code, &av_NetConnection_Connect_InvalidApp))
{
r->m_stream_id = -;
RTMP_Close(r);
RTMP_Log(RTMP_LOGERROR, "Closing connection: %s", code.av_val);
} else if (AVMATCH(&code, &av_NetStream_Play_Start))
{
int i;
r->m_bPlaying = TRUE;
for (i = ; i < r->m_numCalls; i++)
{
if (AVMATCH(&r->m_methodCalls[i].name, &av_play))
{
AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
break;
}
}
} else if (AVMATCH(&code, &av_NetStream_Publish_Start))
{
int i;
r->m_bPlaying = TRUE;
for (i = ; i < r->m_numCalls; i++)
{
if (AVMATCH(&r->m_methodCalls[i].name, &av_publish))
{
AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
break;
}
}
} /* Return 1 if this is a Play.Complete or Play.Stop */
else if (AVMATCH(&code, &av_NetStream_Play_Complete)
|| AVMATCH(&code, &av_NetStream_Play_Stop)
|| AVMATCH(&code, &av_NetStream_Play_UnpublishNotify))
{
RTMP_Close(r);
ret = ;
} else if (AVMATCH(&code, &av_NetStream_Seek_Notify))
{
r->m_read.flags &= ~RTMP_READ_SEEKING;
} else if (AVMATCH(&code, &av_NetStream_Pause_Notify))
{
if (r->m_pausing == || r->m_pausing == )
{
RTMP_SendPause(r, FALSE, r->m_pauseStamp);
r->m_pausing = ;
}
}
}
else if (AVMATCH(&method, &av_playlist_ready))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","playlist_ready");
//-----------------------------
int i;
for (i = ; i < r->m_numCalls; i++)
{
if (AVMATCH(&r->m_methodCalls[i].name, &av_set_playlist))
{
AV_erase(r->m_methodCalls, &r->m_numCalls, i, TRUE);
break;
}
}
}
else
{ }
leave:
AMF_Reset(&obj);
return ret;
} int
RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name,
AMFObjectProperty * p)
{
int n;
/* this is a small object search to locate the "duration" property */
for (n = ; n < obj->o_num; n++)
{
AMFObjectProperty *prop = AMF_GetProp(obj, NULL, n); if (AVMATCH(&prop->p_name, name))
{
*p = *prop;
return TRUE;
} if (prop->p_type == AMF_OBJECT)
{
if (RTMP_FindFirstMatchingProperty(&prop->p_vu.p_object, name, p))
return TRUE;
}
}
return FALSE;
}

该函数主要做了以下几步:

1.调用AMF_Decode()解码AMF命令数据

2.调用AMFProp_GetString()获取具体命令的字符串

3.调用AVMATCH()比较字符串,不同的命令做不同的处理,例如以下几个:

AVMATCH(&methodInvoked, &av_connect)
AVMATCH(&methodInvoked, &av_createStream)
AVMATCH(&methodInvoked, &av_play)
AVMATCH(&methodInvoked, &av_publish)
AVMATCH(&method, &av_onBWDone)

等等,不一一例举了

具体的处理过程如下所示。在这里说一个“建立网络流”(createStream)的例子,通常发生在建立网络连接(NetConnection)之后,播放(Play)之前。

else if (AVMATCH(&methodInvoked, &av_createStream))
{
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Result (CreateStream)");
//-----------------------------
r->m_stream_id = (int)AMFProp_GetNumber(AMF_GetProp(&obj, NULL, )); if (r->Link.protocol & RTMP_FEATURE_WRITE)
{
SendPublish(r);
}
else
{
if (r->Link.lFlags & RTMP_LF_PLST)
SendPlaylist(r);
//----------------
r->dlg->AppendCInfo("发送数据。消息 命令 (typeID=20) (Play)。");
//-----------------------------
RTMP_LogPrintf("发送命令消息“play” (typeID=20)\n");
SendPlay(r);
RTMP_SendCtrl(r, , r->m_stream_id, r->m_nBufferMS);
}
}

由代码可见,程序先获取了stream_id,然后发送了两个消息(Message),分别是SendPlaylist()和SendPlay(),用于获取播放列表,以及开始播放流媒体数据。

8: 发送消息(Message)

之前写了一系列的文章介绍RTMPDump各种函数。比如怎么建立网络连接(NetConnection),怎么建立网络流(NetStream)之类的,唯独没有介绍这些发送或接收的数据,在底层到底是怎么实现的。本文就是要剖析一下其内部的实现。即这些消息(Message)到底是怎么发送和接收的。

先来看看发送消息吧。

发送connect命令使用函数SendConnectPacket()

发送createstream命令使用RTMP_SendCreateStream()

发送realeaseStream命令使用SendReleaseStream()

发送publish命令使用SendPublish()

发送deleteStream的命令使用SendDeleteStream()

发送pause命令使用RTMP_SendPause()

不再一一例举,发现函数命名有两种规律:RTMP_Send***()或者Send***(),其中*号代表命令的名称。

SendConnectPacket()这个命令是每次程序开始运行的时候发送的第一个命令消息,内容比较多,包含了很多AMF编码的内容,在此不多做分析,贴上代码:

//发送“connect”命令
static int
SendConnectPacket(RTMP *r, RTMPPacket *cp)
{
RTMPPacket packet;
char pbuf[], *pend = pbuf + sizeof(pbuf);
char *enc; if (cp)
return RTMP_SendPacket(r, cp, TRUE); packet.m_nChannel = 0x03; /* control channel (invoke) */
packet.m_headerType = RTMP_PACKET_SIZE_LARGE;
packet.m_packetType = 0x14; /* INVOKE */
packet.m_nTimeStamp = ;
packet.m_nInfoField2 = ;
packet.m_hasAbsTimestamp = ;
packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; enc = packet.m_body;
enc = AMF_EncodeString(enc, pend, &av_connect);
enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
*enc++ = AMF_OBJECT; enc = AMF_EncodeNamedString(enc, pend, &av_app, &r->Link.app);
if (!enc)
return FALSE;
if (r->Link.protocol & RTMP_FEATURE_WRITE)
{
enc = AMF_EncodeNamedString(enc, pend, &av_type, &av_nonprivate);
if (!enc)
return FALSE;
}
if (r->Link.flashVer.av_len)
{
enc = AMF_EncodeNamedString(enc, pend, &av_flashVer, &r->Link.flashVer);
if (!enc)
return FALSE;
}
if (r->Link.swfUrl.av_len)
{
enc = AMF_EncodeNamedString(enc, pend, &av_swfUrl, &r->Link.swfUrl);
if (!enc)
return FALSE;
}
if (r->Link.tcUrl.av_len)
{
enc = AMF_EncodeNamedString(enc, pend, &av_tcUrl, &r->Link.tcUrl);
if (!enc)
return FALSE;
}
if (!(r->Link.protocol & RTMP_FEATURE_WRITE))
{
enc = AMF_EncodeNamedBoolean(enc, pend, &av_fpad, FALSE);
if (!enc)
return FALSE;
enc = AMF_EncodeNamedNumber(enc, pend, &av_capabilities, 15.0);
if (!enc)
return FALSE;
enc = AMF_EncodeNamedNumber(enc, pend, &av_audioCodecs, r->m_fAudioCodecs);
if (!enc)
return FALSE;
enc = AMF_EncodeNamedNumber(enc, pend, &av_videoCodecs, r->m_fVideoCodecs);
if (!enc)
return FALSE;
enc = AMF_EncodeNamedNumber(enc, pend, &av_videoFunction, 1.0);
if (!enc)
return FALSE;
if (r->Link.pageUrl.av_len)
{
enc = AMF_EncodeNamedString(enc, pend, &av_pageUrl, &r->Link.pageUrl);
if (!enc)
return FALSE;
}
}
if (r->m_fEncoding != 0.0 || r->m_bSendEncoding)
{ /* AMF0, AMF3 not fully supported yet */
enc = AMF_EncodeNamedNumber(enc, pend, &av_objectEncoding, r->m_fEncoding);
if (!enc)
return FALSE;
}
if (enc + >= pend)
return FALSE;
*enc++ = ;
*enc++ = ; /* end of object - 0x00 0x00 0x09 */
*enc++ = AMF_OBJECT_END; /* add auth string */
if (r->Link.auth.av_len)
{
enc = AMF_EncodeBoolean(enc, pend, r->Link.lFlags & RTMP_LF_AUTH);
if (!enc)
return FALSE;
enc = AMF_EncodeString(enc, pend, &r->Link.auth);
if (!enc)
return FALSE;
}
if (r->Link.extras.o_num)
{
int i;
for (i = ; i < r->Link.extras.o_num; i++)
{
enc = AMFProp_Encode(&r->Link.extras.o_props[i], enc, pend);
if (!enc)
return FALSE;
}
}
packet.m_nBodySize = enc - packet.m_body;
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Connect");
//-----------------------------
return RTMP_SendPacket(r, &packet, TRUE);
}

RTMP_SendCreateStream()命令相对而言比较简单,代码如下:

//发送“createstream”命令
int
RTMP_SendCreateStream(RTMP *r)
{
RTMPPacket packet;
char pbuf[], *pend = pbuf + sizeof(pbuf);
char *enc; packet.m_nChannel = 0x03; /* control channel (invoke) */
packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM;
packet.m_packetType = 0x14; /* INVOKE */
packet.m_nTimeStamp = ;
packet.m_nInfoField2 = ;
packet.m_hasAbsTimestamp = ;
packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; enc = packet.m_body;
enc = AMF_EncodeString(enc, pend, &av_createStream);
enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
*enc++ = AMF_NULL; /* NULL */ packet.m_nBodySize = enc - packet.m_body;
//----------------
r->dlg->AppendMLInfo(,,"命令消息","CreateStream");
//-----------------------------
return RTMP_SendPacket(r, &packet, TRUE);
}

同样,SendReleaseStream()内容也比较简单,我对其中部分内容作了注释:

//发送RealeaseStream命令
static int
SendReleaseStream(RTMP *r)
{
RTMPPacket packet;
char pbuf[], *pend = pbuf + sizeof(pbuf);
char *enc; packet.m_nChannel = 0x03; /* control channel (invoke) */
packet.m_headerType = RTMP_PACKET_SIZE_MEDIUM;
packet.m_packetType = 0x14; /* INVOKE */
packet.m_nTimeStamp = ;
packet.m_nInfoField2 = ;
packet.m_hasAbsTimestamp = ;
packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE; enc = packet.m_body;
//对“releaseStream”字符串进行AMF编码
enc = AMF_EncodeString(enc, pend, &av_releaseStream);
//对传输ID(0)进行AMF编码?
enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
//命令对象
*enc++ = AMF_NULL;
//对播放路径字符串进行AMF编码
enc = AMF_EncodeString(enc, pend, &r->Link.playpath);
if (!enc)
return FALSE; packet.m_nBodySize = enc - packet.m_body;
//----------------
r->dlg->AppendMLInfo(,,"命令消息","ReleaseStream");
//-----------------------------
return RTMP_SendPacket(r, &packet, FALSE);
}

再来看一个SendPublish()函数,用于发送“publish”命令

//发送Publish命令
static int
SendPublish(RTMP *r)
{
RTMPPacket packet;
char pbuf[], *pend = pbuf + sizeof(pbuf);
char *enc;
//块流ID为4
packet.m_nChannel = 0x04; /* source channel (invoke) */
packet.m_headerType = RTMP_PACKET_SIZE_LARGE;
//命令消息,类型20
packet.m_packetType = 0x14; /* INVOKE */
packet.m_nTimeStamp = ;
//流ID
packet.m_nInfoField2 = r->m_stream_id;
packet.m_hasAbsTimestamp = ;
packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;
//指向Chunk的负载
enc = packet.m_body;
//对“publish”字符串进行AMF编码
enc = AMF_EncodeString(enc, pend, &av_publish);
enc = AMF_EncodeNumber(enc, pend, ++r->m_numInvokes);
//命令对象为空
*enc++ = AMF_NULL;
enc = AMF_EncodeString(enc, pend, &r->Link.playpath);
if (!enc)
return FALSE; /* FIXME: should we choose live based on Link.lFlags & RTMP_LF_LIVE? */
enc = AMF_EncodeString(enc, pend, &av_live);
if (!enc)
return FALSE; packet.m_nBodySize = enc - packet.m_body;
//----------------
r->dlg->AppendMLInfo(,,"命令消息","Pulish");
//-----------------------------
return RTMP_SendPacket(r, &packet, TRUE);
}

其他的命令不再一一例举,总体的思路是声明一个RTMPPacket类型的结构体,然后设置各种属性值,最后交给RTMP_SendPacket()进行发送。

RTMPPacket类型的结构体定义如下,一个RTMPPacket对应RTMP协议规范里面的一个块(Chunk)。

//Chunk信息
typedef struct RTMPPacket
{
uint8_t m_headerType;//ChunkMsgHeader的类型(4种)
uint8_t m_packetType;//Message type ID(1-7协议控制;8,9音视频;10以后为AMF编码消息)
uint8_t m_hasAbsTimestamp; /* Timestamp 是绝对值还是相对值? */
int m_nChannel; //块流ID
uint32_t m_nTimeStamp; // Timestamp
int32_t m_nInfoField2; /* last 4 bytes in a long header,消息流ID */
uint32_t m_nBodySize; //消息长度
uint32_t m_nBytesRead;
RTMPChunk *m_chunk;
char *m_body;
} RTMPPacket;

下面我们来看看RTMP_SendPacket()吧,各种的RTMPPacket(即各种Chunk)都需要用这个函数进行发送。

//自己编一个数据报发送出去!
//非常常用
int
RTMP_SendPacket(RTMP *r, RTMPPacket *packet, int queue)
{
const RTMPPacket *prevPacket = r->m_vecChannelsOut[packet->m_nChannel];
uint32_t last = ;
int nSize;
int hSize, cSize;
char *header, *hptr, *hend, hbuf[RTMP_MAX_HEADER_SIZE], c;
uint32_t t;
char *buffer, *tbuf = NULL, *toff = NULL;
int nChunkSize;
int tlen;
//不是完整ChunkMsgHeader
if (prevPacket && packet->m_headerType != RTMP_PACKET_SIZE_LARGE)
{
/* compress a bit by using the prev packet's attributes */
//获取ChunkMsgHeader的类型
//前一个Chunk和这个Chunk对比
if (prevPacket->m_nBodySize == packet->m_nBodySize
&& prevPacket->m_packetType == packet->m_packetType
&& packet->m_headerType == RTMP_PACKET_SIZE_MEDIUM)
packet->m_headerType = RTMP_PACKET_SIZE_SMALL; if (prevPacket->m_nTimeStamp == packet->m_nTimeStamp
&& packet->m_headerType == RTMP_PACKET_SIZE_SMALL)
packet->m_headerType = RTMP_PACKET_SIZE_MINIMUM;
//上一个packet的TimeStamp
last = prevPacket->m_nTimeStamp;
} if (packet->m_headerType > ) /* sanity */
{
RTMP_Log(RTMP_LOGERROR, "sanity failed!! trying to send header of type: 0x%02x.",
(unsigned char)packet->m_headerType);
return FALSE;
}
//chunk包头大小;packetSize[] = { 12, 8, 4, 1 }
nSize = packetSize[packet->m_headerType];
hSize = nSize; cSize = ;
//相对的TimeStamp
t = packet->m_nTimeStamp - last; if (packet->m_body)
{
//Header的Start
//m_body是指向负载数据首地址的指针;“-”号用于指针前移
header = packet->m_body - nSize;
//Header的End
hend = packet->m_body;
}
else
{
header = hbuf + ;
hend = hbuf + sizeof(hbuf);
}
//当ChunkStreamID大于319时
if (packet->m_nChannel > )
//ChunkBasicHeader是3个字节
cSize = ;
//当ChunkStreamID大于63时
else if (packet->m_nChannel > )
//ChunkBasicHeader是2个字节
cSize = ;
if (cSize)
{
//header指针指向ChunkMsgHeader
header -= cSize;
//hsize加上ChunkBasicHeader的长度
hSize += cSize;
}
//相对TimeStamp大于0xffffff,此时需要使用ExtendTimeStamp
if (nSize > && t >= 0xffffff)
{
header -= ;
hSize += ;
} hptr = header;
//把ChunkBasicHeader的Fmt类型左移6位
c = packet->m_headerType << ;
switch (cSize)
{
//把ChunkBasicHeader的低6位设置成ChunkStreamID
case :
c |= packet->m_nChannel;
break;
//同理,但低6位设置成000000
case :
break;
//同理,但低6位设置成000001
case :
c |= ;
break;
}
//可以拆分成两句*hptr=c;hptr++,此时hptr指向第2个字节
*hptr++ = c;
//CSize>0,即ChunkBasicHeader大于1字节
if (cSize)
{
//将要放到第2字节的内容tmp
int tmp = packet->m_nChannel - ;
//获取低位存储与第2字节
*hptr++ = tmp & 0xff;
//ChunkBasicHeader是最大的3字节时
if (cSize == )
//获取高位存储于最后1个字节(注意:排序使用大端序列,和主机相反)
*hptr++ = tmp >> ;
}
//ChunkMsgHeader。注意一共有4种,包含的字段数不同。
//TimeStamp(3B)
if (nSize > )
{
//相对TimeStamp和绝对TimeStamp?
hptr = AMF_EncodeInt24(hptr, hend, t > 0xffffff ? 0xffffff : t);
}
//MessageLength+MessageTypeID(4B)
if (nSize > )
{
//MessageLength
hptr = AMF_EncodeInt24(hptr, hend, packet->m_nBodySize);
//MessageTypeID
*hptr++ = packet->m_packetType;
}
//MessageStreamID(4B)
if (nSize > )
hptr += EncodeInt32LE(hptr, packet->m_nInfoField2); //ExtendedTimeStamp
if (nSize > && t >= 0xffffff)
hptr = AMF_EncodeInt32(hptr, hend, t);
//负载长度,指向负载的指针
nSize = packet->m_nBodySize;
buffer = packet->m_body;
//Chunk大小,默认128字节
nChunkSize = r->m_outChunkSize; RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d, size=%d", __FUNCTION__, r->m_sb.sb_socket,
nSize);
/* send all chunks in one HTTP request */
//使用HTTP
if (r->Link.protocol & RTMP_FEATURE_HTTP)
{
//nSize:Message负载长度;nChunkSize:Chunk长度;
//例nSize:307,nChunkSize:128;
//可分为(307+128-1)/128=3个
//为什么+nChunkSize-1?因为除法会只取整数部分!
int chunks = (nSize+nChunkSize-) / nChunkSize;
//Chunk个数超过一个
if (chunks > )
{
//注意:CSize=1表示ChunkBasicHeader是2字节
//消息分n块后总的开销:
//n个ChunkBasicHeader,1个ChunkMsgHeader,1个Message负载
//实际中只有第一个Chunk是完整的,剩下的只有ChunkBasicHeader
tlen = chunks * (cSize + ) + nSize + hSize;
//分配内存
tbuf = (char *) malloc(tlen);
if (!tbuf)
return FALSE;
toff = tbuf;
}
//消息的负载+头
}
while (nSize + hSize)
{
int wrote;
//消息负载<Chunk大小(不用分块)
if (nSize < nChunkSize)
//Chunk可能小于设定值
nChunkSize = nSize; RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)header, hSize);
RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)buffer, nChunkSize);
if (tbuf)
{
//void *memcpy(void *dest, const void *src, int n);
//由src指向地址为起始地址的连续n个字节的数据复制到以dest指向地址为起始地址的空间内
memcpy(toff, header, nChunkSize + hSize);
toff += nChunkSize + hSize;
}
else
{
wrote = WriteN(r, header, nChunkSize + hSize);
if (!wrote)
return FALSE;
}
//消息负载长度-Chunk负载长度
nSize -= nChunkSize;
//Buffer指针前移1个Chunk负载长度
buffer += nChunkSize;
hSize = ; //如果消息没有发完
if (nSize > )
{
//ChunkBasicHeader
header = buffer - ;
hSize = ;
if (cSize)
{
header -= cSize;
hSize += cSize;
}
//ChunkBasicHeader第1个字节
*header = (0xc0 | c);
//ChunkBasicHeader大于1字节
if (cSize)
{
int tmp = packet->m_nChannel - ;
header[] = tmp & 0xff;
if (cSize == )
header[] = tmp >> ;
}
}
}
if (tbuf)
{
//
int wrote = WriteN(r, tbuf, toff-tbuf);
free(tbuf);
tbuf = NULL;
if (!wrote)
return FALSE;
} /* we invoked a remote method */
if (packet->m_packetType == 0x14)
{
AVal method;
char *ptr;
ptr = packet->m_body + ;
AMF_DecodeString(ptr, &method);
RTMP_Log(RTMP_LOGDEBUG, "Invoking %s", method.av_val);
/* keep it in call queue till result arrives */
if (queue) {
int txn;
ptr += + method.av_len;
txn = (int)AMF_DecodeNumber(ptr);
AV_queue(&r->m_methodCalls, &r->m_numCalls, &method, txn);
}
} if (!r->m_vecChannelsOut[packet->m_nChannel])
r->m_vecChannelsOut[packet->m_nChannel] = (RTMPPacket *) malloc(sizeof(RTMPPacket));
memcpy(r->m_vecChannelsOut[packet->m_nChannel], packet, sizeof(RTMPPacket));
return TRUE;
}

这个函数乍一看好像非常复杂,其实不然,他只是按照RTMP规范将数据编码成符合规范的块(Chunk),规范可以参考相关的文档。

具体怎么编码成块(Chunk)就不多分析了,在这里需要注意一个函数:WriteN()。该函数完成了将数据发送出去的功能。

来看一下WriteN()函数:

//发送数据报的时候调用(连接,buffer,长度)
static int
WriteN(RTMP *r, const char *buffer, int n)
{
const char *ptr = buffer;
#ifdef CRYPTO
char *encrypted = ;
char buf[RTMP_BUFFER_CACHE_SIZE]; if (r->Link.rc4keyOut)
{
if (n > sizeof(buf))
encrypted = (char *)malloc(n);
else
encrypted = (char *)buf;
ptr = encrypted;
RC4_encrypt2((RC4_KEY *)r->Link.rc4keyOut, n, buffer, ptr);
}
#endif while (n > )
{
int nBytes;
//因方式的不同而调用不同函数
//如果使用的是HTTP协议进行连接
if (r->Link.protocol & RTMP_FEATURE_HTTP)
nBytes = HTTP_Post(r, RTMPT_SEND, ptr, n);
else
nBytes = RTMPSockBuf_Send(&r->m_sb, ptr, n);
/*RTMP_Log(RTMP_LOGDEBUG, "%s: %d\n", __FUNCTION__, nBytes); */
//成功发送字节数<0
if (nBytes < )
{
int sockerr = GetSockError();
RTMP_Log(RTMP_LOGERROR, "%s, RTMP send error %d (%d bytes)", __FUNCTION__,
sockerr, n); if (sockerr == EINTR && !RTMP_ctrlC)
continue; RTMP_Close(r);
n = ;
break;
} if (nBytes == )
break; n -= nBytes;
ptr += nBytes;
} #ifdef CRYPTO
if (encrypted && encrypted != buf)
free(encrypted);
#endif return n == ;
}

该函数中,RTMPSockBuf_Send()完成了数据发送的功能,再来看看这个函数(函数调用真是好多啊。。。。)

//Socket发送(指明套接字,buffer缓冲区,数据长度)
//返回所发数据量
int
RTMPSockBuf_Send(RTMPSockBuf *sb, const char *buf, int len)
{
int rc; #ifdef _DEBUG
fwrite(buf, , len, netstackdump);
#endif #if defined(CRYPTO) && !defined(NO_SSL)
if (sb->sb_ssl)
{
rc = TLS_write((SSL *)sb->sb_ssl, buf, len);
}
else
#endif
{
//向一个已连接的套接口发送数据。
//int send( SOCKET s, const char * buf, int len, int flags);
//s:一个用于标识已连接套接口的描述字。
//buf:包含待发送数据的缓冲区。
//len:缓冲区中数据的长度。
//flags:调用执行方式。
//rc:所发数据量。
rc = send(sb->sb_socket, buf, len, );
}
return rc;
} int
RTMPSockBuf_Close(RTMPSockBuf *sb)
{
#if defined(CRYPTO) && !defined(NO_SSL)
if (sb->sb_ssl)
{
TLS_shutdown((SSL *)sb->sb_ssl);
TLS_close((SSL *)sb->sb_ssl);
sb->sb_ssl = NULL;
}
#endif
return closesocket(sb->sb_socket);
}

到这个函数的时候,发现一层层的调用终于完成了,最后调用了系统Socket的send()函数完成了数据的发送功能。

之前贴过一张图总结这个过程,可能理解起来要方便一些:RTMPDump源代码分析 0: 主要函数调用分析

9: 接收消息(Message)(接收视音频数据)

在这里在研究研究接收消息(Message)的源代码,接收消息最典型的应用就是接收视音频数据了,因为视频和音频分别都属于RTMP协议规范中的一种消息。在这里主要分析接收视音频数据。

RTMPdump中完成视音频数据的接收(也可以说是视音频数据的下载)的函数是:RTMP_Read()。

RTMPdump主程序中的Download()函数就是通过调用RTMP_Read()完成数据接收,从而实现下载的。

那么我们马上开始吧,首先看看RTMP_Read()函数:

//FLV文件头
static const char flvHeader[] = { 'F', 'L', 'V', 0x01,
0x00, /* 0x04代表有音频, 0x01代表有视频 */
0x00, 0x00, 0x00, 0x09,
0x00, 0x00, 0x00, 0x00
}; #define HEADERBUF (128*1024)
int
RTMP_Read(RTMP *r, char *buf, int size)
{
int nRead = , total = ; /* can't continue */
fail:
switch (r->m_read.status) {
case RTMP_READ_EOF:
case RTMP_READ_COMPLETE:
return ;
case RTMP_READ_ERROR: /* corrupted stream, resume failed */
SetSockError(EINVAL);
return -;
default:
break;
} /* first time thru */
if (!(r->m_read.flags & RTMP_READ_HEADER))
{
if (!(r->m_read.flags & RTMP_READ_RESUME))
{
//分配内存,指向buf的首部和尾部
char *mybuf = (char *) malloc(HEADERBUF), *end = mybuf + HEADERBUF;
int cnt = ;
//buf指向同一地址
r->m_read.buf = mybuf;
r->m_read.buflen = HEADERBUF; //把Flv的首部复制到mybuf指向的内存
//RTMP传递的多媒体数据是“砍头”的FLV文件
memcpy(mybuf, flvHeader, sizeof(flvHeader));
//m_read.buf指针后移flvheader个单位
r->m_read.buf += sizeof(flvHeader);
//buf长度增加flvheader长度
r->m_read.buflen -= sizeof(flvHeader);
//timestamp=0,不是多媒体数据
while (r->m_read.timestamp == )
{
//读取一个Packet,到r->m_read.buf
//nRead为读取结果标记
nRead = Read_1_Packet(r, r->m_read.buf, r->m_read.buflen);
//有错误
if (nRead < )
{
free(mybuf);
r->m_read.buf = NULL;
r->m_read.buflen = ;
r->m_read.status = nRead;
goto fail;
}
/* buffer overflow, fix buffer and give up */
if (r->m_read.buf < mybuf || r->m_read.buf > end) {
mybuf = (char *) realloc(mybuf, cnt + nRead);
memcpy(mybuf+cnt, r->m_read.buf, nRead);
r->m_read.buf = mybuf+cnt+nRead;
break;
}
//
//记录读取的字节数
cnt += nRead;
//m_read.buf指针后移nRead个单位
r->m_read.buf += nRead;
r->m_read.buflen -= nRead;
//当dataType=00000101时,即有视频和音频时
//说明有多媒体数据了
if (r->m_read.dataType == )
break;
}
//读入数据类型
//注意:mybuf指针位置一直没动
//mybuf[4]中第 6 位表示是否存在音频Tag。第 8 位表示是否存在视频Tag。
mybuf[] = r->m_read.dataType;
//两个指针之间的差
r->m_read.buflen = r->m_read.buf - mybuf;
r->m_read.buf = mybuf;
//这句很重要!后面memcopy
r->m_read.bufpos = mybuf;
}
//flags标明已经读完了文件头
r->m_read.flags |= RTMP_READ_HEADER;
} if ((r->m_read.flags & RTMP_READ_SEEKING) && r->m_read.buf)
{
/* drop whatever's here */
free(r->m_read.buf);
r->m_read.buf = NULL;
r->m_read.bufpos = NULL;
r->m_read.buflen = ;
} /* If there's leftover data buffered, use it up */
if (r->m_read.buf)
{
nRead = r->m_read.buflen;
if (nRead > size)
nRead = size;
//m_read.bufpos指向mybuf
memcpy(buf, r->m_read.bufpos, nRead);
r->m_read.buflen -= nRead;
if (!r->m_read.buflen)
{
free(r->m_read.buf);
r->m_read.buf = NULL;
r->m_read.bufpos = NULL;
}
else
{
r->m_read.bufpos += nRead;
}
buf += nRead;
total += nRead;
size -= nRead;
}
//接着读
while (size > && (nRead = Read_1_Packet(r, buf, size)) >= )
{
if (!nRead) continue;
buf += nRead;
total += nRead;
size -= nRead;
break;
}
if (nRead < )
r->m_read.status = nRead; if (size < )
total += size;
return total;
}

程序关键的地方都已经注释上了代码,在此就不重复说明了。有一点要提一下:RTMP传送的视音频数据的格式和FLV(FLash Video)格式是一样的,把接收下来的数据直接存入文件就可以了。但是这些视音频数据没有文件头,是纯视音频数据,因此需要在其前面加上FLV格式的文件头,这样得到的数据存成文件后才能被一般的视频播放器所播放。FLV格式的文件头是13个字节,如代码中所示。

RTMP_Read()中实际读取数据的函数是Read_1_Packet(),它的功能是从网络上读取一个RTMPPacket的数据,来看看它的源代码吧:

/* 从流媒体中读取多媒体packet。
* Returns -3 if Play.Close/Stop, -2 if fatal error, -1 if no more media
* packets, 0 if ignorable error, >0 if there is a media packet
*/
static int
Read_1_Packet(RTMP *r, char *buf, unsigned int buflen)
{
uint32_t prevTagSize = ;
int rtnGetNextMediaPacket = , ret = RTMP_READ_EOF;
RTMPPacket packet = { };
int recopy = FALSE;
unsigned int size;
char *ptr, *pend;
uint32_t nTimeStamp = ;
unsigned int len;
//获取下一个packet
rtnGetNextMediaPacket = RTMP_GetNextMediaPacket(r, &packet);
while (rtnGetNextMediaPacket)
{
char *packetBody = packet.m_body;
unsigned int nPacketLen = packet.m_nBodySize; /* Return -3 if this was completed nicely with invoke message
* Play.Stop or Play.Complete
*/
if (rtnGetNextMediaPacket == )
{
RTMP_Log(RTMP_LOGDEBUG,
"Got Play.Complete or Play.Stop from server. "
"Assuming stream is complete");
ret = RTMP_READ_COMPLETE;
break;
}
//设置dataType
r->m_read.dataType |= (((packet.m_packetType == 0x08) << ) |
(packet.m_packetType == 0x09));
//MessageID为9时,为视频数据,数据太小时。。。
if (packet.m_packetType == 0x09 && nPacketLen <= )
{
RTMP_Log(RTMP_LOGDEBUG, "ignoring too small video packet: size: %d",
nPacketLen);
ret = RTMP_READ_IGNORE;
break;
}
//MessageID为8时,为音频数据,数据太小时。。。
if (packet.m_packetType == 0x08 && nPacketLen <= )
{
RTMP_Log(RTMP_LOGDEBUG, "ignoring too small audio packet: size: %d",
nPacketLen);
ret = RTMP_READ_IGNORE;
break;
} if (r->m_read.flags & RTMP_READ_SEEKING)
{
ret = RTMP_READ_IGNORE;
break;
}
#ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, TS: %d ms, abs TS: %d",
packet.m_packetType, nPacketLen, packet.m_nTimeStamp,
packet.m_hasAbsTimestamp);
if (packet.m_packetType == 0x09)
RTMP_Log(RTMP_LOGDEBUG, "frametype: %02X", (*packetBody & 0xf0));
#endif if (r->m_read.flags & RTMP_READ_RESUME)
{
/* check the header if we get one */
//此类packet的timestamp都是0
if (packet.m_nTimeStamp == )
{
//messageID=18,数据消息(AMF0)
if (r->m_read.nMetaHeaderSize >
&& packet.m_packetType == 0x12)
{
//获取metadata
AMFObject metaObj;
int nRes =
AMF_Decode(&metaObj, packetBody, nPacketLen, FALSE);
if (nRes >= )
{
AVal metastring;
AMFProp_GetString(AMF_GetProp(&metaObj, NULL, ),
&metastring); if (AVMATCH(&metastring, &av_onMetaData))
{
/* compare */
if ((r->m_read.nMetaHeaderSize != nPacketLen) ||
(memcmp
(r->m_read.metaHeader, packetBody,
r->m_read.nMetaHeaderSize) != ))
{
ret = RTMP_READ_ERROR;
}
}
AMF_Reset(&metaObj);
if (ret == RTMP_READ_ERROR)
break;
}
} /* check first keyframe to make sure we got the right position
* in the stream! (the first non ignored frame)
*/
if (r->m_read.nInitialFrameSize > )
{
/* video or audio data */
if (packet.m_packetType == r->m_read.initialFrameType
&& r->m_read.nInitialFrameSize == nPacketLen)
{
/* we don't compare the sizes since the packet can
* contain several FLV packets, just make sure the
* first frame is our keyframe (which we are going
* to rewrite)
*/
if (memcmp
(r->m_read.initialFrame, packetBody,
r->m_read.nInitialFrameSize) == )
{
RTMP_Log(RTMP_LOGDEBUG, "Checked keyframe successfully!");
r->m_read.flags |= RTMP_READ_GOTKF;
/* ignore it! (what about audio data after it? it is
* handled by ignoring all 0ms frames, see below)
*/
ret = RTMP_READ_IGNORE;
break;
}
} /* hande FLV streams, even though the server resends the
* keyframe as an extra video packet it is also included
* in the first FLV stream chunk and we have to compare
* it and filter it out !!
*/
//MessageID=22,聚合消息
if (packet.m_packetType == 0x16)
{
/* basically we have to find the keyframe with the
* correct TS being nResumeTS
*/
unsigned int pos = ;
uint32_t ts = ; while (pos + < nPacketLen)
{
/* size without header (11) and prevTagSize (4) */
uint32_t dataSize =
AMF_DecodeInt24(packetBody + pos + );
ts = AMF_DecodeInt24(packetBody + pos + );
ts |= (packetBody[pos + ] << ); #ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG,
"keyframe search: FLV Packet: type %02X, dataSize: %d, timeStamp: %d ms",
packetBody[pos], dataSize, ts);
#endif
/* ok, is it a keyframe?:
* well doesn't work for audio!
*/
if (packetBody[pos /*6928, test 0 */ ] ==
r->m_read.initialFrameType
/* && (packetBody[11]&0xf0) == 0x10 */ )
{
if (ts == r->m_read.nResumeTS)
{
RTMP_Log(RTMP_LOGDEBUG,
"Found keyframe with resume-keyframe timestamp!");
if (r->m_read.nInitialFrameSize != dataSize
|| memcmp(r->m_read.initialFrame,
packetBody + pos + ,
r->m_read.
nInitialFrameSize) != )
{
RTMP_Log(RTMP_LOGERROR,
"FLV Stream: Keyframe doesn't match!");
ret = RTMP_READ_ERROR;
break;
}
r->m_read.flags |= RTMP_READ_GOTFLVK; /* skip this packet?
* check whether skippable:
*/
if (pos + + dataSize + > nPacketLen)
{
RTMP_Log(RTMP_LOGWARNING,
"Non skipable packet since it doesn't end with chunk, stream corrupt!");
ret = RTMP_READ_ERROR;
break;
}
packetBody += (pos + + dataSize + );
nPacketLen -= (pos + + dataSize + ); goto stopKeyframeSearch; }
else if (r->m_read.nResumeTS < ts)
{
/* the timestamp ts will only increase with
* further packets, wait for seek
*/
goto stopKeyframeSearch;
}
}
pos += ( + dataSize + );
}
if (ts < r->m_read.nResumeTS)
{
RTMP_Log(RTMP_LOGERROR,
"First packet does not contain keyframe, all "
"timestamps are smaller than the keyframe "
"timestamp; probably the resume seek failed?");
}
stopKeyframeSearch:
;
if (!(r->m_read.flags & RTMP_READ_GOTFLVK))
{
RTMP_Log(RTMP_LOGERROR,
"Couldn't find the seeked keyframe in this chunk!");
ret = RTMP_READ_IGNORE;
break;
}
}
}
} if (packet.m_nTimeStamp >
&& (r->m_read.flags & (RTMP_READ_GOTKF|RTMP_READ_GOTFLVK)))
{
/* another problem is that the server can actually change from
* 09/08 video/audio packets to an FLV stream or vice versa and
* our keyframe check will prevent us from going along with the
* new stream if we resumed.
*
* in this case set the 'found keyframe' variables to true.
* We assume that if we found one keyframe somewhere and were
* already beyond TS > 0 we have written data to the output
* which means we can accept all forthcoming data including the
* change between 08/09 <-> FLV packets
*/
r->m_read.flags |= (RTMP_READ_GOTKF|RTMP_READ_GOTFLVK);
} /* skip till we find our keyframe
* (seeking might put us somewhere before it)
*/
if (!(r->m_read.flags & RTMP_READ_GOTKF) &&
packet.m_packetType != 0x16)
{
RTMP_Log(RTMP_LOGWARNING,
"Stream does not start with requested frame, ignoring data... ");
r->m_read.nIgnoredFrameCounter++;
if (r->m_read.nIgnoredFrameCounter > MAX_IGNORED_FRAMES)
ret = RTMP_READ_ERROR; /* fatal error, couldn't continue stream */
else
ret = RTMP_READ_IGNORE;
break;
}
/* ok, do the same for FLV streams */
if (!(r->m_read.flags & RTMP_READ_GOTFLVK) &&
packet.m_packetType == 0x16)
{
RTMP_Log(RTMP_LOGWARNING,
"Stream does not start with requested FLV frame, ignoring data... ");
r->m_read.nIgnoredFlvFrameCounter++;
if (r->m_read.nIgnoredFlvFrameCounter > MAX_IGNORED_FRAMES)
ret = RTMP_READ_ERROR;
else
ret = RTMP_READ_IGNORE;
break;
} /* we have to ignore the 0ms frames since these are the first
* keyframes; we've got these so don't mess around with multiple
* copies sent by the server to us! (if the keyframe is found at a
* later position there is only one copy and it will be ignored by
* the preceding if clause)
*/
if (!(r->m_read.flags & RTMP_READ_NO_IGNORE) &&
packet.m_packetType != 0x16)
{ /* exclude type 0x16 (FLV) since it can
* contain several FLV packets */
if (packet.m_nTimeStamp == )
{
ret = RTMP_READ_IGNORE;
break;
}
else
{
/* stop ignoring packets */
r->m_read.flags |= RTMP_READ_NO_IGNORE;
}
}
} /* calculate packet size and allocate slop buffer if necessary */
size = nPacketLen +
((packet.m_packetType == 0x08 || packet.m_packetType == 0x09
|| packet.m_packetType == 0x12) ? : ) +
(packet.m_packetType != 0x16 ? : ); if (size + > buflen)
{
/* the extra 4 is for the case of an FLV stream without a last
* prevTagSize (we need extra 4 bytes to append it) */
r->m_read.buf = (char *) malloc(size + );
if (r->m_read.buf == )
{
RTMP_Log(RTMP_LOGERROR, "Couldn't allocate memory!");
ret = RTMP_READ_ERROR; /* fatal error */
break;
}
recopy = TRUE;
ptr = r->m_read.buf;
}
else
{
ptr = buf;
}
pend = ptr + size + ; /* use to return timestamp of last processed packet */ /* audio (0x08), video (0x09) or metadata (0x12) packets :
* construct 11 byte header then add rtmp packet's data */
if (packet.m_packetType == 0x08 || packet.m_packetType == 0x09
|| packet.m_packetType == 0x12)
{
nTimeStamp = r->m_read.nResumeTS + packet.m_nTimeStamp;
prevTagSize = + nPacketLen; *ptr = packet.m_packetType;
ptr++;
ptr = AMF_EncodeInt24(ptr, pend, nPacketLen); #if 0
if(packet.m_packetType == 0x09) { /* video */ /* H264 fix: */
if((packetBody[] & 0x0f) == ) { /* CodecId = H264 */
uint8_t packetType = *(packetBody+); uint32_t ts = AMF_DecodeInt24(packetBody+); /* composition time */
int32_t cts = (ts+0xff800000)^0xff800000;
RTMP_Log(RTMP_LOGDEBUG, "cts : %d\n", cts); nTimeStamp -= cts;
/* get rid of the composition time */
CRTMP::EncodeInt24(packetBody+, );
}
RTMP_Log(RTMP_LOGDEBUG, "VIDEO: nTimeStamp: 0x%08X (%d)\n", nTimeStamp, nTimeStamp);
}
#endif ptr = AMF_EncodeInt24(ptr, pend, nTimeStamp);
*ptr = (char)((nTimeStamp & 0xFF000000) >> );
ptr++; /* stream id */
ptr = AMF_EncodeInt24(ptr, pend, );
} memcpy(ptr, packetBody, nPacketLen);
len = nPacketLen; /* correct tagSize and obtain timestamp if we have an FLV stream */
if (packet.m_packetType == 0x16)
{
unsigned int pos = ;
int delta; /* grab first timestamp and see if it needs fixing */
// nTimeStamp = AMF_DecodeInt24(packetBody + 4);
// nTimeStamp |= (packetBody[7] << 24);
// delta = packet.m_nTimeStamp - nTimeStamp; while (pos + < nPacketLen)
{
/* size without header (11) and without prevTagSize (4) */
uint32_t dataSize = AMF_DecodeInt24(packetBody + pos + );
nTimeStamp = AMF_DecodeInt24(packetBody + pos + );
nTimeStamp |= (packetBody[pos + ] << ); // if (delta)
// {
// nTimeStamp += delta;
// AMF_EncodeInt24(ptr+pos+4, pend, nTimeStamp);
// ptr[pos+7] = nTimeStamp>>24;
// } /* set data type */
r->m_read.dataType |= (((*(packetBody + pos) == 0x08) << ) |
(*(packetBody + pos) == 0x09)); if (pos + + dataSize + > nPacketLen)
{
if (pos + + dataSize > nPacketLen)
{
RTMP_Log(RTMP_LOGERROR,
"Wrong data size (%lu), stream corrupted, aborting!",
dataSize);
ret = RTMP_READ_ERROR;
break;
}
RTMP_Log(RTMP_LOGWARNING, "No tagSize found, appending!"); /* we have to append a last tagSize! */
prevTagSize = dataSize + ;
AMF_EncodeInt32(ptr + pos + + dataSize, pend,
prevTagSize);
size += ;
len += ;
}
else
{
prevTagSize =
AMF_DecodeInt32(packetBody + pos + + dataSize); #ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG,
"FLV Packet: type %02X, dataSize: %lu, tagSize: %lu, timeStamp: %lu ms",
(unsigned char)packetBody[pos], dataSize, prevTagSize,
nTimeStamp);
#endif if (prevTagSize != (dataSize + ))
{
#ifdef _DEBUG
RTMP_Log(RTMP_LOGWARNING,
"Tag and data size are not consitent, writing tag size according to dataSize+11: %d",
dataSize + );
#endif prevTagSize = dataSize + ;
AMF_EncodeInt32(ptr + pos + + dataSize, pend,
prevTagSize);
}
} pos += prevTagSize + ; /*(11+dataSize+4); */
}
}
ptr += len; if (packet.m_packetType != 0x16)
{
/* FLV tag packets contain their own prevTagSize */
AMF_EncodeInt32(ptr, pend, prevTagSize);
} /* In non-live this nTimeStamp can contain an absolute TS.
* Update ext timestamp with this absolute offset in non-live mode
* otherwise report the relative one
*/
/* RTMP_Log(RTMP_LOGDEBUG, "type: %02X, size: %d, pktTS: %dms, TS: %dms, bLiveStream: %d", packet.m_packetType, nPacketLen, packet.m_nTimeStamp, nTimeStamp, r->Link.lFlags & RTMP_LF_LIVE); */
r->m_read.timestamp = (r->Link.lFlags & RTMP_LF_LIVE) ? packet.m_nTimeStamp : nTimeStamp; ret = size;
break;
} if (rtnGetNextMediaPacket)
RTMPPacket_Free(&packet); if (recopy)
{
len = ret > buflen ? buflen : ret;
memcpy(buf, r->m_read.buf, len);
r->m_read.bufpos = r->m_read.buf + len;
r->m_read.buflen = ret - len;
}
return ret;
}

函数功能很多,重要的地方已经加上了注释,在此不再细分析。Read_1_Packet()里面实现从网络中读取视音频数据的函数是RTMP_GetNextMediaPacket()。下面我们来看看该函数的源代码:

int
RTMP_GetNextMediaPacket(RTMP *r, RTMPPacket *packet)
{
int bHasMediaPacket = ; while (!bHasMediaPacket && RTMP_IsConnected(r)
&& RTMP_ReadPacket(r, packet))
{
if (!RTMPPacket_IsReady(packet))
{
continue;
} bHasMediaPacket = RTMP_ClientPacket(r, packet); if (!bHasMediaPacket)
{
RTMPPacket_Free(packet);
}
else if (r->m_pausing == )
{
if (packet->m_nTimeStamp <= r->m_mediaStamp)
{
bHasMediaPacket = ;
#ifdef _DEBUG
RTMP_Log(RTMP_LOGDEBUG,
"Skipped type: %02X, size: %d, TS: %d ms, abs TS: %d, pause: %d ms",
packet->m_packetType, packet->m_nBodySize,
packet->m_nTimeStamp, packet->m_hasAbsTimestamp,
r->m_mediaStamp);
#endif
continue;
}
r->m_pausing = ;
}
} if (bHasMediaPacket)
r->m_bPlaying = TRUE;
else if (r->m_sb.sb_timedout && !r->m_pausing)
r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel]; return bHasMediaPacket;
}

这里有两个函数比较重要:RTMP_ReadPacket()以及RTMP_ClientPacket()。这两个函数中,前一个函数负责从网络上读取数据,后一个负责处理数据。这部分与建立RTMP连接的网络流(NetStream)的时候很相似,参考:RTMPdump(libRTMP) 源代码分析 6: 建立一个流媒体连接 (NetStream部分 1)

RTMP_ClientPacket()在前文中已经做过分析,在此不再重复叙述。在这里重点分析一下RTMP_ReadPacket(),来看看它的源代码。

//读取收下来的Chunk
int
RTMP_ReadPacket(RTMP *r, RTMPPacket *packet)
{
//packet 存读取完后的的数据
//Chunk Header最大值18
uint8_t hbuf[RTMP_MAX_HEADER_SIZE] = { };
//header 指向的是从Socket中收下来的数据
char *header = (char *)hbuf;
int nSize, hSize, nToRead, nChunk;
int didAlloc = FALSE; RTMP_Log(RTMP_LOGDEBUG2, "%s: fd=%d", __FUNCTION__, r->m_sb.sb_socket);
//收下来的数据存入hbuf
if (ReadN(r, (char *)hbuf, ) == )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header", __FUNCTION__);
return FALSE;
}
//块类型fmt
packet->m_headerType = (hbuf[] & 0xc0) >> ;
//块流ID(2-63)
packet->m_nChannel = (hbuf[] & 0x3f);
header++;
//块流ID第1字节为0时,块流ID占2个字节
if (packet->m_nChannel == )
{
if (ReadN(r, (char *)&hbuf[], ) != )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 2nd byte",
__FUNCTION__);
return FALSE;
}
//计算块流ID(64-319)
packet->m_nChannel = hbuf[];
packet->m_nChannel += ;
header++;
}
//块流ID第1字节为0时,块流ID占3个字节
else if (packet->m_nChannel == )
{
int tmp;
if (ReadN(r, (char *)&hbuf[], ) != )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header 3nd byte",
__FUNCTION__);
return FALSE;
}
tmp = (hbuf[] << ) + hbuf[];
//计算块流ID(64-65599)
packet->m_nChannel = tmp + ;
RTMP_Log(RTMP_LOGDEBUG, "%s, m_nChannel: %0x", __FUNCTION__, packet->m_nChannel);
header += ;
}
//ChunkHeader的大小(4种)
nSize = packetSize[packet->m_headerType]; if (nSize == RTMP_LARGE_HEADER_SIZE) /* if we get a full header the timestamp is absolute */
packet->m_hasAbsTimestamp = TRUE; //11字节的完整ChunkMsgHeader的TimeStamp是绝对值 else if (nSize < RTMP_LARGE_HEADER_SIZE)
{ /* using values from the last message of this channel */
if (r->m_vecChannelsIn[packet->m_nChannel])
memcpy(packet, r->m_vecChannelsIn[packet->m_nChannel],
sizeof(RTMPPacket));
} nSize--; if (nSize > && ReadN(r, header, nSize) != nSize)
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet header. type: %x",
__FUNCTION__, (unsigned int)hbuf[]);
return FALSE;
} hSize = nSize + (header - (char *)hbuf); if (nSize >= )
{
//TimeStamp(注意 BigEndian to SmallEndian)(11,7,3字节首部都有)
packet->m_nTimeStamp = AMF_DecodeInt24(header); /*RTMP_Log(RTMP_LOGDEBUG, "%s, reading RTMP packet chunk on channel %x, headersz %i, timestamp %i, abs timestamp %i", __FUNCTION__, packet.m_nChannel, nSize, packet.m_nTimeStamp, packet.m_hasAbsTimestamp); */
//消息长度(11,7字节首部都有)
if (nSize >= )
{
packet->m_nBodySize = AMF_DecodeInt24(header + );
packet->m_nBytesRead = ;
RTMPPacket_Free(packet);
//(11,7字节首部都有)
if (nSize > )
{
//Msg type ID
packet->m_packetType = header[];
//Msg Stream ID
if (nSize == )
packet->m_nInfoField2 = DecodeInt32LE(header + );
}
}
//Extend TimeStamp
if (packet->m_nTimeStamp == 0xffffff)
{
if (ReadN(r, header + nSize, ) != )
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read extended timestamp",
__FUNCTION__);
return FALSE;
}
packet->m_nTimeStamp = AMF_DecodeInt32(header + nSize);
hSize += ;
}
} RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)hbuf, hSize); if (packet->m_nBodySize > && packet->m_body == NULL)
{
if (!RTMPPacket_Alloc(packet, packet->m_nBodySize))
{
RTMP_Log(RTMP_LOGDEBUG, "%s, failed to allocate packet", __FUNCTION__);
return FALSE;
}
didAlloc = TRUE;
packet->m_headerType = (hbuf[] & 0xc0) >> ;
} nToRead = packet->m_nBodySize - packet->m_nBytesRead;
nChunk = r->m_inChunkSize;
if (nToRead < nChunk)
nChunk = nToRead; /* Does the caller want the raw chunk? */
if (packet->m_chunk)
{
packet->m_chunk->c_headerSize = hSize;
memcpy(packet->m_chunk->c_header, hbuf, hSize);
packet->m_chunk->c_chunk = packet->m_body + packet->m_nBytesRead;
packet->m_chunk->c_chunkSize = nChunk;
} if (ReadN(r, packet->m_body + packet->m_nBytesRead, nChunk) != nChunk)
{
RTMP_Log(RTMP_LOGERROR, "%s, failed to read RTMP packet body. len: %lu",
__FUNCTION__, packet->m_nBodySize);
return FALSE;
} RTMP_LogHexString(RTMP_LOGDEBUG2, (uint8_t *)packet->m_body + packet->m_nBytesRead, nChunk); packet->m_nBytesRead += nChunk; /* keep the packet as ref for other packets on this channel */
if (!r->m_vecChannelsIn[packet->m_nChannel])
r->m_vecChannelsIn[packet->m_nChannel] = (RTMPPacket *) malloc(sizeof(RTMPPacket));
memcpy(r->m_vecChannelsIn[packet->m_nChannel], packet, sizeof(RTMPPacket));
//读取完毕
if (RTMPPacket_IsReady(packet))
{
/* make packet's timestamp absolute */
if (!packet->m_hasAbsTimestamp)
packet->m_nTimeStamp += r->m_channelTimestamp[packet->m_nChannel]; /* timestamps seem to be always relative!! */ r->m_channelTimestamp[packet->m_nChannel] = packet->m_nTimeStamp; /* reset the data from the stored packet. we keep the header since we may use it later if a new packet for this channel */
/* arrives and requests to re-use some info (small packet header) */
r->m_vecChannelsIn[packet->m_nChannel]->m_body = NULL;
r->m_vecChannelsIn[packet->m_nChannel]->m_nBytesRead = ;
r->m_vecChannelsIn[packet->m_nChannel]->m_hasAbsTimestamp = FALSE; /* can only be false if we reuse header */
}
else
{
packet->m_body = NULL; /* so it won't be erased on free */
} return TRUE;
}

函数代码看似很多,但是并不是很复杂,可以理解为在从事“简单重复性劳动”(和搬砖差不多)。基本上是一个字节一个字节的读取,然后按照RTMP协议规范进行解析。具体如何解析可以参考RTMP协议规范。

在RTMP_ReadPacket()函数里完成从Socket中读取数据的函数是ReadN(),继续看看它的源代码:

//从HTTP或SOCKET中读取数据
static int
ReadN(RTMP *r, char *buffer, int n)
{
int nOriginalSize = n;
int avail;
char *ptr; r->m_sb.sb_timedout = FALSE; #ifdef _DEBUG
memset(buffer, , n);
#endif ptr = buffer;
while (n > )
{
int nBytes = , nRead;
if (r->Link.protocol & RTMP_FEATURE_HTTP)
{
while (!r->m_resplen)
{
if (r->m_sb.sb_size < )
{
if (!r->m_unackd)
HTTP_Post(r, RTMPT_IDLE, "", );
if (RTMPSockBuf_Fill(&r->m_sb) < )
{
if (!r->m_sb.sb_timedout)
RTMP_Close(r);
return ;
}
}
HTTP_read(r, );
}
if (r->m_resplen && !r->m_sb.sb_size)
RTMPSockBuf_Fill(&r->m_sb);
avail = r->m_sb.sb_size;
if (avail > r->m_resplen)
avail = r->m_resplen;
}
else
{
avail = r->m_sb.sb_size;
if (avail == )
{
if (RTMPSockBuf_Fill(&r->m_sb) < )
{
if (!r->m_sb.sb_timedout)
RTMP_Close(r);
return ;
}
avail = r->m_sb.sb_size;
}
}
nRead = ((n < avail) ? n : avail);
if (nRead > )
{
memcpy(ptr, r->m_sb.sb_start, nRead);
r->m_sb.sb_start += nRead;
r->m_sb.sb_size -= nRead;
nBytes = nRead;
r->m_nBytesIn += nRead;
if (r->m_bSendCounter
&& r->m_nBytesIn > r->m_nBytesInSent + r->m_nClientBW / )
SendBytesReceived(r);
}
/*RTMP_Log(RTMP_LOGDEBUG, "%s: %d bytes\n", __FUNCTION__, nBytes); */
#ifdef _DEBUG
fwrite(ptr, , nBytes, netstackdump_read);
#endif if (nBytes == )
{
RTMP_Log(RTMP_LOGDEBUG, "%s, RTMP socket closed by peer", __FUNCTION__);
/*goto again; */
RTMP_Close(r);
break;
} if (r->Link.protocol & RTMP_FEATURE_HTTP)
r->m_resplen -= nBytes; #ifdef CRYPTO
if (r->Link.rc4keyIn)
{
RC4_encrypt((RC4_KEY *)r->Link.rc4keyIn, nBytes, ptr);
}
#endif n -= nBytes;
ptr += nBytes;
} return nOriginalSize - n;
}

ReadN()中实现从Socket中接收数据的函数是RTMPSockBuf_Fill(),看看代码吧(又是层层调用)。

//调用Socket编程中的recv()函数,接收数据
int
RTMPSockBuf_Fill(RTMPSockBuf *sb)
{
int nBytes; if (!sb->sb_size)
sb->sb_start = sb->sb_buf; while ()
{
//缓冲区长度:总长-未处理字节-已处理字节
//|-----已处理--------|-----未处理--------|---------缓冲区----------|
//sb_buf sb_start sb_size
nBytes = sizeof(sb->sb_buf) - sb->sb_size - (sb->sb_start - sb->sb_buf);
#if defined(CRYPTO) && !defined(NO_SSL)
if (sb->sb_ssl)
{
nBytes = TLS_read((SSL *)sb->sb_ssl, sb->sb_start + sb->sb_size, nBytes);
}
else
#endif
{
//int recv( SOCKET s, char * buf, int len, int flags);
//s:一个标识已连接套接口的描述字。
//buf:用于接收数据的缓冲区。
//len:缓冲区长度。
//flags:指定调用方式。
//从sb_start(待处理的下一字节) + sb_size()还未处理的字节开始buffer为空,可以存储
nBytes = recv(sb->sb_socket, sb->sb_start + sb->sb_size, nBytes, );
}
if (nBytes != -)
{
//未处理的字节又多了
sb->sb_size += nBytes;
}
else
{
int sockerr = GetSockError();
RTMP_Log(RTMP_LOGDEBUG, "%s, recv returned %d. GetSockError(): %d (%s)",
__FUNCTION__, nBytes, sockerr, strerror(sockerr));
if (sockerr == EINTR && !RTMP_ctrlC)
continue; if (sockerr == EWOULDBLOCK || sockerr == EAGAIN)
{
sb->sb_timedout = TRUE;
nBytes = ;
}
}
break;
} return nBytes;
}

从RTMPSockBuf_Fill()代码中可以看出,调用了系统Socket的recv()函数接收RTMP连接传输过来的数据。

10: 处理各种消息(Message)

已经连续写了一系列的博客了,其实大部分内容都是去年搞RTMP研究的时候积累的经验,回顾一下过去的知识,其实 RTMPdump(libRTMP)主要的功能也都分析的差不多了,现在感觉还需要一些查漏补缺。主要就是它是如何处理各种消息(Message)的这方面还没有研究的特明白,在此需要详细研究一下。

再来看一下RTMPdump(libRTMP)的“灵魂”函数RTMP_ClientPacket(),主要完成了各种消息的处理。

//处理接收到的数据
int
RTMP_ClientPacket(RTMP *r, RTMPPacket *packet)
{
int bHasMediaPacket = ;
switch (packet->m_packetType)
{
//RTMP消息类型ID=1,设置块大小
case 0x01:
/* chunk size */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 Set Chunk Size (typeID=1)。");
//-----------------------------
RTMP_LogPrintf("处理消息 Set Chunk Size (typeID=1)\n");
HandleChangeChunkSize(r, packet);
break;
//RTMP消息类型ID=3,致谢
case 0x03:
/* bytes read report */
RTMP_Log(RTMP_LOGDEBUG, "%s, received: bytes read report", __FUNCTION__);
break;
//RTMP消息类型ID=4,用户控制
case 0x04:
/* ctrl */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 User Control (typeID=4)。");
//-----------------------------
RTMP_LogPrintf("处理消息 User Control (typeID=4)\n");
HandleCtrl(r, packet);
break;
//RTMP消息类型ID=5
case 0x05:
/* server bw */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 Window Acknowledgement Size (typeID=5)。");
//-----------------------------
RTMP_LogPrintf("处理消息 Window Acknowledgement Size (typeID=5)\n");
HandleServerBW(r, packet);
break;
//RTMP消息类型ID=6
case 0x06:
/* client bw */
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 Set Peer Bandwidth (typeID=6)。");
//-----------------------------
RTMP_LogPrintf("处理消息 Set Peer Bandwidth (typeID=6)\n");
HandleClientBW(r, packet);
break;
//RTMP消息类型ID=8,音频数据
case 0x08:
/* audio data */
/*RTMP_Log(RTMP_LOGDEBUG, "%s, received: audio %lu bytes", __FUNCTION__, packet.m_nBodySize); */
HandleAudio(r, packet);
bHasMediaPacket = ;
if (!r->m_mediaChannel)
r->m_mediaChannel = packet->m_nChannel;
if (!r->m_pausing)
r->m_mediaStamp = packet->m_nTimeStamp;
break;
//RTMP消息类型ID=9,视频数据
case 0x09:
/* video data */
/*RTMP_Log(RTMP_LOGDEBUG, "%s, received: video %lu bytes", __FUNCTION__, packet.m_nBodySize); */
HandleVideo(r, packet);
bHasMediaPacket = ;
if (!r->m_mediaChannel)
r->m_mediaChannel = packet->m_nChannel;
if (!r->m_pausing)
r->m_mediaStamp = packet->m_nTimeStamp;
break;
//RTMP消息类型ID=15,AMF3编码,忽略
case 0x0F: /* flex stream send */
RTMP_Log(RTMP_LOGDEBUG,
"%s, flex stream send, size %lu bytes, not supported, ignoring",
__FUNCTION__, packet->m_nBodySize);
break;
//RTMP消息类型ID=16,AMF3编码,忽略
case 0x10: /* flex shared object */
RTMP_Log(RTMP_LOGDEBUG,
"%s, flex shared object, size %lu bytes, not supported, ignoring",
__FUNCTION__, packet->m_nBodySize);
break;
//RTMP消息类型ID=17,AMF3编码,忽略
case 0x11: /* flex message */
{
RTMP_Log(RTMP_LOGDEBUG,
"%s, flex message, size %lu bytes, not fully supported",
__FUNCTION__, packet->m_nBodySize);
/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ /* some DEBUG code */
#if 0
RTMP_LIB_AMFObject obj;
int nRes = obj.Decode(packet.m_body+, packet.m_nBodySize-);
if(nRes < ) {
RTMP_Log(RTMP_LOGERROR, "%s, error decoding AMF3 packet", __FUNCTION__);
/*return; */
} obj.Dump();
#endif if (HandleInvoke(r, packet->m_body + , packet->m_nBodySize - ) == )
bHasMediaPacket = ;
break;
}
//RTMP消息类型ID=18,AMF0编码,数据消息
case 0x12:
/* metadata (notify) */ RTMP_Log(RTMP_LOGDEBUG, "%s, received: notify %lu bytes", __FUNCTION__,
packet->m_nBodySize);
//处理元数据,暂时注释
/*
if (HandleMetadata(r, packet->m_body, packet->m_nBodySize))
bHasMediaPacket = 1;
break;
*/
//RTMP消息类型ID=19,AMF0编码,忽略
case 0x13:
RTMP_Log(RTMP_LOGDEBUG, "%s, shared object, not supported, ignoring",
__FUNCTION__);
break;
//RTMP消息类型ID=20,AMF0编码,命令消息
//处理命令消息!
case 0x14:
//----------------
r->dlg->AppendCInfo("处理收到的数据。消息 命令 (AMF0编码) (typeID=20)。");
//-----------------------------
/* invoke */
RTMP_Log(RTMP_LOGDEBUG, "%s, received: invoke %lu bytes", __FUNCTION__,
packet->m_nBodySize);
RTMP_LogPrintf("处理命令消息 (typeID=20,AMF0编码)\n");
/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ if (HandleInvoke(r, packet->m_body, packet->m_nBodySize) == )
bHasMediaPacket = ;
break;
//RTMP消息类型ID=22
case 0x16:
{
/* go through FLV packets and handle metadata packets */
unsigned int pos = ;
uint32_t nTimeStamp = packet->m_nTimeStamp; while (pos + < packet->m_nBodySize)
{
uint32_t dataSize = AMF_DecodeInt24(packet->m_body + pos + ); /* size without header (11) and prevTagSize (4) */ if (pos + + dataSize + > packet->m_nBodySize)
{
RTMP_Log(RTMP_LOGWARNING, "Stream corrupt?!");
break;
}
if (packet->m_body[pos] == 0x12)
{
HandleMetadata(r, packet->m_body + pos + , dataSize);
}
else if (packet->m_body[pos] == || packet->m_body[pos] == )
{
nTimeStamp = AMF_DecodeInt24(packet->m_body + pos + );
nTimeStamp |= (packet->m_body[pos + ] << );
}
pos += ( + dataSize + );
}
if (!r->m_pausing)
r->m_mediaStamp = nTimeStamp; /* FLV tag(s) */
/*RTMP_Log(RTMP_LOGDEBUG, "%s, received: FLV tag(s) %lu bytes", __FUNCTION__, packet.m_nBodySize); */
bHasMediaPacket = ;
break;
}
default:
RTMP_Log(RTMP_LOGDEBUG, "%s, unknown packet type received: 0x%02x", __FUNCTION__,
packet->m_packetType);
#ifdef _DEBUG
RTMP_LogHex(RTMP_LOGDEBUG, (const uint8_t *)packet->m_body, packet->m_nBodySize);
#endif
} return bHasMediaPacket;
}

前文已经分析过当消息类型ID为0x14(20)的时候,即AMF0编码的命令消息的时候,会调用HandleInvoke()进行处理。

参考:RTMPdump(libRTMP) 源代码分析 7: 建立一个流媒体连接 (NetStream部分 2)

这里就不再对这种类型ID的消息进行分析了,分析一下其他类型的消息,毕竟从发起一个RTMP连接到接收视音频数据这个过程中是要处理很多消息的。

参考:RTMP流媒体播放过程

下面我们按照消息ID从小到大的顺序,看看接收到的各种消息都是如何处理的。

消息类型ID是0x01的消息功能是“设置块(Chunk)大小”,处理函数是HandleChangeChunkSize(),可见函数内容很简单。

static void
HandleChangeChunkSize(RTMP *r, const RTMPPacket *packet)
{
if (packet->m_nBodySize >= )
{
r->m_inChunkSize = AMF_DecodeInt32(packet->m_body);
RTMP_Log(RTMP_LOGDEBUG, "%s, received: chunk size change to %d", __FUNCTION__,
r->m_inChunkSize);
}
}

消息类型ID是0x03的消息功能是“致谢”,没有处理函数。

消息类型ID是0x04的消息功能是“用户控制(UserControl)”,处理函数是HandleCtrl(),这类的消息出现的频率非常高,函数体如下所示。具体用户控制消息的作用这里就不多说了,有相应的文档可以参考。

注:该函数中间有一段很长的英文注释,英语好的大神可以看一看

//处理用户控制(UserControl)消息。用户控制消息是服务器端发出的。
static void
HandleCtrl(RTMP *r, const RTMPPacket *packet)
{
short nType = -;
unsigned int tmp;
if (packet->m_body && packet->m_nBodySize >= )
//事件类型(2B)
nType = AMF_DecodeInt16(packet->m_body);
RTMP_Log(RTMP_LOGDEBUG, "%s, received ctrl. type: %d, len: %d", __FUNCTION__, nType,
packet->m_nBodySize);
/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ if (packet->m_nBodySize >= )
{
//不同事件类型做不同处理
switch (nType)
{
//流开始
case :
//流ID
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Begin %d", __FUNCTION__, tmp);
break;
//流结束
case :
//流ID
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream EOF %d", __FUNCTION__, tmp);
if (r->m_pausing == )
r->m_pausing = ;
break;
//流枯竭
case :
//流ID
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream Dry %d", __FUNCTION__, tmp);
break;
//是录制流
case :
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream IsRecorded %d", __FUNCTION__, tmp);
break;
//Ping客户端
case : /* server ping. reply with pong. */
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Ping %d", __FUNCTION__, tmp);
RTMP_SendCtrl(r, 0x07, tmp, );
break; /* FMS 3.5 servers send the following two controls to let the client
* know when the server has sent a complete buffer. I.e., when the
* server has sent an amount of data equal to m_nBufferMS in duration.
* The server meters its output so that data arrives at the client
* in realtime and no faster.
*
* The rtmpdump program tries to set m_nBufferMS as large as
* possible, to force the server to send data as fast as possible.
* In practice, the server appears to cap this at about 1 hour's
* worth of data. After the server has sent a complete buffer, and
* sends this BufferEmpty message, it will wait until the play
* duration of that buffer has passed before sending a new buffer.
* The BufferReady message will be sent when the new buffer starts.
* (There is no BufferReady message for the very first buffer;
* presumably the Stream Begin message is sufficient for that
* purpose.)
*
* If the network speed is much faster than the data bitrate, then
* there may be long delays between the end of one buffer and the
* start of the next.
*
* Since usually the network allows data to be sent at
* faster than realtime, and rtmpdump wants to download the data
* as fast as possible, we use this RTMP_LF_BUFX hack: when we
* get the BufferEmpty message, we send a Pause followed by an
* Unpause. This causes the server to send the next buffer immediately
* instead of waiting for the full duration to elapse. (That's
* also the purpose of the ToggleStream function, which rtmpdump
* calls if we get a read timeout.)
*
* Media player apps don't need this hack since they are just
* going to play the data in realtime anyway. It also doesn't work
* for live streams since they obviously can only be sent in
* realtime. And it's all moot if the network speed is actually
* slower than the media bitrate.
*/
case :
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferEmpty %d", __FUNCTION__, tmp);
if (!(r->Link.lFlags & RTMP_LF_BUFX))
break;
if (!r->m_pausing)
{
r->m_pauseStamp = r->m_channelTimestamp[r->m_mediaChannel];
RTMP_SendPause(r, TRUE, r->m_pauseStamp);
r->m_pausing = ;
}
else if (r->m_pausing == )
{
RTMP_SendPause(r, FALSE, r->m_pauseStamp);
r->m_pausing = ;
}
break; case :
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream BufferReady %d", __FUNCTION__, tmp);
break; default:
tmp = AMF_DecodeInt32(packet->m_body + );
RTMP_Log(RTMP_LOGDEBUG, "%s, Stream xx %d", __FUNCTION__, tmp);
break;
} } if (nType == 0x1A)
{
RTMP_Log(RTMP_LOGDEBUG, "%s, SWFVerification ping received: ", __FUNCTION__);
if (packet->m_nBodySize > && packet->m_body[] > 0x01)
{
RTMP_Log(RTMP_LOGERROR,
"%s: SWFVerification Type %d request not supported! Patches welcome...",
__FUNCTION__, packet->m_body[]);
}
#ifdef CRYPTO
/*RTMP_LogHex(packet.m_body, packet.m_nBodySize); */ /* respond with HMAC SHA256 of decompressed SWF, key is the 30byte player key, also the last 30 bytes of the server handshake are applied */
else if (r->Link.SWFSize)
{
RTMP_SendCtrl(r, 0x1B, , );
}
else
{
RTMP_Log(RTMP_LOGERROR,
"%s: Ignoring SWFVerification request, use --swfVfy!",
__FUNCTION__);
}
#else
RTMP_Log(RTMP_LOGERROR,
"%s: Ignoring SWFVerification request, no CRYPTO support!",
__FUNCTION__);
#endif
}
}

消息类型ID是0x05的消息功能是“窗口致谢大小(Window Acknowledgement Size,翻译的真是挺别扭)”,处理函数是HandleServerBW()。在这里注意一下,该消息在Adobe官方公开的文档中叫“Window Acknowledgement Size”,但是在Adobe公开协议规范之前,破解RTMP协议的组织一直管该协议叫“ServerBW”,只是个称呼,倒是也无所谓~处理代码很简单:

static void
HandleServerBW(RTMP *r, const RTMPPacket *packet)
{
r->m_nServerBW = AMF_DecodeInt32(packet->m_body);
RTMP_Log(RTMP_LOGDEBUG, "%s: server BW = %d", __FUNCTION__, r->m_nServerBW);
}

消息类型ID是0x06的消息功能是“设置对等端带宽(Set Peer Bandwidth)”,处理函数是HandleClientBW()。与上一种消息一样,该消息在Adobe官方公开的文档中叫“Set Peer Bandwidth”,但是在Adobe公开协议规范之前,破解RTMP协议的组织一直管该协议叫“ClientBW”。处理函数也不复杂:

static void
HandleClientBW(RTMP *r, const RTMPPacket *packet)
{
r->m_nClientBW = AMF_DecodeInt32(packet->m_body);
if (packet->m_nBodySize > )
r->m_nClientBW2 = packet->m_body[];
else
r->m_nClientBW2 = -;
RTMP_Log(RTMP_LOGDEBUG, "%s: client BW = %d %d", __FUNCTION__, r->m_nClientBW,
r->m_nClientBW2);
}

消息类型ID是0x08的消息用于传输音频数据,在这里不处理。

消息类型ID是0x09的消息用于传输音频数据,在这里不处理。

消息类型ID是0x0F-11的消息用于传输AMF3编码的命令。

消息类型ID是0x12-14的消息用于传输AMF0编码的命令。

注:消息类型ID是0x14的消息很重要,用于传输AMF0编码的命令,已经做过分析。