Live555: X264 Stream Live source based on quot;testOnDemandRTSPServerquot;(Live555:基于“testOnDemandRTSPServer的X264 Stream Live源)
问题描述
我正在尝试创建一个 rtsp 服务器来流式传输我的程序的 OpenGL 输出.我看了一下 如何编写 Live555 FramedSource 以允许我直播 H.264,但我需要单播流.所以我查看了 testOnDemandRTSPServer.使用相同的代码失败.据我了解,我需要提供存储 h264 帧的内存,以便 OnDemandServer 可以按需读取它们.
I am trying to create a rtsp Server that streams the OpenGL output of my program. I had a look at How to write a Live555 FramedSource to allow me to stream H.264 live, but I need the stream to be unicast. So I had a look at testOnDemandRTSPServer. Using the same Code fails. To my understanding I need to provide memory in which I store my h264 frames so the OnDemandServer can read them on Demand.
H264VideoStreamServerMediaSubsession.cpp
H264VideoStreamServerMediaSubsession.cpp
H264VideoStreamServerMediaSubsession*
H264VideoStreamServerMediaSubsession::createNew(UsageEnvironment& env,
Boolean reuseFirstSource) {
return new H264VideoStreamServerMediaSubsession(env, reuseFirstSource);
}
H264VideoStreamServerMediaSubsession::H264VideoStreamServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource)
: OnDemandServerMediaSubsession(env, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
}
H264VideoStreamServerMediaSubsession::~H264VideoStreamServerMediaSubsession() {
delete[] fAuxSDPLine;
}
static void afterPlayingDummy(void* clientData) {
H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
subsess->afterPlayingDummy1();
}
void H264VideoStreamServerMediaSubsession::afterPlayingDummy1() {
// Unschedule any pending 'checking' task:
envir().taskScheduler().unscheduleDelayedTask(nextTask());
// Signal the event loop that we're done:
setDoneFlag();
}
static void checkForAuxSDPLine(void* clientData) {
H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
subsess->checkForAuxSDPLine1();
}
void H264VideoStreamServerMediaSubsession::checkForAuxSDPLine1() {
char const* dasl;
if (fAuxSDPLine != NULL) {
// Signal the event loop that we're done:
setDoneFlag();
} else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
fAuxSDPLine = strDup(dasl);
fDummyRTPSink = NULL;
// Signal the event loop that we're done:
setDoneFlag();
} else {
// try again after a brief delay:
int uSecsToDelay = 100000; // 100 ms
nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
(TaskFunc*)checkForAuxSDPLine, this);
}
}
char const* H264VideoStreamServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)
if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
// Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
// until we start reading the file. This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
// and we need to start reading data from our file until this changes.
fDummyRTPSink = rtpSink;
// Start reading the file:
fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);
// Check whether the sink's 'auxSDPLine()' is ready:
checkForAuxSDPLine(this);
}
envir().taskScheduler().doEventLoop(&fDoneFlag);
return fAuxSDPLine;
}
FramedSource* H264VideoStreamServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
estBitrate = 500; // kb
megamol::remotecontrol::View3D_MRC *parent = (megamol::remotecontrol::View3D_MRC*)this->parent;
return H264VideoStreamFramer::createNew(envir(), parent->h264FramedSource);
}
RTPSink* H264VideoStreamServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {
return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}
FramedSource.cpp
FramedSource.cpp
H264FramedSource* H264FramedSource::createNew(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
{
return new H264FramedSource(env, preferredFrameSize, playTimePerFrame);
}
H264FramedSource::H264FramedSource(UsageEnvironment& env,
unsigned preferredFrameSize,
unsigned playTimePerFrame)
: FramedSource(env),
fPreferredFrameSize(fMaxSize),
fPlayTimePerFrame(playTimePerFrame),
fLastPlayTime(0),
fCurIndex(0)
{
x264_param_default_preset(¶m, "veryfast", "zerolatency");
param.i_threads = 1;
param.i_width = 1024;
param.i_height = 768;
param.i_fps_num = 30;
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = 60;
param.b_intra_refresh = 1;
//Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = 25;
param.rc.f_rf_constant_max = 35;
param.i_sps_id = 7;
//For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(¶m, "baseline");
param.i_log_level = X264_LOG_ERROR;
encoder = x264_encoder_open(¶m);
pic_in.i_type = X264_TYPE_AUTO;
pic_in.i_qpplus1 = 0;
pic_in.img.i_csp = X264_CSP_I420;
pic_in.img.i_plane = 3;
x264_picture_alloc(&pic_in, X264_CSP_I420, 1024, 768);
convertCtx = sws_getContext(1024, 768, PIX_FMT_RGBA, 1024, 768, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
H264FramedSource::~H264FramedSource()
{
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
void H264FramedSource::AddToBuffer(uint8_t* buf, int surfaceSizeInBytes)
{
uint8_t* surfaceData = (new uint8_t[surfaceSizeInBytes]);
memcpy(surfaceData, buf, surfaceSizeInBytes);
int srcstride = 1024*4;
sws_scale(convertCtx, &surfaceData, &srcstride,0, 768, pic_in.img.plane, pic_in.img.i_stride);
x264_nal_t* nals = NULL;
int i_nals = 0;
int frame_size = -1;
frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);
static bool finished = false;
if (frame_size >= 0)
{
static bool alreadydone = false;
if(!alreadydone)
{
x264_encoder_headers(encoder, &nals, &i_nals);
alreadydone = true;
}
for(int i = 0; i < i_nals; ++i)
{
m_queue.push(nals[i]);
}
}
delete [] surfaceData;
surfaceData = nullptr;
envir().taskScheduler().triggerEvent(eventTriggerId, this);
}
void H264FramedSource::doGetNextFrame()
{
deliverFrame();
}
void H264FramedSource::deliverFrame0(void* clientData)
{
((H264FramedSource*)clientData)->deliverFrame();
}
void H264FramedSource::deliverFrame()
{
x264_nal_t nalToDeliver;
if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
gettimeofday(&fPresentationTime, NULL);
} else {
// Increment by the play time of the previous data:
unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
fPresentationTime.tv_sec += uSeconds/1000000;
fPresentationTime.tv_usec = uSeconds%1000000;
}
// Remember the play time of this data:
fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
fDurationInMicroseconds = fLastPlayTime;
} else {
// We don't know a specific play time duration for this data,
// so just record the current time as being the 'presentation time':
gettimeofday(&fPresentationTime, NULL);
}
if(!m_queue.empty())
{
m_queue.wait_and_pop(nalToDeliver);
uint8_t* newFrameDataStart = (uint8_t*)0xD15EA5E;
newFrameDataStart = (uint8_t*)(nalToDeliver.p_payload);
unsigned newFrameSize = nalToDeliver.i_payload;
// Deliver the data here:
if (newFrameSize > fMaxSize) {
fFrameSize = fMaxSize;
fNumTruncatedBytes = newFrameSize - fMaxSize;
}
else {
fFrameSize = newFrameSize;
}
memcpy(fTo, nalToDeliver.p_payload, nalToDeliver.i_payload);
FramedSource::afterGetting(this);
}
}
RTSP-Server Therad 的相关部分
Relevant part of the RTSP-Server Therad
RTSPServer* rtspServer = RTSPServer::createNew(*(parent->env), 8554, NULL);
if (rtspServer == NULL) {
*(parent->env) << "Failed to create RTSP server: " << (parent->env)->getResultMsg() << "
";
exit(1);
}
char const* streamName = "Stream";
parent->h264FramedSource = H264FramedSource::createNew(*(parent->env), 0, 0);
H264VideoStreamServerMediaSubsession *h264VideoStreamServerMediaSubsession = H264VideoStreamServerMediaSubsession::createNew(*(parent->env), true);
h264VideoStreamServerMediaSubsession->parent = parent;
sms->addSubsession(h264VideoStreamServerMediaSubsession);
rtspServer->addServerMediaSession(sms);
parent->env->taskScheduler().doEventLoop(); // does not return
一旦连接存在,渲染循环调用
Once a connection exists the render loop calls
h264FramedSource->AddToBuffer(videoData, 1024*768*4);
推荐答案
您必须做的第一件事是围绕 x264 编码器编写一个包装器,您可以使用它来对具有良好给定接口的 RGB 数据进行编码.下面的课程将让您了解如何做到这一点.我已经使用这个类来编码我从我的 opencv 捕获中获得的 RAW BGR 帧.
First thing You have to do is write a wrapper around x264 Encoder which you can use to encode RGB data with nice given interface. the following class will give you the idea how to do that. i have used this class to encode RAW BGR frame which i was getting from my opencv capture.
x264Encoder.h
#ifdef __cplusplus
#define __STDINT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include <iostream>
#include <concurrent_queue.h>
#include "opencv2opencv.hpp"
#include <queue>
#include <stdint.h>
extern "C" {
#include "x264x264.h"
}
class x264Encoder
{
public:
x264Encoder(void);
~x264Encoder(void);
public:
void initilize();
void unInitilize();
void encodeFrame(cv::Mat& image);
bool isNalsAvailableInOutputQueue();
x264_nal_t getNalUnit();
private:
// Use this context to convert your BGR Image to YUV image since x264 do not support RGB input
SwsContext* convertContext;
std::queue<x264_nal_t> outputQueue;
x264_param_t parameters;
x264_picture_t picture_in,picture_out;
x264_t* encoder;
};
x264Encoder.cpp
#include "x264Encoder.h"
x264Encoder::x264Encoder(void)
{
}
x264Encoder::~x264Encoder(void)
{
}
void x264Encoder::initilize()
{
x264_param_default_preset(¶meters, "veryfast", "zerolatency");
parameters.i_log_level = X264_LOG_INFO;
parameters.i_threads = 1;
parameters.i_width = 640;
parameters.i_height = 480;
parameters.i_fps_num = 25;
parameters.i_fps_den = 1;
parameters.i_keyint_max = 25;
parameters.b_intra_refresh = 1;
parameters.rc.i_rc_method = X264_RC_CRF;
parameters.rc.i_vbv_buffer_size = 1000000;
parameters.rc.i_vbv_max_bitrate = 90000;
parameters.rc.f_rf_constant = 25;
parameters.rc.f_rf_constant_max = 35;
parameters.i_sps_id = 7;
// the following two value you should keep 1
parameters.b_repeat_headers = 1; // to get header before every I-Frame
parameters.b_annexb = 1; // put start code in front of nal. we will remove start code later
x264_param_apply_profile(¶meters, "baseline");
encoder = x264_encoder_open(¶meters);
x264_picture_alloc(&picture_in, X264_CSP_I420, parameters.i_width, parameters.i_height);
picture_in.i_type = X264_TYPE_AUTO;
picture_in.img.i_csp = X264_CSP_I420;
// i have initilized my color space converter for BGR24 to YUV420 because my opencv video capture gives BGR24 image. You can initilize according to your input pixelFormat
convertContext = sws_getContext(parameters.i_width,parameters.i_height, PIX_FMT_BGR24, parameters.i_width,parameters.i_height,PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
}
void x264Encoder::unInitilize()
{
x264_encoder_close(encoder);
sws_freeContext(convertContext);
}
void x264Encoder::encodeFrame(cv::Mat& image)
{
int srcStride = parameters.i_width * 3;
sws_scale(convertContext, &(image.data), &srcStride, 0, parameters.i_height, picture_in.img.plane, picture_in.img.i_stride);
x264_nal_t* nals ;
int i_nals = 0;
int frameSize = -1;
frameSize = x264_encoder_encode(encoder, &nals, &i_nals, &picture_in, &picture_out);
if(frameSize > 0)
{
for(int i = 0; i< i_nals; i++)
{
outputQueue.push(nals[i]);
}
}
}
bool x264Encoder::isNalsAvailableInOutputQueue()
{
if(outputQueue.empty() == true)
{
return false;
}
else
{
return true;
}
}
x264_nal_t x264Encoder::getNalUnit()
{
x264_nal_t nal;
nal = outputQueue.front();
outputQueue.pop();
return nal;
}
现在我们有了编码器,它将拍摄 BGR 图片并对其进行编码.我的编码器将对帧进行编码并将所有输出 nals 放入输出队列,该队列将由 Live555 流式传输.要实现实时视频源,您必须创建两个类,它们将是(OnDemandServerMediaSubsession 的子类和另一个 FramedSource)的子类.两者都有 live555 媒体库.这个类也将向多个客户端提供数据.
要创建这两个类的子类,您可以参考以下类.
Now we have the encoder which will take BGR picture and encode it. My encoder will encode the frame and put all the output nals into the output queue which will be streamed by Live555. To Implement live video source you have to create two class which will be subclass of (subclass of OnDemandServerMediaSubsession and another FramedSource). both are there is live555 media library.this class will serve data to more than one client also.
To create subclass of these two classes you can refer the following classes.
H264LiveServerMediaSession.h(OnDemandServerMediaSubsession 的子类)
H264LiveServerMediaSession.h (Subclass of OnDemandServerMediaSubsession)
#include "liveMedia.hh"
#include "OnDemandServerMediaSubsession.hh"
#include "LiveSourceWithx264.h"
class H264LiveServerMediaSession:public OnDemandServerMediaSubsession
{
public:
static H264LiveServerMediaSession* createNew(UsageEnvironment& env, bool reuseFirstSource);
void checkForAuxSDPLine1();
void afterPlayingDummy1();
protected:
H264LiveServerMediaSession(UsageEnvironment& env, bool reuseFirstSource);
virtual ~H264LiveServerMediaSession(void);
void setDoneFlag() { fDoneFlag = ~0; }
protected:
virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource);
virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
private:
char* fAuxSDPLine;
char fDoneFlag;
RTPSink* fDummySink;
};
H264LiveServerMediaSession.cpp
#include "H264LiveServerMediaSession.h"
H264LiveServerMediaSession* H264LiveServerMediaSession::createNew(UsageEnvironment& env, bool reuseFirstSource)
{
return new H264LiveServerMediaSession(env, reuseFirstSource);
}
H264LiveServerMediaSession::H264LiveServerMediaSession(UsageEnvironment& env, bool reuseFirstSource):OnDemandServerMediaSubsession(env,reuseFirstSource),fAuxSDPLine(NULL), fDoneFlag(0), fDummySink(NULL)
{
}
H264LiveServerMediaSession::~H264LiveServerMediaSession(void)
{
delete[] fAuxSDPLine;
}
static void afterPlayingDummy(void* clientData)
{
H264LiveServerMediaSession *session = (H264LiveServerMediaSession*)clientData;
session->afterPlayingDummy1();
}
void H264LiveServerMediaSession::afterPlayingDummy1()
{
envir().taskScheduler().unscheduleDelayedTask(nextTask());
setDoneFlag();
}
static void checkForAuxSDPLine(void* clientData)
{
H264LiveServerMediaSession* session = (H264LiveServerMediaSession*)clientData;
session->checkForAuxSDPLine1();
}
void H264LiveServerMediaSession::checkForAuxSDPLine1()
{
char const* dasl;
if(fAuxSDPLine != NULL)
{
setDoneFlag();
}
else if(fDummySink != NULL && (dasl = fDummySink->auxSDPLine()) != NULL)
{
fAuxSDPLine = strDup(dasl);
fDummySink = NULL;
setDoneFlag();
}
else
{
int uSecsDelay = 100000;
nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsDelay, (TaskFunc*)checkForAuxSDPLine, this);
}
}
char const* H264LiveServerMediaSession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource)
{
if(fAuxSDPLine != NULL) return fAuxSDPLine;
if(fDummySink == NULL)
{
fDummySink = rtpSink;
fDummySink->startPlaying(*inputSource, afterPlayingDummy, this);
checkForAuxSDPLine(this);
}
envir().taskScheduler().doEventLoop(&fDoneFlag);
return fAuxSDPLine;
}
FramedSource* H264LiveServerMediaSession::createNewStreamSource(unsigned clientSessionID, unsigned& estBitRate)
{
// Based on encoder configuration i kept it 90000
estBitRate = 90000;
LiveSourceWithx264 *source = LiveSourceWithx264::createNew(envir());
// are you trying to keep the reference of the source somewhere? you shouldn't.
// Live555 will create and delete this class object many times. if you store it somewhere
// you will get memory access violation. instead you should configure you source to always read from your data source
return H264VideoStreamDiscreteFramer::createNew(envir(),source);
}
RTPSink* H264LiveServerMediaSession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource)
{
return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}
现在我们必须继承 LiveMedia 中的 FramedSource 类.对于模型,您可以参考 live555 库中的 DeviceSource.cpp.下面将展示我是如何做到的.
LiveSourceWithx264.h
Now we have to subclass the FramedSource class which is there in LiveMedia. For Model you can refer to DeviceSource.cpp in live555 library. Following will show how i have done it.
LiveSourceWithx264.h
#include <queue>
#include "x264Encoder.h"
#include "opencv2opencv.hpp"
class LiveSourceWithx264:public FramedSource
{
public:
static LiveSourceWithx264* createNew(UsageEnvironment& env);
static EventTriggerId eventTriggerId;
protected:
LiveSourceWithx264(UsageEnvironment& env);
virtual ~LiveSourceWithx264(void);
private:
virtual void doGetNextFrame();
static void deliverFrame0(void* clientData);
void deliverFrame();
void encodeNewFrame();
static unsigned referenceCount;
std::queue<x264_nal_t> nalQueue;
timeval currentTime;
// videoCaptureDevice is my BGR data source. You can have according to your need
cv::VideoCapture videoCaptureDevice;
cv::Mat rawImage;
// Remember the x264 encoder wrapper we wrote in the start
x264Encoder *encoder;
};
LiveSourceWithx264.cpp
#include "LiveSourceWithx264.h"
LiveSourceWithx264* LiveSourceWithx264::createNew(UsageEnvironment& env)
{
return new LiveSourceWithx264(env);
}
EventTriggerId LiveSourceWithx264::eventTriggerId = 0;
unsigned LiveSourceWithx264::referenceCount = 0;
LiveSourceWithx264::LiveSourceWithx264(UsageEnvironment& env):FramedSource(env)
{
if(referenceCount == 0)
{
}
++referenceCount;
videoCaptureDevice.open(0);
encoder = new x264Encoder();
encoder->initilize();
if(eventTriggerId == 0)
{
eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
}
}
LiveSourceWithx264::~LiveSourceWithx264(void)
{
--referenceCount;
videoCaptureDevice.release();
encoder->unInitilize();
envir().taskScheduler().deleteEventTrigger(eventTriggerId);
eventTriggerId = 0;
}
void LiveSourceWithx264::encodeNewFrame()
{
rawImage.data = NULL;
while(rawImage.data == NULL)
{
videoCaptureDevice >> rawImage;
cv::waitKey(100);
}
// Got new image to stream
assert(rawImage.data != NULL);
encoder->encodeFrame(rawImage);
// Take all nals from encoder output queue to our input queue
while(encoder->isNalsAvailableInOutputQueue() == true)
{
x264_nal_t nal = encoder->getNalUnit();
nalQueue.push(nal);
}
}
void LiveSourceWithx264::deliverFrame0(void* clientData)
{
((LiveSourceWithx264*)clientData)->deliverFrame();
}
void LiveSourceWithx264::doGetNextFrame()
{
if(nalQueue.empty() == true)
{
encodeNewFrame();
gettimeofday(¤tTime,NULL);
deliverFrame();
}
else
{
deliverFrame();
}
}
void LiveSourceWithx264::deliverFrame()
{
if(!isCurrentlyAwaitingData()) return;
x264_nal_t nal = nalQueue.front();
nalQueue.pop();
assert(nal.p_payload != NULL);
// You need to remove the start code which is there in front of every nal unit.
// the start code might be 0x00000001 or 0x000001. so detect it and remove it. pass remaining data to live555
int trancate = 0;
if (nal.i_payload >= 4 && nal.p_payload[0] == 0 && nal.p_payload[1] == 0 && nal.p_payload[2] == 0 && nal.p_payload[3] == 1 )
{
trancate = 4;
}
else
{
if(nal.i_payload >= 3 && nal.p_payload[0] == 0 && nal.p_payload[1] == 0 && nal.p_payload[2] == 1 )
{
trancate = 3;
}
}
if(nal.i_payload-trancate > fMaxSize)
{
fFrameSize = fMaxSize;
fNumTruncatedBytes = nal.i_payload-trancate - fMaxSize;
}
else
{
fFrameSize = nal.i_payload-trancate;
}
fPresentationTime = currentTime;
memmove(fTo,nal.p_payload+trancate,fFrameSize);
FramedSource::afterGetting(this);
}
现在我们完成了类的实现.现在要进行流设置,您可以按照 testOnDemandRTSPServer.cpp 示例进行操作.这是我进行设置的主要位置
Now we are done with the classes implementation. Now to do the streaming setup you can follow same as testOnDemandRTSPServer.cpp sample. Here is my main where i did the setup
#include <iostream>
#include <liveMedia.hh>
#include <BasicUsageEnvironment.hh>
#include <GroupsockHelper.hh>
#include "H264LiveServerMediaSession.h"
#include "opencv2opencv.hpp"
#include "x264Encoder.h"
int main(int argc, char* argv[])
{
TaskScheduler* taskSchedular = BasicTaskScheduler::createNew();
BasicUsageEnvironment* usageEnvironment = BasicUsageEnvironment::createNew(*taskSchedular);
RTSPServer* rtspServer = RTSPServer::createNew(*usageEnvironment, 8554, NULL);
if(rtspServer == NULL)
{
*usageEnvironment << "Failed to create rtsp server ::" << usageEnvironment->getResultMsg() <<"
";
exit(1);
}
std::string streamName = "usb1";
ServerMediaSession* sms = ServerMediaSession::createNew(*usageEnvironment, streamName.c_str(), streamName.c_str(), "Live H264 Stream");
H264LiveServerMediaSession *liveSubSession = H264LiveServerMediaSession::createNew(*usageEnvironment, true);
sms->addSubsession(liveSubSession);
rtspServer->addServerMediaSession(sms);
char* url = rtspServer->rtspURL(sms);
*usageEnvironment << "Play the stream using url "<<url << "
";
delete[] url;
taskSchedular->doEventLoop();
return 0;
}
并且您拥有 LiveSource 的 URL.我有我的 USB 摄像头 :)
and you have the URL for Your LiveSource. I had for my USB Cam :)
这篇关于Live555:基于“testOnDemandRTSPServer"的X264 Stream Live源的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持编程学习网!
本文标题为:Live555:基于“testOnDemandRTSPServer"的X264 Stream Live源
- 如何对自定义类的向量使用std::find()? 2022-11-07
- Stroustrup 的 Simple_window.h 2022-01-01
- STL 中有 dereference_iterator 吗? 2022-01-01
- 静态初始化顺序失败 2022-01-01
- 近似搜索的工作原理 2021-01-01
- 与 int by int 相比,为什么执行 float by float 矩阵乘法更快? 2021-01-01
- 从python回调到c++的选项 2022-11-16
- C++ 协变模板 2021-01-01
- 一起使用 MPI 和 OpenCV 时出现分段错误 2022-01-01
- 使用/clr 时出现 LNK2022 错误 2022-01-01