首页 > 其他 > 详细

【转】V4L2+swscale+X264+live555实现流媒体服务端

时间:2014-03-04 20:22:55      阅读:606      评论:0      收藏:0      [点我收藏+]

   写这边博客,一方面是因为自己在做项目的时候不太做笔记,怕以后自己忘记了。另一方面,是让正在寻求资料的同行少走一点弯路吧。不能说我这个方案怎么的好,至少是有一点参考价值的。这边博客需要一定基础才能看明白的,当然对V4L2,Swscale,x264,live555不太了解的人,我这里会给出我当时看的资料链接,但愿链接一直有效。也感谢了那些写博客的作者。

   下面贴出一些资料的链接吧,认真看完肯定收获不少。

    一篇关于V4L2(Video For Linux Two)http://www.cnblogs.com/lixiaoming90/archive/2012/08/25/2657019.html写的很不错的文章,认真看完了,我觉得V4L2视屏捕捉肯定不是问题。

    Swscale是ffmpeg库的一部分,主要是做图像格式的转换和拉伸,缩放。这边文章介绍了Swscale的使用,http://blog.csdn.net/leixiaohua1020/article/details/14215391 。

    x264是做H264编码用的,要注意的是x264的输入图像格式是:I410也就是420P。这样可以用Swscale对原始图像进行转格式。http://www.cnblogs.com/fojian/archive/2012/09/01/2666627.html 。

    live555 是一个流媒体框架,主要是做RTSP协议的。写的很不错(大家都这么说的),但是读live555源代码还是有一定难度的。这里我就不过多的介绍live555的实现机制了。但是资料还是给出了,感谢作者。http://blog.csdn.net/niu_gao/article/category/1066093 。

   下面我开始讲解整个服务器的实现过程吧,

bubuko.com,布布扣
/*
 * V4L2.cpp
 *
 *  Created on: 2013年12月17日
 *      Author: ny
 */

#include "V4L2.h"

V4L2::V4L2()
{
    fd = -1;
    buffers = NULL;
    width = 0;
    height = 0;
    CLEAR(fmt);       //设置帧格式
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; //V4L2_PIX_FMT_YUYV;
    fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
}

V4L2::~V4L2()
{
    close(fd);
}

int V4L2::getWidth()
{
    return width;
}

int V4L2::getHeight()
{
    return height;
}

bool V4L2::setSize(int width, int height)
{
    fmt.fmt.pix.width = width;
    fmt.fmt.pix.height = height;
    if (ioctl(fd, VIDIOC_S_FMT, &fmt) == -1)
    {
        printf("Can not VIDIOC_S_FMT\n");
        return false;
    }
    getSizeInfo();
    return true;
}

void V4L2::getSizeInfo()
{
    if (ioctl(fd, VIDIOC_G_FMT, &fmt) == -1)
    {
        printf("Can not VIDIOC_G_FMT\n");
        return;
    }
    this->width = fmt.fmt.pix.width;
    this->height = fmt.fmt.pix.height;
}

bool V4L2::initDev(const char * devName, int width, int height)
{
    v4l2_capability cap;

    fd = open(devName, O_RDWR, 0); //打开设备
    if (fd == -1)
    {
        printf("Can not open %s\n", devName);
        return false;
    }
    if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == -1) //查询设备的功能
    {
        printf("Can not get Capability\n");
        return false;
    }
    if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
    {
        printf("Can not capture video\n");
        return false;
    }
    if (!(cap.capabilities & V4L2_CAP_STREAMING))
    {
        printf("does not support streaming\n");
    }
    if (!setSize(width, height))
        return false;
    printf("fmt.fmt.pix.bytesperline:%d\n", fmt.fmt.pix.bytesperline);
    printf("format:%c%c%c%c\n", (fmt.fmt.pix.pixelformat & 0xff),
            ((fmt.fmt.pix.pixelformat >> 8) & 0xff),
            ((fmt.fmt.pix.pixelformat >> 16) & 0xff),
            ((fmt.fmt.pix.pixelformat >> 24) & 0xff));

    return initMmap();
}
bool V4L2::initMmap()
{
    struct v4l2_requestbuffers req;
    unsigned int n_buffers;
    CLEAR(req);

    req.count = 4; //先要想内核申请buffer缓冲,一般选择4个缓冲,最多是5个
    req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    req.memory = V4L2_MEMORY_MMAP;

    if (-1 == ioctl(fd, VIDIOC_REQBUFS, &req)) //向内核里面设置buffer
    { //分配内存
        if (EINVAL == errno)
        {
            printf("%s does not support memory mapping\n", "ss");
            exit(EXIT_FAILURE);
        } else
            printf("VIDIOC_REQBUFS\n");
    }

    buffers = (buffer *) calloc(req.count, sizeof(buffer)); //分配缓存
    if (!buffers)
    {
        printf("Out of memory\n");
        exit(EXIT_FAILURE);
    }

    for (n_buffers = 0; n_buffers < req.count; n_buffers++) //将buffer添加到QUERTBUF的队列里面去
    {
        struct v4l2_buffer buf;
        CLEAR(buf);

        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = n_buffers;

        if (-1 == ioctl(fd, VIDIOC_QUERYBUF, &buf))
            printf("VIDIOC_QUERYBUF");

        buffers[n_buffers].length = buf.length;   //设置映射方式为mmap
        printf("buf.length %d\n", buffers[n_buffers].length);
        buffers[n_buffers].start = mmap(NULL, buf.length,
        PROT_READ | PROT_WRITE,
        MAP_SHARED, fd, buf.m.offset);

        if (MAP_FAILED == buffers[n_buffers].start)
            printf("fail mmap\n");
    }
    return 1;
}
bool V4L2::startStream()
{
    unsigned int n_buffers;
    enum v4l2_buf_type type;
    /*将申请到的帧缓冲全部入队列,以便存放采集到的数据*/
    for (n_buffers = 0; n_buffers < 4; n_buffers++)
    {
        v4l2_buffer buf;
        CLEAR(buf);

        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = n_buffers;

        if (-1 == ioctl(fd, VIDIOC_QBUF, &buf))   //放入缓存
        {
            printf("fail VIDIOC_QBUF");
            return false;
        }
    }

    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

    if (-1 == ioctl(fd, VIDIOC_STREAMON, &type))   //打开视频流
    {
        printf("fail VIDIOC_STREAMON\n");
        return false;
    } else
        printf("StreamOn success!\n");
    return true;
}
//摄像头数据 主要是YUYV格式数据,所以需要进行对格式进行转化,转化后的数据保存在AVPicture里面,
//格式是由FMT制定的,输出图像尺寸也是widht_des,height_des决定的
bool V4L2::readFrame(AVPicture & pPictureDes, AVPixelFormat FMT, int widht_des,
        int height_des)
{
    v4l2_buffer buf;
    AVPicture pPictureSrc;
    SwsContext * pSwsCtx;
    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;
    if (-1 == ioctl(fd, VIDIOC_DQBUF, &buf))   //读取
    {
        printf("fail VIDIOC_DQBUF\n");
        return false;
    }
    pPictureSrc.data[0] = (unsigned char *) buffers[buf.index].start;
    pPictureSrc.data[1] = pPictureSrc.data[2] = pPictureSrc.data[3] = NULL;
    pPictureSrc.linesize[0] = fmt.fmt.pix.bytesperline;
    int i = 0;
    for (i = 1; i < 8; i++)
    {
        pPictureSrc.linesize[i] = 0;
    }
    pSwsCtx = sws_getContext(width, height, PIX_FMT_YUYV422, widht_des,
            height_des, FMT,
            SWS_BICUBIC, 0, 0, 0);
    int rs = sws_scale(pSwsCtx, pPictureSrc.data, pPictureSrc.linesize, 0,
            height, pPictureDes.data, pPictureDes.linesize);
    if (rs == -1)
    {
        printf("Can open to change to des image");
        return false;
    }
    sws_freeContext(pSwsCtx);
    if (-1 == ioctl(fd, VIDIOC_QBUF, &buf))    //放回缓存
    {
        printf("fail VIDIOC_QBUF\n");
        return false;
    }
    return true;
}

bool V4L2::stopStream()
{
    enum v4l2_buf_type type;
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (-1 == ioctl(fd, VIDIOC_STREAMOFF, &type))
    {
        perror("Fail to ioctl ‘VIDIOC_STREAMOFF‘");
        //exit(EXIT_FAILURE);
        return false;
    }
    return true;
}
bubuko.com,布布扣

    上面的代码 主要是实现了摄像头的数据捕获,值得注意是函数bool V4L2::readFrame(AVPicture & pPictureDes, AVPixelFormat FMT, int widht_des,
int height_des);这个是第一个参数 AVPicture是ffmpeg里面的一个结构体 主要是保存图像的。不过这个参数的初始化在 函数外面,这个大家可以看看那篇swscale的文章就知道了,还有一点就是在函数内部分配的栈数据会在函数结束后销毁的。

bubuko.com,布布扣
/*
 * H264Encode.cpp
 *
 *  Created on: 2014年1月4日
 *      Author: ny
 */

#include <H264Encode.h>

H264Encode::H264Encode()
{
    i_pts = 0;
    x264EnCoder = NULL;
    pPicOut = NULL;
    nnal=0;
    nals=NULL;
}

H264Encode::~H264Encode()
{
}

void H264Encode::x264_init(AVPicture picture, int width, int height)
{
    x264_param_default_preset(?m, "veryfast", "zerolatency");

    param.i_width = width;
    param.i_height = height;
    param.i_fps_num = 25;
    param.i_fps_den = 1;

    param.i_keyint_max = 25;
    param.b_intra_refresh = 1;

    param.b_annexb = 1;

    x264_param_apply_profile(?m, "baseline");
    x264EnCoder = x264_encoder_open(?m);

    x264_picture_alloc(&xPic, X264_CSP_I420, width, height);

    xPic.img.plane[0] = picture.data[0];
    xPic.img.plane[1] = picture.data[1];
    xPic.img.plane[2] = picture.data[2];
    pPicOut = new x264_picture_t;
}

void H264Encode::x264_encode()
{
    xPic.i_pts = i_pts++;
    x264_encoder_encode(x264EnCoder, &nals, &nnal, &xPic, pPicOut);
}
bubuko.com,布布扣

    这部分代码 没什么好讲的 就是这个流程,值得注意的是一帧图像编码后可能生成几个nal,nals是一个nal的指针,nnal表示这帧数据有几个nal。而nal里面保存了数据信息。

bubuko.com,布布扣
/*
 * H264OnDemandServerMediaSubsession.cpp
 *
 *  Created on: 2014年1月4日
 *      Author: ny
 */

#include <H264OnDemandServerMediaSubsession.h>
#include <V4L2FramedSource.h>
#include <live/H264VideoStreamFramer.hh>
#include <live/H264VideoRTPSink.hh>

H264OnDemandServerMediaSubsession::H264OnDemandServerMediaSubsession(
        UsageEnvironment& env, FramedSource * source) :
        OnDemandServerMediaSubsession(env, true)
{
    mp_source = source;
    mp_sdp_line = NULL;
    mp_dummy_rtpsink = NULL;
    m_done = 0;
}

H264OnDemandServerMediaSubsession::~H264OnDemandServerMediaSubsession()
{
}

void H264OnDemandServerMediaSubsession::chkForAuxSDPLine1()
{
    if (mp_dummy_rtpsink->auxSDPLine())
        m_done = 0xff;
    else
    {
        int delay = 100 * 1000;   // 100ms
        nextTask() = envir().taskScheduler().scheduleDelayedTask(delay,
                chkForAuxSDPLine, this);
    }
}

const char * H264OnDemandServerMediaSubsession::getAuxSDPLine(RTPSink *sink,
        FramedSource *source)
{
    if (mp_sdp_line)
        return mp_sdp_line;

    mp_dummy_rtpsink = sink;
    mp_dummy_rtpsink->startPlaying(*source, 0, 0);
    //mp_dummy_rtpsink->startPlaying(*source, afterPlayingDummy, this);
    chkForAuxSDPLine(this);
    m_done = 0;
    envir().taskScheduler().doEventLoop(&m_done);
    mp_sdp_line = strdup(mp_dummy_rtpsink->auxSDPLine());
    mp_dummy_rtpsink->stopPlaying();

    return mp_sdp_line;
}

RTPSink * H264OnDemandServerMediaSubsession::createNewRTPSink(
        Groupsock *rtpsock, unsigned char type, FramedSource *source)
{
    return H264VideoRTPSink::createNew(envir(), rtpsock, type);
}

FramedSource * H264OnDemandServerMediaSubsession::createNewStreamSource(
        unsigned sid, unsigned &bitrate)
{
    bitrate = 500;
    return H264VideoStreamFramer::createNew(envir(),
            mp_source);
}
char const* H264OnDemandServerMediaSubsession::sdpLines()
{
    return fSDPLines = (char *)
        "m=video 0 RTP/AVP 96\r\n"
        "c=IN IP4 0.0.0.0\r\n"
        "b=AS:96\r\n"
        "a=rtpmap:96 H264/90000\r\n"
        "a=fmtp:96 packetization-mode=1;profile-level-id=000000;sprop-parameter-sets=H264\r\n"
        "a=control:track1\r\n";
}
bubuko.com,布布扣

    H264OnDemandServerMediaSubsession类主要是继承了H264OnDemandServerMediaSubsession类。主要是为了设置SDP 描述。

bubuko.com,布布扣
sdpLines()这个函数 需要根据自己的实际情况进行改写的。关于里面的含义,我这里就不多解释了,熟悉一下RTSP协议就可以完全知道含义了。
bubuko.com,布布扣
bubuko.com,布布扣
<PRE class=cpp name="code" snippet_file_name="blog_20140221_5_1115034" code_snippet_id="198354">/*
 * V4L2FramedSource.cpp
 *
 *  Created on: 2014年1月4日
 *      Author: ny
 */

#include <V4L2FramedSource.h>

int V4L2FramedSource::nalIndex = 0;

V4L2FramedSource::V4L2FramedSource(UsageEnvironment & env) :
        FramedSource(env)
{
    v4l2 = new V4L2();
    pEncode = new H264Encode();
    mp_token = NULL;
    printf("creater\n");
    v4l2->initDev("/dev/video0", 640, 480);
    avpicture_alloc(&Picture, PIX_FMT_YUV420P, v4l2->getWidth(),
            v4l2->getHeight());
    v4l2->startStream();

    pEncode->x264_init(Picture, 640, 480);

}

V4L2FramedSource::~V4L2FramedSource()
{
}

unsigned V4L2FramedSource::maxFrameSize() const
{
    return 40 * 1024;
}

void V4L2FramedSource::doGetNextFrame()
{
    /*double delay = 1000.0 / 25;
     int to_delay = delay * 1000;    // us
     mp_token = envir().taskScheduler().scheduleDelayedTask(to_delay,
     getNextFrame, this);*/
    if (V4L2FramedSource::nalIndex == pEncode->nnal)
    {
        v4l2->readFrame(Picture, PIX_FMT_YUV420P, v4l2->getWidth(),
                v4l2->getHeight());
        pEncode->x264_encode();
        V4L2FramedSource::nalIndex = 0;
        gettimeofday(&fPresentationTime, NULL);
    }
    memmove(fTo, pEncode->nals[V4L2FramedSource::nalIndex].p_payload,
            pEncode->nals[V4L2FramedSource::nalIndex].i_payload);
    fFrameSize = pEncode->nals[V4L2FramedSource::nalIndex].i_payload;
    V4L2FramedSource::nalIndex++;
    afterGetting(this);
}

void V4L2FramedSource::getNextFrame1()
{

}
</PRE><BR>
<BR>
<PRE></PRE>
<P></P>
<PRE></PRE>
<P></P>
<P>V4L2FramedSource类继承了FramedSource类。V4L2FramedSource是我们自定义的类,主要实现了我们的视频数据如何进入到live555里面去。首先在构造函数里面,我们对v4l2进行了初始化以及x264编码初始化。这个类最重要的就是<SPAN style="BACKGROUND-COLOR: rgb(240,240,240)">doGetNextFrame函数,live555就是通过这个函数将我们的一个nal数据加载到live555里面,然后消息循环发送出去的。我们先是将v4l2捕捉的视频数据进行H264压缩编码,这里面值得注意的是一帧图像可能压缩成几个nal的,所以我这里面在确保一帧数据完全发送完了才向v4l2要数据。然后就是数据的般移了,其中数据存在fTo里面。然后就是消息了。</SPAN></P>
<P></P>
<P></P>
<PRE class=cpp name="code" snippet_file_name="blog_20140221_6_7320023" code_snippet_id="198354">/*
 * Application.cpp
 *
 *  Created on: 2014年1月4日
 *      Author: ny
 */

#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>

#include "H264OnDemandServerMediaSubsession.h"
#include "V4L2FramedSource.h"

#include <live/liveMedia.hh>
#include <live/BasicUsageEnvironment.hh>
#include <live/UsageEnvironment.hh>

UsageEnvironment* env;
static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,
        char const* streamName, char const* inputFileName = "Live"); // fwd
int main()
{
    // 设置使用环境。Begin by setting up our usage environment:
    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    env = BasicUsageEnvironment::createNew(*scheduler);
    UserAuthenticationDatabase* authDB = NULL;

    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
    if (rtspServer == NULL)
    {
        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
        exit(1);
    }

    char const* descriptionString =
            "Session streamed by \"testOnDemandRTSPServer\"";

    char const* streamName = "live";
    ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName,
            streamName, descriptionString);
    sms->addSubsession(
            new H264OnDemandServerMediaSubsession(*env,
                    new V4L2FramedSource(*env)));
    rtspServer->addServerMediaSession(sms);
    announceStream(rtspServer, sms, streamName);
    env->taskScheduler().doEventLoop(); // does not return

    return 0;
}
static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,
        char const* streamName, char const* inputFileName)
{
    char* url = rtspServer->rtspURL(sms);
    UsageEnvironment& env = rtspServer->envir();
    env << "\n\"" << streamName << "\" stream, from the file \""
            << inputFileName << "\"\n";
    env << "Play this stream using the URL \"" << url << "\"\n";
    delete[] url;
}
</PRE><BR>
<BR>
<P>这是我们程序的主程序,主要是参考了live555的testOnDemandRTSPServer.cpp。主要是区别是我们自己指定了rtsp链接了,以及我们自己实现的framesource。这样本地的视频采集发送服务器就完成了,可以用开源的VLC播放进行播放了,live555支持1对多的,已经测试过可以在ipad,iphone,android同步播放,延迟在1s以内。不过要VLC设置缓存时间在200ms就可以了。</P>
<P><BR>
</P>
<P><BR>
</P>
<P><BR>
</P>
<PRE></PRE>
bubuko.com,布布扣

【转】V4L2+swscale+X264+live555实现流媒体服务端,布布扣,bubuko.com

【转】V4L2+swscale+X264+live555实现流媒体服务端

原文:http://www.cnblogs.com/cslunatic/p/3579706.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!