FFmpeg 实现从设备端获取音视频流并通过RTMP推流

2024-08-27 09:36

本文主要是介绍FFmpeg 实现从设备端获取音视频流并通过RTMP推流,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

使用FFmpeg库(版本号为:4.4.2-0ubuntu0.22.04.1)实现从摄像头和麦克风获取音视频流并通过RTMP推流。
RTMP服务器使用的是SRS,我这边是跑在Ubuntu上的,最好是关闭掉系统防火墙,不然连接服务器好像会出问题,拉流端使用VLC。如果想要降低延时,请看我另外一篇博客,里面有说降低延时的方法。

代码如下:

#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <libavutil/imgutils.h>
#include <pthread.h>
#include <libavutil/audio_fifo.h>typedef struct st_video
{enum AVPixelFormat camera_pix_fmt;AVStream *stream_out;AVFormatContext *context_in;AVFormatContext **context_out;int streamid;struct SwsContext *sws_ctx;AVCodecContext *codec_context;pthread_mutex_t *lock_write_frame;
} st_video;typedef struct st_audio
{AVStream *stream_out;AVFormatContext *context_in;AVFormatContext **context_out;int streamid;struct SwrContext *swr_ctx;AVCodecContext *codec_context;pthread_mutex_t *lock_write_frame;
} st_audio;int initVideo(st_video *s_video);
int initAudio(st_audio *s_audio);
void *thread_v(void *arg);
void *thread_a(void *arg);int main(void)
{int ret = -1;const char *url = "rtmp://192.168.3.230/live/livestream"; // rtmp地址AVFormatContext *context_out = NULL;pthread_mutex_t lock_write_frame;st_video s_video;s_video.context_out = &context_out;s_video.lock_write_frame = &lock_write_frame;st_audio s_audio;s_audio.context_out = &context_out;s_audio.lock_write_frame = &lock_write_frame;// 打印ffmpeg版本信息printf("ffmpeg version: %s\n", av_version_info());// 注册所有设备avdevice_register_all();// 分配输出格式上下文avformat_alloc_output_context2(&context_out, NULL, "flv", NULL);if (!context_out){printf("avformat_alloc_output_context2 failed\n");return -1;}// 初始化视频流initVideo(&s_video);// 初始化音频流initAudio(&s_audio);// 打开urlif (!(context_out->oformat->flags & AVFMT_NOFILE)){ret = avio_open(&context_out->pb, url, AVIO_FLAG_WRITE);if (ret < 0){printf("avio_open error (errmsg '%s')\n", av_err2str(ret));return -1;}}// 写入头部信息ret = avformat_write_header(context_out, NULL);if (ret < 0){avio_close(context_out->pb);printf("avformat_write_header failed\n");return -1;}pthread_t thread1, thread2;pthread_mutex_init(&lock_write_frame, NULL);pthread_create(&thread1, NULL, thread_v, &s_video);pthread_create(&thread2, NULL, thread_a, &s_audio);pthread_join(thread1, NULL);pthread_join(thread2, NULL);pthread_mutex_destroy(&lock_write_frame);if (s_video.sws_ctx)sws_freeContext(s_video.sws_ctx);if (s_video.context_in)avformat_close_input(&s_video.context_in);if (s_video.codec_context)avcodec_free_context(&s_video.codec_context);if (s_audio.codec_context)avcodec_free_context(&s_audio.codec_context);if (s_audio.swr_ctx)swr_free(&s_audio.swr_ctx);if (s_audio.context_in)avformat_close_input(&s_audio.context_in);if (context_out && !(context_out->flags & AVFMT_NOFILE))avio_close(context_out->pb);if (context_out)avformat_free_context(context_out);return 0;
}int initStream(AVFormatContext **context_in, enum AVMediaType type, int *streamid,const char *input_format_name, const char *device_name, AVDictionary **options,AVFormatContext *context_out, AVStream **stream_out)
{// 查找输入格式AVInputFormat *fmt = av_find_input_format(input_format_name);if (!fmt){printf("av_find_input_format error\n");return -1;}// 打开输入if (avformat_open_input(context_in, device_name, fmt, options) != 0){av_dict_free(options);printf("avformat_open_input error\n");return -1;}// 获取输入流信息if (avformat_find_stream_info(*context_in, NULL) < 0){printf("avformat_find_stream_info error\n");return -1;}// 获取流索引*streamid = av_find_best_stream(*context_in, type, -1, -1, NULL, 0);if (*streamid < 0){printf("cannot find video stream\n");return -1;}// 创建输出流*stream_out = avformat_new_stream(context_out, NULL);if (!(*stream_out)){avformat_free_context(context_out);printf("avformat_new_stream failed\n");return -1;}return 0;
}int initSws(struct SwsContext **sws_ctx, AVStream *stream_in, AVStream *stream_out)
{// 初始化转换上下文*sws_ctx = sws_getContext(stream_in->codecpar->width, stream_in->codecpar->height, stream_in->codecpar->format,stream_out->codecpar->width, stream_out->codecpar->height, stream_out->codecpar->format,SWS_BILINEAR, NULL, NULL, NULL);if (!sws_ctx){printf("sws_getContext error\n");return -1;}return 0;
}int initSwr(struct SwrContext **swr_ctx, AVStream *stream_in, AVStream *stream_out)
{// 根据通道数获取默认的通道布局,codecpar->channel_layou没有被设置,不能直接使用int64_t chlayout_in = av_get_default_channel_layout(stream_in->codecpar->channels);int64_t chlayout_out = av_get_default_channel_layout(stream_out->codecpar->channels);// 初始化重采样上下文*swr_ctx = swr_alloc_set_opts(NULL,chlayout_in, stream_out->codecpar->format, stream_out->codecpar->sample_rate,chlayout_out, stream_in->codecpar->format, stream_in->codecpar->sample_rate,0, NULL);if (!(*swr_ctx) || swr_init(*swr_ctx) < 0){printf("allocate resampler context failed\n");return -1;}return 0;
}int setVcodec(AVCodecContext **codec_context, AVStream *stream_in,int frame_rate, AVFormatContext *context_out, AVStream *stream_out)
{AVCodec *c = NULL;// 查找编码器c = avcodec_find_encoder(AV_CODEC_ID_H264);if (!c){printf("Codec not found\n");return -1;}printf("codec name: %s\n", c->name);// 分配编码器上下文*codec_context = avcodec_alloc_context3(c);if (!(*codec_context)){printf("avcodec_alloc_context3 failed\n");return -1;}AVCodecContext *ctx = *codec_context;// 设置编码器参数ctx->codec_id = AV_CODEC_ID_H264;ctx->codec_type = AVMEDIA_TYPE_VIDEO;ctx->pix_fmt = AV_PIX_FMT_YUV420P;ctx->width = stream_in->codecpar->width;ctx->height = stream_in->codecpar->height;ctx->time_base = (AVRational){1, frame_rate};         // 设置时间基ctx->framerate = (AVRational){frame_rate, 1};         // 设置帧率ctx->bit_rate = 750 * 1000;                           // 设置比特率ctx->gop_size = frame_rate;                           // 设置GOP大小ctx->max_b_frames = 0;                                // 设置最大B帧数,不需要B帧时设置为0av_opt_set(ctx->priv_data, "profile", "baseline", 0); // 设置h264画质级别av_opt_set(ctx->priv_data, "tune", "zerolatency", 0); // 设置h264编码优化参数// 检测输出上下文的封装格式,判断是否设置 AV_CODEC_FLAG_GLOBAL_HEADER// AV_CODEC_FLAG_GLOBAL_HEADER:由原来编码时在每个关键帧前加入pps和sps,改变为在extradate这个字节区加入pps和spsif (context_out->oformat->flags & AVFMT_GLOBALHEADER){printf("set AV_CODEC_FLAG_GLOBAL_HEADER\n");ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}// 打开编码器if (avcodec_open2(ctx, c, NULL) < 0){avcodec_free_context(codec_context);printf("avcodec_open2 failed\n");return -1;}// 将编码器参数复制到流int ret = avcodec_parameters_from_context(stream_out->codecpar, ctx);if (ret < 0){avcodec_free_context(codec_context);printf("avcodec_parameters_from_context failed\n");return -1;}return 0;
}int setAcodec(AVCodecContext **codec_context, AVStream *stream_in,AVFormatContext *context_out, AVStream *stream_out)
{AVCodec *c = NULL;// 查找编码器c = avcodec_find_encoder(AV_CODEC_ID_AAC);if (!c){printf("Codec not found\n");return -1;}printf("codec name: %s\n", c->name);// 分配编码器上下文*codec_context = avcodec_alloc_context3(c);if (!c){printf("avcodec_alloc_context3 failed\n");return -1;}AVCodecContext *ctx = *codec_context;// 设置编码器参数ctx->codec_id = AV_CODEC_ID_AAC;ctx->codec_type = AVMEDIA_TYPE_AUDIO;ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;ctx->sample_rate = stream_in->codecpar->sample_rate;ctx->channels = stream_in->codecpar->channels;ctx->channel_layout = av_get_default_channel_layout(stream_in->codecpar->channels);ctx->bit_rate = 64000;ctx->profile = FF_PROFILE_AAC_LOW;if (context_out->oformat->flags & AVFMT_GLOBALHEADER){printf("set AV_CODEC_FLAG_GLOBAL_HEADER\n");ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}// 打开编码器if (avcodec_open2(ctx, c, NULL) < 0){avcodec_free_context(codec_context);printf("avcodec_open2 failed\n");return -1;}// 将编码器参数复制到流int ret = avcodec_parameters_from_context(stream_out->codecpar, ctx);if (ret < 0){avcodec_free_context(codec_context);printf("avcodec_parameters_from_context failed\n");return -1;}return 0;
}int initVideo(st_video *s_video)
{s_video->streamid = -1;const char *input_format_name = "video4linux2"; // 输入格式名称,Linux下为video4linux2或v4l2const char *device_name = "/dev/video0";        // 摄像头设备名称const char *camera_resolution = "640x480";      // 摄像头分辨率s_video->camera_pix_fmt = AV_PIX_FMT_YUYV422;   // 摄像头像素格式int frame_rate = 25;                            // 帧率int ret = -1;AVDictionary *options = NULL;av_dict_set(&options, "video_size", camera_resolution, 0); // 设置分辨率ret = initStream(&s_video->context_in, AVMEDIA_TYPE_VIDEO, &s_video->streamid,input_format_name, device_name, &options,*(s_video->context_out), &s_video->stream_out);if (ret < 0){printf("initStream failed\n");return -1;}AVStream *stream_v = s_video->context_in->streams[s_video->streamid];printf("video stream, width: %d, height: %d, format: %s\n",stream_v->codecpar->width, stream_v->codecpar->height,av_get_pix_fmt_name((enum AVPixelFormat)stream_v->codecpar->format));ret = setVcodec(&s_video->codec_context, stream_v, frame_rate,*(s_video->context_out), s_video->stream_out);if (ret < 0){printf("setVcodec failed\n");return -1;}ret = initSws(&s_video->sws_ctx, stream_v, s_video->stream_out);if (ret < 0){printf("initSws failed\n");return -1;}return 0;
}int initAudio(st_audio *s_audio)
{const char *input_format_name = "alsa";const char *device_name = "hw:1,0";   // 麦克风设备名称const char *in_sample_rate = "16000"; // 采样率const char *in_channels = "1";        // 声道数int ret = -1;AVDictionary *options = NULL;// 设置麦克风音频参数av_dict_set(&options, "sample_rate", in_sample_rate, 0);av_dict_set(&options, "channels", in_channels, 0);ret = initStream(&s_audio->context_in, AVMEDIA_TYPE_AUDIO, &s_audio->streamid,input_format_name, device_name, &options,*(s_audio->context_out), &s_audio->stream_out);if (ret < 0){printf("initStream failed\n");return -1;}AVStream *stream_a = s_audio->context_in->streams[s_audio->streamid];printf("audio stream, sample_rate: %d, channels: %d, format: %s\n",stream_a->codecpar->sample_rate, stream_a->codecpar->channels,av_get_sample_fmt_name((enum AVSampleFormat)stream_a->codecpar->format));ret = setAcodec(&s_audio->codec_context, stream_a, *(s_audio->context_out), s_audio->stream_out);if (ret < 0){printf("setAcodec failed\n");return -1;}ret = initSwr(&s_audio->swr_ctx, stream_a, s_audio->stream_out);if (ret < 0){printf("initSwr failed\n");return -1;}return 0;
}void *thread_v(void *arg)
{int ret = -1;int64_t frame_index = 0;st_video *s_video = (st_video *)arg;AVStream *stream_v = s_video->context_in->streams[s_video->streamid];// 分配内存AVFrame *input_frame = av_frame_alloc();AVFrame *frame_yuv420p = av_frame_alloc();if (!input_frame || !frame_yuv420p){printf("av_frame_alloc error\n");goto end;}AVPacket *packet = av_packet_alloc();if (!packet){printf("av_packet_alloc failed\n");goto end;}// 设置帧格式input_frame->format = s_video->camera_pix_fmt;input_frame->width = stream_v->codecpar->width;input_frame->height = stream_v->codecpar->height;frame_yuv420p->format = AV_PIX_FMT_YUV420P;frame_yuv420p->width = stream_v->codecpar->width;frame_yuv420p->height = stream_v->codecpar->height;// 分配帧内存ret = av_frame_get_buffer(frame_yuv420p, 0);if (ret < 0){printf("av_frame_get_buffer error\n");goto end;}// 读取帧并进行转换AVPacket pkt;while (av_read_frame(s_video->context_in, &pkt) >= 0){if (pkt.stream_index == s_video->streamid){// 把读取的帧数据(AVPacket)拷贝到输入帧(AVFrame)中ret = av_image_fill_arrays(input_frame->data, input_frame->linesize, pkt.data, s_video->camera_pix_fmt,stream_v->codecpar->width, stream_v->codecpar->height, 1);if (ret < 0){av_packet_unref(&pkt);printf("av_image_fill_arrays error\n");break;}// 转换为 YUV420Psws_scale(s_video->sws_ctx, (const uint8_t *const *)input_frame->data, input_frame->linesize, 0,input_frame->height, frame_yuv420p->data, frame_yuv420p->linesize);frame_yuv420p->pts = frame_index;frame_index++;// 发送帧到编码器ret = avcodec_send_frame(s_video->codec_context, frame_yuv420p);if (ret < 0){printf("avcodec_send_frame error (errmsg '%s')\n", av_err2str(ret));break;}// 接收编码后的数据包while (ret >= 0){ret = avcodec_receive_packet(s_video->codec_context, packet);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){break;}else if (ret < 0){printf("avcodec_receive_packet error (errmsg '%s')\n", av_err2str(ret));goto end;}packet->stream_index = s_video->stream_out->index;// 将时间戳从编码器时间基转换到流时间基av_packet_rescale_ts(packet, s_video->codec_context->time_base, s_video->stream_out->time_base);packet->pos = -1;// 推送到RTMP服务器pthread_mutex_lock(s_video->lock_write_frame);ret = av_interleaved_write_frame(*(s_video->context_out), packet);pthread_mutex_unlock(s_video->lock_write_frame);if (ret < 0){printf("av_interleaved_write_frame error (errmsg '%d')\n", ret);av_packet_unref(packet);goto end;}av_packet_unref(packet);}}av_packet_unref(&pkt);}end:// 释放资源if (input_frame)av_frame_free(&input_frame);if (frame_yuv420p)av_frame_free(&frame_yuv420p);if (packet)av_packet_free(&packet);return NULL;
}void *thread_a(void *arg)
{st_audio *s_audio = (st_audio *)arg;int ret = -1;int fsize = 0;int64_t pts = 0;AVFrame *frame_out = NULL;AVAudioFifo *fifo = NULL;frame_out = av_frame_alloc();if (!frame_out){printf("av_frame_alloc failed\n");goto end;}// 设置帧参数, av_frame_get_buffer 在分配缓冲区时会用到frame_out->format = s_audio->codec_context->sample_fmt;frame_out->nb_samples = s_audio->codec_context->frame_size;frame_out->channel_layout = s_audio->codec_context->channel_layout;// 分配帧缓冲区ret = av_frame_get_buffer(frame_out, 0);if (ret < 0){printf("av_frame_get_buffer failed\n");goto end;}AVStream *stream_a = s_audio->context_in->streams[s_audio->streamid];// 计算编码每帧aac所需的pcm数据的大小 = 采样个数 * 采样格式大小 * 声道数fsize = s_audio->codec_context->frame_size *av_get_bytes_per_sample(stream_a->codecpar->format) *stream_a->codecpar->channels;printf("frame size: %d\n", fsize);fifo = av_audio_fifo_alloc((enum AVSampleFormat)stream_a->codecpar->format,stream_a->codecpar->channels, s_audio->codec_context->frame_size * 5);if (!fifo){printf("av_audio_fifo_alloc failed\n");goto end;}uint8_t *buf = av_malloc(fsize);if (!buf){printf("av_malloc failed\n");goto end;}AVPacket *recv_ptk = av_packet_alloc();if (!recv_ptk){printf("av_packet_alloc failed\n");goto end;}int sample_size = av_get_bytes_per_sample(stream_a->codecpar->format);// 读取帧AVPacket read_pkt;while (av_read_frame(s_audio->context_in, &read_pkt) >= 0){if (read_pkt.stream_index == s_audio->streamid){av_audio_fifo_write(fifo, (void **)&read_pkt.buf->data,read_pkt.size / sample_size);if (av_audio_fifo_size(fifo) < s_audio->codec_context->frame_size){// 不够一帧aac编码所需的数据continue;}av_audio_fifo_read(fifo, (void **)&buf, s_audio->codec_context->frame_size);// 重采样ret = swr_convert(s_audio->swr_ctx, frame_out->data, frame_out->nb_samples,(const uint8_t **)&buf, frame_out->nb_samples);if (ret < 0){printf("swr_convert failed\n");goto end;}frame_out->pts = pts;pts += frame_out->nb_samples;// 发送帧给编码器ret = avcodec_send_frame(s_audio->codec_context, frame_out);if (ret < 0){printf("avcodec_send_frame failed\n");goto end;}// 接收编码后的数据包while (ret >= 0){ret = avcodec_receive_packet(s_audio->codec_context, recv_ptk);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){break;}else if (ret < 0){printf("avcodec_receive_packet error (errmsg '%s')\n", av_err2str(ret));goto end;}recv_ptk->stream_index = s_audio->stream_out->index;av_packet_rescale_ts(recv_ptk, s_audio->codec_context->time_base,s_audio->stream_out->time_base);pthread_mutex_lock(s_audio->lock_write_frame);ret = av_interleaved_write_frame(*s_audio->context_out, recv_ptk);pthread_mutex_unlock(s_audio->lock_write_frame);if (ret < 0){printf("av_interleaved_write_frame failed\n");av_packet_unref(recv_ptk);goto end;}av_packet_unref(recv_ptk);}}av_packet_unref(&read_pkt);}end:if (frame_out)av_frame_free(&frame_out);if (recv_ptk)av_packet_free(&recv_ptk);if (fifo)av_audio_fifo_free(fifo);if (buf)av_free(buf);return NULL;
}


相关博客链接:FFmpeg 实现从摄像头获取流并通过RTMP推流
                         FFmpeg 实现从麦克风获取流并通过RTMP推流 

这篇关于FFmpeg 实现从设备端获取音视频流并通过RTMP推流的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!


原文地址:
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.chinasem.cn/article/1111298

相关文章

MySQL 横向衍生表(Lateral Derived Tables)的实现

《MySQL横向衍生表(LateralDerivedTables)的实现》横向衍生表适用于在需要通过子查询获取中间结果集的场景,相对于普通衍生表,横向衍生表可以引用在其之前出现过的表名,本文就来... 目录一、横向衍生表用法示例1.1 用法示例1.2 使用建议前面我们介绍过mysql中的衍生表(From子句

Mybatis的分页实现方式

《Mybatis的分页实现方式》MyBatis的分页实现方式主要有以下几种,每种方式适用于不同的场景,且在性能、灵活性和代码侵入性上有所差异,对Mybatis的分页实现方式感兴趣的朋友一起看看吧... 目录​1. 原生 SQL 分页(物理分页)​​2. RowBounds 分页(逻辑分页)​​3. Page

Python基于微信OCR引擎实现高效图片文字识别

《Python基于微信OCR引擎实现高效图片文字识别》这篇文章主要为大家详细介绍了一款基于微信OCR引擎的图片文字识别桌面应用开发全过程,可以实现从图片拖拽识别到文字提取,感兴趣的小伙伴可以跟随小编一... 目录一、项目概述1.1 开发背景1.2 技术选型1.3 核心优势二、功能详解2.1 核心功能模块2.

MYSQL查询结果实现发送给客户端

《MYSQL查询结果实现发送给客户端》:本文主要介绍MYSQL查询结果实现发送给客户端方式,具有很好的参考价值,希望对大家有所帮助,如有错误或未考虑完全的地方,望不吝赐教... 目录mysql取数据和发数据的流程(边读边发)Sending to clientSending DataLRU(Least Rec

Java中实现线程的创建和启动的方法

《Java中实现线程的创建和启动的方法》在Java中,实现线程的创建和启动是两个不同但紧密相关的概念,理解为什么要启动线程(调用start()方法)而非直接调用run()方法,是掌握多线程编程的关键,... 目录1. 线程的生命周期2. start() vs run() 的本质区别3. 为什么必须通过 st

使用SpringBoot整合Sharding Sphere实现数据脱敏的示例

《使用SpringBoot整合ShardingSphere实现数据脱敏的示例》ApacheShardingSphere数据脱敏模块,通过SQL拦截与改写实现敏感信息加密存储,解决手动处理繁琐及系统改... 目录痛点一:痛点二:脱敏配置Quick Start——Spring 显示配置:1.引入依赖2.创建脱敏

基于Python实现一个简单的题库与在线考试系统

《基于Python实现一个简单的题库与在线考试系统》在当今信息化教育时代,在线学习与考试系统已成为教育技术领域的重要组成部分,本文就来介绍一下如何使用Python和PyQt5框架开发一个名为白泽题库系... 目录概述功能特点界面展示系统架构设计类结构图Excel题库填写格式模板题库题目填写格式表核心数据结构

C#之List集合去重复对象的实现方法

《C#之List集合去重复对象的实现方法》:本文主要介绍C#之List集合去重复对象的实现方法,具有很好的参考价值,希望对大家有所帮助,如有错误或未考虑完全的地方,望不吝赐教... 目录C# List集合去重复对象方法1、测试数据2、测试数据3、知识点补充总结C# List集合去重复对象方法1、测试数据

Linux实现线程同步的多种方式汇总

《Linux实现线程同步的多种方式汇总》本文详细介绍了Linux下线程同步的多种方法,包括互斥锁、自旋锁、信号量以及它们的使用示例,通过这些同步机制,可以解决线程安全问题,防止资源竞争导致的错误,示例... 目录什么是线程同步?一、互斥锁(单人洗手间规则)适用场景:特点:二、条件变量(咖啡厅取餐系统)工作流

SpringBoot读取ZooKeeper(ZK)属性的方法实现

《SpringBoot读取ZooKeeper(ZK)属性的方法实现》本文主要介绍了SpringBoot读取ZooKeeper(ZK)属性的方法实现,强调使用@ConfigurationProperti... 目录1. 在配置文件中定义 ZK 属性application.propertiesapplicati