admin管理员组文章数量:1122832
I am working on an embedded Linux system (kernel-5.10.24) and I want to use ffmpeg
libraries (ffmpeg-4.4.4) to do video decoding.
The C code is as follows, it uses h264_v4l2m2m
decoder to decode the video,
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Usage: %s <input_video> <output_video>\n", argv[0]);
return -1;
}
const char *input_file = argv[1];
const char *output_file = argv[2];
AVFormatContext *fmt_ctx = NULL;
AVCodecContext *codec_ctx = NULL;
AVCodec *codec = NULL;
AVPacket pkt;
AVFrame *frame = NULL;
AVFrame *rgb_frame = NULL;
struct SwsContext *sws_ctx = NULL;
FILE *output = NULL;
int video_stream_index = -1;
avformat_network_init();
if (avformat_open_input(&fmt_ctx, input_file, NULL, NULL) < 0) {
fprintf(stderr, "Could not open input file %s\n", input_file);
return -1;
}
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
return -1;
}
for (int i = 0; i < fmt_ctx->nb_streams; i++) {
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index == -1) {
fprintf(stderr, "Could not find video stream\n");
return -1;
}
//// codec = avcodec_find_decoder(fmt_ctx->streams[video_stream_index]->codecpar->codec_id);
codec = avcodec_find_decoder_by_name("h264_v4l2m2m");
if (!codec) {
fprintf(stderr, "Codec not found\n");
return -1;
}
codec_ctx = avcodec_alloc_context3(codec);
if (!codec_ctx) {
fprintf(stderr, "Could not allocate codec context\n");
return -1;
}
if (avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream_index]->codecpar) < 0) {
fprintf(stderr, "Failed to copy codec parameters to decoder context\n");
return -1;
}
if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
return -1;
}
output = fopen(output_file, "wb");
if (!output) {
fprintf(stderr, "Could not open output file %s\n", output_file);
return -1;
}
frame = av_frame_alloc();
rgb_frame = av_frame_alloc();
if (!frame || !rgb_frame) {
fprintf(stderr, "Could not allocate frames\n");
return -1;
}
int width = codec_ctx->width;
int height = codec_ctx->height;
int num_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, width, height, 1);
uint8_t *buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t));
av_image_fill_arrays(rgb_frame->data, rgb_frame->linesize, buffer, AV_PIX_FMT_RGB24, width, height, 1);
printf("XXXXXXXXXXXX width: %d, height: %d, fmt: %d\n", width, height, codec_ctx->pix_fmt);
sws_ctx = sws_getContext(width, height, codec_ctx->pix_fmt,
width, height, AV_PIX_FMT_RGB24,
SWS_BILINEAR, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr, "Could not initialize the conversion context\n");
return -1;
}
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
if (pkt.stream_index == video_stream_index) {
int ret = avcodec_send_packet(codec_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error sending packet for decoding\n");
return -1;
}
while (ret >= 0) {
ret = avcodec_receive_frame(codec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
return -1;
}
sws_scale(sws_ctx, (const uint8_t *const *)frame->data, frame->linesize,
0, height, rgb_frame->data, rgb_frame->linesize);
fprintf(output, "P6\n%d %d\n255\n", width, height);
fwrite(rgb_frame->data[0], 1, num_bytes, output);
}
}
av_packet_unref(&pkt);
}
fclose(output);
av_frame_free(&frame);
av_frame_free(&rgb_frame);
avcodec_free_context(&codec_ctx);
avformat_close_input(&fmt_ctx);
sws_freeContext(sws_ctx);
return 0;
}
It ran with some error logs from swscale
as follows,
# ./test_ffmpeg ./test.mp4 /tmp/output
[h264_v4l2m2m @ 0x1d76320] Using device /dev/video0
[h264_v4l2m2m @ 0x1d76320] driver 'mysoc-vdec' on card 'msoc-vdec' in mplane mode
[h264_v4l2m2m @ 0x1d76320] requesting formats: output=H264 capture=NV12
[h264_v4l2m2m @ 0x1d76320] the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT
XXXXXXXXXXXX width: 1280, height: 720, fmt: 0
[swscaler @ 0x1dadaa0] No accelerated colorspace conversion found from yuv420p to rgb24.
[h264_v4l2m2m @ 0x1d76320] VIDIOC_G_SELECTION ioctl
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
......
And it ran for about 4 seconds, while the test.mp4
is about 13 seconds.
If I did NOT specify the h264_v4l2m2m
as the decoder, there is NO bad src image pointers
and its run-time is as long as the mp4
file.
What is wrong with above codes using h264_v4l2m2m
and how to fix it?
本文标签: cFailed to use h264v4l2 codec in ffmpeg to decode videoStack Overflow
版权声明:本文标题:c - Failed to use h264_v4l2 codec in ffmpeg to decode video - Stack Overflow 内容由网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:http://www.betaflare.com/web/1736282985a1926818.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
发表评论