android - How to concat mp4 files using libffmpeg in c program? -
i know ffmpeg command line easy, how programmatically implement? i'm not @ this,here code internet, used convert .mp4 .ts,and made changes,but audio stream problem persists:
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "libavformat/avformat.h" #include "libavcodec/avcodec.h" #include "libavutil/avutil.h" #include "libavutil/rational.h" #include "libavdevice/avdevice.h" #include "libavutil/mathematics.h" #include "libswscale/swscale.h" static avstream* add_output_stream(avformatcontext* output_format_context, avstream* input_stream) { avcodeccontext* input_codec_context = null; avcodeccontext* output_codec_context = null; avstream* output_stream = null; output_stream = av_new_stream(output_format_context, 0); if (!output_stream) { printf("call av_new_stream function failed\n"); return null; } input_codec_context = input_stream->codec; output_codec_context = output_stream->codec; output_codec_context->codec_id = input_codec_context->codec_id; output_codec_context->codec_type = input_codec_context->codec_type; output_codec_context->codec_tag = input_codec_context->codec_tag; output_codec_context->bit_rate = input_codec_context->bit_rate; output_codec_context->extradata = input_codec_context->extradata; output_codec_context->extradata_size = input_codec_context->extradata_size; if (av_q2d(input_codec_context->time_base) * input_codec_context->ticks_per_frame > av_q2d(input_stream->time_base) && av_q2d(input_stream->time_base) < 1.0 / 1000) { output_codec_context->time_base = input_codec_context->time_base; output_codec_context->time_base.num *= input_codec_context->ticks_per_frame; } else { output_codec_context->time_base = input_stream->time_base; } switch (input_codec_context->codec_type) { case avmedia_type_audio: output_codec_context->channel_layout = input_codec_context->channel_layout; output_codec_context->sample_rate = input_codec_context->sample_rate; output_codec_context->channels = input_codec_context->channels; output_codec_context->frame_size = input_codec_context->frame_size; if ((input_codec_context->block_align == 1 && input_codec_context->codec_id == codec_id_mp3) || input_codec_context->codec_id == codec_id_ac3) { output_codec_context->block_align = 0; } else { output_codec_context->block_align = input_codec_context->block_align; } break; case avmedia_type_video: output_codec_context->pix_fmt = input_codec_context->pix_fmt; output_codec_context->width = input_codec_context->width; output_codec_context->height = input_codec_context->height; output_codec_context->has_b_frames = input_codec_context->has_b_frames; if (output_format_context->oformat->flags & avfmt_globalheader) { output_codec_context->flags |= codec_flag_global_header; } break; default: break; } return output_stream; } //[[** ffmpeg.c static void write_frame(avformatcontext *s, avpacket *pkt, avcodeccontext *avctx, avbitstreamfiltercontext *bsfc){ int ret; while(bsfc){ avpacket new_pkt= *pkt; int a= av_bitstream_filter_filter(bsfc, avctx, null, &new_pkt.data, &new_pkt.size, pkt->data, pkt->size, pkt->flags & av_pkt_flag_key); if(a>0){ av_free_packet(pkt); new_pkt.destruct= av_destruct_packet; } else if(a<0){ fprintf(stderr, "%s failed stream %d, codec %s\n", bsfc->filter->name, pkt->stream_index, avctx->codec ? avctx->codec->name : "copy"); //print_error("", a); //if (exit_on_error) // ffmpeg_exit(1); } *pkt= new_pkt; bsfc= bsfc->next; } ret= av_interleaved_write_frame(s, pkt); if(ret < 0){ //print_error("av_interleaved_write_frame()", ret); fprintf(stderr, "av_interleaved_write_frame(%d)\n", ret); exit(1); } } //]]** int main(int argc, char* argv[]) { const char* input; const char* output; const char* output_prefix = null; char* segment_duration_check = 0; const char* index = null; char* tmp_index = null; const char* http_prefix = null; long max_tsfiles = null; double prev_segment_time = 0; double segment_duration = 0; avinputformat* ifmt = null; avoutputformat* ofmt = null; avformatcontext* ic = null; avformatcontext* oc = null; avstream* video_st = null; avstream* audio_st = null; avcodec* codec = null; avdictionary* pavdictionary = null; long frame_count = 0; if (argc != 3) { fprintf(stderr, "usage: %s inputfile outputfile\n", argv[0]); exit(1); } input = argv[1]; output = argv[2]; av_register_all(); char szerror[256] = {0}; int nret = avformat_open_input(&ic, input, ifmt, &pavdictionary); if (nret != 0) { av_strerror(nret, szerror, 256); printf(szerror); printf("\n"); printf("call avformat_open_input function failed!\n"); return 0; } if (av_find_stream_info(ic) < 0) { printf("call av_find_stream_info function failed!\n"); return 0; } ofmt = av_guess_format("mpegts", null, null); if (!ofmt) { printf("call av_guess_format function failed!\n"); return 0; } oc = avformat_alloc_context(); if (!oc) { printf("call av_guess_format function failed!\n"); return 0; } oc->oformat = ofmt; int video_index = -1, audio_index = -1; (unsigned int = 0; < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) { switch (ic->streams[i]->codec->codec_type) { case avmedia_type_video: video_index = i; ic->streams[i]->discard = avdiscard_none; video_st = add_output_stream(oc, ic->streams[i]); break; case avmedia_type_audio: audio_index = i; ic->streams[i]->discard = avdiscard_none; audio_st = add_output_stream(oc, ic->streams[i]); break; default: ic->streams[i]->discard = avdiscard_all; break; } } codec = avcodec_find_decoder(video_st->codec->codec_id); if (codec == null) { printf("call avcodec_find_decoder function failed!\n"); return 0; } if (avcodec_open(video_st->codec, codec) < 0) { printf("call avcodec_open function failed !\n"); return 0; } if (avio_open(&oc->pb, output, avio_flag_write) < 0) { return 0; } if (avformat_write_header(oc, &pavdictionary)) { printf("call avformat_write_header function failed.\n"); return 0; } //[[++ avbitstreamfiltercontext *bsfc = av_bitstream_filter_init("h264_mp4toannexb"); //avbitstreamfiltercontext *absfc = av_bitstream_filter_init("aac_adtstoasc"); if (!bsfc) { fprintf(stderr, "bsf init error!\n"); return -1; } //]]++ int decode_done = 0; { double segment_time = 0; avpacket packet; decode_done = av_read_frame(ic, &packet); if (decode_done < 0) break; if (av_dup_packet(&packet) < 0) { printf("call av_dup_packet function failed\n"); av_free_packet(&packet); break; } //[[** if (packet.stream_index == audio_index) { segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; nret = av_interleaved_write_frame(oc, &packet); } else if (packet.stream_index == video_index) { if (packet.flags & av_pkt_flag_key) { segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; } else { segment_time = prev_segment_time; } //nret = av_interleaved_write_frame(oc, &packet); write_frame(oc, &packet, video_st->codec, bsfc); } //]]** if (nret < 0) { printf("call av_interleaved_write_frame function failed: %d\n", nret); } else if (nret > 0) { printf("end of stream requested\n"); av_free_packet(&packet); break; } av_free_packet(&packet); frame_count++; }while(!decode_done); av_write_trailer(oc); printf("frame_count = %d\n", frame_count); av_bitstream_filter_close(bsfc); avcodec_close(video_st->codec); for(unsigned int k = 0; k < oc->nb_streams; k++) { av_freep(&oc->streams[k]->codec); av_freep(&oc->streams[k]); } av_free(oc); //getchar(); return 0; }
compile code, got executable file named muxts
, , then:
$ ./muxts vid1.mp4 vid1.ts
no error message printed,but audio stream unsynchronized , noise。check .ts file using ffmpeg:
$ ffmpeg -i vid1.ts ffmpeg version 0.8.14-tessus, copyright (c) 2000-2013 ffmpeg developers built on jul 29 2013 17:05:18 llvm_gcc 4.2.1 (based on apple inc. build 5658) (llvm build 2336.1.00) configuration: --prefix=/usr/local --arch=x86_64 --as=yasm --extra-version=tessus --enable-gpl --enable-nonfree --enable-version3 --disable-ffplay --enable-libvorbis --enable-libmp3lame --enable-libx264 --enable-libxvid --enable-bzlib --enable-zlib --enable-postproc --enable-filters --enable-runtime-cpudetect --enable-debug=3 --disable-optimizations libavutil 51. 9. 1 / 51. 9. 1 libavcodec 53. 8. 0 / 53. 8. 0 libavformat 53. 5. 0 / 53. 5. 0 libavdevice 53. 1. 1 / 53. 1. 1 libavfilter 2. 23. 0 / 2. 23. 0 libswscale 2. 0. 0 / 2. 0. 0 libpostproc 51. 2. 0 / 51. 2. 0 seems stream 0 codec frame rate differs container frame rate: 180000.00 (180000/1) -> 90000.00 (180000/2) input #0, mpegts, 'vid1.ts': duration: 00:00:03.75, start: 0.000000, bitrate: 3656 kb/s program 1 metadata: service_name : service01 service_provider: ffmpeg stream #0.0[0x100]: video: h264 (baseline), yuv420p, 640x480, 90k tbr, 90k tbn, 180k tbc stream #0.1[0x101]: audio: aac, 48000 hz, mono, s16, 190 kb/s @ least 1 output file must specified
what should do?
if issue fixed , how can concat multi .ts files single .mp4 file?
Comments
Post a Comment