From 9c3b8a6a958c79eea18d515b347ae543fb1b27a1 Mon Sep 17 00:00:00 2001 From: ziyue <1213642868@qq.com> Date: Mon, 8 Aug 2022 17:13:39 +0800 Subject: [PATCH] =?UTF-8?q?=E6=97=B6=E9=97=B4=E6=88=B3=E6=94=B9=E4=B8=BA64?= =?UTF-8?q?=E4=BD=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/include/mk_frame.h | 6 +++--- api/include/mk_media.h | 12 +++++------ api/source/mk_frame.cpp | 8 ++++---- api/source/mk_media.cpp | 12 +++++------ src/Codec/Transcode.cpp | 6 +++--- src/Codec/Transcode.h | 2 +- src/Common/Device.cpp | 28 ++++++++++++------------- src/Common/Device.h | 12 +++++------ src/Common/Stamp.cpp | 4 ++-- src/Common/Stamp.h | 14 ++++++------- src/Extension/AACRtp.cpp | 4 ++-- src/Extension/AACRtp.h | 4 ++-- src/Extension/Frame.h | 36 ++++++++++++++++----------------- src/Extension/H264Rtmp.cpp | 2 +- src/Extension/H264Rtp.cpp | 14 ++++++------- src/Extension/H264Rtp.h | 14 ++++++------- src/Extension/H265Rtmp.cpp | 2 +- src/Extension/H265Rtp.cpp | 6 +++--- src/Extension/H265Rtp.h | 6 +++--- src/FMP4/FMP4MediaSourceMuxer.h | 2 +- src/Record/HlsMaker.cpp | 4 ++-- src/Record/HlsMaker.h | 10 ++++----- src/Record/HlsMakerImp.cpp | 2 +- src/Record/HlsMakerImp.h | 2 +- src/Record/HlsRecorder.h | 2 +- src/Record/MP4Demuxer.cpp | 8 ++++---- src/Record/MP4Muxer.cpp | 2 +- src/Record/MP4Muxer.h | 2 +- src/Record/MPEG.cpp | 6 +++--- src/Record/MPEG.h | 6 +++--- src/Rtp/Decoder.cpp | 14 ++++++------- src/Rtp/PSEncoder.cpp | 2 +- src/Rtp/PSEncoder.h | 2 +- src/Rtp/RtpProcess.cpp | 2 +- src/Rtp/RtpProcess.h | 4 ++-- src/Rtp/RtpSelector.cpp | 4 ++-- src/Rtp/RtpSelector.h | 2 +- src/Rtsp/RtpCodec.cpp | 2 +- src/Rtsp/RtpCodec.h | 2 +- src/Rtsp/Rtsp.cpp | 4 ++-- src/Rtsp/Rtsp.h | 2 +- src/TS/TSMediaSource.h | 2 +- src/TS/TSMediaSourceMuxer.h | 2 +- tests/test_rtp.cpp | 4 ++-- 44 files changed, 143 insertions(+), 143 deletions(-) diff --git a/api/include/mk_frame.h b/api/include/mk_frame.h index cbc5fe76..ec14a508 100644 --- a/api/include/mk_frame.h +++ b/api/include/mk_frame.h @@ -55,7 +55,7 @@ typedef void(API_CALL *on_mk_frame_data_release)(void *user_data, char *ptr); * @param user_data data指针free释放回调用户指针 * @return frame对象引用 */ -API_EXPORT mk_frame API_CALL mk_frame_create(int codec_id, uint32_t dts, uint32_t pts, const char *data, size_t size, +API_EXPORT mk_frame API_CALL mk_frame_create(int codec_id, uint64_t dts, uint64_t pts, const char *data, size_t size, on_mk_frame_data_release cb, void *user_data); /** @@ -104,12 +104,12 @@ API_EXPORT size_t API_CALL mk_frame_get_data_prefix_size(mk_frame frame); /** * 获取解码时间戳,单位毫秒 */ -API_EXPORT uint32_t API_CALL mk_frame_get_dts(mk_frame frame); +API_EXPORT uint64_t API_CALL mk_frame_get_dts(mk_frame frame); /** * 获取显示时间戳,单位毫秒 */ -API_EXPORT uint32_t API_CALL mk_frame_get_pts(mk_frame frame); +API_EXPORT uint64_t API_CALL mk_frame_get_pts(mk_frame frame); /** * 获取帧flag,请参考 MK_FRAME_FLAG diff --git a/api/include/mk_media.h b/api/include/mk_media.h index a93ededc..39a0ae80 100755 --- a/api/include/mk_media.h +++ b/api/include/mk_media.h @@ -99,7 +99,7 @@ API_EXPORT int API_CALL mk_media_input_frame(mk_media ctx, mk_frame frame); * @param pts 播放时间戳,单位毫秒 * @return 1代表成功,0失败 */ -API_EXPORT int API_CALL mk_media_input_h264(mk_media ctx, const void *data, int len, uint32_t dts, uint32_t pts); +API_EXPORT int API_CALL mk_media_input_h264(mk_media ctx, const void *data, int len, uint64_t dts, uint64_t pts); /** * 输入单帧H265视频,帧起始字节00 00 01,00 00 00 01均可,请改用mk_media_input_frame方法 @@ -110,7 +110,7 @@ API_EXPORT int API_CALL mk_media_input_h264(mk_media ctx, const void *data, int * @param pts 播放时间戳,单位毫秒 * @return 1代表成功,0失败 */ -API_EXPORT int API_CALL mk_media_input_h265(mk_media ctx, const void *data, int len, uint32_t dts, uint32_t pts); +API_EXPORT int API_CALL mk_media_input_h265(mk_media ctx, const void *data, int len, uint64_t dts, uint64_t pts); /** * 输入YUV视频数据 @@ -119,7 +119,7 @@ API_EXPORT int API_CALL mk_media_input_h265(mk_media ctx, const void *data, int * @param linesize yuv420p linesize * @param cts 视频采集时间戳,单位毫秒 */ -API_EXPORT void API_CALL mk_media_input_yuv(mk_media ctx, const char *yuv[3], int linesize[3], uint32_t cts); +API_EXPORT void API_CALL mk_media_input_yuv(mk_media ctx, const char *yuv[3], int linesize[3], uint64_t cts); /** * 输入单帧AAC音频(单独指定adts头),请改用mk_media_input_frame方法 @@ -130,7 +130,7 @@ API_EXPORT void API_CALL mk_media_input_yuv(mk_media ctx, const char *yuv[3], in * @param adts adts头,可以为null * @return 1代表成功,0失败 */ -API_EXPORT int API_CALL mk_media_input_aac(mk_media ctx, const void *data, int len, uint32_t dts, void *adts); +API_EXPORT int API_CALL mk_media_input_aac(mk_media ctx, const void *data, int len, uint64_t dts, void *adts); /** * 输入单帧PCM音频,启用ENABLE_FAAC编译时,该函数才有效 @@ -140,7 +140,7 @@ API_EXPORT int API_CALL mk_media_input_aac(mk_media ctx, const void *data, int l * @param dts 时间戳,毫秒 * @return 1代表成功,0失败 */ -API_EXPORT int API_CALL mk_media_input_pcm(mk_media ctx, void *data, int len, uint32_t pts); +API_EXPORT int API_CALL mk_media_input_pcm(mk_media ctx, void *data, int len, uint64_t pts); /** * 输入单帧OPUS/G711音频帧,请改用mk_media_input_frame方法 @@ -150,7 +150,7 @@ API_EXPORT int API_CALL mk_media_input_pcm(mk_media ctx, void *data, int len, ui * @param dts 时间戳,毫秒 * @return 1代表成功,0失败 */ -API_EXPORT int API_CALL mk_media_input_audio(mk_media ctx, const void* data, int len, uint32_t dts); +API_EXPORT int API_CALL mk_media_input_audio(mk_media ctx, const void* data, int len, uint64_t dts); /** * MediaSource.close()回调事件 diff --git a/api/source/mk_frame.cpp b/api/source/mk_frame.cpp index 0bd973e2..367e8cc3 100644 --- a/api/source/mk_frame.cpp +++ b/api/source/mk_frame.cpp @@ -70,7 +70,7 @@ private: bool _cache_able; }; -static mk_frame mk_frame_create_complex(int codec_id, uint32_t dts, uint32_t pts, uint32_t frame_flags, size_t prefix_size, +static mk_frame mk_frame_create_complex(int codec_id, uint64_t dts, uint64_t pts, uint32_t frame_flags, size_t prefix_size, char *data, size_t size, on_mk_frame_data_release cb, void *user_data) { switch (codec_id) { case CodecH264: @@ -85,7 +85,7 @@ static mk_frame mk_frame_create_complex(int codec_id, uint32_t dts, uint32_t pts } } -API_EXPORT mk_frame API_CALL mk_frame_create(int codec_id, uint32_t dts, uint32_t pts, const char *data, size_t size, +API_EXPORT mk_frame API_CALL mk_frame_create(int codec_id, uint64_t dts, uint64_t pts, const char *data, size_t size, on_mk_frame_data_release cb, void *user_data) { switch (codec_id) { @@ -146,12 +146,12 @@ API_EXPORT size_t API_CALL mk_frame_get_data_prefix_size(mk_frame frame) { return (*((Frame::Ptr *) frame))->prefixSize(); } -API_EXPORT uint32_t API_CALL mk_frame_get_dts(mk_frame frame) { +API_EXPORT uint64_t API_CALL mk_frame_get_dts(mk_frame frame) { assert(frame); return (*((Frame::Ptr *) frame))->dts(); } -API_EXPORT uint32_t API_CALL mk_frame_get_pts(mk_frame frame) { +API_EXPORT uint64_t API_CALL mk_frame_get_pts(mk_frame frame) { assert(frame); return (*((Frame::Ptr *) frame))->pts(); } diff --git a/api/source/mk_media.cpp b/api/source/mk_media.cpp index 0abe2f46..9d4f8f59 100755 --- a/api/source/mk_media.cpp +++ b/api/source/mk_media.cpp @@ -219,37 +219,37 @@ API_EXPORT int API_CALL mk_media_input_frame(mk_media ctx, mk_frame frame){ return (*obj)->getChannel()->inputFrame(*((Frame::Ptr *) frame)); } -API_EXPORT int API_CALL mk_media_input_h264(mk_media ctx, const void *data, int len, uint32_t dts, uint32_t pts) { +API_EXPORT int API_CALL mk_media_input_h264(mk_media ctx, const void *data, int len, uint64_t dts, uint64_t pts) { assert(ctx && data && len > 0); MediaHelper::Ptr *obj = (MediaHelper::Ptr *) ctx; return (*obj)->getChannel()->inputH264((const char *) data, len, dts, pts); } -API_EXPORT int API_CALL mk_media_input_h265(mk_media ctx, const void *data, int len, uint32_t dts, uint32_t pts) { +API_EXPORT int API_CALL mk_media_input_h265(mk_media ctx, const void *data, int len, uint64_t dts, uint64_t pts) { assert(ctx && data && len > 0); MediaHelper::Ptr *obj = (MediaHelper::Ptr *) ctx; return (*obj)->getChannel()->inputH265((const char *) data, len, dts, pts); } -API_EXPORT void API_CALL mk_media_input_yuv(mk_media ctx, const char *yuv[3], int linesize[3], uint32_t cts) { +API_EXPORT void API_CALL mk_media_input_yuv(mk_media ctx, const char *yuv[3], int linesize[3], uint64_t cts) { assert(ctx && yuv && linesize); MediaHelper::Ptr *obj = (MediaHelper::Ptr *) ctx; (*obj)->getChannel()->inputYUV((char **) yuv, linesize, cts); } -API_EXPORT int API_CALL mk_media_input_aac(mk_media ctx, const void *data, int len, uint32_t dts, void *adts) { +API_EXPORT int API_CALL mk_media_input_aac(mk_media ctx, const void *data, int len, uint64_t dts, void *adts) { assert(ctx && data && len > 0 && adts); MediaHelper::Ptr *obj = (MediaHelper::Ptr *) ctx; return (*obj)->getChannel()->inputAAC((const char *) data, len, dts, (char *) adts); } -API_EXPORT int API_CALL mk_media_input_pcm(mk_media ctx, void *data , int len, uint32_t pts){ +API_EXPORT int API_CALL mk_media_input_pcm(mk_media ctx, void *data , int len, uint64_t pts){ assert(ctx && data && len > 0); MediaHelper::Ptr* obj = (MediaHelper::Ptr*) ctx; return (*obj)->getChannel()->inputPCM((char*)data, len, pts); } -API_EXPORT int API_CALL mk_media_input_audio(mk_media ctx, const void* data, int len, uint32_t dts){ +API_EXPORT int API_CALL mk_media_input_audio(mk_media ctx, const void* data, int len, uint64_t dts){ assert(ctx && data && len > 0); MediaHelper::Ptr* obj = (MediaHelper::Ptr*) ctx; return (*obj)->getChannel()->inputAudio((const char*)data, len, dts); diff --git a/src/Codec/Transcode.cpp b/src/Codec/Transcode.cpp index 26e969a2..989dcda7 100644 --- a/src/Codec/Transcode.cpp +++ b/src/Codec/Transcode.cpp @@ -420,7 +420,7 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track, int thread_num) { FFmpegDecoder::~FFmpegDecoder() { stopThread(true); if (_do_merger) { - _merger.inputFrame(nullptr, [&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) { + _merger.inputFrame(nullptr, [&](uint64_t dts, uint64_t pts, const Buffer::Ptr &buffer, bool have_idr) { decodeFrame(buffer->data(), buffer->size(), dts, pts, false); }); } @@ -452,7 +452,7 @@ const AVCodecContext *FFmpegDecoder::getContext() const { bool FFmpegDecoder::inputFrame_l(const Frame::Ptr &frame, bool live, bool enable_merge) { if (_do_merger && enable_merge) { - return _merger.inputFrame(frame, [&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) { + return _merger.inputFrame(frame, [&](uint64_t dts, uint64_t pts, const Buffer::Ptr &buffer, bool have_idr) { decodeFrame(buffer->data(), buffer->size(), dts, pts, live); }); } @@ -478,7 +478,7 @@ bool FFmpegDecoder::inputFrame(const Frame::Ptr &frame, bool live, bool async, b }); } -bool FFmpegDecoder::decodeFrame(const char *data, size_t size, uint32_t dts, uint32_t pts, bool live) { +bool FFmpegDecoder::decodeFrame(const char *data, size_t size, uint64_t dts, uint64_t pts, bool live) { TimeTicker2(30, TraceL); auto pkt = alloc_av_packet(); diff --git a/src/Codec/Transcode.h b/src/Codec/Transcode.h index 73c37428..fc4c37d0 100644 --- a/src/Codec/Transcode.h +++ b/src/Codec/Transcode.h @@ -113,7 +113,7 @@ public: private: void onDecode(const FFmpegFrame::Ptr &frame); bool inputFrame_l(const Frame::Ptr &frame, bool live, bool enable_merge); - bool decodeFrame(const char *data, size_t size, uint32_t dts, uint32_t pts, bool live); + bool decodeFrame(const char *data, size_t size, uint64_t dts, uint64_t pts, bool live); private: bool _do_merger = false; diff --git a/src/Common/Device.cpp b/src/Common/Device.cpp index c6be0721..f858f12f 100644 --- a/src/Common/Device.cpp +++ b/src/Common/Device.cpp @@ -28,7 +28,7 @@ using namespace std; namespace mediakit { -bool DevChannel::inputYUV(char *yuv[3], int linesize[3], uint32_t cts) { +bool DevChannel::inputYUV(char *yuv[3], int linesize[3], uint64_t cts) { #ifdef ENABLE_X264 //TimeTicker1(50); if (!_pH264Enc) { @@ -54,7 +54,7 @@ bool DevChannel::inputYUV(char *yuv[3], int linesize[3], uint32_t cts) { #endif //ENABLE_X264 } -bool DevChannel::inputPCM(char* pcData, int iDataLen, uint32_t uiStamp) { +bool DevChannel::inputPCM(char* pcData, int iDataLen, uint64_t uiStamp) { #ifdef ENABLE_FAAC if (!_pAacEnc) { _pAacEnc.reset(new AACEncoder()); @@ -77,11 +77,11 @@ bool DevChannel::inputPCM(char* pcData, int iDataLen, uint32_t uiStamp) { #endif //ENABLE_FAAC } -bool DevChannel::inputH264(const char *data, int len, uint32_t dts, uint32_t pts) { - if(dts == 0){ - dts = (uint32_t)_aTicker[0].elapsedTime(); +bool DevChannel::inputH264(const char *data, int len, uint64_t dts, uint64_t pts) { + if (dts == 0) { + dts = _aTicker[0].elapsedTime(); } - if(pts == 0){ + if (pts == 0) { pts = dts; } @@ -96,11 +96,11 @@ bool DevChannel::inputH264(const char *data, int len, uint32_t dts, uint32_t pts return inputFrame(frame); } -bool DevChannel::inputH265(const char *data, int len, uint32_t dts, uint32_t pts) { - if(dts == 0){ - dts = (uint32_t)_aTicker[0].elapsedTime(); +bool DevChannel::inputH265(const char *data, int len, uint64_t dts, uint64_t pts) { + if (dts == 0) { + dts = _aTicker[0].elapsedTime(); } - if(pts == 0){ + if (pts == 0) { pts = dts; } @@ -129,9 +129,9 @@ public: } }; -bool DevChannel::inputAAC(const char *data_without_adts, int len, uint32_t dts, const char *adts_header){ +bool DevChannel::inputAAC(const char *data_without_adts, int len, uint64_t dts, const char *adts_header){ if (dts == 0) { - dts = (uint32_t) _aTicker[1].elapsedTime(); + dts = _aTicker[1].elapsedTime(); } if (!adts_header) { @@ -152,9 +152,9 @@ bool DevChannel::inputAAC(const char *data_without_adts, int len, uint32_t dts, } -bool DevChannel::inputAudio(const char *data, int len, uint32_t dts){ +bool DevChannel::inputAudio(const char *data, int len, uint64_t dts){ if (dts == 0) { - dts = (uint32_t) _aTicker[1].elapsedTime(); + dts = _aTicker[1].elapsedTime(); } return inputFrame(std::make_shared(_audio->codecId, (char *) data, len, dts, 0)); } diff --git a/src/Common/Device.h b/src/Common/Device.h index f1ab266f..b39a24dd 100644 --- a/src/Common/Device.h +++ b/src/Common/Device.h @@ -75,7 +75,7 @@ public: * @param dts 解码时间戳,单位毫秒;等于0时内部会自动生成时间戳 * @param pts 播放时间戳,单位毫秒;等于0时内部会赋值为dts */ - bool inputH264(const char *data, int len, uint32_t dts, uint32_t pts = 0); + bool inputH264(const char *data, int len, uint64_t dts, uint64_t pts = 0); /** * 输入265帧 @@ -84,7 +84,7 @@ public: * @param dts 解码时间戳,单位毫秒;等于0时内部会自动生成时间戳 * @param pts 播放时间戳,单位毫秒;等于0时内部会赋值为dts */ - bool inputH265(const char *data, int len, uint32_t dts, uint32_t pts = 0); + bool inputH265(const char *data, int len, uint64_t dts, uint64_t pts = 0); /** * 输入aac帧 @@ -93,7 +93,7 @@ public: * @param dts 时间戳,单位毫秒 * @param adts_header adts头 */ - bool inputAAC(const char *data_without_adts, int len, uint32_t dts, const char *adts_header); + bool inputAAC(const char *data_without_adts, int len, uint64_t dts, const char *adts_header); /** * 输入OPUS/G711音频帧 @@ -101,7 +101,7 @@ public: * @param len 帧数据长度 * @param dts 时间戳,单位毫秒 */ - bool inputAudio(const char *data, int len, uint32_t dts); + bool inputAudio(const char *data, int len, uint64_t dts); /** * 输入yuv420p视频帧,内部会完成编码并调用inputH264方法 @@ -109,7 +109,7 @@ public: * @param linesize yuv420p数据linesize * @param cts 采集时间戳,单位毫秒 */ - bool inputYUV(char *yuv[3], int linesize[3], uint32_t cts); + bool inputYUV(char *yuv[3], int linesize[3], uint64_t cts); /** * 输入pcm数据,内部会完成编码并调用inputAAC方法 @@ -117,7 +117,7 @@ public: * @param len pcm数据长度 * @param cts 采集时间戳,单位毫秒 */ - bool inputPCM(char *data, int len, uint32_t cts); + bool inputPCM(char *data, int len, uint64_t cts); private: MediaOriginType getOriginType(MediaSource &sender) const override; diff --git a/src/Common/Stamp.cpp b/src/Common/Stamp.cpp index 364ad95e..1e2ec1ec 100644 --- a/src/Common/Stamp.cpp +++ b/src/Common/Stamp.cpp @@ -137,7 +137,7 @@ int64_t Stamp::getRelativeStamp() const { return _relative_stamp; } -bool DtsGenerator::getDts(uint32_t pts, uint32_t &dts){ +bool DtsGenerator::getDts(uint64_t pts, uint64_t &dts){ bool ret = false; if (pts == _last_pts) { //pts未变,说明dts也不会变,返回上次dts @@ -167,7 +167,7 @@ bool DtsGenerator::getDts(uint32_t pts, uint32_t &dts){ //该算法核心思想是对pts进行排序,排序好的pts就是dts。 //排序有一定的滞后性,那么需要加上排序导致的时间戳偏移量 -bool DtsGenerator::getDts_l(uint32_t pts, uint32_t &dts){ +bool DtsGenerator::getDts_l(uint64_t pts, uint64_t &dts){ if(_sorter_max_size == 1){ //没有B帧,dts就等于pts dts = pts; diff --git a/src/Common/Stamp.h b/src/Common/Stamp.h index 85c181fd..f943fc40 100644 --- a/src/Common/Stamp.h +++ b/src/Common/Stamp.h @@ -97,20 +97,20 @@ class DtsGenerator{ public: DtsGenerator() = default; ~DtsGenerator() = default; - bool getDts(uint32_t pts, uint32_t &dts); + bool getDts(uint64_t pts, uint64_t &dts); private: - bool getDts_l(uint32_t pts, uint32_t &dts); + bool getDts_l(uint64_t pts, uint64_t &dts); private: - uint32_t _dts_pts_offset = 0; - uint32_t _last_dts = 0; - uint32_t _last_pts = 0; - uint32_t _last_max_pts = 0; + uint64_t _dts_pts_offset = 0; + uint64_t _last_dts = 0; + uint64_t _last_pts = 0; + uint64_t _last_max_pts = 0; size_t _frames_since_last_max_pts = 0; size_t _sorter_max_size = 0; size_t _count_sorter_max_size = 0; - std::set _pts_sorter; + std::set _pts_sorter; }; class NtpStamp { diff --git a/src/Extension/AACRtp.cpp b/src/Extension/AACRtp.cpp index 82557928..ccedf65e 100644 --- a/src/Extension/AACRtp.cpp +++ b/src/Extension/AACRtp.cpp @@ -53,8 +53,8 @@ bool AACRtpEncoder::inputFrame(const Frame::Ptr &frame) { return len > 0; } -void AACRtpEncoder::makeAACRtp(const void *data, size_t len, bool mark, uint32_t uiStamp) { - RtpCodec::inputRtp(makeRtp(getTrackType(), data, len, mark, uiStamp), false); +void AACRtpEncoder::makeAACRtp(const void *data, size_t len, bool mark, uint64_t stamp) { + RtpCodec::inputRtp(makeRtp(getTrackType(), data, len, mark, stamp), false); } ///////////////////////////////////////////////////////////////////////////////////// diff --git a/src/Extension/AACRtp.h b/src/Extension/AACRtp.h index 1efb2e24..23f48691 100644 --- a/src/Extension/AACRtp.h +++ b/src/Extension/AACRtp.h @@ -43,7 +43,7 @@ private: void flushData(); private: - uint32_t _last_dts = 0; + uint64_t _last_dts = 0; std::string _aac_cfg; FrameImp::Ptr _frame; }; @@ -77,7 +77,7 @@ public: bool inputFrame(const Frame::Ptr &frame) override; private: - void makeAACRtp(const void *pData, size_t uiLen, bool bMark, uint32_t uiStamp); + void makeAACRtp(const void *data, size_t len, bool mark, uint64_t stamp); private: unsigned char _section_buf[1600]; diff --git a/src/Extension/Frame.h b/src/Extension/Frame.h index 46a61d60..72a8a7ed 100644 --- a/src/Extension/Frame.h +++ b/src/Extension/Frame.h @@ -112,12 +112,12 @@ public: /** * 返回解码时间戳,单位毫秒 */ - virtual uint32_t dts() const = 0; + virtual uint64_t dts() const = 0; /** * 返回显示时间戳,单位毫秒 */ - virtual uint32_t pts() const { return dts(); } + virtual uint64_t pts() const { return dts(); } /** * 前缀长度,譬如264前缀为0x00 00 00 01,那么前缀长度就是4 @@ -194,8 +194,8 @@ public: char *data() const override { return (char *)_buffer.data(); } size_t size() const override { return _buffer.size(); } - uint32_t dts() const override { return _dts; } - uint32_t pts() const override { return _pts ? _pts : _dts; } + uint64_t dts() const override { return _dts; } + uint64_t pts() const override { return _pts ? _pts : _dts; } size_t prefixSize() const override { return _prefix_size; } CodecId getCodecId() const override { return _codec_id; } bool keyFrame() const override { return false; } @@ -203,8 +203,8 @@ public: public: CodecId _codec_id = CodecInvalid; - uint32_t _dts = 0; - uint32_t _pts = 0; + uint64_t _dts = 0; + uint64_t _pts = 0; size_t _prefix_size = 0; toolkit::BufferLikeString _buffer; @@ -248,7 +248,7 @@ class FrameTSInternal : public Parent { public: typedef std::shared_ptr Ptr; FrameTSInternal( - const Frame::Ptr &parent_frame, char *ptr, size_t size, size_t prefix_size, uint32_t dts, uint32_t pts) + const Frame::Ptr &parent_frame, char *ptr, size_t size, size_t prefix_size, uint64_t dts, uint64_t pts) : Parent(ptr, size, dts, pts, prefix_size) { _parent_frame = parent_frame; } @@ -361,13 +361,13 @@ public: typedef std::shared_ptr Ptr; FrameFromPtr( - CodecId codec_id, char *ptr, size_t size, uint32_t dts, uint32_t pts = 0, size_t prefix_size = 0, + CodecId codec_id, char *ptr, size_t size, uint64_t dts, uint64_t pts = 0, size_t prefix_size = 0, bool is_key = false) : FrameFromPtr(ptr, size, dts, pts, prefix_size, is_key) { _codec_id = codec_id; } - FrameFromPtr(char *ptr, size_t size, uint32_t dts, uint32_t pts = 0, size_t prefix_size = 0, bool is_key = false) { + FrameFromPtr(char *ptr, size_t size, uint64_t dts, uint64_t pts = 0, size_t prefix_size = 0, bool is_key = false) { _ptr = ptr; _size = size; _dts = dts; @@ -378,8 +378,8 @@ public: char *data() const override { return _ptr; } size_t size() const override { return _size; } - uint32_t dts() const override { return _dts; } - uint32_t pts() const override { return _pts ? _pts : dts(); } + uint64_t dts() const override { return _dts; } + uint64_t pts() const override { return _pts ? _pts : dts(); } size_t prefixSize() const override { return _prefix_size; } bool cacheAble() const override { return false; } bool keyFrame() const override { return _is_key; } @@ -399,8 +399,8 @@ protected: protected: bool _is_key; char *_ptr; - uint32_t _dts; - uint32_t _pts = 0; + uint64_t _dts; + uint64_t _pts = 0; size_t _size; size_t _prefix_size; CodecId _codec_id = CodecInvalid; @@ -464,8 +464,8 @@ public: } ~FrameStamp() override {} - uint32_t dts() const override { return (uint32_t)_dts; } - uint32_t pts() const override { return (uint32_t)_pts; } + uint64_t dts() const override { return (uint64_t)_dts; } + uint64_t pts() const override { return (uint64_t)_pts; } size_t prefixSize() const override { return _frame->prefixSize(); } bool keyFrame() const override { return _frame->keyFrame(); } bool configFrame() const override { return _frame->configFrame(); } @@ -498,7 +498,7 @@ public: * @param prefix 帧前缀长度 * @param offset buffer有效数据偏移量 */ - FrameWrapper(toolkit::Buffer::Ptr buf, uint32_t dts, uint32_t pts, size_t prefix, size_t offset) + FrameWrapper(toolkit::Buffer::Ptr buf, uint64_t dts, uint64_t pts, size_t prefix, size_t offset) : Parent(buf->data() + offset, buf->size() - offset, dts, pts, prefix) { _buf = std::move(buf); } @@ -512,7 +512,7 @@ public: * @param offset buffer有效数据偏移量 * @param codec 帧类型 */ - FrameWrapper(toolkit::Buffer::Ptr buf, uint32_t dts, uint32_t pts, size_t prefix, size_t offset, CodecId codec) + FrameWrapper(toolkit::Buffer::Ptr buf, uint64_t dts, uint64_t pts, size_t prefix, size_t offset, CodecId codec) : Parent(codec, buf->data() + offset, buf->size() - offset, dts, pts, prefix) { _buf = std::move(buf); } @@ -531,7 +531,7 @@ private: */ class FrameMerger { public: - using onOutput = std::function; + using onOutput = std::function; using Ptr = std::shared_ptr; enum { none = 0, diff --git a/src/Extension/H264Rtmp.cpp b/src/Extension/H264Rtmp.cpp index 8570983c..2af6c0b2 100644 --- a/src/Extension/H264Rtmp.cpp +++ b/src/Extension/H264Rtmp.cpp @@ -152,7 +152,7 @@ bool H264RtmpEncoder::inputFrame(const Frame::Ptr &frame) { _rtmp_packet->buffer.resize(5); } - return _merger.inputFrame(frame, [this](uint32_t dts, uint32_t pts, const Buffer::Ptr &, bool have_key_frame) { + return _merger.inputFrame(frame, [this](uint64_t dts, uint64_t pts, const Buffer::Ptr &, bool have_key_frame) { //flags _rtmp_packet->buffer[0] = FLV_CODEC_H264 | ((have_key_frame ? FLV_KEY_FRAME : FLV_INTER_FRAME) << 4); //not config diff --git a/src/Extension/H264Rtp.cpp b/src/Extension/H264Rtp.cpp index 34188c7b..dc13a03a 100644 --- a/src/Extension/H264Rtp.cpp +++ b/src/Extension/H264Rtp.cpp @@ -73,7 +73,7 @@ Table 1. Summary of NAL unit types and their payload structures 30-31 undefined - */ -bool H264RtpDecoder::singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp){ +bool H264RtpDecoder::singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp){ _frame->_buffer.assign("\x00\x00\x00\x01", 4); _frame->_buffer.append((char *) ptr, size); _frame->_pts = stamp; @@ -82,7 +82,7 @@ bool H264RtpDecoder::singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, return key; } -bool H264RtpDecoder::unpackStapA(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp) { +bool H264RtpDecoder::unpackStapA(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp) { //STAP-A 单一时间的组合包 auto have_key_frame = false; auto end = ptr + size; @@ -102,7 +102,7 @@ bool H264RtpDecoder::unpackStapA(const RtpPacket::Ptr &rtp, const uint8_t *ptr, return have_key_frame; } -bool H264RtpDecoder::mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp, uint16_t seq){ +bool H264RtpDecoder::mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp, uint16_t seq){ auto nal_suffix = *ptr & (~0x1F); FuFlags *fu = (FuFlags *) (ptr + 1); if (fu->start_bit) { @@ -197,7 +197,7 @@ H264RtpEncoder::H264RtpEncoder(uint32_t ssrc, uint32_t mtu, uint32_t sample_rate : RtpInfo(ssrc, mtu, sample_rate, pt, interleaved) { } -void H264RtpEncoder::insertConfigFrame(uint32_t pts){ +void H264RtpEncoder::insertConfigFrame(uint64_t pts){ if (!_sps || !_pps) { return; } @@ -206,7 +206,7 @@ void H264RtpEncoder::insertConfigFrame(uint32_t pts){ packRtp(_pps->data() + _pps->prefixSize(), _pps->size() - _pps->prefixSize(), pts, false, false); } -void H264RtpEncoder::packRtp(const char *ptr, size_t len, uint32_t pts, bool is_mark, bool gop_pos){ +void H264RtpEncoder::packRtp(const char *ptr, size_t len, uint64_t pts, bool is_mark, bool gop_pos){ if (len + 3 <= getMaxSize()) { //STAP-A模式打包小于MTU packRtpStapA(ptr, len, pts, is_mark, gop_pos); @@ -216,7 +216,7 @@ void H264RtpEncoder::packRtp(const char *ptr, size_t len, uint32_t pts, bool is_ } } -void H264RtpEncoder::packRtpFu(const char *ptr, size_t len, uint32_t pts, bool is_mark, bool gop_pos){ +void H264RtpEncoder::packRtpFu(const char *ptr, size_t len, uint64_t pts, bool is_mark, bool gop_pos){ auto packet_size = getMaxSize() - 2; if (len <= packet_size + 1) { //小于FU-A打包最小字节长度要求,采用STAP-A模式 @@ -256,7 +256,7 @@ void H264RtpEncoder::packRtpFu(const char *ptr, size_t len, uint32_t pts, bool i } } -void H264RtpEncoder::packRtpStapA(const char *ptr, size_t len, uint32_t pts, bool is_mark, bool gop_pos){ +void H264RtpEncoder::packRtpStapA(const char *ptr, size_t len, uint64_t pts, bool is_mark, bool gop_pos){ //如果帧长度不超过mtu,为了兼容性 webrtc,采用STAP-A模式打包 auto rtp = makeRtp(getTrackType(), nullptr, len + 3, is_mark, pts); uint8_t *payload = rtp->getPayload(); diff --git a/src/Extension/H264Rtp.h b/src/Extension/H264Rtp.h index cd81abe0..be00f7af 100644 --- a/src/Extension/H264Rtp.h +++ b/src/Extension/H264Rtp.h @@ -42,9 +42,9 @@ public: } private: - bool singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp); - bool unpackStapA(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp); - bool mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp, uint16_t seq); + bool singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp); + bool unpackStapA(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp); + bool mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp, uint16_t seq); bool decodeRtp(const RtpPacket::Ptr &rtp); H264Frame::Ptr obtainFrame(); @@ -86,11 +86,11 @@ public: bool inputFrame(const Frame::Ptr &frame) override; private: - void insertConfigFrame(uint32_t pts); + void insertConfigFrame(uint64_t pts); bool inputFrame_l(const Frame::Ptr &frame, bool is_mark); - void packRtp(const char *data, size_t len, uint32_t pts, bool is_mark, bool gop_pos); - void packRtpFu(const char *data, size_t len, uint32_t pts, bool is_mark, bool gop_pos); - void packRtpStapA(const char *data, size_t len, uint32_t pts, bool is_mark, bool gop_pos); + void packRtp(const char *data, size_t len, uint64_t pts, bool is_mark, bool gop_pos); + void packRtpFu(const char *data, size_t len, uint64_t pts, bool is_mark, bool gop_pos); + void packRtpStapA(const char *data, size_t len, uint64_t pts, bool is_mark, bool gop_pos); private: Frame::Ptr _sps; diff --git a/src/Extension/H265Rtmp.cpp b/src/Extension/H265Rtmp.cpp index 467be51a..54f9aeb5 100644 --- a/src/Extension/H265Rtmp.cpp +++ b/src/Extension/H265Rtmp.cpp @@ -172,7 +172,7 @@ bool H265RtmpEncoder::inputFrame(const Frame::Ptr &frame) { _rtmp_packet->buffer.resize(5); } - return _merger.inputFrame(frame, [this](uint32_t dts, uint32_t pts, const Buffer::Ptr &, bool have_key_frame) { + return _merger.inputFrame(frame, [this](uint64_t dts, uint64_t pts, const Buffer::Ptr &, bool have_key_frame) { //flags _rtmp_packet->buffer[0] = FLV_CODEC_H265 | ((have_key_frame ? FLV_KEY_FRAME : FLV_INTER_FRAME) << 4); //not config diff --git a/src/Extension/H265Rtp.cpp b/src/Extension/H265Rtp.cpp index 28af721b..305dcd27 100644 --- a/src/Extension/H265Rtp.cpp +++ b/src/Extension/H265Rtp.cpp @@ -69,7 +69,7 @@ H265Frame::Ptr H265RtpDecoder::obtainFrame() { | : ...OPTIONAL RTP padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -bool H265RtpDecoder::unpackAp(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp){ +bool H265RtpDecoder::unpackAp(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp){ bool have_key_frame = false; //忽略PayloadHdr CHECK_SIZE(size, 2, have_key_frame); @@ -119,7 +119,7 @@ bool H265RtpDecoder::unpackAp(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssi +---------------+ */ -bool H265RtpDecoder::mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp, uint16_t seq){ +bool H265RtpDecoder::mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp, uint16_t seq){ CHECK_SIZE(size, 4, false); auto s_bit = ptr[2] >> 7; auto e_bit = (ptr[2] >> 6) & 0x01; @@ -216,7 +216,7 @@ bool H265RtpDecoder::decodeRtp(const RtpPacket::Ptr &rtp) { } } -bool H265RtpDecoder::singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp){ +bool H265RtpDecoder::singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp){ _frame->_buffer.assign("\x00\x00\x00\x01", 4); _frame->_buffer.append((char *) ptr, size); _frame->_pts = stamp; diff --git a/src/Extension/H265Rtp.h b/src/Extension/H265Rtp.h index ad1002e1..b1982bfd 100644 --- a/src/Extension/H265Rtp.h +++ b/src/Extension/H265Rtp.h @@ -42,9 +42,9 @@ public: } private: - bool unpackAp(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp); - bool mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp, uint16_t seq); - bool singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint32_t stamp); + bool unpackAp(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp); + bool mergeFu(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp, uint16_t seq); + bool singleFrame(const RtpPacket::Ptr &rtp, const uint8_t *ptr, ssize_t size, uint64_t stamp); bool decodeRtp(const RtpPacket::Ptr &rtp); H265Frame::Ptr obtainFrame(); diff --git a/src/FMP4/FMP4MediaSourceMuxer.h b/src/FMP4/FMP4MediaSourceMuxer.h index bc59c8ce..cad7caad 100644 --- a/src/FMP4/FMP4MediaSourceMuxer.h +++ b/src/FMP4/FMP4MediaSourceMuxer.h @@ -72,7 +72,7 @@ public: } protected: - void onSegmentData(std::string string, uint32_t stamp, bool key_frame) override { + void onSegmentData(std::string string, uint64_t stamp, bool key_frame) override { if (string.empty()) { return; } diff --git a/src/Record/HlsMaker.cpp b/src/Record/HlsMaker.cpp index a6ea1bae..7faed8e2 100644 --- a/src/Record/HlsMaker.cpp +++ b/src/Record/HlsMaker.cpp @@ -75,7 +75,7 @@ void HlsMaker::makeIndexFile(bool eof) { } -void HlsMaker::inputData(void *data, size_t len, uint32_t timestamp, bool is_idr_fast_packet) { +void HlsMaker::inputData(void *data, size_t len, uint64_t timestamp, bool is_idr_fast_packet) { if (data && len) { if (timestamp < _last_timestamp) { //时间戳回退了,切片时长重新计时 @@ -117,7 +117,7 @@ void HlsMaker::delOldSegment() { } } -void HlsMaker::addNewSegment(uint32_t stamp) { +void HlsMaker::addNewSegment(uint64_t stamp) { if (!_last_file_name.empty() && stamp - _last_seg_timestamp < _seg_duration * 1000) { //存在上个切片,并且未到分片时间 return; diff --git a/src/Record/HlsMaker.h b/src/Record/HlsMaker.h index 247c152a..78dbcff5 100644 --- a/src/Record/HlsMaker.h +++ b/src/Record/HlsMaker.h @@ -38,7 +38,7 @@ public: * @param timestamp 毫秒时间戳 * @param is_idr_fast_packet 是否为关键帧第一个包 */ - void inputData(void *data, size_t len, uint32_t timestamp, bool is_idr_fast_packet); + void inputData(void *data, size_t len, uint64_t timestamp, bool is_idr_fast_packet); /** * 是否为直播 @@ -85,7 +85,7 @@ protected: * 上一个 ts 切片写入完成, 可在这里进行通知处理 * @param duration_ms 上一个 ts 切片的时长, 单位为毫秒 */ - virtual void onFlushLastSegment(uint32_t duration_ms) {}; + virtual void onFlushLastSegment(uint64_t duration_ms) {}; /** * 关闭上个ts切片并且写入m3u8索引 @@ -109,14 +109,14 @@ private: * 添加新的ts切片 * @param timestamp */ - void addNewSegment(uint32_t timestamp); + void addNewSegment(uint64_t timestamp); private: float _seg_duration = 0; uint32_t _seg_number = 0; bool _seg_keep = false; - uint32_t _last_timestamp = 0; - uint32_t _last_seg_timestamp = 0; + uint64_t _last_timestamp = 0; + uint64_t _last_seg_timestamp = 0; uint64_t _file_index = 0; std::string _last_file_name; std::deque > _seg_dur_list; diff --git a/src/Record/HlsMakerImp.cpp b/src/Record/HlsMakerImp.cpp index cde0f129..cb36ca7e 100644 --- a/src/Record/HlsMakerImp.cpp +++ b/src/Record/HlsMakerImp.cpp @@ -130,7 +130,7 @@ void HlsMakerImp::onWriteHls(const std::string &data) { //DebugL << "\r\n" << string(data,len); } -void HlsMakerImp::onFlushLastSegment(uint32_t duration_ms) { +void HlsMakerImp::onFlushLastSegment(uint64_t duration_ms) { //关闭并flush文件到磁盘 _file = nullptr; diff --git a/src/Record/HlsMakerImp.h b/src/Record/HlsMakerImp.h index b6ef254a..6b9acffa 100644 --- a/src/Record/HlsMakerImp.h +++ b/src/Record/HlsMakerImp.h @@ -54,7 +54,7 @@ protected: void onDelSegment(uint64_t index) override; void onWriteSegment(const char *data, size_t len) override; void onWriteHls(const std::string &data) override; - void onFlushLastSegment(uint32_t duration_ms) override; + void onFlushLastSegment(uint64_t duration_ms) override; private: std::shared_ptr makeFile(const std::string &file,bool setbuf = false); diff --git a/src/Record/HlsRecorder.h b/src/Record/HlsRecorder.h index 0762a3e4..c5b50f45 100644 --- a/src/Record/HlsRecorder.h +++ b/src/Record/HlsRecorder.h @@ -75,7 +75,7 @@ public: } private: - void onWrite(std::shared_ptr buffer, uint32_t timestamp, bool key_pos) override { + void onWrite(std::shared_ptr buffer, uint64_t timestamp, bool key_pos) override { if (!buffer) { _hls->inputData(nullptr, 0, timestamp, key_pos); } else { diff --git a/src/Record/MP4Demuxer.cpp b/src/Record/MP4Demuxer.cpp index f4884df8..4ea3fc83 100644 --- a/src/Record/MP4Demuxer.cpp +++ b/src/Record/MP4Demuxer.cpp @@ -233,10 +233,10 @@ Frame::Ptr MP4Demuxer::makeFrame(uint32_t track_id, const Buffer::Ptr &buf, int6 offset += (frame_len + 4); } if (codec == CodecH264) { - ret = std::make_shared >(buf, (uint32_t)dts, (uint32_t)pts, 4, DATA_OFFSET); + ret = std::make_shared >(buf, (uint64_t)dts, (uint64_t)pts, 4, DATA_OFFSET); break; } - ret = std::make_shared >(buf, (uint32_t)dts, (uint32_t)pts, 4, DATA_OFFSET); + ret = std::make_shared >(buf, (uint64_t)dts, (uint64_t)pts, 4, DATA_OFFSET); break; } @@ -245,14 +245,14 @@ Frame::Ptr MP4Demuxer::makeFrame(uint32_t track_id, const Buffer::Ptr &buf, int6 assert(track); //加上adts头 dumpAacConfig(track->getAacCfg(), buf->size() - DATA_OFFSET, (uint8_t *) buf->data() + (DATA_OFFSET - ADTS_HEADER_LEN), ADTS_HEADER_LEN); - ret = std::make_shared >(buf, (uint32_t)dts, (uint32_t)pts, ADTS_HEADER_LEN, DATA_OFFSET - ADTS_HEADER_LEN, codec); + ret = std::make_shared >(buf, (uint64_t)dts, (uint64_t)pts, ADTS_HEADER_LEN, DATA_OFFSET - ADTS_HEADER_LEN, codec); break; } case CodecOpus: case CodecG711A: case CodecG711U: { - ret = std::make_shared >(buf, (uint32_t)dts, (uint32_t)pts, 0, DATA_OFFSET, codec); + ret = std::make_shared >(buf, (uint64_t)dts, (uint64_t)pts, 0, DATA_OFFSET, codec); break; } diff --git a/src/Record/MP4Muxer.cpp b/src/Record/MP4Muxer.cpp index a3ccf275..f9a13fe6 100644 --- a/src/Record/MP4Muxer.cpp +++ b/src/Record/MP4Muxer.cpp @@ -93,7 +93,7 @@ bool MP4MuxerInterface::inputFrame(const Frame::Ptr &frame) { case CodecH264: case CodecH265: { //这里的代码逻辑是让SPS、PPS、IDR这些时间戳相同的帧打包到一起当做一个帧处理, - _frame_merger.inputFrame(frame, [&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) { + _frame_merger.inputFrame(frame, [&](uint64_t dts, uint64_t pts, const Buffer::Ptr &buffer, bool have_idr) { track_info.stamp.revise(dts, pts, dts_out, pts_out); mp4_writer_write(_mov_writter.get(), track_info.track_id, diff --git a/src/Record/MP4Muxer.h b/src/Record/MP4Muxer.h index 50475224..25fd0447 100644 --- a/src/Record/MP4Muxer.h +++ b/src/Record/MP4Muxer.h @@ -134,7 +134,7 @@ protected: * @param stamp 切片末尾时间戳 * @param key_frame 是否有关键帧 */ - virtual void onSegmentData(std::string string, uint32_t stamp, bool key_frame) = 0; + virtual void onSegmentData(std::string string, uint64_t stamp, bool key_frame) = 0; protected: MP4FileIO::Writer createWriter() override; diff --git a/src/Record/MPEG.cpp b/src/Record/MPEG.cpp index 16dd3e4b..085e9f38 100644 --- a/src/Record/MPEG.cpp +++ b/src/Record/MPEG.cpp @@ -63,10 +63,10 @@ bool MpegMuxer::inputFrame(const Frame::Ptr &frame) { case CodecH264: case CodecH265: { //这里的代码逻辑是让SPS、PPS、IDR这些时间戳相同的帧打包到一起当做一个帧处理, - return _frame_merger.inputFrame(frame,[&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) { + return _frame_merger.inputFrame(frame,[&](uint64_t dts, uint64_t pts, const Buffer::Ptr &buffer, bool have_idr) { _key_pos = have_idr; //取视频时间戳为TS的时间戳 - _timestamp = (uint32_t) dts; + _timestamp = dts; _max_cache_size = 512 + 1.2 * buffer->size(); mpeg_muxer_input((::mpeg_muxer_t *)_context, track_id, have_idr ? 0x0001 : 0, pts * 90LL,dts * 90LL, buffer->data(), buffer->size()); flushCache(); @@ -83,7 +83,7 @@ bool MpegMuxer::inputFrame(const Frame::Ptr &frame) { default: { if (!_have_video) { //没有视频时,才以音频时间戳为TS的时间戳 - _timestamp = (uint32_t) frame->dts(); + _timestamp = frame->dts(); } _max_cache_size = 512 + 1.2 * frame->size(); mpeg_muxer_input((::mpeg_muxer_t *)_context, track_id, frame->keyFrame() ? 0x0001 : 0, frame->pts() * 90LL, frame->dts() * 90LL, frame->data(), frame->size()); diff --git a/src/Record/MPEG.h b/src/Record/MPEG.h index e1e2862a..5d4b05cd 100644 --- a/src/Record/MPEG.h +++ b/src/Record/MPEG.h @@ -52,7 +52,7 @@ protected: * @param timestamp 时间戳,单位毫秒 * @param key_pos 是否为关键帧的第一个ts/ps包,用于确保ts切片第一帧为关键帧 */ - virtual void onWrite(std::shared_ptr buffer, uint32_t timestamp, bool key_pos) = 0; + virtual void onWrite(std::shared_ptr buffer, uint64_t timestamp, bool key_pos) = 0; private: void createContext(); @@ -65,7 +65,7 @@ private: bool _have_video = false; bool _key_pos = false; uint32_t _max_cache_size = 0; - uint32_t _timestamp = 0; + uint64_t _timestamp = 0; struct mpeg_muxer_t *_context = nullptr; std::unordered_map _codec_to_trackid; FrameMerger _frame_merger{FrameMerger::h264_prefix}; @@ -90,7 +90,7 @@ public: bool inputFrame(const Frame::Ptr &frame) override { return false; } protected: - virtual void onWrite(std::shared_ptr buffer, uint32_t timestamp, bool key_pos) = 0; + virtual void onWrite(std::shared_ptr buffer, uint64_t timestamp, bool key_pos) = 0; }; }//namespace mediakit diff --git a/src/Rtp/Decoder.cpp b/src/Rtp/Decoder.cpp index fb530d78..b9827e98 100644 --- a/src/Rtp/Decoder.cpp +++ b/src/Rtp/Decoder.cpp @@ -165,8 +165,8 @@ void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t d if (!_tracks[TrackVideo]) { onTrack(std::make_shared()); } - auto frame = std::make_shared((char *) data, bytes, (uint32_t)dts, (uint32_t)pts, prefixSize((char *) data, bytes)); - _merger.inputFrame(frame,[this](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool) { + auto frame = std::make_shared((char *) data, bytes, (uint64_t)dts, (uint64_t)pts, prefixSize((char *) data, bytes)); + _merger.inputFrame(frame,[this](uint64_t dts, uint64_t pts, const Buffer::Ptr &buffer, bool) { onFrame(std::make_shared >(buffer, dts, pts, prefixSize(buffer->data(), buffer->size()), 0)); }); break; @@ -176,8 +176,8 @@ void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t d if (!_tracks[TrackVideo]) { onTrack(std::make_shared()); } - auto frame = std::make_shared((char *) data, bytes, (uint32_t)dts, (uint32_t)pts, prefixSize((char *) data, bytes)); - _merger.inputFrame(frame,[this](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool) { + auto frame = std::make_shared((char *) data, bytes, (uint64_t)dts, (uint64_t)pts, prefixSize((char *) data, bytes)); + _merger.inputFrame(frame,[this](uint64_t dts, uint64_t pts, const Buffer::Ptr &buffer, bool) { onFrame(std::make_shared >(buffer, dts, pts, prefixSize(buffer->data(), buffer->size()), 0)); }); break; @@ -193,7 +193,7 @@ void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t d if (!_tracks[TrackAudio]) { onTrack(std::make_shared()); } - onFrame(std::make_shared(CodecAAC, (char *) data, bytes, (uint32_t)dts, 0, ADTS_HEADER_LEN)); + onFrame(std::make_shared(CodecAAC, (char *) data, bytes, (uint64_t)dts, 0, ADTS_HEADER_LEN)); break; } @@ -204,7 +204,7 @@ void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t d //G711传统只支持 8000/1/16的规格,FFmpeg貌似做了扩展,但是这里不管它了 onTrack(std::make_shared(codec, 8000, 1, 16)); } - onFrame(std::make_shared(codec, (char *) data, bytes, (uint32_t)dts)); + onFrame(std::make_shared(codec, (char *) data, bytes, (uint64_t)dts)); break; } @@ -212,7 +212,7 @@ void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t d if (!_tracks[TrackAudio]) { onTrack(std::make_shared()); } - onFrame(std::make_shared(CodecOpus, (char *) data, bytes, (uint32_t)dts)); + onFrame(std::make_shared(CodecOpus, (char *) data, bytes, (uint64_t)dts)); break; } diff --git a/src/Rtp/PSEncoder.cpp b/src/Rtp/PSEncoder.cpp index aaff629f..a3ca8086 100644 --- a/src/Rtp/PSEncoder.cpp +++ b/src/Rtp/PSEncoder.cpp @@ -32,7 +32,7 @@ PSEncoderImp::~PSEncoderImp() { InfoL << this << " " << printSSRC(_rtp_encoder->getSsrc()); } -void PSEncoderImp::onWrite(std::shared_ptr buffer, uint32_t stamp, bool key_pos) { +void PSEncoderImp::onWrite(std::shared_ptr buffer, uint64_t stamp, bool key_pos) { if (!buffer) { return; } diff --git a/src/Rtp/PSEncoder.h b/src/Rtp/PSEncoder.h index adbf76f7..1e3c13d4 100644 --- a/src/Rtp/PSEncoder.h +++ b/src/Rtp/PSEncoder.h @@ -30,7 +30,7 @@ protected: virtual void onRTP(toolkit::Buffer::Ptr rtp,bool is_key = false) = 0; protected: - void onWrite(std::shared_ptr buffer, uint32_t stamp, bool key_pos) override; + void onWrite(std::shared_ptr buffer, uint64_t stamp, bool key_pos) override; private: std::shared_ptr _rtp_encoder; diff --git a/src/Rtp/RtpProcess.cpp b/src/Rtp/RtpProcess.cpp index d224a3ee..b0027f69 100644 --- a/src/Rtp/RtpProcess.cpp +++ b/src/Rtp/RtpProcess.cpp @@ -64,7 +64,7 @@ RtpProcess::~RtpProcess() { } } -bool RtpProcess::inputRtp(bool is_udp, const Socket::Ptr &sock, const char *data, size_t len, const struct sockaddr *addr, uint32_t *dts_out) { +bool RtpProcess::inputRtp(bool is_udp, const Socket::Ptr &sock, const char *data, size_t len, const struct sockaddr *addr, uint64_t *dts_out) { auto is_busy = _busy_flag.test_and_set(); if (is_busy) { //其他线程正在执行本函数 diff --git a/src/Rtp/RtpProcess.h b/src/Rtp/RtpProcess.h index e6f9b6bd..0d729b06 100644 --- a/src/Rtp/RtpProcess.h +++ b/src/Rtp/RtpProcess.h @@ -35,7 +35,7 @@ public: * @param dts_out 解析出最新的dts * @return 是否解析成功 */ - bool inputRtp(bool is_udp, const toolkit::Socket::Ptr &sock, const char *data, size_t len, const struct sockaddr *addr , uint32_t *dts_out = nullptr); + bool inputRtp(bool is_udp, const toolkit::Socket::Ptr &sock, const char *data, size_t len, const struct sockaddr *addr , uint64_t *dts_out = nullptr); /** * 是否超时,用于超时移除对象 @@ -86,7 +86,7 @@ private: void doCachedFunc(); private: - uint32_t _dts = 0; + uint64_t _dts = 0; uint64_t _total_bytes = 0; std::unique_ptr _addr; toolkit::Socket::Ptr _sock; diff --git a/src/Rtp/RtpSelector.cpp b/src/Rtp/RtpSelector.cpp index f1c6d423..80f87110 100644 --- a/src/Rtp/RtpSelector.cpp +++ b/src/Rtp/RtpSelector.cpp @@ -25,8 +25,8 @@ void RtpSelector::clear(){ _map_rtp_process.clear(); } -bool RtpSelector::inputRtp(const Socket::Ptr &sock, const char *data, size_t data_len, - const struct sockaddr *addr,uint32_t *dts_out) { +bool RtpSelector::inputRtp(const Socket::Ptr &sock, const char *data, size_t data_len, const struct sockaddr *addr, + uint64_t *dts_out) { uint32_t ssrc = 0; if (!getSSRC(data, data_len, ssrc)) { WarnL << "get ssrc from rtp failed:" << data_len; diff --git a/src/Rtp/RtpSelector.h b/src/Rtp/RtpSelector.h index 70e160a7..87ac2a8a 100644 --- a/src/Rtp/RtpSelector.h +++ b/src/Rtp/RtpSelector.h @@ -64,7 +64,7 @@ public: * @return 是否成功 */ bool inputRtp(const toolkit::Socket::Ptr &sock, const char *data, size_t data_len, - const struct sockaddr *addr, uint32_t *dts_out = nullptr); + const struct sockaddr *addr, uint64_t *dts_out = nullptr); /** * 获取一个rtp处理器 diff --git a/src/Rtsp/RtpCodec.cpp b/src/Rtsp/RtpCodec.cpp index fcdb649d..21257bce 100644 --- a/src/Rtsp/RtpCodec.cpp +++ b/src/Rtsp/RtpCodec.cpp @@ -12,7 +12,7 @@ namespace mediakit{ -RtpPacket::Ptr RtpInfo::makeRtp(TrackType type, const void* data, size_t len, bool mark, uint32_t stamp) { +RtpPacket::Ptr RtpInfo::makeRtp(TrackType type, const void* data, size_t len, bool mark, uint64_t stamp) { uint16_t payload_len = (uint16_t) (len + RtpPacket::kRtpHeaderSize); auto rtp = RtpPacket::create(); rtp->setCapacity(payload_len + RtpPacket::kRtpTcpHeaderSize); diff --git a/src/Rtsp/RtpCodec.h b/src/Rtsp/RtpCodec.h index 8fe2e7e3..46988008 100644 --- a/src/Rtsp/RtpCodec.h +++ b/src/Rtsp/RtpCodec.h @@ -84,7 +84,7 @@ public: return _ssrc; } - RtpPacket::Ptr makeRtp(TrackType type,const void *data, size_t len, bool mark, uint32_t stamp); + RtpPacket::Ptr makeRtp(TrackType type,const void *data, size_t len, bool mark, uint64_t stamp); private: uint8_t _pt; diff --git a/src/Rtsp/Rtsp.cpp b/src/Rtsp/Rtsp.cpp index 52d86468..e6c64f3b 100644 --- a/src/Rtsp/Rtsp.cpp +++ b/src/Rtsp/Rtsp.cpp @@ -558,8 +558,8 @@ uint32_t RtpPacket::getStamp() const { return ntohl(getHeader()->stamp); } -uint32_t RtpPacket::getStampMS(bool ntp) const { - return ntp ? ntp_stamp & 0xFFFFFFFF : getStamp() * uint64_t(1000) / sample_rate; +uint64_t RtpPacket::getStampMS(bool ntp) const { + return ntp ? ntp_stamp : getStamp() * uint64_t(1000) / sample_rate; } uint32_t RtpPacket::getSSRC() const { diff --git a/src/Rtsp/Rtsp.h b/src/Rtsp/Rtsp.h index d18bc374..839c67dc 100644 --- a/src/Rtsp/Rtsp.h +++ b/src/Rtsp/Rtsp.h @@ -159,7 +159,7 @@ public: uint16_t getSeq() const; uint32_t getStamp() const; //主机字节序的时间戳,已经转换为毫秒 - uint32_t getStampMS(bool ntp = true) const; + uint64_t getStampMS(bool ntp = true) const; //主机字节序的ssrc uint32_t getSSRC() const; //有效负载,跳过csrc、ext diff --git a/src/TS/TSMediaSource.h b/src/TS/TSMediaSource.h index 5d9658b0..bbd2f498 100644 --- a/src/TS/TSMediaSource.h +++ b/src/TS/TSMediaSource.h @@ -27,7 +27,7 @@ public: ~TSPacket() override = default; public: - uint32_t time_stamp = 0; + uint64_t time_stamp = 0; }; //TS直播源 diff --git a/src/TS/TSMediaSourceMuxer.h b/src/TS/TSMediaSourceMuxer.h index 9a96f0c8..2b3e5d80 100644 --- a/src/TS/TSMediaSourceMuxer.h +++ b/src/TS/TSMediaSourceMuxer.h @@ -66,7 +66,7 @@ public: } protected: - void onWrite(std::shared_ptr buffer, uint32_t timestamp, bool key_pos) override { + void onWrite(std::shared_ptr buffer, uint64_t timestamp, bool key_pos) override { if (!buffer) { return; } diff --git a/tests/test_rtp.cpp b/tests/test_rtp.cpp index 5146d380..4826c821 100644 --- a/tests/test_rtp.cpp +++ b/tests/test_rtp.cpp @@ -34,7 +34,7 @@ static bool loadFile(const char *path){ return false; } - uint32_t timeStamp_last = 0; + uint64_t timeStamp_last = 0; uint16_t len; char rtp[0xFFFF]; struct sockaddr_storage addr = {0}; @@ -57,7 +57,7 @@ static bool loadFile(const char *path){ break; } total_size += len; - uint32_t timeStamp; + uint64_t timeStamp; RtpSelector::Instance().inputRtp(sock, rtp, len, (struct sockaddr *)&addr, &timeStamp); auto diff = timeStamp - timeStamp_last;