mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-10-19 09:53:18 +00:00
all: fix typos found by codespell
This commit is contained in:
parent
8d439b2483
commit
262d41c804
303 changed files with 482 additions and 481 deletions
|
@ -156,6 +156,7 @@ version 6.1:
|
||||||
variable-fields elements within the same parent element
|
variable-fields elements within the same parent element
|
||||||
- ffprobe -output_format option added as an alias of -of
|
- ffprobe -output_format option added as an alias of -of
|
||||||
|
|
||||||
|
# codespell:off
|
||||||
|
|
||||||
version 6.0:
|
version 6.0:
|
||||||
- Radiance HDR image support
|
- Radiance HDR image support
|
||||||
|
|
|
@ -24,5 +24,5 @@ NOTICE for Package Maintainers
|
||||||
|
|
||||||
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
|
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
|
||||||
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
|
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
|
||||||
can then be built. And last build FFmpeg with full dependancies (which may in turn depend on
|
can then be built. And last build FFmpeg with full dependencies (which may in turn depend on
|
||||||
some of these 3rd party packages). This avoids circular dependencies during build.
|
some of these 3rd party packages). This avoids circular dependencies during build.
|
||||||
|
|
|
@ -218,7 +218,7 @@ while (<F>) {
|
||||||
# Lines of the form '} SOME_VERSION_NAME_1.0;'
|
# Lines of the form '} SOME_VERSION_NAME_1.0;'
|
||||||
if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) {
|
if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) {
|
||||||
$glob = 'glob';
|
$glob = 'glob';
|
||||||
# We tried to match symbols agains this version, but none matched.
|
# We tried to match symbols against this version, but none matched.
|
||||||
# Emit dummy hidden symbol to avoid marking this version WEAK.
|
# Emit dummy hidden symbol to avoid marking this version WEAK.
|
||||||
if ($matches_attempted && $matched_symbols == 0) {
|
if ($matches_attempted && $matched_symbols == 0) {
|
||||||
print " hidden:\n";
|
print " hidden:\n";
|
||||||
|
|
10
configure
vendored
10
configure
vendored
|
@ -823,7 +823,7 @@ is_in(){
|
||||||
}
|
}
|
||||||
|
|
||||||
# The cfg loop is very hot (several thousands iterations), and in bash also
|
# The cfg loop is very hot (several thousands iterations), and in bash also
|
||||||
# potentialy quite slow. Try to abort the iterations early, preferably without
|
# potentially quite slow. Try to abort the iterations early, preferably without
|
||||||
# calling functions. 70%+ of the time cfg is already done or without deps.
|
# calling functions. 70%+ of the time cfg is already done or without deps.
|
||||||
check_deps(){
|
check_deps(){
|
||||||
for cfg; do
|
for cfg; do
|
||||||
|
@ -929,7 +929,7 @@ reverse () {
|
||||||
'
|
'
|
||||||
}
|
}
|
||||||
|
|
||||||
# keeps the last occurence of each non-unique item
|
# keeps the last occurrence of each non-unique item
|
||||||
unique(){
|
unique(){
|
||||||
unique_out=
|
unique_out=
|
||||||
eval unique_in=\$$1
|
eval unique_in=\$$1
|
||||||
|
@ -4530,7 +4530,7 @@ done
|
||||||
|
|
||||||
if disabled autodetect; then
|
if disabled autodetect; then
|
||||||
|
|
||||||
# Unless iconv is explicitely disabled by the user, we still want to probe
|
# Unless iconv is explicitly disabled by the user, we still want to probe
|
||||||
# for the iconv from the libc.
|
# for the iconv from the libc.
|
||||||
disabled iconv || enable libc_iconv
|
disabled iconv || enable libc_iconv
|
||||||
|
|
||||||
|
@ -5142,7 +5142,7 @@ probe_cc(){
|
||||||
disable stripping
|
disable stripping
|
||||||
elif $_cc -? 2>/dev/null | grep -q 'LLVM.*Linker'; then
|
elif $_cc -? 2>/dev/null | grep -q 'LLVM.*Linker'; then
|
||||||
# lld can emulate multiple different linkers; in ms link.exe mode,
|
# lld can emulate multiple different linkers; in ms link.exe mode,
|
||||||
# the -? parameter gives the help output which contains an identifyable
|
# the -? parameter gives the help output which contains an identifiable
|
||||||
# string, while it gives an error in other modes.
|
# string, while it gives an error in other modes.
|
||||||
_type=lld-link
|
_type=lld-link
|
||||||
# The link.exe mode doesn't have a switch for getting the version,
|
# The link.exe mode doesn't have a switch for getting the version,
|
||||||
|
@ -7794,7 +7794,7 @@ elif enabled_any msvc icl; then
|
||||||
fi
|
fi
|
||||||
# msvcrt10 x64 incorrectly enables log2, only msvcrt12 (MSVC 2013) onwards actually has log2.
|
# msvcrt10 x64 incorrectly enables log2, only msvcrt12 (MSVC 2013) onwards actually has log2.
|
||||||
check_cpp_condition log2 crtversion.h "_VC_CRT_MAJOR_VERSION >= 12"
|
check_cpp_condition log2 crtversion.h "_VC_CRT_MAJOR_VERSION >= 12"
|
||||||
# the new SSA optimzer in VS2015 U3 is mis-optimizing some parts of the code
|
# the new SSA optimizer in VS2015 U3 is mis-optimizing some parts of the code
|
||||||
# Issue has been fixed in MSVC v19.00.24218.
|
# Issue has been fixed in MSVC v19.00.24218.
|
||||||
test_cpp_condition windows.h "_MSC_FULL_VER >= 190024218" ||
|
test_cpp_condition windows.h "_MSC_FULL_VER >= 190024218" ||
|
||||||
check_cflags -d2SSAOptimizer-
|
check_cflags -d2SSAOptimizer-
|
||||||
|
|
|
@ -613,7 +613,7 @@ API changes, most recent first:
|
||||||
Deprecate AVFrame.palette_has_changed without replacement.
|
Deprecate AVFrame.palette_has_changed without replacement.
|
||||||
|
|
||||||
2023-05-15 - 7d1d61cc5f5 - lavc 60 - avcodec.h
|
2023-05-15 - 7d1d61cc5f5 - lavc 60 - avcodec.h
|
||||||
Depreate AVCodecContext.ticks_per_frame in favor of
|
Deprecate AVCodecContext.ticks_per_frame in favor of
|
||||||
AVCodecContext.framerate (encoding) and
|
AVCodecContext.framerate (encoding) and
|
||||||
AV_CODEC_PROP_FIELDS (decoding).
|
AV_CODEC_PROP_FIELDS (decoding).
|
||||||
|
|
||||||
|
@ -621,7 +621,7 @@ API changes, most recent first:
|
||||||
Add AV_CODEC_PROP_FIELDS.
|
Add AV_CODEC_PROP_FIELDS.
|
||||||
|
|
||||||
2023-05-15 - 8b20d0dcb5c - lavc 60 - codec.h
|
2023-05-15 - 8b20d0dcb5c - lavc 60 - codec.h
|
||||||
Depreate AV_CODEC_CAP_SUBFRAMES without replacement.
|
Deprecate AV_CODEC_CAP_SUBFRAMES without replacement.
|
||||||
|
|
||||||
2023-05-07 - c2ae8e30b7f - lavc 60.11.100 - codec_par.h
|
2023-05-07 - c2ae8e30b7f - lavc 60.11.100 - codec_par.h
|
||||||
Add AVCodecParameters.framerate.
|
Add AVCodecParameters.framerate.
|
||||||
|
|
|
@ -1093,7 +1093,7 @@ HTML_STYLESHEET =
|
||||||
# cascading style sheets that are included after the standard style sheets
|
# cascading style sheets that are included after the standard style sheets
|
||||||
# created by doxygen. Using this option one can overrule certain style aspects.
|
# created by doxygen. Using this option one can overrule certain style aspects.
|
||||||
# This is preferred over using HTML_STYLESHEET since it does not replace the
|
# This is preferred over using HTML_STYLESHEET since it does not replace the
|
||||||
# standard style sheet and is therefor more robust against future updates.
|
# standard style sheet and is therefore more robust against future updates.
|
||||||
# Doxygen will copy the style sheet files to the output directory.
|
# Doxygen will copy the style sheet files to the output directory.
|
||||||
# Note: The order of the extra stylesheet files is of importance (e.g. the last
|
# Note: The order of the extra stylesheet files is of importance (e.g. the last
|
||||||
# stylesheet in the list overrules the setting of the previous ones in the
|
# stylesheet in the list overrules the setting of the previous ones in the
|
||||||
|
@ -1636,7 +1636,7 @@ EXTRA_PACKAGES =
|
||||||
# Note: Only use a user-defined header if you know what you are doing! The
|
# Note: Only use a user-defined header if you know what you are doing! The
|
||||||
# following commands have a special meaning inside the header: $title,
|
# following commands have a special meaning inside the header: $title,
|
||||||
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
|
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
|
||||||
# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
|
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty string,
|
||||||
# for the replacement values of the other commands the user is referred to
|
# for the replacement values of the other commands the user is referred to
|
||||||
# HTML_HEADER.
|
# HTML_HEADER.
|
||||||
# This tag requires that the tag GENERATE_LATEX is set to YES.
|
# This tag requires that the tag GENERATE_LATEX is set to YES.
|
||||||
|
|
|
@ -123,7 +123,7 @@ Internally, the TC should take decisions with a majority, or using ranked-choice
|
||||||
|
|
||||||
Each TC member must vote on such decision according to what is, in their view, best for the project.
|
Each TC member must vote on such decision according to what is, in their view, best for the project.
|
||||||
|
|
||||||
If a TC member feels they are affected by a conflict of interest with regards to the case, they should announce it and recuse themselves from the TC
|
If a TC member feels they are affected by a conflict of interest with regards to the case, they should announce it and recurse themselves from the TC
|
||||||
discussion and vote.
|
discussion and vote.
|
||||||
|
|
||||||
A conflict of interest is presumed to occur when a TC member has a personal interest (e.g. financial) in a specific outcome of the case.
|
A conflict of interest is presumed to occur when a TC member has a personal interest (e.g. financial) in a specific outcome of the case.
|
||||||
|
|
|
@ -395,7 +395,7 @@ without this library.
|
||||||
@c man end AUDIO DECODERS
|
@c man end AUDIO DECODERS
|
||||||
|
|
||||||
@chapter Subtitles Decoders
|
@chapter Subtitles Decoders
|
||||||
@c man begin SUBTILES DECODERS
|
@c man begin SUBTITLES DECODERS
|
||||||
|
|
||||||
@section libaribb24
|
@section libaribb24
|
||||||
|
|
||||||
|
@ -427,7 +427,7 @@ Enabled by default.
|
||||||
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
|
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
|
||||||
library.
|
library.
|
||||||
|
|
||||||
Implements profiles A and C of the Japanse ARIB STD-B24 standard,
|
Implements profiles A and C of the Japanese ARIB STD-B24 standard,
|
||||||
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
|
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
|
||||||
|
|
||||||
Requires the presence of the libaribcaption headers and library
|
Requires the presence of the libaribcaption headers and library
|
||||||
|
@ -477,7 +477,7 @@ Specify comma-separated list of font family names to be used for @dfn{bitmap}
|
||||||
or @dfn{ass} type subtitle rendering.
|
or @dfn{ass} type subtitle rendering.
|
||||||
Only first font name is used for @dfn{ass} type subtitle.
|
Only first font name is used for @dfn{ass} type subtitle.
|
||||||
|
|
||||||
If not specified, use internaly defined default font family.
|
If not specified, use internally defined default font family.
|
||||||
|
|
||||||
@item -ass_single_rect @var{boolean}
|
@item -ass_single_rect @var{boolean}
|
||||||
ARIB STD-B24 specifies that some captions may be displayed at different
|
ARIB STD-B24 specifies that some captions may be displayed at different
|
||||||
|
@ -495,7 +495,7 @@ default behavior at compilation.
|
||||||
|
|
||||||
@item -force_outline_text @var{boolean}
|
@item -force_outline_text @var{boolean}
|
||||||
Specify whether always render outline text for all characters regardless of
|
Specify whether always render outline text for all characters regardless of
|
||||||
the indication by charactor style.
|
the indication by character style.
|
||||||
|
|
||||||
The default is @var{false}.
|
The default is @var{false}.
|
||||||
|
|
||||||
|
@ -696,4 +696,4 @@ box and an end box, typically subtitles. Default value is 0 if
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@c man end SUBTILES DECODERS
|
@c man end SUBTITLES DECODERS
|
||||||
|
|
|
@ -990,7 +990,7 @@ to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
||||||
disabled). Default value is -1.
|
disabled). Default value is -1.
|
||||||
|
|
||||||
@item merge_pmt_versions
|
@item merge_pmt_versions
|
||||||
Re-use existing streams when a PMT's version is updated and elementary
|
Reuse existing streams when a PMT's version is updated and elementary
|
||||||
streams move to different PIDs. Default value is 0.
|
streams move to different PIDs. Default value is 0.
|
||||||
|
|
||||||
@item max_packet_size
|
@item max_packet_size
|
||||||
|
|
|
@ -546,7 +546,7 @@ FFmpeg also has a defined scope - your new API must fit within it.
|
||||||
|
|
||||||
@subsubheading Replacing existing APIs
|
@subsubheading Replacing existing APIs
|
||||||
If your new API is replacing an existing one, it should be strictly superior to
|
If your new API is replacing an existing one, it should be strictly superior to
|
||||||
it, so that the advantages of using the new API outweight the cost to the
|
it, so that the advantages of using the new API outweigh the cost to the
|
||||||
callers of changing their code. After adding the new API you should then
|
callers of changing their code. After adding the new API you should then
|
||||||
deprecate the old one and schedule it for removal, as described in
|
deprecate the old one and schedule it for removal, as described in
|
||||||
@ref{Removing interfaces}.
|
@ref{Removing interfaces}.
|
||||||
|
@ -596,7 +596,7 @@ change in @file{doc/APIchanges}.
|
||||||
Backward-incompatible API or ABI changes require incrementing (bumping) the
|
Backward-incompatible API or ABI changes require incrementing (bumping) the
|
||||||
major version number, as described in @ref{Major version bumps}. Major
|
major version number, as described in @ref{Major version bumps}. Major
|
||||||
bumps are significant events that happen on a schedule - so if your change
|
bumps are significant events that happen on a schedule - so if your change
|
||||||
strictly requires one you should add it under @code{#if} preprocesor guards that
|
strictly requires one you should add it under @code{#if} preprocessor guards that
|
||||||
disable it until the next major bump happens.
|
disable it until the next major bump happens.
|
||||||
|
|
||||||
New APIs that can be added without breaking API or ABI compatibility require
|
New APIs that can be added without breaking API or ABI compatibility require
|
||||||
|
@ -917,7 +917,7 @@ improves readability.
|
||||||
Consider adding a regression test for your code. All new modules
|
Consider adding a regression test for your code. All new modules
|
||||||
should be covered by tests. That includes demuxers, muxers, decoders, encoders
|
should be covered by tests. That includes demuxers, muxers, decoders, encoders
|
||||||
filters, bitstream filters, parsers. If its not possible to do that, add
|
filters, bitstream filters, parsers. If its not possible to do that, add
|
||||||
an explanation why to your patchset, its ok to not test if theres a reason.
|
an explanation why to your patchset, its ok to not test if there's a reason.
|
||||||
|
|
||||||
@item
|
@item
|
||||||
If you added NASM code please check that things still work with --disable-x86asm.
|
If you added NASM code please check that things still work with --disable-x86asm.
|
||||||
|
|
|
@ -1038,7 +1038,7 @@ forces a wideband cutoff for bitrates < 15 kbps, unless CELT-only
|
||||||
Set channel mapping family to be used by the encoder. The default value of -1
|
Set channel mapping family to be used by the encoder. The default value of -1
|
||||||
uses mapping family 0 for mono and stereo inputs, and mapping family 1
|
uses mapping family 0 for mono and stereo inputs, and mapping family 1
|
||||||
otherwise. The default also disables the surround masking and LFE bandwidth
|
otherwise. The default also disables the surround masking and LFE bandwidth
|
||||||
optimzations in libopus, and requires that the input contains 8 channels or
|
optimizations in libopus, and requires that the input contains 8 channels or
|
||||||
fewer.
|
fewer.
|
||||||
|
|
||||||
Other values include 0 for mono and stereo, 1 for surround sound with masking
|
Other values include 0 for mono and stereo, 1 for surround sound with masking
|
||||||
|
@ -3703,7 +3703,7 @@ For encoders set this flag to ON to reduce power consumption and GPU usage.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Runtime Options
|
@subsection Runtime Options
|
||||||
Following options can be used durning qsv encoding.
|
Following options can be used during qsv encoding.
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item @var{global_quality}
|
@item @var{global_quality}
|
||||||
|
@ -3813,7 +3813,7 @@ improves subjective visual quality. Enabling this flag may have negative impact
|
||||||
on performance and objective visual quality metric.
|
on performance and objective visual quality metric.
|
||||||
|
|
||||||
@item @var{low_delay_brc}
|
@item @var{low_delay_brc}
|
||||||
Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
|
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides
|
||||||
more accurate bitrate control to minimize the variance of bitstream size frame
|
more accurate bitrate control to minimize the variance of bitstream size frame
|
||||||
by frame. Value: -1-default 0-off 1-on
|
by frame. Value: -1-default 0-off 1-on
|
||||||
|
|
||||||
|
@ -4012,7 +4012,7 @@ improves subjective visual quality. Enabling this flag may have negative impact
|
||||||
on performance and objective visual quality metric.
|
on performance and objective visual quality metric.
|
||||||
|
|
||||||
@item @var{low_delay_brc}
|
@item @var{low_delay_brc}
|
||||||
Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
|
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides
|
||||||
more accurate bitrate control to minimize the variance of bitstream size frame
|
more accurate bitrate control to minimize the variance of bitstream size frame
|
||||||
by frame. Value: -1-default 0-off 1-on
|
by frame. Value: -1-default 0-off 1-on
|
||||||
|
|
||||||
|
@ -4246,7 +4246,7 @@ Extended bitrate control.
|
||||||
Depth of look ahead in number frames, available when extbrc option is enabled.
|
Depth of look ahead in number frames, available when extbrc option is enabled.
|
||||||
|
|
||||||
@item @var{low_delay_brc}
|
@item @var{low_delay_brc}
|
||||||
Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
|
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides
|
||||||
more accurate bitrate control to minimize the variance of bitstream size frame
|
more accurate bitrate control to minimize the variance of bitstream size frame
|
||||||
by frame. Value: -1-default 0-off 1-on
|
by frame. Value: -1-default 0-off 1-on
|
||||||
|
|
||||||
|
|
|
@ -418,7 +418,7 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate and init a re-usable frame */
|
/* allocate and init a reusable frame */
|
||||||
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
|
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||||
if (!ost->frame) {
|
if (!ost->frame) {
|
||||||
fprintf(stderr, "Could not allocate video frame\n");
|
fprintf(stderr, "Could not allocate video frame\n");
|
||||||
|
|
|
@ -101,7 +101,7 @@ static int dynamic_set_parameter(AVCodecContext *avctx)
|
||||||
/* Set codec specific option */
|
/* Set codec specific option */
|
||||||
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
/* There is no "framerate" option in commom option list. Use "-r" to set
|
/* There is no "framerate" option in common option list. Use "-r" to set
|
||||||
* framerate, which is compatible with ffmpeg commandline. The video is
|
* framerate, which is compatible with ffmpeg commandline. The video is
|
||||||
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
||||||
e = av_dict_get(opts, "r", NULL, 0);
|
e = av_dict_get(opts, "r", NULL, 0);
|
||||||
|
@ -180,7 +180,7 @@ static int open_input_file(char *filename)
|
||||||
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
fprintf(stderr, "Codec is not supportted by qsv\n");
|
fprintf(stderr, "Codec is not supported by qsv\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
|
||||||
fprintf(stderr, "Failed to set encoding parameter.\n");
|
fprintf(stderr, "Failed to set encoding parameter.\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
/* There is no "framerate" option in commom option list. Use "-r" to
|
/* There is no "framerate" option in common option list. Use "-r" to
|
||||||
* set framerate, which is compatible with ffmpeg commandline. The
|
* set framerate, which is compatible with ffmpeg commandline. The
|
||||||
* video is assumed to be average frame rate, so set time_base to
|
* video is assumed to be average frame rate, so set time_base to
|
||||||
* 1/framerate. */
|
* 1/framerate. */
|
||||||
|
|
|
@ -376,7 +376,7 @@ which they are placed.
|
||||||
These examples can of course be further generalized into arbitrary remappings
|
These examples can of course be further generalized into arbitrary remappings
|
||||||
of any number of inputs into any number of outputs.
|
of any number of inputs into any number of outputs.
|
||||||
|
|
||||||
@section Trancoding
|
@section Transcoding
|
||||||
@emph{Transcoding} is the process of decoding a stream and then encoding it
|
@emph{Transcoding} is the process of decoding a stream and then encoding it
|
||||||
again. Since encoding tends to be computationally expensive and in most cases
|
again. Since encoding tends to be computationally expensive and in most cases
|
||||||
degrades the stream quality (i.e. it is @emph{lossy}), you should only transcode
|
degrades the stream quality (i.e. it is @emph{lossy}), you should only transcode
|
||||||
|
@ -443,7 +443,7 @@ that simple filtergraphs are tied to their output stream, so e.g. if you have
|
||||||
multiple audio streams, @option{-af} will create a separate filtergraph for each
|
multiple audio streams, @option{-af} will create a separate filtergraph for each
|
||||||
one.
|
one.
|
||||||
|
|
||||||
Taking the trancoding example from above, adding filtering (and omitting audio,
|
Taking the transcoding example from above, adding filtering (and omitting audio,
|
||||||
for clarity) makes it look like this:
|
for clarity) makes it look like this:
|
||||||
@verbatim
|
@verbatim
|
||||||
┌──────────┬───────────────┐
|
┌──────────┬───────────────┐
|
||||||
|
@ -1048,7 +1048,7 @@ The following flags are available:
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item recon_gain
|
@item recon_gain
|
||||||
Wether to signal if recon_gain is present as metadata in parameter blocks within frames
|
Whether to signal if recon_gain is present as metadata in parameter blocks within frames
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item output_gain
|
@item output_gain
|
||||||
|
|
|
@ -216,7 +216,7 @@ filter input and output pads of all the filterchains are connected.
|
||||||
|
|
||||||
Leading and trailing whitespaces (space, tabs, or line feeds) separating tokens
|
Leading and trailing whitespaces (space, tabs, or line feeds) separating tokens
|
||||||
in the filtergraph specification are ignored. This means that the filtergraph
|
in the filtergraph specification are ignored. This means that the filtergraph
|
||||||
can be expressed using empty lines and spaces to improve redability.
|
can be expressed using empty lines and spaces to improve readability.
|
||||||
|
|
||||||
For example, the filtergraph:
|
For example, the filtergraph:
|
||||||
@example
|
@example
|
||||||
|
@ -3383,7 +3383,7 @@ where applicable, an overall figure is also given.
|
||||||
It accepts the following option:
|
It accepts the following option:
|
||||||
@table @option
|
@table @option
|
||||||
@item length
|
@item length
|
||||||
Short window length in seconds, used for peak and trough RMS measurement.
|
Short window length in seconds, used for peak and through RMS measurement.
|
||||||
Default is @code{0.05} (50 milliseconds). Allowed range is @code{[0 - 10]}.
|
Default is @code{0.05} (50 milliseconds). Allowed range is @code{[0 - 10]}.
|
||||||
|
|
||||||
@item metadata
|
@item metadata
|
||||||
|
@ -3544,7 +3544,7 @@ standard RMS level measured in dBFS
|
||||||
|
|
||||||
@item RMS_peak
|
@item RMS_peak
|
||||||
@item RMS_trough
|
@item RMS_trough
|
||||||
peak and trough values for RMS level measured over a short window,
|
peak and through values for RMS level measured over a short window,
|
||||||
measured in dBFS.
|
measured in dBFS.
|
||||||
|
|
||||||
@item Zero crossings
|
@item Zero crossings
|
||||||
|
@ -7904,7 +7904,7 @@ Gains are separated by white spaces and each gain is set in dBFS.
|
||||||
Default is @code{0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}.
|
Default is @code{0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0}.
|
||||||
|
|
||||||
@item bands, b
|
@item bands, b
|
||||||
Set the custom bands from where custon equalizer gains are set.
|
Set the custom bands from where custom equalizer gains are set.
|
||||||
This must be in strictly increasing order. Only used if the preset option is set to @code{custom}.
|
This must be in strictly increasing order. Only used if the preset option is set to @code{custom}.
|
||||||
Bands are separated by white spaces and each band represent frequency in Hz.
|
Bands are separated by white spaces and each band represent frequency in Hz.
|
||||||
Default is @code{25 40 63 100 160 250 400 630 1000 1600 2500 4000 6300 10000 16000 24000}.
|
Default is @code{25 40 63 100 160 250 400 630 1000 1600 2500 4000 6300 10000 16000 24000}.
|
||||||
|
@ -9214,7 +9214,7 @@ The default value is @code{all}.
|
||||||
|
|
||||||
Repack CEA-708 closed captioning side data
|
Repack CEA-708 closed captioning side data
|
||||||
|
|
||||||
This filter fixes various issues seen with commerical encoders
|
This filter fixes various issues seen with commercial encoders
|
||||||
related to upstream malformed CEA-708 payloads, specifically
|
related to upstream malformed CEA-708 payloads, specifically
|
||||||
incorrect number of tuples (wrong cc_count for the target FPS),
|
incorrect number of tuples (wrong cc_count for the target FPS),
|
||||||
and incorrect ordering of tuples (i.e. the CEA-608 tuples are not at
|
and incorrect ordering of tuples (i.e. the CEA-608 tuples are not at
|
||||||
|
@ -9357,19 +9357,19 @@ Mostly useful to speed-up filtering.
|
||||||
@item threy
|
@item threy
|
||||||
Set Y threshold for averaging chrominance values.
|
Set Y threshold for averaging chrominance values.
|
||||||
Set finer control for max allowed difference between Y components
|
Set finer control for max allowed difference between Y components
|
||||||
of current pixel and neigbour pixels.
|
of current pixel and neighbour pixels.
|
||||||
Default value is 200. Allowed range is from 1 to 200.
|
Default value is 200. Allowed range is from 1 to 200.
|
||||||
|
|
||||||
@item threu
|
@item threu
|
||||||
Set U threshold for averaging chrominance values.
|
Set U threshold for averaging chrominance values.
|
||||||
Set finer control for max allowed difference between U components
|
Set finer control for max allowed difference between U components
|
||||||
of current pixel and neigbour pixels.
|
of current pixel and neighbour pixels.
|
||||||
Default value is 200. Allowed range is from 1 to 200.
|
Default value is 200. Allowed range is from 1 to 200.
|
||||||
|
|
||||||
@item threv
|
@item threv
|
||||||
Set V threshold for averaging chrominance values.
|
Set V threshold for averaging chrominance values.
|
||||||
Set finer control for max allowed difference between V components
|
Set finer control for max allowed difference between V components
|
||||||
of current pixel and neigbour pixels.
|
of current pixel and neighbour pixels.
|
||||||
Default value is 200. Allowed range is from 1 to 200.
|
Default value is 200. Allowed range is from 1 to 200.
|
||||||
|
|
||||||
@item distance
|
@item distance
|
||||||
|
@ -9768,7 +9768,7 @@ Accepts a combination of the following flags:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item color_range
|
@item color_range
|
||||||
Detect if the source countains luma pixels outside the limited (MPEG) range,
|
Detect if the source contains luma pixels outside the limited (MPEG) range,
|
||||||
which indicates that this is a full range YUV source.
|
which indicates that this is a full range YUV source.
|
||||||
@item alpha_mode
|
@item alpha_mode
|
||||||
Detect if the source contains color values above the alpha channel, which
|
Detect if the source contains color values above the alpha channel, which
|
||||||
|
@ -19817,7 +19817,7 @@ To enable the compilation of this filter, you need to configure FFmpeg with
|
||||||
@code{--enable-libqrencode}.
|
@code{--enable-libqrencode}.
|
||||||
|
|
||||||
The QR code is generated from the provided text or text pattern. The
|
The QR code is generated from the provided text or text pattern. The
|
||||||
corresponding QR code is scaled and overlayed into the video output according to
|
corresponding QR code is scaled and overlaid into the video output according to
|
||||||
the specified options.
|
the specified options.
|
||||||
|
|
||||||
In case no text is specified, no QR code is overlaied.
|
In case no text is specified, no QR code is overlaied.
|
||||||
|
@ -23146,7 +23146,7 @@ by progressively selecting a different column from each input frame.
|
||||||
|
|
||||||
The end result is a sort of inverted parallax, so that far away objects move
|
The end result is a sort of inverted parallax, so that far away objects move
|
||||||
much faster that the ones in the front. The ideal conditions for this video
|
much faster that the ones in the front. The ideal conditions for this video
|
||||||
effect are when there is either very little motion and the backgroud is static,
|
effect are when there is either very little motion and the background is static,
|
||||||
or when there is a lot of motion and a very wide depth of field (e.g. wide
|
or when there is a lot of motion and a very wide depth of field (e.g. wide
|
||||||
panorama, while moving on a train).
|
panorama, while moving on a train).
|
||||||
|
|
||||||
|
@ -23174,7 +23174,7 @@ How many columns should be inserted before end of filtering.
|
||||||
|
|
||||||
Normally the filter shifts and tilts from the very first frame, and stops when
|
Normally the filter shifts and tilts from the very first frame, and stops when
|
||||||
the last one is received. However, before filtering starts, normal video may
|
the last one is received. However, before filtering starts, normal video may
|
||||||
be preseved, so that the effect is slowly shifted in its place. Similarly,
|
be preserved, so that the effect is slowly shifted in its place. Similarly,
|
||||||
the last video frame may be reconstructed at the end. Alternatively it is
|
the last video frame may be reconstructed at the end. Alternatively it is
|
||||||
possible to just start and end with black.
|
possible to just start and end with black.
|
||||||
|
|
||||||
|
@ -25495,7 +25495,7 @@ pixel formats are not RGB.
|
||||||
@item fitmode, fm
|
@item fitmode, fm
|
||||||
Set sample aspect ratio of video output frames.
|
Set sample aspect ratio of video output frames.
|
||||||
Can be used to configure waveform so it is not
|
Can be used to configure waveform so it is not
|
||||||
streched too much in one of directions.
|
stretched too much in one of directions.
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item none
|
||||||
|
|
|
@ -162,7 +162,7 @@ Then pass @code{--enable-libmp3lame} to configure to enable it.
|
||||||
|
|
||||||
@section LCEVCdec
|
@section LCEVCdec
|
||||||
|
|
||||||
FFmpeg can make use of the liblcevc_dec library for LCEVC enhacement layer
|
FFmpeg can make use of the liblcevc_dec library for LCEVC enhancement layer
|
||||||
decoding on supported bitstreams.
|
decoding on supported bitstreams.
|
||||||
|
|
||||||
Go to @url{https://github.com/v-novaltd/LCEVCdec} and follow the instructions
|
Go to @url{https://github.com/v-novaltd/LCEVCdec} and follow the instructions
|
||||||
|
|
|
@ -704,7 +704,7 @@ Win32 GDI-based screen capture device.
|
||||||
|
|
||||||
This device allows you to capture a region of the display on Windows.
|
This device allows you to capture a region of the display on Windows.
|
||||||
|
|
||||||
Amongst options for the imput filenames are such elements as:
|
Amongst options for the input filenames are such elements as:
|
||||||
@example
|
@example
|
||||||
desktop
|
desktop
|
||||||
@end example
|
@end example
|
||||||
|
|
|
@ -319,7 +319,7 @@ This is the same as the @samp{vob} muxer with a few differences.
|
||||||
@table @option
|
@table @option
|
||||||
@item muxrate @var{rate}
|
@item muxrate @var{rate}
|
||||||
Set user-defined mux rate expressed as a number of bits/s. If not
|
Set user-defined mux rate expressed as a number of bits/s. If not
|
||||||
specied the automatically computed mux rate is employed. Default value
|
specified the automatically computed mux rate is employed. Default value
|
||||||
is @code{0}.
|
is @code{0}.
|
||||||
|
|
||||||
@item preload @var{delay}
|
@item preload @var{delay}
|
||||||
|
@ -772,7 +772,7 @@ Force a delay expressed in seconds after the last frame of each
|
||||||
repetition. Default value is @code{0.0}.
|
repetition. Default value is @code{0.0}.
|
||||||
|
|
||||||
@item plays @var{repetitions}
|
@item plays @var{repetitions}
|
||||||
specify how many times to play the content, @code{0} causes an infinte
|
specify how many times to play the content, @code{0} causes an infinite
|
||||||
loop, with @code{1} there is no loop
|
loop, with @code{1} there is no loop
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@ -1770,7 +1770,7 @@ for looping indefinitely (default).
|
||||||
@item final_delay @var{delay}
|
@item final_delay @var{delay}
|
||||||
Force the delay (expressed in centiseconds) after the last frame. Each frame
|
Force the delay (expressed in centiseconds) after the last frame. Each frame
|
||||||
ends with a delay until the next frame. The default is @code{-1}, which is a
|
ends with a delay until the next frame. The default is @code{-1}, which is a
|
||||||
special value to tell the muxer to re-use the previous delay. In case of a
|
special value to tell the muxer to reuse the previous delay. In case of a
|
||||||
loop, you might want to customize this value to mark a pause for instance.
|
loop, you might want to customize this value to mark a pause for instance.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@ -1856,7 +1856,7 @@ This muxer creates an .f4m (Adobe Flash Media Manifest File) manifest, an .abst
|
||||||
(Adobe Bootstrap File) for each stream, and segment files in a directory
|
(Adobe Bootstrap File) for each stream, and segment files in a directory
|
||||||
specified as the output.
|
specified as the output.
|
||||||
|
|
||||||
These needs to be accessed by an HDS player throuhg HTTPS for it to be able to
|
These needs to be accessed by an HDS player through HTTPS for it to be able to
|
||||||
perform playback on the generated stream.
|
perform playback on the generated stream.
|
||||||
|
|
||||||
@subsection Options
|
@subsection Options
|
||||||
|
@ -2538,7 +2538,7 @@ these applications, audio may be played back on a wide range of devices, e.g.,
|
||||||
headphones, mobile phones, tablets, TVs, sound bars, home theater systems, and
|
headphones, mobile phones, tablets, TVs, sound bars, home theater systems, and
|
||||||
big screens.
|
big screens.
|
||||||
|
|
||||||
This format was promoted and desgined by Alliance for Open Media.
|
This format was promoted and designed by Alliance for Open Media.
|
||||||
|
|
||||||
For more information about this format, see @url{https://aomedia.org/iamf/}.
|
For more information about this format, see @url{https://aomedia.org/iamf/}.
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ client may also set a user/password for authentication. The default for both
|
||||||
fields is "guest". Name of virtual host on broker can be set with vhost. The
|
fields is "guest". Name of virtual host on broker can be set with vhost. The
|
||||||
default value is "/".
|
default value is "/".
|
||||||
|
|
||||||
Muliple subscribers may stream from the broker using the command:
|
Multiple subscribers may stream from the broker using the command:
|
||||||
@example
|
@example
|
||||||
ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
||||||
@end example
|
@end example
|
||||||
|
@ -607,7 +607,7 @@ The resource requested by a client, when the experimental HTTP server is in use.
|
||||||
The HTTP code returned to the client, when the experimental HTTP server is in use.
|
The HTTP code returned to the client, when the experimental HTTP server is in use.
|
||||||
|
|
||||||
@item short_seek_size
|
@item short_seek_size
|
||||||
Set the threshold, in bytes, for when a readahead should be prefered over a seek and
|
Set the threshold, in bytes, for when a readahead should be preferred over a seek and
|
||||||
new HTTP request. This is useful, for example, to make sure the same connection
|
new HTTP request. This is useful, for example, to make sure the same connection
|
||||||
is used for reading large video packets with small audio packets in between.
|
is used for reading large video packets with small audio packets in between.
|
||||||
|
|
||||||
|
|
|
@ -295,7 +295,7 @@ for improvements:
|
||||||
- op_scale_1110
|
- op_scale_1110
|
||||||
- op_scale_1111
|
- op_scale_1111
|
||||||
|
|
||||||
This reflects the four different arangements of pixel components that are
|
This reflects the four different arrangements of pixel components that are
|
||||||
typically present (or absent). While best for performance, it does turn into
|
typically present (or absent). While best for performance, it does turn into
|
||||||
a bit of a chore when implementing these kernels.
|
a bit of a chore when implementing these kernels.
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ ifdef CONFIG_SHARED
|
||||||
# for purely shared builds.
|
# for purely shared builds.
|
||||||
# Test programs are always statically linked against their library
|
# Test programs are always statically linked against their library
|
||||||
# to be able to access their library's internals, even with shared builds.
|
# to be able to access their library's internals, even with shared builds.
|
||||||
# Yet linking against dependend libraries still uses dynamic linking.
|
# Yet linking against dependent libraries still uses dynamic linking.
|
||||||
# This means that we are in the scenario described above.
|
# This means that we are in the scenario described above.
|
||||||
# In case only static libs are used, the linker will only use
|
# In case only static libs are used, the linker will only use
|
||||||
# one of these copies; this depends on the duplicated object files
|
# one of these copies; this depends on the duplicated object files
|
||||||
|
|
|
@ -1594,7 +1594,7 @@ static int dec_open(DecoderPriv *dp, AVDictionary **dec_opts,
|
||||||
if (o->flags & DECODER_FLAG_BITEXACT)
|
if (o->flags & DECODER_FLAG_BITEXACT)
|
||||||
dp->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
|
dp->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
|
||||||
|
|
||||||
// we apply cropping outselves
|
// we apply cropping ourselves
|
||||||
dp->apply_cropping = dp->dec_ctx->apply_cropping;
|
dp->apply_cropping = dp->dec_ctx->apply_cropping;
|
||||||
dp->dec_ctx->apply_cropping = 0;
|
dp->dec_ctx->apply_cropping = 0;
|
||||||
|
|
||||||
|
|
|
@ -2728,7 +2728,7 @@ static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb
|
||||||
if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
|
if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
|
||||||
/* if we have hit the end of the current displayed subpicture,
|
/* if we have hit the end of the current displayed subpicture,
|
||||||
or if we need to initialize the system, update the
|
or if we need to initialize the system, update the
|
||||||
overlayed subpicture and its start/end times */
|
overlaid subpicture and its start/end times */
|
||||||
sub2video_update(ifp, pts2 + 1, NULL);
|
sub2video_update(ifp, pts2 + 1, NULL);
|
||||||
else
|
else
|
||||||
sub2video_push_ref(ifp, pts2);
|
sub2video_push_ref(ifp, pts2);
|
||||||
|
@ -3141,7 +3141,7 @@ static int filter_thread(void *arg)
|
||||||
goto finish;
|
goto finish;
|
||||||
|
|
||||||
read_frames:
|
read_frames:
|
||||||
// retrieve all newly avalable frames
|
// retrieve all newly available frames
|
||||||
ret = read_frames(fg, &fgt, fgt.frame);
|
ret = read_frames(fg, &fgt, fgt.frame);
|
||||||
if (ret == AVERROR_EOF) {
|
if (ret == AVERROR_EOF) {
|
||||||
av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
|
av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
|
||||||
|
|
|
@ -1899,7 +1899,7 @@ static int send_to_mux(Scheduler *sch, SchMux *mux, unsigned stream_idx,
|
||||||
|
|
||||||
update_schedule:
|
update_schedule:
|
||||||
// TODO: use atomics to check whether this changes trailing dts
|
// TODO: use atomics to check whether this changes trailing dts
|
||||||
// to avoid locking unnecesarily
|
// to avoid locking unnecessarily
|
||||||
if (dts != AV_NOPTS_VALUE || !pkt) {
|
if (dts != AV_NOPTS_VALUE || !pkt) {
|
||||||
pthread_mutex_lock(&sch->schedule_lock);
|
pthread_mutex_lock(&sch->schedule_lock);
|
||||||
|
|
||||||
|
|
|
@ -355,7 +355,7 @@ enum DemuxSendFlags {
|
||||||
* @retval "non-negative value" success
|
* @retval "non-negative value" success
|
||||||
* @retval AVERROR_EOF all consumers for the stream are done
|
* @retval AVERROR_EOF all consumers for the stream are done
|
||||||
* @retval AVERROR_EXIT all consumers are done, should terminate demuxing
|
* @retval AVERROR_EXIT all consumers are done, should terminate demuxing
|
||||||
* @retval "anoter negative error code" other failure
|
* @retval "another negative error code" other failure
|
||||||
*/
|
*/
|
||||||
int sch_demux_send(Scheduler *sch, unsigned demux_idx, struct AVPacket *pkt,
|
int sch_demux_send(Scheduler *sch, unsigned demux_idx, struct AVPacket *pkt,
|
||||||
unsigned flags);
|
unsigned flags);
|
||||||
|
@ -436,7 +436,7 @@ void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
|
||||||
*
|
*
|
||||||
* @retval "non-negative value" success
|
* @retval "non-negative value" success
|
||||||
* @retval AVERROR_EOF all consumers are done
|
* @retval AVERROR_EOF all consumers are done
|
||||||
* @retval "anoter negative error code" other failure
|
* @retval "another negative error code" other failure
|
||||||
*/
|
*/
|
||||||
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx,
|
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx,
|
||||||
struct AVFrame *frame);
|
struct AVFrame *frame);
|
||||||
|
|
|
@ -2381,7 +2381,7 @@ static int open_input_file(InputFile *ifile, const char *filename,
|
||||||
exit(1);
|
exit(1);
|
||||||
|
|
||||||
if (do_show_log) {
|
if (do_show_log) {
|
||||||
// For loging it is needed to disable at least frame threads as otherwise
|
// For logging it is needed to disable at least frame threads as otherwise
|
||||||
// the log information would need to be reordered and matches up to contexts and frames
|
// the log information would need to be reordered and matches up to contexts and frames
|
||||||
// That is in fact possible but not trivial
|
// That is in fact possible but not trivial
|
||||||
av_dict_set(&codec_opts, "threads", "1", 0);
|
av_dict_set(&codec_opts, "threads", "1", 0);
|
||||||
|
|
|
@ -395,8 +395,8 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||||
FFSWAP(struct elem_to_channel, e2c_vec[6], e2c_vec[4]); // FLc & FRc fifth (final), SiL & SiR seventh
|
FFSWAP(struct elem_to_channel, e2c_vec[6], e2c_vec[4]); // FLc & FRc fifth (final), SiL & SiR seventh
|
||||||
FFSWAP(struct elem_to_channel, e2c_vec[7], e2c_vec[6]); // LFE2 seventh (final), SiL & SiR eight (final)
|
FFSWAP(struct elem_to_channel, e2c_vec[7], e2c_vec[6]); // LFE2 seventh (final), SiL & SiR eight (final)
|
||||||
FFSWAP(struct elem_to_channel, e2c_vec[9], e2c_vec[8]); // TpFL & TpFR ninth (final), TFC tenth (final)
|
FFSWAP(struct elem_to_channel, e2c_vec[9], e2c_vec[8]); // TpFL & TpFR ninth (final), TFC tenth (final)
|
||||||
FFSWAP(struct elem_to_channel, e2c_vec[11], e2c_vec[10]); // TC eleventh (final), TpSiL & TpSiR twelth
|
FFSWAP(struct elem_to_channel, e2c_vec[11], e2c_vec[10]); // TC eleventh (final), TpSiL & TpSiR twelfth
|
||||||
FFSWAP(struct elem_to_channel, e2c_vec[12], e2c_vec[11]); // TpBL & TpBR twelth (final), TpSiL & TpSiR thirteenth (final)
|
FFSWAP(struct elem_to_channel, e2c_vec[12], e2c_vec[11]); // TpBL & TpBR twelfth (final), TpSiL & TpSiR thirteenth (final)
|
||||||
} else {
|
} else {
|
||||||
// For everything else, utilize the AV channel position define as a
|
// For everything else, utilize the AV channel position define as a
|
||||||
// stable sort.
|
// stable sort.
|
||||||
|
@ -1728,7 +1728,7 @@ int ff_aac_decode_ics(AACDecContext *ac, SingleChannelElement *sce,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// I see no textual basis in the spec for this occurring after SSR gain
|
// I see no textual basis in the spec for this occurring after SSR gain
|
||||||
// control, but this is what both reference and real implmentations do
|
// control, but this is what both reference and real implementations do
|
||||||
if (tns->present && er_syntax) {
|
if (tns->present && er_syntax) {
|
||||||
ret = ff_aac_decode_tns(ac, tns, gb, ics);
|
ret = ff_aac_decode_tns(ac, tns, gb, ics);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|
|
@ -311,14 +311,14 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scale uplims to match rate distortion to quality
|
* Scale uplims to match rate distortion to quality
|
||||||
* bu applying noisy band depriorization and tonal band priorization.
|
* bu applying noisy band depriorization and tonal band prioritization.
|
||||||
* Maxval-energy ratio gives us an idea of how noisy/tonal the band is.
|
* Maxval-energy ratio gives us an idea of how noisy/tonal the band is.
|
||||||
* If maxval^2 ~ energy, then that band is mostly noise, and we can relax
|
* If maxval^2 ~ energy, then that band is mostly noise, and we can relax
|
||||||
* rate distortion requirements.
|
* rate distortion requirements.
|
||||||
*/
|
*/
|
||||||
memcpy(euplims, uplims, sizeof(euplims));
|
memcpy(euplims, uplims, sizeof(euplims));
|
||||||
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
|
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
|
||||||
/** psy already priorizes transients to some extent */
|
/** psy already prioritizes transients to some extent */
|
||||||
float de_psy_factor = (sce->ics.num_windows > 1) ? 8.0f / sce->ics.group_len[w] : 1.0f;
|
float de_psy_factor = (sce->ics.num_windows > 1) ? 8.0f / sce->ics.group_len[w] : 1.0f;
|
||||||
start = w*128;
|
start = w*128;
|
||||||
for (g = 0; g < sce->ics.num_swb; g++) {
|
for (g = 0; g < sce->ics.num_swb; g++) {
|
||||||
|
@ -331,7 +331,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||||
nzslope * cleanup_factor);
|
nzslope * cleanup_factor);
|
||||||
energy2uplim *= de_psy_factor;
|
energy2uplim *= de_psy_factor;
|
||||||
if (!(avctx->flags & AV_CODEC_FLAG_QSCALE)) {
|
if (!(avctx->flags & AV_CODEC_FLAG_QSCALE)) {
|
||||||
/** In ABR, we need to priorize less and let rate control do its thing */
|
/** In ABR, we need to prioritize less and let rate control do its thing */
|
||||||
energy2uplim = sqrtf(energy2uplim);
|
energy2uplim = sqrtf(energy2uplim);
|
||||||
}
|
}
|
||||||
energy2uplim = FFMAX(0.015625f, FFMIN(1.0f, energy2uplim));
|
energy2uplim = FFMAX(0.015625f, FFMIN(1.0f, energy2uplim));
|
||||||
|
@ -345,7 +345,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||||
2.0f);
|
2.0f);
|
||||||
energy2uplim *= de_psy_factor;
|
energy2uplim *= de_psy_factor;
|
||||||
if (!(avctx->flags & AV_CODEC_FLAG_QSCALE)) {
|
if (!(avctx->flags & AV_CODEC_FLAG_QSCALE)) {
|
||||||
/** In ABR, we need to priorize less and let rate control do its thing */
|
/** In ABR, we need to prioritize less and let rate control do its thing */
|
||||||
energy2uplim = sqrtf(energy2uplim);
|
energy2uplim = sqrtf(energy2uplim);
|
||||||
}
|
}
|
||||||
energy2uplim = FFMAX(0.015625f, FFMIN(1.0f, energy2uplim));
|
energy2uplim = FFMAX(0.015625f, FFMIN(1.0f, energy2uplim));
|
||||||
|
|
|
@ -168,7 +168,7 @@ typedef struct AacPsyContext{
|
||||||
* LAME psy model preset struct
|
* LAME psy model preset struct
|
||||||
*/
|
*/
|
||||||
typedef struct PsyLamePreset {
|
typedef struct PsyLamePreset {
|
||||||
int quality; ///< Quality to map the rest of the vaules to.
|
int quality; ///< Quality to map the rest of the values to.
|
||||||
/* This is overloaded to be both kbps per channel in ABR mode, and
|
/* This is overloaded to be both kbps per channel in ABR mode, and
|
||||||
* requested quality in constant quality mode.
|
* requested quality in constant quality mode.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -59,7 +59,7 @@ static void sbr_turnoff(SpectralBandReplication *sbr) {
|
||||||
sbr->start = 0;
|
sbr->start = 0;
|
||||||
sbr->usac = 0;
|
sbr->usac = 0;
|
||||||
sbr->ready_for_dequant = 0;
|
sbr->ready_for_dequant = 0;
|
||||||
// Init defults used in pure upsampling mode
|
// Init defaults used in pure upsampling mode
|
||||||
sbr->kx[1] = 32; //Typo in spec, kx' inits to 32
|
sbr->kx[1] = 32; //Typo in spec, kx' inits to 32
|
||||||
sbr->m[1] = 0;
|
sbr->m[1] = 0;
|
||||||
// Reset values for first SBR header
|
// Reset values for first SBR header
|
||||||
|
|
|
@ -534,7 +534,7 @@ function ff_pix_abs16_y2_neon, export=1
|
||||||
ld1 {v2.16b}, [x2], x3 // Load pix3 for first iteration
|
ld1 {v2.16b}, [x2], x3 // Load pix3 for first iteration
|
||||||
ld1 {v0.16b}, [x1], x3 // Load pix1 for first iteration
|
ld1 {v0.16b}, [x1], x3 // Load pix1 for first iteration
|
||||||
urhadd v30.16b, v1.16b, v2.16b // Rounding halving add, first iteration
|
urhadd v30.16b, v1.16b, v2.16b // Rounding halving add, first iteration
|
||||||
ld1 {v5.16b}, [x2], x3 // Load pix3 for second iteartion
|
ld1 {v5.16b}, [x2], x3 // Load pix3 for second iteration
|
||||||
uabal v29.8h, v0.8b, v30.8b // Absolute difference of lower half, first iteration
|
uabal v29.8h, v0.8b, v30.8b // Absolute difference of lower half, first iteration
|
||||||
uabal2 v28.8h, v0.16b, v30.16b // Absolute difference of upper half, first iteration
|
uabal2 v28.8h, v0.16b, v30.16b // Absolute difference of upper half, first iteration
|
||||||
ld1 {v3.16b}, [x1], x3 // Load pix1 for second iteration
|
ld1 {v3.16b}, [x1], x3 // Load pix1 for second iteration
|
||||||
|
@ -606,7 +606,7 @@ function sse16_neon, export=1
|
||||||
uabd v27.16b, v2.16b, v3.16b // Absolute difference, second iteration
|
uabd v27.16b, v2.16b, v3.16b // Absolute difference, second iteration
|
||||||
uadalp v17.4s, v29.8h // Pairwise add, first iteration
|
uadalp v17.4s, v29.8h // Pairwise add, first iteration
|
||||||
ld1 {v4.16b}, [x1], x3 // Load pix1 for third iteration
|
ld1 {v4.16b}, [x1], x3 // Load pix1 for third iteration
|
||||||
umull v26.8h, v27.8b, v27.8b // Mulitply lower half, second iteration
|
umull v26.8h, v27.8b, v27.8b // Multiply lower half, second iteration
|
||||||
umull2 v25.8h, v27.16b, v27.16b // Multiply upper half, second iteration
|
umull2 v25.8h, v27.16b, v27.16b // Multiply upper half, second iteration
|
||||||
ld1 {v5.16b}, [x2], x3 // Load pix2 for third iteration
|
ld1 {v5.16b}, [x2], x3 // Load pix2 for third iteration
|
||||||
uadalp v17.4s, v26.8h // Pairwise add and accumulate, second iteration
|
uadalp v17.4s, v26.8h // Pairwise add and accumulate, second iteration
|
||||||
|
@ -616,7 +616,7 @@ function sse16_neon, export=1
|
||||||
umull v23.8h, v24.8b, v24.8b // Multiply lower half, third iteration
|
umull v23.8h, v24.8b, v24.8b // Multiply lower half, third iteration
|
||||||
umull2 v22.8h, v24.16b, v24.16b // Multiply upper half, third iteration
|
umull2 v22.8h, v24.16b, v24.16b // Multiply upper half, third iteration
|
||||||
uadalp v17.4s, v23.8h // Pairwise add and accumulate, third iteration
|
uadalp v17.4s, v23.8h // Pairwise add and accumulate, third iteration
|
||||||
ld1 {v7.16b}, [x2], x3 // Load pix2 for fouth iteration
|
ld1 {v7.16b}, [x2], x3 // Load pix2 for fourth iteration
|
||||||
uadalp v17.4s, v22.8h // Pairwise add and accumulate, third iteration
|
uadalp v17.4s, v22.8h // Pairwise add and accumulate, third iteration
|
||||||
uabd v21.16b, v6.16b, v7.16b // Absolute difference, fourth iteration
|
uabd v21.16b, v6.16b, v7.16b // Absolute difference, fourth iteration
|
||||||
uadalp v17.4s, v28.8h // Pairwise add and accumulate, first iteration
|
uadalp v17.4s, v28.8h // Pairwise add and accumulate, first iteration
|
||||||
|
@ -748,7 +748,7 @@ function sse4_neon, export=1
|
||||||
uabdl v28.8h, v4.8b, v5.8b // Absolute difference, third iteration
|
uabdl v28.8h, v4.8b, v5.8b // Absolute difference, third iteration
|
||||||
umlal v16.4s, v29.4h, v29.4h // Multiply and accumulate, second iteration
|
umlal v16.4s, v29.4h, v29.4h // Multiply and accumulate, second iteration
|
||||||
sub w4, w4, #4
|
sub w4, w4, #4
|
||||||
uabdl v27.8h, v6.8b, v7.8b // Absolue difference, fourth iteration
|
uabdl v27.8h, v6.8b, v7.8b // Absolute difference, fourth iteration
|
||||||
umlal v16.4s, v28.4h, v28.4h // Multiply and accumulate, third iteration
|
umlal v16.4s, v28.4h, v28.4h // Multiply and accumulate, third iteration
|
||||||
cmp w4, #4
|
cmp w4, #4
|
||||||
umlal v16.4s, v27.4h, v27.4h // Multiply and accumulate, fourth iteration
|
umlal v16.4s, v27.4h, v27.4h // Multiply and accumulate, fourth iteration
|
||||||
|
@ -1593,7 +1593,7 @@ function sse16_neon_dotprod, export=1
|
||||||
uabd v24.16b, v4.16b, v5.16b // Absolute difference, third iteration
|
uabd v24.16b, v4.16b, v5.16b // Absolute difference, third iteration
|
||||||
ld1 {v6.16b}, [x1], x3 // Load pix1 for fourth iteration
|
ld1 {v6.16b}, [x1], x3 // Load pix1 for fourth iteration
|
||||||
udot v17.4s, v24.16b, v24.16b
|
udot v17.4s, v24.16b, v24.16b
|
||||||
ld1 {v7.16b}, [x2], x3 // Load pix2 for fouth iteration
|
ld1 {v7.16b}, [x2], x3 // Load pix2 for fourth iteration
|
||||||
uabd v21.16b, v6.16b, v7.16b // Absolute difference, fourth iteration
|
uabd v21.16b, v6.16b, v7.16b // Absolute difference, fourth iteration
|
||||||
sub w4, w4, #4 // h -= 4
|
sub w4, w4, #4 // h -= 4
|
||||||
udot v17.4s, v21.16b, v21.16b
|
udot v17.4s, v21.16b, v21.16b
|
||||||
|
|
|
@ -224,7 +224,7 @@ typedef struct ALSDecContext {
|
||||||
int32_t *quant_cof_buffer; ///< contains all quantized parcor coefficients
|
int32_t *quant_cof_buffer; ///< contains all quantized parcor coefficients
|
||||||
int32_t **lpc_cof; ///< coefficients of the direct form prediction filter for a channel
|
int32_t **lpc_cof; ///< coefficients of the direct form prediction filter for a channel
|
||||||
int32_t *lpc_cof_buffer; ///< contains all coefficients of the direct form prediction filter
|
int32_t *lpc_cof_buffer; ///< contains all coefficients of the direct form prediction filter
|
||||||
int32_t *lpc_cof_reversed_buffer; ///< temporary buffer to set up a reversed versio of lpc_cof_buffer
|
int32_t *lpc_cof_reversed_buffer; ///< temporary buffer to set up a reversed version of lpc_cof_buffer
|
||||||
ALSChannelData **chan_data; ///< channel data for multi-channel correlation
|
ALSChannelData **chan_data; ///< channel data for multi-channel correlation
|
||||||
ALSChannelData *chan_data_buffer; ///< contains channel data for all channels
|
ALSChannelData *chan_data_buffer; ///< contains channel data for all channels
|
||||||
int *reverted_channels; ///< stores a flag for each reverted channel
|
int *reverted_channels; ///< stores a flag for each reverted channel
|
||||||
|
@ -1558,7 +1558,7 @@ static int read_diff_float_data(ALSDecContext *ctx, unsigned int ra_frame) {
|
||||||
if (highest_byte) {
|
if (highest_byte) {
|
||||||
for (i = 0; i < frame_length; ++i) {
|
for (i = 0; i < frame_length; ++i) {
|
||||||
if (ctx->raw_samples[c][i] != 0) {
|
if (ctx->raw_samples[c][i] != 0) {
|
||||||
//The following logic is taken from Tabel 14.45 and 14.46 from the ISO spec
|
//The following logic is taken from Table 14.45 and 14.46 from the ISO spec
|
||||||
if (av_cmp_sf_ieee754(acf[c], FLOAT_1)) {
|
if (av_cmp_sf_ieee754(acf[c], FLOAT_1)) {
|
||||||
nbits[i] = 23 - av_log2(abs(ctx->raw_samples[c][i]));
|
nbits[i] = 23 - av_log2(abs(ctx->raw_samples[c][i]));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -656,7 +656,7 @@ static int amf_decode_frame(AVCodecContext *avctx, struct AVFrame *frame)
|
||||||
}else
|
}else
|
||||||
return AVERROR_EOF;
|
return AVERROR_EOF;
|
||||||
} else {
|
} else {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Unkown result from QueryOutput %d\n", res);
|
av_log(avctx, AV_LOG_ERROR, "Unknown result from QueryOutput %d\n", res);
|
||||||
}
|
}
|
||||||
return got_frame ? 0 : AVERROR(EAGAIN);
|
return got_frame ? 0 : AVERROR(EAGAIN);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,11 +32,11 @@ static const AVOption options[] = {
|
||||||
|
|
||||||
{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY, VE, .unit = "usage" },
|
{ "usage", "Set the encoding usage", OFFSET(usage), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY, VE, .unit = "usage" },
|
||||||
{ "transcoding", "Generic Transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" },
|
{ "transcoding", "Generic Transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, 0, 0, VE, .unit = "usage" },
|
||||||
{ "ultralowlatency", "ultra low latency trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, .unit = "usage" },
|
{ "ultralowlatency", "ultra low latency transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, .unit = "usage" },
|
||||||
{ "lowlatency", "Low latency usecase", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" },
|
{ "lowlatency", "Low latency usecase", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY }, 0, 0, VE, .unit = "usage" },
|
||||||
{ "webcam", "Webcam", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_WEBCAM }, 0, 0, VE, .unit = "usage" },
|
{ "webcam", "Webcam", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_WEBCAM }, 0, 0, VE, .unit = "usage" },
|
||||||
{ "high_quality", "high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_HIGH_QUALITY }, 0, 0, VE, .unit = "usage" },
|
{ "high_quality", "high quality transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_HIGH_QUALITY }, 0, 0, VE, .unit = "usage" },
|
||||||
{ "lowlatency_high_quality","low latency yet high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY }, 0, 0, VE, .unit = "usage" },
|
{ "lowlatency_high_quality","low latency yet high quality transcoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY_HIGH_QUALITY }, 0, 0, VE, .unit = "usage" },
|
||||||
|
|
||||||
{ "bitdepth", "Set color bit deph", OFFSET(bit_depth), AV_OPT_TYPE_INT, {.i64 = AMF_COLOR_BIT_DEPTH_UNDEFINED }, AMF_COLOR_BIT_DEPTH_UNDEFINED, AMF_COLOR_BIT_DEPTH_10, VE, .unit = "bitdepth" },
|
{ "bitdepth", "Set color bit deph", OFFSET(bit_depth), AV_OPT_TYPE_INT, {.i64 = AMF_COLOR_BIT_DEPTH_UNDEFINED }, AMF_COLOR_BIT_DEPTH_UNDEFINED, AMF_COLOR_BIT_DEPTH_10, VE, .unit = "bitdepth" },
|
||||||
{ "8", "8 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_COLOR_BIT_DEPTH_8 }, 0, 0, VE, .unit = "bitdepth" },
|
{ "8", "8 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_COLOR_BIT_DEPTH_8 }, 0, 0, VE, .unit = "bitdepth" },
|
||||||
|
@ -88,7 +88,7 @@ static const AVOption options[] = {
|
||||||
{ "rc", "Set the rate control mode", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_HIGH_QUALITY_CBR, VE, .unit = "rc" },
|
{ "rc", "Set the rate control mode", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_HIGH_QUALITY_CBR, VE, .unit = "rc" },
|
||||||
{ "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, .unit = "rc" },
|
{ "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "vbr_peak", "Peak Contrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "vbr_peak", "Peak Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, .unit = "rc" },
|
{ "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "qvbr", "Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "qvbr", "Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "hqvbr", "High Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_HIGH_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "hqvbr", "High Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_HIGH_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
|
@ -148,16 +148,16 @@ static const AVOption options[] = {
|
||||||
{ "pa_scene_change_detection_enable", "Enable scene change detection", OFFSET(pa_scene_change_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
{ "pa_scene_change_detection_enable", "Enable scene change detection", OFFSET(pa_scene_change_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
||||||
|
|
||||||
{ "pa_scene_change_detection_sensitivity", "Set the sensitivity of scene change detection", OFFSET(pa_scene_change_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH, VE, .unit = "scene_change_sensitivity" },
|
{ "pa_scene_change_detection_sensitivity", "Set the sensitivity of scene change detection", OFFSET(pa_scene_change_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "low", "low scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "low", "low scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "medium", "medium scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "medium", "medium scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "high", "high scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "high", "high scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
|
|
||||||
{ "pa_static_scene_detection_enable", "Enable static scene detection", OFFSET(pa_static_scene_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
{ "pa_static_scene_detection_enable", "Enable static scene detection", OFFSET(pa_static_scene_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
||||||
|
|
||||||
{ "pa_static_scene_detection_sensitivity", "Set the sensitivity of static scene detection", OFFSET(pa_static_scene_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH, VE , .unit = "static_scene_sensitivity" },
|
{ "pa_static_scene_detection_sensitivity", "Set the sensitivity of static scene detection", OFFSET(pa_static_scene_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH, VE , .unit = "static_scene_sensitivity" },
|
||||||
{ "low", "low static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "low", "low static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
{ "medium", "medium static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "medium", "medium static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
{ "high", "high static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "high", "high static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
|
|
||||||
{ "pa_initial_qp_after_scene_change", "The QP value that is used immediately after a scene change", OFFSET(pa_initial_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
{ "pa_initial_qp_after_scene_change", "The QP value that is used immediately after a scene change", OFFSET(pa_initial_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
||||||
{ "pa_max_qp_before_force_skip", "The QP threshold to allow a skip frame", OFFSET(pa_max_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
{ "pa_max_qp_before_force_skip", "The QP threshold to allow a skip frame", OFFSET(pa_max_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
||||||
|
@ -267,7 +267,7 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx)
|
||||||
// Color Transfer Characteristics (AMF matches ISO/IEC)
|
// Color Transfer Characteristics (AMF matches ISO/IEC)
|
||||||
if(avctx->color_primaries != AVCOL_PRI_UNSPECIFIED && (pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010)){
|
if(avctx->color_primaries != AVCOL_PRI_UNSPECIFIED && (pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010)){
|
||||||
// if input is YUV, color_primaries are for VUI only
|
// if input is YUV, color_primaries are for VUI only
|
||||||
// AMF VCN color coversion supports only specifc output primaries BT2020 for 10-bit and BT709 for 8-bit
|
// AMF VCN color conversion supports only specific output primaries BT2020 for 10-bit and BT709 for 8-bit
|
||||||
// vpp_amf supports more
|
// vpp_amf supports more
|
||||||
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, avctx->color_trc);
|
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, avctx->color_trc);
|
||||||
}
|
}
|
||||||
|
@ -275,7 +275,7 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx)
|
||||||
// Color Primaries (AMF matches ISO/IEC)
|
// Color Primaries (AMF matches ISO/IEC)
|
||||||
if(avctx->color_primaries != AVCOL_PRI_UNSPECIFIED || pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010 )
|
if(avctx->color_primaries != AVCOL_PRI_UNSPECIFIED || pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010 )
|
||||||
{
|
{
|
||||||
// AMF VCN color coversion supports only specifc primaries BT2020 for 10-bit and BT709 for 8-bit
|
// AMF VCN color conversion supports only specific primaries BT2020 for 10-bit and BT709 for 8-bit
|
||||||
// vpp_amf supports more
|
// vpp_amf supports more
|
||||||
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, avctx->color_primaries);
|
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, avctx->color_primaries);
|
||||||
}
|
}
|
||||||
|
@ -657,7 +657,7 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx)
|
||||||
buffer->pVtbl->Release(buffer);
|
buffer->pVtbl->Release(buffer);
|
||||||
var.pInterface->pVtbl->Release(var.pInterface);
|
var.pInterface->pVtbl->Release(var.pInterface);
|
||||||
|
|
||||||
//processing crop informaiton according to alignment
|
//processing crop information according to alignment
|
||||||
if (ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_CAP_WIDTH_ALIGNMENT_FACTOR_LOCAL, &var) != AMF_OK)
|
if (ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_AV1_CAP_WIDTH_ALIGNMENT_FACTOR_LOCAL, &var) != AMF_OK)
|
||||||
// assume older driver and Navi3x
|
// assume older driver and Navi3x
|
||||||
width_alignment_factor = 64;
|
width_alignment_factor = 64;
|
||||||
|
|
|
@ -82,7 +82,7 @@ static const AVOption options[] = {
|
||||||
{ "rc", "Rate Control Method", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_HIGH_QUALITY_CBR, VE, .unit = "rc" },
|
{ "rc", "Rate Control Method", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_HIGH_QUALITY_CBR, VE, .unit = "rc" },
|
||||||
{ "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, .unit = "rc" },
|
{ "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, .unit = "rc" },
|
{ "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "vbr_peak", "Peak Contrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "vbr_peak", "Peak Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "qvbr", "Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "qvbr", "Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "hqvbr", "High Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_HIGH_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "hqvbr", "High Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_HIGH_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
|
@ -151,16 +151,16 @@ static const AVOption options[] = {
|
||||||
{ "pa_scene_change_detection_enable", "Enable scene change detection", OFFSET(pa_scene_change_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
{ "pa_scene_change_detection_enable", "Enable scene change detection", OFFSET(pa_scene_change_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
||||||
|
|
||||||
{ "pa_scene_change_detection_sensitivity", "Set the sensitivity of scene change detection", OFFSET(pa_scene_change_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH, VE, .unit = "scene_change_sensitivity" },
|
{ "pa_scene_change_detection_sensitivity", "Set the sensitivity of scene change detection", OFFSET(pa_scene_change_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "low", "low scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "low", "low scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "medium", "medium scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "medium", "medium scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "high", "high scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "high", "high scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
|
|
||||||
{ "pa_static_scene_detection_enable", "Enable static scene detection", OFFSET(pa_static_scene_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
{ "pa_static_scene_detection_enable", "Enable static scene detection", OFFSET(pa_static_scene_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
||||||
|
|
||||||
{ "pa_static_scene_detection_sensitivity", "Set the sensitivity of static scene detection", OFFSET(pa_static_scene_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH, VE , .unit = "static_scene_sensitivity" },
|
{ "pa_static_scene_detection_sensitivity", "Set the sensitivity of static scene detection", OFFSET(pa_static_scene_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH, VE , .unit = "static_scene_sensitivity" },
|
||||||
{ "low", "low static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "low", "low static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
{ "medium", "medium static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "medium", "medium static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
{ "high", "high static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "high", "high static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
|
|
||||||
{ "pa_initial_qp_after_scene_change", "The QP value that is used immediately after a scene change", OFFSET(pa_initial_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
{ "pa_initial_qp_after_scene_change", "The QP value that is used immediately after a scene change", OFFSET(pa_initial_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
||||||
{ "pa_max_qp_before_force_skip", "The QP threshold to allow a skip frame", OFFSET(pa_max_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
{ "pa_max_qp_before_force_skip", "The QP threshold to allow a skip frame", OFFSET(pa_max_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
||||||
|
|
|
@ -74,7 +74,7 @@ static const AVOption options[] = {
|
||||||
{ "rc", "Set the rate control mode", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_HIGH_QUALITY_CBR, VE, .unit = "rc" },
|
{ "rc", "Set the rate control mode", OFFSET(rate_control_mode), AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_UNKNOWN }, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_UNKNOWN, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_HIGH_QUALITY_CBR, VE, .unit = "rc" },
|
||||||
{ "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, .unit = "rc" },
|
{ "cqp", "Constant Quantization Parameter", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, .unit = "rc" },
|
{ "cbr", "Constant Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "vbr_peak", "Peak Contrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "vbr_peak", "Peak Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "vbr_latency", "Latency Constrained Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "qvbr", "Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "qvbr", "Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
{ "hqvbr", "High Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_HIGH_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
{ "hqvbr", "High Quality Variable Bitrate", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_HIGH_QUALITY_VBR }, 0, 0, VE, .unit = "rc" },
|
||||||
|
@ -121,16 +121,16 @@ static const AVOption options[] = {
|
||||||
{ "pa_scene_change_detection_enable", "Enable scene change detection", OFFSET(pa_scene_change_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
{ "pa_scene_change_detection_enable", "Enable scene change detection", OFFSET(pa_scene_change_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
||||||
|
|
||||||
{ "pa_scene_change_detection_sensitivity", "Set the sensitivity of scene change detection", OFFSET(pa_scene_change_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH, VE, .unit = "scene_change_sensitivity" },
|
{ "pa_scene_change_detection_sensitivity", "Set the sensitivity of scene change detection", OFFSET(pa_scene_change_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "low", "low scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "low", "low scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "medium", "medium scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "medium", "medium scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
{ "high", "high scene change dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
{ "high", "high scene change detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_SCENE_CHANGE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "scene_change_sensitivity" },
|
||||||
|
|
||||||
{ "pa_static_scene_detection_enable", "Enable static scene detection", OFFSET(pa_static_scene_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
{ "pa_static_scene_detection_enable", "Enable static scene detection", OFFSET(pa_static_scene_detection), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
|
||||||
|
|
||||||
{ "pa_static_scene_detection_sensitivity", "Set the sensitivity of static scene detection", OFFSET(pa_static_scene_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH, VE , .unit = "static_scene_sensitivity" },
|
{ "pa_static_scene_detection_sensitivity", "Set the sensitivity of static scene detection", OFFSET(pa_static_scene_detection_sensitivity), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH, VE , .unit = "static_scene_sensitivity" },
|
||||||
{ "low", "low static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "low", "low static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_LOW }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
{ "medium", "medium static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "medium", "medium static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_MEDIUM }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
{ "high", "high static scene dectection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
{ "high", "high static scene detection sensitivity", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_PA_STATIC_SCENE_DETECTION_SENSITIVITY_HIGH }, 0, 0, VE, .unit = "static_scene_sensitivity" },
|
||||||
|
|
||||||
{ "pa_initial_qp_after_scene_change", "The QP value that is used immediately after a scene change", OFFSET(pa_initial_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
{ "pa_initial_qp_after_scene_change", "The QP value that is used immediately after a scene change", OFFSET(pa_initial_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
||||||
{ "pa_max_qp_before_force_skip", "The QP threshold to allow a skip frame", OFFSET(pa_max_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
{ "pa_max_qp_before_force_skip", "The QP threshold to allow a skip frame", OFFSET(pa_max_qp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 51, VE },
|
||||||
|
@ -261,7 +261,7 @@ static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
|
||||||
// Color Transfer Characteristics (AMF matches ISO/IEC)
|
// Color Transfer Characteristics (AMF matches ISO/IEC)
|
||||||
if(avctx->color_trc != AVCOL_TRC_UNSPECIFIED && (pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010)){
|
if(avctx->color_trc != AVCOL_TRC_UNSPECIFIED && (pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010)){
|
||||||
// if input is YUV, color_trc is for VUI only - any value
|
// if input is YUV, color_trc is for VUI only - any value
|
||||||
// AMF VCN color coversion supports only specifc output transfer characterstic SMPTE2084 for 10-bit and BT709 for 8-bit
|
// AMF VCN color conversion supports only specific output transfer characteristic SMPTE2084 for 10-bit and BT709 for 8-bit
|
||||||
// vpp_amf supports more
|
// vpp_amf supports more
|
||||||
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, avctx->color_trc);
|
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, avctx->color_trc);
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
|
||||||
// Color Primaries (AMF matches ISO/IEC)
|
// Color Primaries (AMF matches ISO/IEC)
|
||||||
if(avctx->color_primaries != AVCOL_PRI_UNSPECIFIED && (pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010)){
|
if(avctx->color_primaries != AVCOL_PRI_UNSPECIFIED && (pix_fmt == AV_PIX_FMT_NV12 || pix_fmt == AV_PIX_FMT_P010)){
|
||||||
// if input is YUV, color_primaries are for VUI only
|
// if input is YUV, color_primaries are for VUI only
|
||||||
// AMF VCN color coversion supports only specifc output primaries BT2020 for 10-bit and BT709 for 8-bit
|
// AMF VCN color conversion supports only specific output primaries BT2020 for 10-bit and BT709 for 8-bit
|
||||||
// vpp_amf supports more
|
// vpp_amf supports more
|
||||||
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, avctx->color_primaries);
|
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, avctx->color_primaries);
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,7 +108,7 @@ typedef const struct {
|
||||||
|
|
||||||
extern ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS];
|
extern ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS];
|
||||||
|
|
||||||
/* Rounded right shift with optionnal clipping */
|
/* Rounded right shift with optional clipping */
|
||||||
#define RSHIFT_SIZE(size) \
|
#define RSHIFT_SIZE(size) \
|
||||||
av_always_inline \
|
av_always_inline \
|
||||||
static int##size##_t rshift##size(int##size##_t value, int shift) \
|
static int##size##_t rshift##size(int##size##_t value, int shift) \
|
||||||
|
|
|
@ -783,7 +783,7 @@ endfunc
|
||||||
transpose8_4x4 d29, d3, d21, d23
|
transpose8_4x4 d29, d3, d21, d23
|
||||||
store16 d22, d23, d20, d21, d2, d3, d28, d29, r8
|
store16 d22, d23, d20, d21, d2, d3, d28, d29, r8
|
||||||
|
|
||||||
@ reload multiplication coefficiens to q1
|
@ reload multiplication coefficients to q1
|
||||||
vld1.s16 {q1}, [r9, :128]
|
vld1.s16 {q1}, [r9, :128]
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ function ff_hevc_sao_edge_filter_neon_8, export=1
|
||||||
mov r12, r4 // r12 = height
|
mov r12, r4 // r12 = height
|
||||||
mov r6, r0 // r6 = r0 = dst
|
mov r6, r0 // r6 = r0 = dst
|
||||||
mov r7, r1 // r7 = r1 = src
|
mov r7, r1 // r7 = r1 = src
|
||||||
vld1.8 {d0}, [r11] // edge_idx tabel load in d0 5x8bit
|
vld1.8 {d0}, [r11] // edge_idx table load in d0 5x8bit
|
||||||
vld1.16 {q1}, [r10] // sao_offset_val table load in q1, 5x16bit
|
vld1.16 {q1}, [r10] // sao_offset_val table load in q1, 5x16bit
|
||||||
vmov.u8 d1, #2
|
vmov.u8 d1, #2
|
||||||
vmov.u16 q2, #1
|
vmov.u16 q2, #1
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* SSA/ASS spliting functions
|
* SSA/ASS splitting functions
|
||||||
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
|
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
|
||||||
*
|
*
|
||||||
* This file is part of FFmpeg.
|
* This file is part of FFmpeg.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* SSA/ASS spliting functions
|
* SSA/ASS splitting functions
|
||||||
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
|
* Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
|
||||||
*
|
*
|
||||||
* This file is part of FFmpeg.
|
* This file is part of FFmpeg.
|
||||||
|
|
|
@ -114,7 +114,7 @@ av_cold void ff_atrac3p_init_dsp_static(void)
|
||||||
* @param[in] fdsp ptr to floating-point DSP context
|
* @param[in] fdsp ptr to floating-point DSP context
|
||||||
* @param[in] invert_phase flag indicating 180° phase shift
|
* @param[in] invert_phase flag indicating 180° phase shift
|
||||||
* @param[in] reg_offset region offset for trimming envelope data
|
* @param[in] reg_offset region offset for trimming envelope data
|
||||||
* @param[out] out receives sythesized data
|
* @param[out] out receives synthesized data
|
||||||
*/
|
*/
|
||||||
static void waves_synth(Atrac3pWaveSynthParams *synth_param,
|
static void waves_synth(Atrac3pWaveSynthParams *synth_param,
|
||||||
Atrac3pWavesData *waves_info,
|
Atrac3pWavesData *waves_info,
|
||||||
|
|
|
@ -80,7 +80,7 @@ typedef struct ATRAC9BlockData {
|
||||||
int cpe_base_channel;
|
int cpe_base_channel;
|
||||||
int is_signs[30];
|
int is_signs[30];
|
||||||
|
|
||||||
int reuseable;
|
int reusable;
|
||||||
|
|
||||||
} ATRAC9BlockData;
|
} ATRAC9BlockData;
|
||||||
|
|
||||||
|
@ -689,7 +689,7 @@ static int atrac9_decode_block(ATRAC9Context *s, GetBitContext *gb,
|
||||||
if (!reuse_params) {
|
if (!reuse_params) {
|
||||||
int stereo_band, ext_band;
|
int stereo_band, ext_band;
|
||||||
const int min_band_count = s->samplerate_idx > 7 ? 1 : 3;
|
const int min_band_count = s->samplerate_idx > 7 ? 1 : 3;
|
||||||
b->reuseable = 0;
|
b->reusable = 0;
|
||||||
b->band_count = get_bits(gb, 4) + min_band_count;
|
b->band_count = get_bits(gb, 4) + min_band_count;
|
||||||
b->q_unit_cnt = at9_tab_band_q_unit_map[b->band_count];
|
b->q_unit_cnt = at9_tab_band_q_unit_map[b->band_count];
|
||||||
|
|
||||||
|
@ -721,9 +721,9 @@ static int atrac9_decode_block(ATRAC9Context *s, GetBitContext *gb,
|
||||||
}
|
}
|
||||||
b->band_ext_q_unit = at9_tab_band_q_unit_map[ext_band];
|
b->band_ext_q_unit = at9_tab_band_q_unit_map[ext_band];
|
||||||
}
|
}
|
||||||
b->reuseable = 1;
|
b->reusable = 1;
|
||||||
}
|
}
|
||||||
if (!b->reuseable) {
|
if (!b->reusable) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "invalid block reused!\n");
|
av_log(s->avctx, AV_LOG_ERROR, "invalid block reused!\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
|
@ -225,7 +225,7 @@ typedef struct RcOverride{
|
||||||
#define AV_CODEC_FLAG_QPEL (1 << 4)
|
#define AV_CODEC_FLAG_QPEL (1 << 4)
|
||||||
/**
|
/**
|
||||||
* Request the encoder to output reconstructed frames, i.e.\ frames that would
|
* Request the encoder to output reconstructed frames, i.e.\ frames that would
|
||||||
* be produced by decoding the encoded bistream. These frames may be retrieved
|
* be produced by decoding the encoded bitstream. These frames may be retrieved
|
||||||
* by calling avcodec_receive_frame() immediately after a successful call to
|
* by calling avcodec_receive_frame() immediately after a successful call to
|
||||||
* avcodec_receive_packet().
|
* avcodec_receive_packet().
|
||||||
*
|
*
|
||||||
|
@ -2900,7 +2900,7 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
|
||||||
*
|
*
|
||||||
* @note for encoders, this function will only do something if the encoder
|
* @note for encoders, this function will only do something if the encoder
|
||||||
* declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
|
* declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
|
||||||
* will drain any remaining packets, and can then be re-used for a different
|
* will drain any remaining packets, and can then be reused for a different
|
||||||
* stream (as opposed to sending a null frame which will leave the encoder
|
* stream (as opposed to sending a null frame which will leave the encoder
|
||||||
* in a permanent EOF state after draining). This can be desirable if the
|
* in a permanent EOF state after draining). This can be desirable if the
|
||||||
* cost of tearing down and replacing the encoder instance is high.
|
* cost of tearing down and replacing the encoder instance is high.
|
||||||
|
|
|
@ -80,7 +80,7 @@ static int h266_metadata_update_fragment(AVBSFContext *bsf, AVPacket *pkt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!ph) {
|
if (!ph) {
|
||||||
av_log(bsf, AV_LOG_ERROR, "no avaliable picture header");
|
av_log(bsf, AV_LOG_ERROR, "no available picture header");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1184,7 +1184,7 @@ static int decode_seq_header(AVSContext *h)
|
||||||
h->profile = get_bits(&h->gb, 8);
|
h->profile = get_bits(&h->gb, 8);
|
||||||
if (h->profile != 0x20) {
|
if (h->profile != 0x20) {
|
||||||
avpriv_report_missing_feature(h->avctx,
|
avpriv_report_missing_feature(h->avctx,
|
||||||
"only supprt JiZhun profile");
|
"only support JiZhun profile");
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
h->level = get_bits(&h->gb, 8);
|
h->level = get_bits(&h->gb, 8);
|
||||||
|
|
|
@ -516,7 +516,7 @@ enum CbsDiscardFlags {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Discard units accroding to 'skip'.
|
* Discard units according to 'skip'.
|
||||||
*/
|
*/
|
||||||
void CBS_FUNC(discard_units)(CodedBitstreamContext *ctx,
|
void CBS_FUNC(discard_units)(CodedBitstreamContext *ctx,
|
||||||
CodedBitstreamFragment *frag,
|
CodedBitstreamFragment *frag,
|
||||||
|
|
|
@ -1725,7 +1725,7 @@ static int FUNC(pps) (CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
current->pps_pic_height_in_luma_samples !=
|
current->pps_pic_height_in_luma_samples !=
|
||||||
sps->sps_pic_height_max_in_luma_samples)) {
|
sps->sps_pic_height_max_in_luma_samples)) {
|
||||||
av_log(ctx->log_ctx, AV_LOG_ERROR,
|
av_log(ctx->log_ctx, AV_LOG_ERROR,
|
||||||
"Resoltuion change is not allowed, "
|
"Resolution change is not allowed, "
|
||||||
"in max resolution (%ux%u) mismatched with pps(%ux%u).\n",
|
"in max resolution (%ux%u) mismatched with pps(%ux%u).\n",
|
||||||
sps->sps_pic_width_max_in_luma_samples,
|
sps->sps_pic_width_max_in_luma_samples,
|
||||||
sps->sps_pic_height_max_in_luma_samples,
|
sps->sps_pic_height_max_in_luma_samples,
|
||||||
|
|
|
@ -273,7 +273,7 @@ int CBS_FUNC(write_signed)(CodedBitstreamContext *ctx, PutBitContext *pbc,
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
// End of a syntax element which is made up of subelements which
|
// End of a syntax element which is made up of subelements which
|
||||||
// are aleady traced, so we are only showing the value.
|
// are already traced, so we are only showing the value.
|
||||||
#define CBS_TRACE_READ_END_VALUE_ONLY() \
|
#define CBS_TRACE_READ_END_VALUE_ONLY() \
|
||||||
do { \
|
do { \
|
||||||
if (ctx->trace_enable) { \
|
if (ctx->trace_enable) { \
|
||||||
|
@ -310,7 +310,7 @@ int CBS_FUNC(write_signed)(CodedBitstreamContext *ctx, PutBitContext *pbc,
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
// End of a syntax element which is made up of subelements which are
|
// End of a syntax element which is made up of subelements which are
|
||||||
// aleady traced, so we are only showing the value. This forges a
|
// already traced, so we are only showing the value. This forges a
|
||||||
// PutBitContext to point to the position of the start of the syntax
|
// PutBitContext to point to the position of the start of the syntax
|
||||||
// element, but the other state doesn't matter because length is zero.
|
// element, but the other state doesn't matter because length is zero.
|
||||||
#define CBS_TRACE_WRITE_END_VALUE_ONLY() \
|
#define CBS_TRACE_WRITE_END_VALUE_ONLY() \
|
||||||
|
|
|
@ -814,7 +814,7 @@ static int process_cc608(CCaptionSubContext *ctx, uint8_t hi, uint8_t lo)
|
||||||
break;
|
break;
|
||||||
case 0x2e:
|
case 0x2e:
|
||||||
/* erase buffered (non displayed) memory */
|
/* erase buffered (non displayed) memory */
|
||||||
// Only in realtime mode. In buffered mode, we re-use the inactive screen
|
// Only in realtime mode. In buffered mode, we reuse the inactive screen
|
||||||
// for our own buffering.
|
// for our own buffering.
|
||||||
if (ctx->real_time) {
|
if (ctx->real_time) {
|
||||||
struct Screen *screen = ctx->screen + !ctx->active_screen;
|
struct Screen *screen = ctx->screen + !ctx->active_screen;
|
||||||
|
|
|
@ -148,7 +148,7 @@ typedef struct AVCodecParameters {
|
||||||
* durations. Should be set to { 0, 1 } when some frames have differing
|
* durations. Should be set to { 0, 1 } when some frames have differing
|
||||||
* durations or if the value is not known.
|
* durations or if the value is not known.
|
||||||
*
|
*
|
||||||
* @note This field correponds to values that are stored in codec-level
|
* @note This field corresponds to values that are stored in codec-level
|
||||||
* headers and is typically overridden by container/transport-layer
|
* headers and is typically overridden by container/transport-layer
|
||||||
* timestamps, when available. It should thus be used only as a last resort,
|
* timestamps, when available. It should thus be used only as a last resort,
|
||||||
* when no higher-level timing information is available.
|
* when no higher-level timing information is available.
|
||||||
|
|
|
@ -311,7 +311,7 @@ static int chs_parse_header(DCAXllDecoder *s, DCAXllChSet *c, DCAExssAsset *asse
|
||||||
b->highest_pred_order = b->adapt_pred_order[i];
|
b->highest_pred_order = b->adapt_pred_order[i];
|
||||||
}
|
}
|
||||||
if (b->highest_pred_order > s->nsegsamples) {
|
if (b->highest_pred_order > s->nsegsamples) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid XLL adaptive predicition order\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid XLL adaptive prediction order\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -666,7 +666,7 @@ static void chs_filter_band_data(DCAXllDecoder *s, DCAXllChSet *c, int band)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inverse pairwise channel decorrellation
|
// Inverse pairwise channel decorrelation
|
||||||
if (b->decor_enabled) {
|
if (b->decor_enabled) {
|
||||||
int32_t *tmp[DCA_XLL_CHANNELS_MAX];
|
int32_t *tmp[DCA_XLL_CHANNELS_MAX];
|
||||||
|
|
||||||
|
|
|
@ -854,7 +854,7 @@ static int init_quantization_noise(DCAEncContext *c, int noise, int forbid_zero)
|
||||||
if (c->lfe_channel)
|
if (c->lfe_channel)
|
||||||
c->consumed_bits += 72;
|
c->consumed_bits += 72;
|
||||||
|
|
||||||
/* attempt to guess the bit distribution based on the prevoius frame */
|
/* attempt to guess the bit distribution based on the previous frame */
|
||||||
for (ch = 0; ch < c->fullband_channels; ch++) {
|
for (ch = 0; ch < c->fullband_channels; ch++) {
|
||||||
for (band = 0; band < 32; band++) {
|
for (band = 0; band < 32; band++) {
|
||||||
int snr_cb = c->peak_cb[ch][band] - c->band_masking_cb[band] - noise;
|
int snr_cb = c->peak_cb[ch][band] - c->band_masking_cb[band] - noise;
|
||||||
|
|
|
@ -826,7 +826,7 @@ static int subband_coeffs(const DiracContext *s, int x, int y, int p,
|
||||||
int level, coef = 0;
|
int level, coef = 0;
|
||||||
for (level = 0; level < s->wavelet_depth; level++) {
|
for (level = 0; level < s->wavelet_depth; level++) {
|
||||||
SliceCoeffs *o = &c[level];
|
SliceCoeffs *o = &c[level];
|
||||||
const SubBand *b = &s->plane[p].band[level][3]; /* orientation doens't matter */
|
const SubBand *b = &s->plane[p].band[level][3]; /* orientation doesn't matter */
|
||||||
o->top = b->height * y / s->num_y;
|
o->top = b->height * y / s->num_y;
|
||||||
o->left = b->width * x / s->num_x;
|
o->left = b->width * x / s->num_x;
|
||||||
o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
|
o->tot_h = ((b->width * (x + 1)) / s->num_x) - o->left;
|
||||||
|
|
|
@ -365,7 +365,7 @@ int ff_dovi_rpu_parse(DOVIContext *s, const uint8_t *rpu, size_t rpu_size,
|
||||||
|
|
||||||
/* Container */
|
/* Container */
|
||||||
if (s->cfg.dv_profile == 10 /* dav1.10 */) {
|
if (s->cfg.dv_profile == 10 /* dav1.10 */) {
|
||||||
/* DV inside AV1 re-uses an EMDF container skeleton, but with fixed
|
/* DV inside AV1 reuses an EMDF container skeleton, but with fixed
|
||||||
* values - so we can effectively treat this as a magic byte sequence.
|
* values - so we can effectively treat this as a magic byte sequence.
|
||||||
*
|
*
|
||||||
* The exact fields are, as follows:
|
* The exact fields are, as follows:
|
||||||
|
|
|
@ -300,7 +300,7 @@ static int cmp_dm_level0(const AVDOVIColorMetadata *dm1,
|
||||||
sizeof(AVDOVIColorMetadata) -offsetof(AVDOVIColorMetadata, signal_eotf));
|
sizeof(AVDOVIColorMetadata) -offsetof(AVDOVIColorMetadata, signal_eotf));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Tries to re-use the static ext blocks. May reorder `ext->dm_static` */
|
/* Tries to reuse the static ext blocks. May reorder `ext->dm_static` */
|
||||||
static int try_reuse_ext(DOVIExt *ext, const AVDOVIMetadata *metadata)
|
static int try_reuse_ext(DOVIExt *ext, const AVDOVIMetadata *metadata)
|
||||||
{
|
{
|
||||||
int i, j, idx = 0;
|
int i, j, idx = 0;
|
||||||
|
|
|
@ -56,7 +56,7 @@ enum EVCNALUnitType {
|
||||||
EVC_RSV_VCL_NUT22 = 22,
|
EVC_RSV_VCL_NUT22 = 22,
|
||||||
EVC_RSV_VCL_NUT23 = 23,
|
EVC_RSV_VCL_NUT23 = 23,
|
||||||
EVC_SPS_NUT = 24, /* Sequence parameter set */
|
EVC_SPS_NUT = 24, /* Sequence parameter set */
|
||||||
EVC_PPS_NUT = 25, /* Picture paremeter set */
|
EVC_PPS_NUT = 25, /* Picture parameter set */
|
||||||
EVC_APS_NUT = 26, /* Adaptation parameter set */
|
EVC_APS_NUT = 26, /* Adaptation parameter set */
|
||||||
EVC_FD_NUT = 27, /* Filler data */
|
EVC_FD_NUT = 27, /* Filler data */
|
||||||
EVC_SEI_NUT = 28, /* Supplemental enhancement information */
|
EVC_SEI_NUT = 28, /* Supplemental enhancement information */
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
#include "evc.h"
|
#include "evc.h"
|
||||||
#include "evc_ps.h"
|
#include "evc_ps.h"
|
||||||
|
|
||||||
// The sturcture reflects Slice Header RBSP(raw byte sequence payload) layout
|
// The structure reflects Slice Header RBSP(raw byte sequence payload) layout
|
||||||
// @see ISO_IEC_23094-1 section 7.3.2.6
|
// @see ISO_IEC_23094-1 section 7.3.2.6
|
||||||
//
|
//
|
||||||
// The following descriptors specify the parsing process of each element
|
// The following descriptors specify the parsing process of each element
|
||||||
|
|
|
@ -102,7 +102,7 @@ typedef struct VUIParameters {
|
||||||
HRDParameters hrd_parameters;
|
HRDParameters hrd_parameters;
|
||||||
} VUIParameters;
|
} VUIParameters;
|
||||||
|
|
||||||
// The sturcture reflects SPS RBSP(raw byte sequence payload) layout
|
// The structure reflects SPS RBSP(raw byte sequence payload) layout
|
||||||
// @see ISO_IEC_23094-1 section 7.3.2.1
|
// @see ISO_IEC_23094-1 section 7.3.2.1
|
||||||
//
|
//
|
||||||
// The following descriptors specify the parsing process of each element
|
// The following descriptors specify the parsing process of each element
|
||||||
|
|
|
@ -182,7 +182,7 @@ static evrc_packet_rate buf_size2bitrate(const int buf_size)
|
||||||
*
|
*
|
||||||
* @param avctx the AV codec context
|
* @param avctx the AV codec context
|
||||||
* @param buf_size length of the buffer
|
* @param buf_size length of the buffer
|
||||||
* @param buf the bufffer
|
* @param buf the buffer
|
||||||
*
|
*
|
||||||
* @return the bitrate on success,
|
* @return the bitrate on success,
|
||||||
* RATE_ERRS if the bitrate cannot be satisfactorily determined
|
* RATE_ERRS if the bitrate cannot be satisfactorily determined
|
||||||
|
|
|
@ -979,7 +979,7 @@ static av_cold int encode_init_internal(AVCodecContext *avctx)
|
||||||
if ((ret = ff_ffv1_common_init(avctx, s)) < 0)
|
if ((ret = ff_ffv1_common_init(avctx, s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (s->ac == 1) // Compatbility with common command line usage
|
if (s->ac == 1) // Compatibility with common command line usage
|
||||||
s->ac = AC_RANGE_CUSTOM_TAB;
|
s->ac = AC_RANGE_CUSTOM_TAB;
|
||||||
else if (s->ac == AC_RANGE_DEFAULT_TAB_FORCE)
|
else if (s->ac == AC_RANGE_DEFAULT_TAB_FORCE)
|
||||||
s->ac = AC_RANGE_DEFAULT_TAB;
|
s->ac = AC_RANGE_DEFAULT_TAB;
|
||||||
|
|
|
@ -58,7 +58,7 @@ typedef struct FLACFrameInfo {
|
||||||
* @param[out] s where parsed information is stored
|
* @param[out] s where parsed information is stored
|
||||||
* @param[in] buffer pointer to start of 34-byte streaminfo data
|
* @param[in] buffer pointer to start of 34-byte streaminfo data
|
||||||
*
|
*
|
||||||
* @return negative error code on faiure or >= 0 on success
|
* @return negative error code on failure or >= 0 on success
|
||||||
*/
|
*/
|
||||||
int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
|
int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
|
||||||
const uint8_t *buffer);
|
const uint8_t *buffer);
|
||||||
|
|
|
@ -211,8 +211,8 @@ static int16_t long_term_filter(AudioDSPContext *adsp, int pitch_delay_int,
|
||||||
/* Compute signals with non-integer delay k (with 1/8 precision),
|
/* Compute signals with non-integer delay k (with 1/8 precision),
|
||||||
where k is in [0;6] range.
|
where k is in [0;6] range.
|
||||||
Entire delay is qual to best_delay+(k+1)/8
|
Entire delay is qual to best_delay+(k+1)/8
|
||||||
This is archieved by applying an interpolation filter of
|
This is achieved by applying an interpolation filter of
|
||||||
legth 33 to source signal. */
|
length 33 to source signal. */
|
||||||
for (k = 0; k < ANALYZED_FRAC_DELAYS; k++) {
|
for (k = 0; k < ANALYZED_FRAC_DELAYS; k++) {
|
||||||
ff_acelp_interpolate(&delayed_signal[k][0],
|
ff_acelp_interpolate(&delayed_signal[k][0],
|
||||||
&sig_scaled[RES_PREV_DATA_SIZE - best_delay_int],
|
&sig_scaled[RES_PREV_DATA_SIZE - best_delay_int],
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
#define MB_TYPE_REF0 MB_TYPE_CODEC_SPECIFIC
|
#define MB_TYPE_REF0 MB_TYPE_CODEC_SPECIFIC
|
||||||
#define MB_TYPE_8x8DCT 0x01000000
|
#define MB_TYPE_8x8DCT 0x01000000
|
||||||
|
|
||||||
// This table must be here because scan8[constant] must be known at compiletime
|
// This table must be here because scan8[constant] must be known at compile time
|
||||||
static const uint8_t scan8[16 * 3 + 3] = {
|
static const uint8_t scan8[16 * 3 + 3] = {
|
||||||
4 + 1 * 8, 5 + 1 * 8, 4 + 2 * 8, 5 + 2 * 8,
|
4 + 1 * 8, 5 + 1 * 8, 4 + 2 * 8, 5 + 2 * 8,
|
||||||
6 + 1 * 8, 7 + 1 * 8, 6 + 2 * 8, 7 + 2 * 8,
|
6 + 1 * 8, 7 + 1 * 8, 6 + 2 * 8, 7 + 2 * 8,
|
||||||
|
|
|
@ -754,7 +754,7 @@ static int decode_nal_units(H264Context *h, AVBufferRef *buf_ref,
|
||||||
if (h->cur_pic_ptr->decode_error_flags) {
|
if (h->cur_pic_ptr->decode_error_flags) {
|
||||||
/* Frame-threading in use */
|
/* Frame-threading in use */
|
||||||
atomic_int *decode_error = h->cur_pic_ptr->decode_error_flags;
|
atomic_int *decode_error = h->cur_pic_ptr->decode_error_flags;
|
||||||
/* Using atomics here is not supposed to provide syncronisation;
|
/* Using atomics here is not supposed to provide synchronisation;
|
||||||
* they are merely used to allow to set decode_error from both
|
* they are merely used to allow to set decode_error from both
|
||||||
* decoding threads in case of coded slices. */
|
* decoding threads in case of coded slices. */
|
||||||
atomic_fetch_or_explicit(decode_error, FF_DECODE_ERROR_DECODE_SLICES,
|
atomic_fetch_or_explicit(decode_error, FF_DECODE_ERROR_DECODE_SLICES,
|
||||||
|
|
|
@ -541,7 +541,7 @@ typedef struct H264Context {
|
||||||
* all subsequently output fraames are also marked as recovered
|
* all subsequently output fraames are also marked as recovered
|
||||||
*
|
*
|
||||||
* In effect, if you want all subsequent DECODED frames marked as recovered, set frame_recovered
|
* In effect, if you want all subsequent DECODED frames marked as recovered, set frame_recovered
|
||||||
* If you want all subsequent DISPAYED frames marked as recovered, set the frame->recovered
|
* If you want all subsequent DISPLAYED frames marked as recovered, set the frame->recovered
|
||||||
*/
|
*/
|
||||||
int frame_recovered;
|
int frame_recovered;
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,7 @@ static void init_slice_c(int8_t out[64][64], uint8_t h, uint8_t v,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deblock horizontal edges by simple attentuation of values
|
// Deblock horizontal edges by simple attenuation of values
|
||||||
for (int y = 0; y < 64; y += 8) {
|
for (int y = 0; y < 64; y += 8) {
|
||||||
for (int x = 0; x < 64; x++) {
|
for (int x = 0; x < 64; x++) {
|
||||||
out[y + 0][x] = (out[y + 0][x] * deblock_coeff) >> 7;
|
out[y + 0][x] = (out[y + 0][x] * deblock_coeff) >> 7;
|
||||||
|
@ -239,7 +239,7 @@ int ff_h274_apply_film_grain(AVFrame *out_frame, const AVFrame *in_frame,
|
||||||
|
|
||||||
uint8_t * const out = out_frame->data[c];
|
uint8_t * const out = out_frame->data[c];
|
||||||
const int out_stride = out_frame->linesize[c];
|
const int out_stride = out_frame->linesize[c];
|
||||||
int8_t * const grain = out_frame->data[c]; // re-use output buffer for grain
|
int8_t * const grain = out_frame->data[c]; // reuse output buffer for grain
|
||||||
const int grain_stride = out_stride;
|
const int grain_stride = out_stride;
|
||||||
const uint8_t * const in = in_frame->data[c];
|
const uint8_t * const in = in_frame->data[c];
|
||||||
const int in_stride = in_frame->linesize[c];
|
const int in_stride = in_frame->linesize[c];
|
||||||
|
|
|
@ -305,7 +305,7 @@ static av_cold int hap_init(AVCodecContext *avctx)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid compresor %02X\n", ctx->opt_compressor);
|
av_log(avctx, AV_LOG_ERROR, "Invalid compressor %02X\n", ctx->opt_compressor);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (corrected_chunk_count != ctx->opt_chunk_count) {
|
if (corrected_chunk_count != ctx->opt_chunk_count) {
|
||||||
|
|
|
@ -3544,7 +3544,7 @@ static int decode_slice(HEVCContext *s, unsigned nal_idx, GetBitContext *gb)
|
||||||
|
|
||||||
ret = hls_slice_header(&s->sh, s, gb);
|
ret = hls_slice_header(&s->sh, s, gb);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
// hls_slice_header() does not cleanup on failure thus the state now is inconsistant so we cannot use it on depandant slices
|
// hls_slice_header() does not cleanup on failure thus the state now is inconsistent so we cannot use it on dependent slices
|
||||||
s->slice_initialized = 0;
|
s->slice_initialized = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -4206,7 +4206,7 @@ static void hevc_decode_flush(AVCodecContext *avctx)
|
||||||
static const AVOption options[] = {
|
static const AVOption options[] = {
|
||||||
{ "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
|
{ "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
|
||||||
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
|
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
|
||||||
{ "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
|
{ "strict-displaywin", "strictly apply default display window size", OFFSET(apply_defdispwin),
|
||||||
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
|
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
|
||||||
{ "view_ids", "Array of view IDs that should be decoded and output; a single -1 to decode all views",
|
{ "view_ids", "Array of view IDs that should be decoded and output; a single -1 to decode all views",
|
||||||
.offset = OFFSET(view_ids), .type = AV_OPT_TYPE_INT | AV_OPT_TYPE_FLAG_ARRAY,
|
.offset = OFFSET(view_ids), .type = AV_OPT_TYPE_INT | AV_OPT_TYPE_FLAG_ARRAY,
|
||||||
|
|
|
@ -1097,7 +1097,7 @@ static void do_plc(int16_t *plc_residual, /* (o) concealed residual */
|
||||||
use_gain = 29491; /* 0.9 in Q15 */
|
use_gain = 29491; /* 0.9 in Q15 */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compute mixing factor of picth repeatition and noise:
|
/* Compute mixing factor of picth repetition and noise:
|
||||||
for max_per>0.7 set periodicity to 1.0
|
for max_per>0.7 set periodicity to 1.0
|
||||||
0.4<max_per<0.7 set periodicity to (maxper-0.4)/0.7-0.4)
|
0.4<max_per<0.7 set periodicity to (maxper-0.4)/0.7-0.4)
|
||||||
max_per<0.4 set periodicity to 0.0
|
max_per<0.4 set periodicity to 0.0
|
||||||
|
@ -1142,7 +1142,7 @@ static void do_plc(int16_t *plc_residual, /* (o) concealed residual */
|
||||||
randvec[i] = s->prevResidual[pick];
|
randvec[i] = s->prevResidual[pick];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pitch repeatition component */
|
/* pitch repetition component */
|
||||||
pick = i - use_lag;
|
pick = i - use_lag;
|
||||||
|
|
||||||
if (pick < 0) {
|
if (pick < 0) {
|
||||||
|
@ -1160,7 +1160,7 @@ static void do_plc(int16_t *plc_residual, /* (o) concealed residual */
|
||||||
tot_gain = SPL_MUL_16_16_RSFT(29491, use_gain, 15); /* 0.9*use_gain */
|
tot_gain = SPL_MUL_16_16_RSFT(29491, use_gain, 15); /* 0.9*use_gain */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mix noise and pitch repeatition */
|
/* mix noise and pitch repetition */
|
||||||
plc_residual[i] = SPL_MUL_16_16_RSFT(tot_gain, (pitchfact * plc_residual[i] + (32767 - pitchfact) * randvec[i] + 16384) >> 15, 15);
|
plc_residual[i] = SPL_MUL_16_16_RSFT(tot_gain, (pitchfact * plc_residual[i] + (32767 - pitchfact) * randvec[i] + 16384) >> 15, 15);
|
||||||
|
|
||||||
/* Shifting down the result one step extra to ensure that no overflow
|
/* Shifting down the result one step extra to ensure that no overflow
|
||||||
|
|
|
@ -1802,7 +1802,7 @@ static int j2kenc_destroy(AVCodecContext *avctx)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// taken from the libopenjpeg wraper so it matches
|
// taken from the libopenjpeg wrapper so it matches
|
||||||
|
|
||||||
#define OFFSET(x) offsetof(Jpeg2000EncoderContext, x)
|
#define OFFSET(x) offsetof(Jpeg2000EncoderContext, x)
|
||||||
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
|
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
|
||||||
|
|
|
@ -1362,7 +1362,7 @@ static int jpeg2000_decode_packet(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||||
}
|
}
|
||||||
bits_to_read = (uint8_t) (bits_to_read + cblk->lblock);
|
bits_to_read = (uint8_t) (bits_to_read + cblk->lblock);
|
||||||
segment_bytes = get_bits(s, bits_to_read);
|
segment_bytes = get_bits(s, bits_to_read);
|
||||||
// Write length information for HT Refinment segment
|
// Write length information for HT Refinement segment
|
||||||
cblk->pass_lengths[1] += segment_bytes;
|
cblk->pass_lengths[1] += segment_bytes;
|
||||||
} else if (!(cblk->modes & (JPEG2000_CBLK_TERMALL | JPEG2000_CBLK_BYPASS))) {
|
} else if (!(cblk->modes & (JPEG2000_CBLK_TERMALL | JPEG2000_CBLK_BYPASS))) {
|
||||||
// Common case for non-HT code-blocks; we have only one segment
|
// Common case for non-HT code-blocks; we have only one segment
|
||||||
|
@ -2814,7 +2814,7 @@ static av_cold int jpeg2000_decode_init(AVCodecContext *avctx)
|
||||||
Jpeg2000DecoderContext *s = avctx->priv_data;
|
Jpeg2000DecoderContext *s = avctx->priv_data;
|
||||||
|
|
||||||
if (avctx->lowres)
|
if (avctx->lowres)
|
||||||
av_log(avctx, AV_LOG_WARNING, "lowres is overriden by reduction_factor but set anyway\n");
|
av_log(avctx, AV_LOG_WARNING, "lowres is overridden by reduction_factor but set anyway\n");
|
||||||
if (!s->reduction_factor && avctx->lowres < JPEG2000_MAX_RESLEVELS) {
|
if (!s->reduction_factor && avctx->lowres < JPEG2000_MAX_RESLEVELS) {
|
||||||
s->reduction_factor = avctx->lowres;
|
s->reduction_factor = avctx->lowres;
|
||||||
}
|
}
|
||||||
|
|
|
@ -250,7 +250,7 @@ static uint8_t clut_pick_or_set(ARIBCaptionContext *ctx, int r, int g, int b, in
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* initialiaze CLUT with each character colors */
|
/* initialize CLUT with each character colors */
|
||||||
static void clut_init(ARIBCaptionContext *ctx, aribcc_caption_region_t *region)
|
static void clut_init(ARIBCaptionContext *ctx, aribcc_caption_region_t *region)
|
||||||
{
|
{
|
||||||
aribcc_color_t text_color, back_color, stroke_color;
|
aribcc_color_t text_color, back_color, stroke_color;
|
||||||
|
|
|
@ -93,7 +93,7 @@ static float quality_to_distance(float quality)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initalize the encoder on a per-file basis. All of these need to be set
|
* Initialize the encoder on a per-file basis. All of these need to be set
|
||||||
* once each time the encoder is reset, which is each frame for still
|
* once each time the encoder is reset, which is each frame for still
|
||||||
* images, to make the image2 muxer work. For animation this is run once.
|
* images, to make the image2 muxer work. For animation this is run once.
|
||||||
*
|
*
|
||||||
|
|
|
@ -44,7 +44,7 @@
|
||||||
#define MAX_BS_BUF (128 * 1024 * 1024)
|
#define MAX_BS_BUF (128 * 1024 * 1024)
|
||||||
#define MAX_NUM_FRMS (1) // supports only 1-frame in an access unit
|
#define MAX_NUM_FRMS (1) // supports only 1-frame in an access unit
|
||||||
#define FRM_IDX (0) // supports only 1-frame in an access unit
|
#define FRM_IDX (0) // supports only 1-frame in an access unit
|
||||||
#define MAX_NUM_CC (OAPV_MAX_CC) // Max number of color componets (upto 4:4:4:4)
|
#define MAX_NUM_CC (OAPV_MAX_CC) // Max number of color components (upto 4:4:4:4)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The structure stores all the states associated with the instance of APV encoder
|
* The structure stores all the states associated with the instance of APV encoder
|
||||||
|
@ -393,7 +393,7 @@ static int liboapve_encode(AVCodecContext *avctx, AVPacket *avpkt,
|
||||||
uint8_t *data = apv->bitb.addr;
|
uint8_t *data = apv->bitb.addr;
|
||||||
int size = apv->stat.write;
|
int size = apv->stat.write;
|
||||||
|
|
||||||
// The encoder may return a "Raw bitstream" formated AU, including au_size.
|
// The encoder may return a "Raw bitstream" formatted AU, including au_size.
|
||||||
// Discard it as we only need the access_unit() structure.
|
// Discard it as we only need the access_unit() structure.
|
||||||
if (size > 4 && AV_RB32(data) != APV_SIGNATURE) {
|
if (size > 4 && AV_RB32(data) != APV_SIGNATURE) {
|
||||||
data += 4;
|
data += 4;
|
||||||
|
|
|
@ -674,7 +674,7 @@ endfunc
|
||||||
vdp2.h.bu.b \out1, \in1, vr5
|
vdp2.h.bu.b \out1, \in1, vr5
|
||||||
vdp2.h.bu.b vr12, \in2, vr5
|
vdp2.h.bu.b vr12, \in2, vr5
|
||||||
vdp2.h.bu.b vr20, \in3, vr5
|
vdp2.h.bu.b vr20, \in3, vr5
|
||||||
vbsrl.v \in0, \in0, 1 //Back up previous 7 loaded datas,
|
vbsrl.v \in0, \in0, 1 //Back up previous 7 loaded data,
|
||||||
vbsrl.v \in1, \in1, 1 //so just need to insert the 8th
|
vbsrl.v \in1, \in1, 1 //so just need to insert the 8th
|
||||||
vbsrl.v \in2, \in2, 1 //load in the next loop.
|
vbsrl.v \in2, \in2, 1 //load in the next loop.
|
||||||
vbsrl.v \in3, \in3, 1
|
vbsrl.v \in3, \in3, 1
|
||||||
|
@ -903,7 +903,7 @@ endfunc
|
||||||
xvhaddw.d.h xr7
|
xvhaddw.d.h xr7
|
||||||
xvhaddw.d.h xr8
|
xvhaddw.d.h xr8
|
||||||
xvhaddw.d.h xr9
|
xvhaddw.d.h xr9
|
||||||
xvbsrl.v xr14, xr14, 1 //Back up previous 7 loaded datas,
|
xvbsrl.v xr14, xr14, 1 //Back up previous 7 loaded data,
|
||||||
xvbsrl.v xr15, xr15, 1 //so just need to insert the 8th
|
xvbsrl.v xr15, xr15, 1 //so just need to insert the 8th
|
||||||
xvbsrl.v xr16, xr16, 1 //load in next loop.
|
xvbsrl.v xr16, xr16, 1 //load in next loop.
|
||||||
xvbsrl.v xr17, xr17, 1
|
xvbsrl.v xr17, xr17, 1
|
||||||
|
|
|
@ -332,7 +332,7 @@ ASM_PREF\name: ;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Description : Store elements of vector
|
* Description : Store elements of vector
|
||||||
* vd : Data vector to be stroed
|
* vd : Data vector to be stored
|
||||||
* rk : Address of data storage
|
* rk : Address of data storage
|
||||||
* ra : Offset of address
|
* ra : Offset of address
|
||||||
* si : Index of data in vd
|
* si : Index of data in vd
|
||||||
|
|
|
@ -30,7 +30,7 @@ void ff_vc1_inv_trans_8x8_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *bloc
|
||||||
void ff_vc1_inv_trans_8x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
void ff_vc1_inv_trans_8x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
||||||
void ff_vc1_inv_trans_8x4_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
void ff_vc1_inv_trans_8x4_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
||||||
void ff_vc1_inv_trans_4x8_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
void ff_vc1_inv_trans_4x8_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
||||||
void ff_vc1_inv_trans_4x8_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *blokc);
|
void ff_vc1_inv_trans_4x8_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
||||||
void ff_vc1_inv_trans_4x4_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
void ff_vc1_inv_trans_4x4_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
||||||
void ff_vc1_inv_trans_4x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
void ff_vc1_inv_trans_4x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ typedef struct HuffEntry {
|
||||||
|
|
||||||
typedef struct PTable {
|
typedef struct PTable {
|
||||||
int value; ///< input value
|
int value; ///< input value
|
||||||
int64_t prob; ///< number of occurences of this value in input
|
int64_t prob; ///< number of occurrences of this value in input
|
||||||
} PTable;
|
} PTable;
|
||||||
|
|
||||||
typedef struct Slice {
|
typedef struct Slice {
|
||||||
|
|
|
@ -2124,7 +2124,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
|
av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
|
||||||
goto out;
|
goto out;
|
||||||
} else if (s->iccnum != 0 && nummarkers != s->iccnum) {
|
} else if (s->iccnum != 0 && nummarkers != s->iccnum) {
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
|
av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
|
||||||
goto out;
|
goto out;
|
||||||
} else if (seqno > nummarkers) {
|
} else if (seqno > nummarkers) {
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
|
av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
*/
|
*/
|
||||||
typedef struct PTable {
|
typedef struct PTable {
|
||||||
int value; ///< input value
|
int value; ///< input value
|
||||||
int prob; ///< number of occurences of this value in input
|
int prob; ///< number of occurrences of this value in input
|
||||||
} PTable;
|
} PTable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -179,7 +179,7 @@ void ff_mjpeg_encode_huffman_close(MJpegEncHuffmanContext *s, uint8_t bits[17],
|
||||||
|
|
||||||
// val_counts[0] is the fake element we added earlier.
|
// val_counts[0] is the fake element we added earlier.
|
||||||
av_assert1(val_counts[0].prob == 0 && val_counts[0].value == 256);
|
av_assert1(val_counts[0].prob == 0 && val_counts[0].value == 256);
|
||||||
// The following loop puts the values with higher occurence first,
|
// The following loop puts the values with higher occurrence first,
|
||||||
// ensuring that they get the shorter codes.
|
// ensuring that they get the shorter codes.
|
||||||
for (int i = 0; i < nval; ++i)
|
for (int i = 0; i < nval; ++i)
|
||||||
val[i] = val_counts[nval - i].value;
|
val[i] = val_counts[nval - i].value;
|
||||||
|
|
|
@ -42,7 +42,7 @@ typedef struct MLZDict {
|
||||||
int match_len;
|
int match_len;
|
||||||
} MLZDict;
|
} MLZDict;
|
||||||
|
|
||||||
/** MLZ data strucure
|
/** MLZ data structure
|
||||||
*/
|
*/
|
||||||
typedef struct MLZ {
|
typedef struct MLZ {
|
||||||
int dic_code_bit;
|
int dic_code_bit;
|
||||||
|
|
|
@ -259,7 +259,7 @@ static int encode_sample_description(AVCodecContext *avctx)
|
||||||
// Build font table
|
// Build font table
|
||||||
// We can't build a complete font table since that would require
|
// We can't build a complete font table since that would require
|
||||||
// scanning all dialogs first. But we can at least fill in what
|
// scanning all dialogs first. But we can at least fill in what
|
||||||
// is avaiable in the ASS header
|
// is available in the ASS header
|
||||||
if (style && ass->styles_count) {
|
if (style && ass->styles_count) {
|
||||||
// Find unique font names
|
// Find unique font names
|
||||||
if (style->font_name) {
|
if (style->font_name) {
|
||||||
|
|
|
@ -3592,7 +3592,7 @@ int ff_mpeg4_parse_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
|
||||||
|
|
||||||
// If we have not switched to studio profile than we also did not switch bps
|
// If we have not switched to studio profile than we also did not switch bps
|
||||||
// that means something else (like a previous instance) outside set bps which
|
// that means something else (like a previous instance) outside set bps which
|
||||||
// would be inconsistant with the currect state, thus reset it
|
// would be inconsistent with the correct state, thus reset it
|
||||||
if (!s->studio_profile && s->avctx->bits_per_raw_sample != 8)
|
if (!s->studio_profile && s->avctx->bits_per_raw_sample != 8)
|
||||||
s->avctx->bits_per_raw_sample = 0;
|
s->avctx->bits_per_raw_sample = 0;
|
||||||
|
|
||||||
|
|
|
@ -1770,7 +1770,7 @@ static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
|
||||||
else
|
else
|
||||||
s->syncword = 0xfff00000;
|
s->syncword = 0xfff00000;
|
||||||
|
|
||||||
/* Init the first mp3 decoder in standard way, so that all tables get builded
|
/* Init the first mp3 decoder in standard way, so that all tables get built
|
||||||
* We replace avctx->priv_data with the context of the first decoder so that
|
* We replace avctx->priv_data with the context of the first decoder so that
|
||||||
* decode_init() does not have to be changed.
|
* decode_init() does not have to be changed.
|
||||||
* Other decoders will be initialized here copying data from the first context
|
* Other decoders will be initialized here copying data from the first context
|
||||||
|
|
|
@ -1828,7 +1828,7 @@ static int select_input_picture(MPVMainEncContext *const m)
|
||||||
|
|
||||||
if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
|
if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
|
||||||
// input is a shared pix, so we can't modify it -> allocate a new
|
// input is a shared pix, so we can't modify it -> allocate a new
|
||||||
// one & ensure that the shared one is reuseable
|
// one & ensure that the shared one is reusable
|
||||||
av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
|
av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
|
||||||
|
|
||||||
ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
|
ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
/**
|
/**
|
||||||
* @file
|
* @file
|
||||||
* The 3 alphanumeric copyright notices are md5summed they are from the original
|
* The 3 alphanumeric copyright notices are md5summed they are from the original
|
||||||
* implementors. The original code is available from http://code.google.com/p/nelly2pcm/
|
* implementers. The original code is available from http://code.google.com/p/nelly2pcm/
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
/**
|
/**
|
||||||
* @file
|
* @file
|
||||||
* The 3 alphanumeric copyright notices are md5summed they are from the original
|
* The 3 alphanumeric copyright notices are md5summed they are from the original
|
||||||
* implementors. The original code is available from http://code.google.com/p/nelly2pcm/
|
* implementers. The original code is available from http://code.google.com/p/nelly2pcm/
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef AVCODEC_NELLYMOSER_H
|
#ifndef AVCODEC_NELLYMOSER_H
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
/**
|
/**
|
||||||
* @file
|
* @file
|
||||||
* The 3 alphanumeric copyright notices are md5summed they are from the original
|
* The 3 alphanumeric copyright notices are md5summed they are from the original
|
||||||
* implementors. The original code is available from http://code.google.com/p/nelly2pcm/
|
* implementers. The original code is available from http://code.google.com/p/nelly2pcm/
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "libavutil/channel_layout.h"
|
#include "libavutil/channel_layout.h"
|
||||||
|
|
|
@ -433,7 +433,7 @@ static void celt_encode_frame(OpusEncContext *s, OpusRangeCoder *rc,
|
||||||
|
|
||||||
if (f->silence) {
|
if (f->silence) {
|
||||||
if (f->framebits >= 16)
|
if (f->framebits >= 16)
|
||||||
ff_opus_rc_enc_log(rc, 1, 15); /* Silence (if using explicit singalling) */
|
ff_opus_rc_enc_log(rc, 1, 15); /* Silence (if using explicit signalling) */
|
||||||
for (int ch = 0; ch < s->channels; ch++)
|
for (int ch = 0; ch < s->channels; ch++)
|
||||||
memset(s->last_quantized_energy[ch], 0.0f, sizeof(float)*CELT_MAX_BANDS);
|
memset(s->last_quantized_energy[ch], 0.0f, sizeof(float)*CELT_MAX_BANDS);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1155,7 +1155,7 @@ const uint32_t * const ff_celt_pvq_u_row[15] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Deemphasis constant (alpha_p), as specified in RFC6716 as 0.8500061035.
|
/* Deemphasis constant (alpha_p), as specified in RFC6716 as 0.8500061035.
|
||||||
* libopus uses a slighly rounded constant, set to 0.85 exactly,
|
* libopus uses a slightly rounded constant, set to 0.85 exactly,
|
||||||
* to simplify its fixed-point version, but it's not significant to impact
|
* to simplify its fixed-point version, but it's not significant to impact
|
||||||
* compliance. */
|
* compliance. */
|
||||||
#define CELT_EMPH_COEFF 0.8500061035
|
#define CELT_EMPH_COEFF 0.8500061035
|
||||||
|
|
|
@ -142,7 +142,7 @@ enum AVPacketSideDataType {
|
||||||
AV_PKT_DATA_CPB_PROPERTIES,
|
AV_PKT_DATA_CPB_PROPERTIES,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Recommmends skipping the specified number of samples
|
* Recommends skipping the specified number of samples
|
||||||
* @code
|
* @code
|
||||||
* u32le number of samples to skip from start of this packet
|
* u32le number of samples to skip from start of this packet
|
||||||
* u32le number of samples to skip from end of this packet
|
* u32le number of samples to skip from end of this packet
|
||||||
|
|
|
@ -447,7 +447,7 @@ static int parse_presentation_segment(AVCodecContext *avctx,
|
||||||
PGSSubObjectRef *const object = &ctx->presentation.objects[i];
|
PGSSubObjectRef *const object = &ctx->presentation.objects[i];
|
||||||
|
|
||||||
if (buf_end - buf < 8) {
|
if (buf_end - buf < 8) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Insufficent space for object\n");
|
av_log(avctx, AV_LOG_ERROR, "Insufficient space for object\n");
|
||||||
ctx->presentation.object_count = i;
|
ctx->presentation.object_count = i;
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,7 +172,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||||
1 byte run (=0)
|
1 byte run (=0)
|
||||||
2 bytes run
|
2 bytes run
|
||||||
1 byte val
|
1 byte val
|
||||||
thats 5 bytes and the maximum run we can code is 65535
|
that's 5 bytes and the maximum run we can code is 65535
|
||||||
|
|
||||||
The RLE decoder can exit prematurly but it does not on any image available
|
The RLE decoder can exit prematurly but it does not on any image available
|
||||||
Based on this the formula is assumed correct for undamaged images.
|
Based on this the formula is assumed correct for undamaged images.
|
||||||
|
|
|
@ -46,7 +46,7 @@ void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t li
|
||||||
register ptrdiff_t line_size_4 = line_size * (1 << 2);
|
register ptrdiff_t line_size_4 = line_size * (1 << 2);
|
||||||
|
|
||||||
// hand-unrolling the loop by 4 gains about 15%
|
// hand-unrolling the loop by 4 gains about 15%
|
||||||
// mininum execution time goes from 74 to 60 cycles
|
// minimum execution time goes from 74 to 60 cycles
|
||||||
// it's faster than -funroll-loops, but using
|
// it's faster than -funroll-loops, but using
|
||||||
// -funroll-loops w/ this is bad - 74 cycles again.
|
// -funroll-loops w/ this is bad - 74 cycles again.
|
||||||
// all this is on a 7450, tuning for the 7450
|
// all this is on a 7450, tuning for the 7450
|
||||||
|
|
|
@ -312,7 +312,7 @@ static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, const uint
|
||||||
perm = vec_lvsl(0, src);
|
perm = vec_lvsl(0, src);
|
||||||
#endif
|
#endif
|
||||||
// hand-unrolling the loop by 4 gains about 15%
|
// hand-unrolling the loop by 4 gains about 15%
|
||||||
// mininum execution time goes from 74 to 60 cycles
|
// minimum execution time goes from 74 to 60 cycles
|
||||||
// it's faster than -funroll-loops, but using
|
// it's faster than -funroll-loops, but using
|
||||||
// -funroll-loops w/ this is bad - 74 cycles again.
|
// -funroll-loops w/ this is bad - 74 cycles again.
|
||||||
// all this is on a 7450, tuning for the 7450
|
// all this is on a 7450, tuning for the 7450
|
||||||
|
|
|
@ -173,10 +173,10 @@ static int decode_header(PSDContext * s)
|
||||||
}
|
}
|
||||||
bytestream2_skip(&s->gb, len_section);
|
bytestream2_skip(&s->gb, len_section);
|
||||||
|
|
||||||
/* image ressources */
|
/* image resources */
|
||||||
len_section = bytestream2_get_be32(&s->gb);
|
len_section = bytestream2_get_be32(&s->gb);
|
||||||
if (len_section < 0) {
|
if (len_section < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Negative size for image ressources section.\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Negative size for image resources section.\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -696,7 +696,7 @@ static int qsv_export_hdr_side_data(AVCodecContext *avctx, mfxExtMasteringDispla
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
// The SDK re-uses this flag for HDR SEI parsing
|
// The SDK reuses this flag for HDR SEI parsing
|
||||||
if (mdcv->InsertPayloadToggle) {
|
if (mdcv->InsertPayloadToggle) {
|
||||||
AVMasteringDisplayMetadata *mastering;
|
AVMasteringDisplayMetadata *mastering;
|
||||||
const int mapping[3] = {2, 0, 1};
|
const int mapping[3] = {2, 0, 1};
|
||||||
|
@ -726,7 +726,7 @@ static int qsv_export_hdr_side_data(AVCodecContext *avctx, mfxExtMasteringDispla
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The SDK re-uses this flag for HDR SEI parsing
|
// The SDK reuses this flag for HDR SEI parsing
|
||||||
if (clli->InsertPayloadToggle) {
|
if (clli->InsertPayloadToggle) {
|
||||||
AVContentLightMetadata *light;
|
AVContentLightMetadata *light;
|
||||||
|
|
||||||
|
@ -1002,7 +1002,7 @@ static int qsv_process_data(AVCodecContext *avctx, QSVContext *q,
|
||||||
|
|
||||||
// sw_pix_fmt, coded_width/height should be set for ff_get_format(),
|
// sw_pix_fmt, coded_width/height should be set for ff_get_format(),
|
||||||
// assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
|
// assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720,
|
||||||
// the assumption may be not corret but will be updated after header decoded if not true.
|
// the assumption may be not correct but will be updated after header decoded if not true.
|
||||||
if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
|
if (q->orig_pix_fmt != AV_PIX_FMT_NONE)
|
||||||
pix_fmt = q->orig_pix_fmt;
|
pix_fmt = q->orig_pix_fmt;
|
||||||
if (!avctx->coded_width)
|
if (!avctx->coded_width)
|
||||||
|
|
|
@ -2057,7 +2057,7 @@ static int submit_frame(QSVEncContext *q, const AVFrame *frame,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* make a copy if the input is not padded as libmfx requires */
|
/* make a copy if the input is not padded as libmfx requires */
|
||||||
/* and to make allocation continious for data[0]/data[1] */
|
/* and to make allocation continuous for data[0]/data[1] */
|
||||||
if ((frame->height & (q->height_align - 1) || frame->linesize[0] & (q->width_align - 1)) ||
|
if ((frame->height & (q->height_align - 1) || frame->linesize[0] & (q->width_align - 1)) ||
|
||||||
((frame->format == AV_PIX_FMT_NV12 || frame->format == AV_PIX_FMT_P010 || frame->format == AV_PIX_FMT_P012) &&
|
((frame->format == AV_PIX_FMT_NV12 || frame->format == AV_PIX_FMT_P010 || frame->format == AV_PIX_FMT_P012) &&
|
||||||
(frame->data[1] - frame->data[0] != frame->linesize[0] * FFALIGN(qf->frame->height, q->height_align)))) {
|
(frame->data[1] - frame->data[0] != frame->linesize[0] * FFALIGN(qf->frame->height, q->height_align)))) {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* AAC encoder assembly optimizations
|
* AAC encoder assembly optimizations
|
||||||
* Copyright (c) 2023 Institue of Software Chinese Academy of Sciences (ISCAS).
|
* Copyright (c) 2023 Institute of Software Chinese Academy of Sciences (ISCAS).
|
||||||
*
|
*
|
||||||
* This file is part of FFmpeg.
|
* This file is part of FFmpeg.
|
||||||
*
|
*
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue