mirror of
https://github.com/kkroening/ffmpeg-python.git
synced 2025-04-05 04:22:51 +08:00
Merge pull request #104 from kkroening/filter
Use `filter` as the canonical name for `filter_`
This commit is contained in:
commit
6523c46fa4
@ -122,7 +122,7 @@ Alternatively, standard python help is available, such as at the python REPL pro
|
||||
Don't see the filter you're looking for? `ffmpeg-python` includes shorthand notation for some of the most commonly used filters (such as `concat`), but it's easy to use any arbitrary ffmpeg filter:
|
||||
```python
|
||||
stream = ffmpeg.input('dummy.mp4')
|
||||
stream = ffmpeg.filter_(stream, 'fps', fps=25, round='up')
|
||||
stream = ffmpeg.filter(stream, 'fps', fps=25, round='up')
|
||||
stream = ffmpeg.output(stream, 'dummy2.mp4')
|
||||
ffmpeg.run(stream)
|
||||
```
|
||||
@ -132,7 +132,7 @@ Or fluently:
|
||||
(
|
||||
ffmpeg
|
||||
.input('dummy.mp4')
|
||||
.filter_('fps', fps=25, round='up')
|
||||
.filter('fps', fps=25, round='up')
|
||||
.output('dummy2.mp4')
|
||||
.run()
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ height = int(video_stream['height'])
|
||||
(
|
||||
ffmpeg
|
||||
.input(in_filename, ss=time)
|
||||
.filter_('scale', width, -1)
|
||||
.filter('scale', width, -1)
|
||||
.output(out_filename, vframes=1)
|
||||
.run()
|
||||
)
|
||||
@ -49,7 +49,7 @@ video = (
|
||||
out, _ = (
|
||||
ffmpeg
|
||||
.input(in_filename)
|
||||
.filter_('select', 'gte(n,{})'.format(frame_num))
|
||||
.filter('select', 'gte(n,{})'.format(frame_num))
|
||||
.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
|
||||
.run(capture_output=True)
|
||||
)
|
||||
@ -89,8 +89,8 @@ With additional filtering:
|
||||
(
|
||||
ffmpeg
|
||||
.input('/path/to/jpegs/*.jpg', pattern_type='glob', framerate=25)
|
||||
.filter_('deflicker', mode='pm', size=10)
|
||||
.filter_('scale', size='hd1080', force_original_aspect_ratio='increase')
|
||||
.filter('deflicker', mode='pm', size=10)
|
||||
.filter('scale', size='hd1080', force_original_aspect_ratio='increase')
|
||||
.output('movie.mp4', crf=20, preset='slower', movflags='faststart', pix_fmt='yuv420p')
|
||||
.view(filename='filter_graph')
|
||||
.run()
|
||||
@ -106,11 +106,11 @@ in1 = ffmpeg.input('in1.mp4')
|
||||
in2 = ffmpeg.input('in2.mp4')
|
||||
v1 = in1['v'].hflip()
|
||||
a1 = in1['a']
|
||||
v2 = in2['v'].filter_('reverse').filter_('hue', s=0)
|
||||
a2 = in2['a'].filter_('areverse').filter_('aphaser')
|
||||
v2 = in2['v'].filter('reverse').filter('hue', s=0)
|
||||
a2 = in2['a'].filter('areverse').filter('aphaser')
|
||||
joined = ffmpeg.concat(v1, a1, v2, a2, v=1, a=1).node
|
||||
v3 = joined[0]
|
||||
a3 = joined[1].filter_('volume', 0.8)
|
||||
a3 = joined[1].filter('volume', 0.8)
|
||||
out = ffmpeg.output(v3, a3, 'out.mp4')
|
||||
out.run()
|
||||
```
|
||||
|
@ -20,7 +20,7 @@ def generate_thumbnail(in_filename, out_filename, time, width):
|
||||
(
|
||||
ffmpeg
|
||||
.input(in_filename, ss=time)
|
||||
.filter_('scale', width, -1)
|
||||
.filter('scale', width, -1)
|
||||
.output(out_filename, vframes=1)
|
||||
.overwrite_output()
|
||||
.run(capture_stdout=True, capture_stderr=True)
|
||||
|
@ -15,7 +15,7 @@ def read_frame_as_jpeg(in_filename, frame_num):
|
||||
out, err = (
|
||||
ffmpeg
|
||||
.input(in_filename)
|
||||
.filter_('select', 'gte(n,{})'.format(frame_num))
|
||||
.filter('select', 'gte(n,{})'.format(frame_num))
|
||||
.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
|
||||
.run(capture_stdout=True)
|
||||
)
|
||||
|
@ -50,7 +50,7 @@ def get_chunk_times(in_filename, silence_threshold, silence_duration, start_time
|
||||
p = _logged_popen(
|
||||
(ffmpeg
|
||||
.input(in_filename, **input_kwargs)
|
||||
.filter_('silencedetect', n='{}dB'.format(silence_threshold), d=silence_duration)
|
||||
.filter('silencedetect', n='{}dB'.format(silence_threshold), d=silence_duration)
|
||||
.output('-', format='null')
|
||||
.compile()
|
||||
) + ['-nostats'], # FIXME: use .nostats() once it's implemented in ffmpeg-python.
|
||||
|
@ -25,7 +25,7 @@ def filter_multi_output(stream_spec, filter_name, *args, **kwargs):
|
||||
|
||||
|
||||
@filter_operator()
|
||||
def filter_(stream_spec, filter_name, *args, **kwargs):
|
||||
def filter(stream_spec, filter_name, *args, **kwargs):
|
||||
"""Apply custom filter.
|
||||
|
||||
``filter_`` is normally used by higher-level filter functions such as ``hflip``, but if a filter implementation
|
||||
@ -42,11 +42,19 @@ def filter_(stream_spec, filter_name, *args, **kwargs):
|
||||
|
||||
Example:
|
||||
|
||||
``ffmpeg.input('in.mp4').filter_('hflip').output('out.mp4').run()``
|
||||
``ffmpeg.input('in.mp4').filter('hflip').output('out.mp4').run()``
|
||||
"""
|
||||
return filter_multi_output(stream_spec, filter_name, *args, **kwargs).stream()
|
||||
|
||||
|
||||
@filter_operator()
|
||||
def filter_(stream_spec, filter_name, *args, **kwargs):
|
||||
"""Alternate name for ``filter``, so as to not collide with the
|
||||
built-in python ``filter`` operator.
|
||||
"""
|
||||
return filter(stream_spec, filter_name, *args, **kwargs)
|
||||
|
||||
|
||||
@filter_operator()
|
||||
def split(stream):
|
||||
return FilterNode(stream, split.__name__)
|
||||
@ -343,7 +351,7 @@ def drawtext(stream, text=None, x=0, y=0, escape_text=True, **kwargs):
|
||||
kwargs['x'] = x
|
||||
if y != 0:
|
||||
kwargs['y'] = y
|
||||
return filter_(stream, drawtext.__name__, **kwargs)
|
||||
return filter(stream, drawtext.__name__, **kwargs)
|
||||
|
||||
|
||||
@filter_operator()
|
||||
@ -432,6 +440,7 @@ __all__ = [
|
||||
'crop',
|
||||
'drawbox',
|
||||
'drawtext',
|
||||
'filter',
|
||||
'filter_',
|
||||
'filter_multi_output',
|
||||
'hflip',
|
||||
|
@ -53,7 +53,7 @@ class Stream(object):
|
||||
Process the audio and video portions of a stream independently::
|
||||
|
||||
input = ffmpeg.input('in.mp4')
|
||||
audio = input[:'a'].filter_("aecho", 0.8, 0.9, 1000, 0.3)
|
||||
audio = input[:'a'].filter("aecho", 0.8, 0.9, 1000, 0.3)
|
||||
video = input[:'v'].hflip()
|
||||
out = ffmpeg.output(audio, video, 'out.mp4')
|
||||
"""
|
||||
@ -141,7 +141,7 @@ class Node(KwargReprNode):
|
||||
Process the audio and video portions of a stream independently::
|
||||
|
||||
input = ffmpeg.input('in.mp4')
|
||||
audio = input[:'a'].filter_("aecho", 0.8, 0.9, 1000, 0.3)
|
||||
audio = input[:'a'].filter("aecho", 0.8, 0.9, 1000, 0.3)
|
||||
video = input[:'v'].hflip()
|
||||
out = ffmpeg.output(audio, video, 'out.mp4')
|
||||
"""
|
||||
|
@ -174,7 +174,7 @@ def test_combined_output():
|
||||
def test_filter_with_selector():
|
||||
i = ffmpeg.input(TEST_INPUT_FILE1)
|
||||
v1 = i['v'].hflip()
|
||||
a1 = i['a'].filter_('aecho', 0.8, 0.9, 1000, 0.3)
|
||||
a1 = i['a'].filter('aecho', 0.8, 0.9, 1000, 0.3)
|
||||
out = ffmpeg.output(a1, v1, TEST_OUTPUT_FILE1)
|
||||
assert out.get_args() == [
|
||||
'-i', TEST_INPUT_FILE1,
|
||||
@ -214,8 +214,8 @@ def _get_complex_filter_asplit_example():
|
||||
|
||||
return (ffmpeg
|
||||
.concat(
|
||||
split0.filter_('atrim', start=10, end=20),
|
||||
split1.filter_('atrim', start=30, end=40),
|
||||
split0.filter('atrim', start=10, end=20),
|
||||
split1.filter('atrim', start=30, end=40),
|
||||
)
|
||||
.output(TEST_OUTPUT_FILE1)
|
||||
.overwrite_output()
|
||||
@ -484,7 +484,7 @@ def test__run__dummy_cmd_list():
|
||||
|
||||
def test__filter__custom():
|
||||
stream = ffmpeg.input('dummy.mp4')
|
||||
stream = ffmpeg.filter_(stream, 'custom_filter', 'a', 'b', kwarg1='c')
|
||||
stream = ffmpeg.filter(stream, 'custom_filter', 'a', 'b', kwarg1='c')
|
||||
stream = ffmpeg.output(stream, 'dummy2.mp4')
|
||||
assert stream.get_args() == [
|
||||
'-i', 'dummy.mp4',
|
||||
@ -497,7 +497,7 @@ def test__filter__custom():
|
||||
def test__filter__custom_fluent():
|
||||
stream = (ffmpeg
|
||||
.input('dummy.mp4')
|
||||
.filter_('custom_filter', 'a', 'b', kwarg1='c')
|
||||
.filter('custom_filter', 'a', 'b', kwarg1='c')
|
||||
.output('dummy2.mp4')
|
||||
)
|
||||
assert stream.get_args() == [
|
||||
|
Loading…
x
Reference in New Issue
Block a user