diff --git a/doc/html/index.html b/doc/html/index.html index eac8967..ddca38e 100644 --- a/doc/html/index.html +++ b/doc/html/index.html @@ -132,7 +132,7 @@ etc.). Some keyword-arguments are handled specially, as shown below.
video_bitrate – parameter for -b:v
, e.g. video_bitrate=1000
.
audio_bitrate – parameter for -b:a
, e.g. audio_bitrate=200
.
format – alias for -f
parameter, e.g. format='mp4'
+
format – alias for -f
parameter, e.g. fmt='mp4'
(equivalent to f='mp4'
).
pipe:Run and stream input:
process = (
ffmpeg
- .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
+ .input('pipe:', fmt='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
@@ -247,7 +247,7 @@ used with pipe:process = (
ffmpeg
.input(in_filename)
- .output('pipe':, format='rawvideo', pix_fmt='rgb24')
+ .output('pipe':, fmt='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate()
@@ -257,13 +257,13 @@ used with pipe:process1 = (
ffmpeg
.input(in_filename)
- .output('pipe:', format='rawvideo', pix_fmt='rgb24')
+ .output('pipe:', fmt='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
- .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
+ .input('pipe:', fmt='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
diff --git a/examples/README.md b/examples/README.md
index dabc739..83abca2 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -31,7 +31,7 @@ height = int(video_stream['height'])
out, _ = (
ffmpeg
.input('in.mp4')
- .output('pipe:', format='rawvideo', pix_fmt='rgb24')
+ .output('pipe:', fmt='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True)
)
video = (
@@ -50,7 +50,7 @@ out, _ = (
ffmpeg
.input(in_filename)
.filter('select', 'gte(n,{})'.format(frame_num))
- .output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
+ .output('pipe:', vframes=1, fmt='image2', vcodec='mjpeg')
.run(capture_stdout=True)
)
```
@@ -62,7 +62,7 @@ out, _ = (
```python
out, _ = (ffmpeg
.input(in_filename, **input_kwargs)
- .output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
+ .output('-', fmt='s16le', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run(capture_stdout=True)
)
@@ -165,13 +165,13 @@ input_video = ffmpeg.input('input-video.mp4')
process1 = (
ffmpeg
.input(in_filename)
- .output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=8)
+ .output('pipe:', fmt='rawvideo', pix_fmt='rgb24', vframes=8)
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
- .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
+ .input('pipe:', fmt='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
@@ -208,7 +208,7 @@ process2.wait()
```python
(
ffmpeg
- .input('FaceTime', format='avfoundation', pix_fmt='uyvy422', framerate=30)
+ .input('FaceTime', fmt='avfoundation', pix_fmt='uyvy422', framerate=30)
.output('out.mp4', pix_fmt='yuv420p', vframes=100)
.run()
)
@@ -248,7 +248,7 @@ packet_size = 4096
process = (
ffmpeg
.input('rtsp://%s:8554/default')
- .output('-', format='h264')
+ .output('-', fmt='h264')
.run_async(pipe_stdout=True)
)
diff --git a/examples/facetime.py b/examples/facetime.py
index 58d083e..ae8ef42 100644
--- a/examples/facetime.py
+++ b/examples/facetime.py
@@ -2,7 +2,7 @@ import ffmpeg
(
ffmpeg
- .input('FaceTime', format='avfoundation', pix_fmt='uyvy422', framerate=30)
+ .input('FaceTime', fmt='avfoundation', pix_fmt='uyvy422', framerate=30)
.output('out.mp4', pix_fmt='yuv420p', vframes=100)
.run()
)
diff --git a/examples/ffmpeg-numpy.ipynb b/examples/ffmpeg-numpy.ipynb
index b6d991b..c30ed63 100644
--- a/examples/ffmpeg-numpy.ipynb
+++ b/examples/ffmpeg-numpy.ipynb
@@ -50,7 +50,7 @@
"out, err = (\n",
" ffmpeg\n",
" .input('in.mp4')\n",
- " .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n",
+ " .output('pipe:', fmt='rawvideo', pix_fmt='rgb24')\n",
" .run(capture_stdout=True)\n",
")\n",
"video = (\n",
@@ -95,7 +95,7 @@
" out, _ = (\n",
" stream\n",
" .filter_('select', 'gte(n,{})'.format(frame_num))\n",
- " .output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=1)\n",
+ " .output('pipe:', fmt='rawvideo', pix_fmt='rgb24', vframes=1)\n",
" .run(capture_stdout=True, capture_stderr=True)\n",
" )\n",
" return np.frombuffer(out, np.uint8).reshape([height, width, 3])\n",
diff --git a/examples/read_frame_as_jpeg.py b/examples/read_frame_as_jpeg.py
index 92b4fee..9e88b3c 100755
--- a/examples/read_frame_as_jpeg.py
+++ b/examples/read_frame_as_jpeg.py
@@ -16,7 +16,7 @@ def read_frame_as_jpeg(in_filename, frame_num):
ffmpeg
.input(in_filename)
.filter('select', 'gte(n,{})'.format(frame_num))
- .output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
+ .output('pipe:', vframes=1, fmt='image2', vcodec='mjpeg')
.run(capture_stdout=True)
)
return out
diff --git a/examples/split_silence.py b/examples/split_silence.py
index a889db1..467d325 100755
--- a/examples/split_silence.py
+++ b/examples/split_silence.py
@@ -51,7 +51,7 @@ def get_chunk_times(in_filename, silence_threshold, silence_duration, start_time
(ffmpeg
.input(in_filename, **input_kwargs)
.filter('silencedetect', n='{}dB'.format(silence_threshold), d=silence_duration)
- .output('-', format='null')
+ .output('-', fmt='null')
.compile()
) + ['-nostats'], # FIXME: use .nostats() once it's implemented in ffmpeg-python.
stderr=subprocess.PIPE
diff --git a/examples/tensorflow_stream.py b/examples/tensorflow_stream.py
index 6c9c9c9..7433c1b 100644
--- a/examples/tensorflow_stream.py
+++ b/examples/tensorflow_stream.py
@@ -58,7 +58,7 @@ def start_ffmpeg_process1(in_filename):
args = (
ffmpeg
.input(in_filename)
- .output('pipe:', format='rawvideo', pix_fmt='rgb24')
+ .output('pipe:', fmt='rawvideo', pix_fmt='rgb24')
.compile()
)
return subprocess.Popen(args, stdout=subprocess.PIPE)
@@ -68,7 +68,7 @@ def start_ffmpeg_process2(out_filename, width, height):
logger.info('Starting ffmpeg process2')
args = (
ffmpeg
- .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
+ .input('pipe:', fmt='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.compile()
diff --git a/examples/transcribe.py b/examples/transcribe.py
index 0b7200c..00600cf 100755
--- a/examples/transcribe.py
+++ b/examples/transcribe.py
@@ -22,7 +22,7 @@ def decode_audio(in_filename, **input_kwargs):
try:
out, err = (ffmpeg
.input(in_filename, **input_kwargs)
- .output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
+ .output('-', fmt='s16le', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True)
)
diff --git a/ffmpeg/_ffmpeg.py b/ffmpeg/_ffmpeg.py
index 31e2b90..7cb47f6 100644
--- a/ffmpeg/_ffmpeg.py
+++ b/ffmpeg/_ffmpeg.py
@@ -26,9 +26,9 @@ def input(filename, **kwargs):
kwargs['filename'] = filename
fmt = kwargs.pop('f', None)
if fmt:
- if 'format' in kwargs:
- raise ValueError("Can't specify both `format` and `f` kwargs")
- kwargs['format'] = fmt
+ if 'fmt' in kwargs:
+ raise ValueError("Can't specify both `fmt` and `f` kwargs")
+ kwargs['fmt'] = fmt
return InputNode(input.__name__, kwargs=kwargs).stream()
@@ -69,7 +69,7 @@ def output(*streams_and_filename, **kwargs):
Args:
video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``.
audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``.
- format: alias for ``-f`` parameter, e.g. ``format='mp4'``
+ fmt: alias for ``-f`` parameter, e.g. ``fmt='mp4'``
(equivalent to ``f='mp4'``).
If multiple streams are provided, they are mapped to the same
@@ -88,9 +88,9 @@ def output(*streams_and_filename, **kwargs):
fmt = kwargs.pop('f', None)
if fmt:
- if 'format' in kwargs:
- raise ValueError("Can't specify both `format` and `f` kwargs")
- kwargs['format'] = fmt
+ if 'fmt' in kwargs:
+ raise ValueError("Can't specify both `fmt` and `f` kwargs")
+ kwargs['fmt'] = fmt
return OutputNode(streams, output.__name__, kwargs=kwargs).stream()
diff --git a/ffmpeg/_run.py b/ffmpeg/_run.py
index c3d5550..a96de31 100644
--- a/ffmpeg/_run.py
+++ b/ffmpeg/_run.py
@@ -32,7 +32,7 @@ def _get_input_args(input_node):
if input_node.name == input.__name__:
kwargs = copy.copy(input_node.kwargs)
filename = kwargs.pop('filename')
- fmt = kwargs.pop('format', None)
+ fmt = kwargs.pop('fmt', None)
video_size = kwargs.pop('video_size', None)
args = []
if fmt:
@@ -128,8 +128,8 @@ def _get_output_args(node, stream_name_map):
kwargs = copy.copy(node.kwargs)
filename = kwargs.pop('filename')
- if 'format' in kwargs:
- args += ['-f', kwargs.pop('format')]
+ if 'fmt' in kwargs:
+ args += ['-f', kwargs.pop('fmt')]
if 'video_bitrate' in kwargs:
args += ['-b:v', str(kwargs.pop('video_bitrate'))]
if 'audio_bitrate' in kwargs:
@@ -221,7 +221,7 @@ def run_async(
process = (
ffmpeg
- .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
+ .input('pipe:', fmt='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
@@ -233,7 +233,7 @@ def run_async(
process = (
ffmpeg
.input(in_filename)
- .output('pipe':, format='rawvideo', pix_fmt='rgb24')
+ .output('pipe':, fmt='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate()
@@ -243,13 +243,13 @@ def run_async(
process1 = (
ffmpeg
.input(in_filename)
- .output('pipe:', format='rawvideo', pix_fmt='rgb24')
+ .output('pipe:', fmt='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
- .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
+ .input('pipe:', fmt='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
diff --git a/ffmpeg/tests/test_ffmpeg.py b/ffmpeg/tests/test_ffmpeg.py
index 51ee258..741eca9 100644
--- a/ffmpeg/tests/test_ffmpeg.py
+++ b/ffmpeg/tests/test_ffmpeg.py
@@ -660,13 +660,13 @@ def test_pipe():
out = (
ffmpeg.input(
'pipe:0',
- format='rawvideo',
+ fmt='rawvideo',
pixel_format='rgb24',
video_size=(width, height),
framerate=10,
)
.trim(start_frame=start_frame)
- .output('pipe:1', format='rawvideo')
+ .output('pipe:1', fmt='rawvideo')
)
args = out.get_args()