mirror of
https://github.com/kkroening/ffmpeg-python.git
synced 2025-04-05 04:22:51 +08:00
Merge remote-tracking branch 'origin/master' into feature-80
Conflicts: ffmpeg/_run.py ffmpeg/tests/test_ffmpeg.py
This commit is contained in:
commit
0cc0bfaaaa
11
README.md
11
README.md
@ -95,6 +95,10 @@ $ python
|
||||
>>> import ffmpeg
|
||||
```
|
||||
|
||||
## [Examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples)
|
||||
|
||||
When in doubt, take a look at the [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples) to see if there's something that's close to whatever you're trying to do.
|
||||
|
||||
## [API Reference](https://kkroening.github.io/ffmpeg-python/)
|
||||
|
||||
API documentation is automatically generated from python docstrings and hosted on github pages: https://kkroening.github.io/ffmpeg-python/
|
||||
@ -140,6 +144,13 @@ Pull requests are welcome as well.
|
||||
|
||||
<br />
|
||||
|
||||
### Special thanks
|
||||
|
||||
- [Arne de Laat](https://github.com/153957)
|
||||
- [Davide Depau](https://github.com/depau)
|
||||
- [Dim](https://github.com/lloti)
|
||||
- [Noah Stier](https://github.com/noahstier)
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [API Reference](https://kkroening.github.io/ffmpeg-python/)
|
||||
|
BIN
doc/jupyter-screenshot.png
Normal file
BIN
doc/jupyter-screenshot.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 461 KiB |
63
examples/README.md
Normal file
63
examples/README.md
Normal file
@ -0,0 +1,63 @@
|
||||
# Examples
|
||||
|
||||
## [Get video info](https://github.com/kkroening/ffmpeg-python/blob/master/examples/video_info.py#L15)
|
||||
|
||||
```python
|
||||
probe = ffmpeg.probe(args.in_filename)
|
||||
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
|
||||
width = int(video_stream['width'])
|
||||
height = int(video_stream['height'])
|
||||
```
|
||||
|
||||
## [Convert video to numpy array](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
|
||||
|
||||
```python
|
||||
out, _ = (
|
||||
ffmpeg
|
||||
.input('in.mp4')
|
||||
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
|
||||
.run(capture_stdout=True)
|
||||
)
|
||||
video = (
|
||||
np
|
||||
.frombuffer(out, np.uint8)
|
||||
.reshape([-1, height, width, 3])
|
||||
)
|
||||
```
|
||||
|
||||
## [Generate thumbnail for video](https://github.com/kkroening/ffmpeg-python/blob/master/examples/get_video_thumbnail.py#L21)
|
||||
```python
|
||||
(
|
||||
ffmpeg
|
||||
.input(in_filename, ss=time)
|
||||
.filter_('scale', width, -1)
|
||||
.output(out_filename, vframes=1)
|
||||
.run()
|
||||
)
|
||||
```
|
||||
|
||||
## [Read single video frame as jpeg through pipe](https://github.com/kkroening/ffmpeg-python/blob/master/examples/read_frame_as_jpeg.py#L16)
|
||||
```python
|
||||
out, _ = (
|
||||
ffmpeg
|
||||
.input(in_filename)
|
||||
.filter_('select', 'gte(n,{})'.format(frame_num))
|
||||
.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
|
||||
.run(capture_output=True)
|
||||
)
|
||||
```
|
||||
|
||||
## [Convert sound to raw PCM audio](https://github.com/kkroening/ffmpeg-python/blob/master/examples/transcribe.py#L23)
|
||||
```python
|
||||
out, _ = (ffmpeg
|
||||
.input(in_filename, **input_kwargs)
|
||||
.output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
|
||||
.overwrite_output()
|
||||
.run(capture_stdout=True)
|
||||
)
|
||||
```
|
||||
|
||||
## [JupyterLab/Notebook widgets](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/jupyter-screenshot.png" alt="jupyter screenshot" width="75%" />
|
||||
|
103
examples/ffmpeg-numpy.ipynb
Normal file
103
examples/ffmpeg-numpy.ipynb
Normal file
@ -0,0 +1,103 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 116,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from ipywidgets import interact\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"import ffmpeg\n",
|
||||
"import ipywidgets as widgets\n",
|
||||
"import numpy as np"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 117,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"probe = ffmpeg.probe('in.mp4')\n",
|
||||
"video_info = next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')\n",
|
||||
"width = int(video_info['width'])\n",
|
||||
"height = int(video_info['height'])\n",
|
||||
"num_frames = int(video_info['nb_frames'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 118,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"out, err = (\n",
|
||||
" ffmpeg\n",
|
||||
" .input('in.mp4')\n",
|
||||
" .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n",
|
||||
" .run(capture_stdout=True)\n",
|
||||
")\n",
|
||||
"video = (\n",
|
||||
" np\n",
|
||||
" .frombuffer(out, np.uint8)\n",
|
||||
" .reshape([-1, height, width, 3])\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 115,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "17d13d7551114fb39a1fad933cf0398a",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"interactive(children=(IntSlider(value=0, description='frame', max=209), Output()), _dom_classes=('widget-inter…"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@interact(frame=(0, num_frames))\n",
|
||||
"def show_frame(frame=0):\n",
|
||||
" plt.imshow(video[frame,:,:,:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
35
examples/get_video_thumbnail.py
Executable file
35
examples/get_video_thumbnail.py
Executable file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals, print_function
|
||||
import argparse
|
||||
import ffmpeg
|
||||
import sys
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Generate video thumbnail')
|
||||
parser.add_argument('in_filename', help='Input filename')
|
||||
parser.add_argument('out_filename', help='Output filename')
|
||||
parser.add_argument(
|
||||
'--time', type=int, default=0.1, help='Time offset')
|
||||
parser.add_argument(
|
||||
'--width', type=int, default=120,
|
||||
help='Width of output thumbnail (height automatically determined by aspect ratio)')
|
||||
|
||||
|
||||
def generate_thumbnail(in_filename, out_filename, time, width):
|
||||
try:
|
||||
(
|
||||
ffmpeg
|
||||
.input(in_filename, ss=time)
|
||||
.filter_('scale', width, -1)
|
||||
.output(out_filename, vframes=1)
|
||||
.overwrite_output()
|
||||
.run(capture_stdout=True, capture_stderr=True)
|
||||
)
|
||||
except ffmpeg.Error as e:
|
||||
print(e.stderr.decode(), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
generate_thumbnail(args.in_filename, args.out_filename, args.time, args.width)
|
28
examples/read_frame_as_jpeg.py
Executable file
28
examples/read_frame_as_jpeg.py
Executable file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
import argparse
|
||||
import ffmpeg
|
||||
import sys
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Read individual video frame into memory as jpeg and write to stdout')
|
||||
parser.add_argument('in_filename', help='Input filename')
|
||||
parser.add_argument('frame_num', help='Frame number')
|
||||
|
||||
|
||||
def read_frame_as_jpeg(in_filename, frame_num):
|
||||
out, err = (
|
||||
ffmpeg
|
||||
.input(in_filename)
|
||||
.filter_('select', 'gte(n,{})'.format(frame_num))
|
||||
.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
|
||||
.run(capture_stdout=True)
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
out = read_frame_as_jpeg(args.in_filename, args.frame_num)
|
||||
sys.stdout.buffer.write(out)
|
@ -1,2 +1,4 @@
|
||||
ffmpeg-python
|
||||
gevent
|
||||
google-cloud-speech
|
||||
tqdm
|
||||
|
130
examples/show_progress.py
Executable file
130
examples/show_progress.py
Executable file
@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals, print_function
|
||||
from tqdm import tqdm
|
||||
import argparse
|
||||
import contextlib
|
||||
import ffmpeg
|
||||
import gevent
|
||||
import gevent.monkey; gevent.monkey.patch_all(thread=False)
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import tempfile
|
||||
import textwrap
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description=textwrap.dedent('''\
|
||||
Process video and report and show progress bar.
|
||||
|
||||
This is an example of using the ffmpeg `-progress` option with a
|
||||
unix-domain socket to report progress in the form of a progress
|
||||
bar.
|
||||
|
||||
The video processing simply consists of converting the video to
|
||||
sepia colors, but the same pattern can be applied to other use
|
||||
cases.
|
||||
'''))
|
||||
|
||||
parser.add_argument('in_filename', help='Input filename')
|
||||
parser.add_argument('out_filename', help='Output filename')
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _tmpdir_scope():
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
try:
|
||||
yield tmpdir
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def _do_watch_progress(filename, sock, handler):
|
||||
"""Function to run in a separate gevent greenlet to read progress
|
||||
events from a unix-domain socket."""
|
||||
connection, client_address = sock.accept()
|
||||
data = b''
|
||||
try:
|
||||
while True:
|
||||
more_data = connection.recv(16)
|
||||
if not more_data:
|
||||
break
|
||||
data += more_data
|
||||
lines = data.split(b'\n')
|
||||
for line in lines[:-1]:
|
||||
line = line.decode()
|
||||
parts = line.split('=')
|
||||
key = parts[0] if len(parts) > 0 else None
|
||||
value = parts[1] if len(parts) > 1 else None
|
||||
handler(key, value)
|
||||
data = lines[-1]
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _watch_progress(handler):
|
||||
"""Context manager for creating a unix-domain socket and listen for
|
||||
ffmpeg progress events.
|
||||
|
||||
The socket filename is yielded from the context manager and the
|
||||
socket is closed when the context manager is exited.
|
||||
|
||||
Args:
|
||||
handler: a function to be called when progress events are
|
||||
received; receives a ``key`` argument and ``value``
|
||||
argument. (The example ``show_progress`` below uses tqdm)
|
||||
|
||||
Yields:
|
||||
socket_filename: the name of the socket file.
|
||||
"""
|
||||
with _tmpdir_scope() as tmpdir:
|
||||
socket_filename = os.path.join(tmpdir, 'sock')
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
with contextlib.closing(sock):
|
||||
sock.bind(socket_filename)
|
||||
sock.listen(1)
|
||||
child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler)
|
||||
try:
|
||||
yield socket_filename
|
||||
except:
|
||||
gevent.kill(child)
|
||||
raise
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def show_progress(total_duration):
|
||||
"""Create a unix-domain socket to watch progress and render tqdm
|
||||
progress bar."""
|
||||
with tqdm(total=round(total_duration, 2)) as bar:
|
||||
def handler(key, value):
|
||||
if key == 'out_time_ms':
|
||||
time = round(float(value) / 1000000., 2)
|
||||
bar.update(time - bar.n)
|
||||
elif key == 'progress' and value == 'end':
|
||||
bar.update(bar.total - bar.n)
|
||||
with _watch_progress(handler) as socket_filename:
|
||||
yield socket_filename
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
total_duration = float(ffmpeg.probe(args.in_filename)['format']['duration'])
|
||||
|
||||
with show_progress(total_duration) as socket_filename:
|
||||
# See https://ffmpeg.org/ffmpeg-filters.html#Examples-44
|
||||
sepia_values = [.393, .769, .189, 0, .349, .686, .168, 0, .272, .534, .131]
|
||||
try:
|
||||
(ffmpeg
|
||||
.input(args.in_filename)
|
||||
.colorchannelmixer(*sepia_values)
|
||||
.output(args.out_filename)
|
||||
.global_args('-progress', 'unix://{}'.format(socket_filename))
|
||||
.overwrite_output()
|
||||
.run(capture_stdout=True, capture_stderr=True)
|
||||
)
|
||||
except ffmpeg.Error as e:
|
||||
print(e.stderr, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
@ -1,13 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from __future__ import unicode_literals, print_function
|
||||
from google.cloud import speech
|
||||
from google.cloud.speech import enums
|
||||
from google.cloud.speech import types
|
||||
import argparse
|
||||
import ffmpeg
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
@ -21,21 +19,17 @@ parser.add_argument('in_filename', help='Input filename (`-` for stdin)')
|
||||
|
||||
|
||||
def decode_audio(in_filename, **input_kwargs):
|
||||
p = subprocess.Popen(
|
||||
(ffmpeg
|
||||
try:
|
||||
out, err = (ffmpeg
|
||||
.input(in_filename, **input_kwargs)
|
||||
.output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
|
||||
.overwrite_output()
|
||||
.compile()
|
||||
),
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
out = p.communicate()
|
||||
if p.returncode != 0:
|
||||
sys.stderr.write(out[1])
|
||||
.run(capture_stdout=True, capture_stderr=True)
|
||||
)
|
||||
except ffmpeg.Error as e:
|
||||
print(e.stderr, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return out[0]
|
||||
return out
|
||||
|
||||
|
||||
def get_transcripts(audio_data):
|
||||
|
31
examples/video_info.py
Executable file
31
examples/video_info.py
Executable file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals, print_function
|
||||
import argparse
|
||||
import ffmpeg
|
||||
import sys
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Get video information')
|
||||
parser.add_argument('in_filename', help='Input filename')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
probe = ffmpeg.probe(args.in_filename)
|
||||
except ffmpeg.Error as e:
|
||||
print(e.stderr, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
|
||||
if video_stream is None:
|
||||
print('No video stream found', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
width = int(video_stream['width'])
|
||||
height = int(video_stream['height'])
|
||||
num_frames = int(video_stream['nb_frames'])
|
||||
print('width: {}'.format(width))
|
||||
print('height: {}'.format(height))
|
||||
print('num_frames: {}'.format(num_frames))
|
@ -1,10 +1,9 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .dag import get_outgoing_edges, topo_sort
|
||||
from ._utils import basestring
|
||||
from builtins import str
|
||||
from functools import reduce
|
||||
from past.builtins import basestring
|
||||
import collections
|
||||
import copy
|
||||
import operator
|
||||
import subprocess
|
||||
@ -126,6 +125,11 @@ def _get_output_args(node, stream_name_map):
|
||||
args += ['-b:v', str(kwargs.pop('video_bitrate'))]
|
||||
if 'audio_bitrate' in kwargs:
|
||||
args += ['-b:a', str(kwargs.pop('audio_bitrate'))]
|
||||
if 'video_size' in kwargs:
|
||||
video_size = kwargs.pop('video_size')
|
||||
if not isinstance(video_size, basestring) and isinstance(video_size, collections.Iterable):
|
||||
video_size = '{}x{}'.format(video_size[0], video_size[1])
|
||||
args += ['-video_size', video_size]
|
||||
args += _convert_kwargs_to_cmd_line_args(kwargs)
|
||||
args += [filename]
|
||||
return args
|
||||
|
@ -237,7 +237,6 @@ def test_filter_asplit():
|
||||
]
|
||||
|
||||
|
||||
|
||||
def test__output__bitrate():
|
||||
args = (
|
||||
ffmpeg
|
||||
@ -248,6 +247,17 @@ def test__output__bitrate():
|
||||
assert args == ['-i', 'in', '-b:v', '1000', '-b:a', '200', 'out']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('video_size', [(320, 240), '320x240'])
|
||||
def test__output__video_size(video_size):
|
||||
args = (
|
||||
ffmpeg
|
||||
.input('in')
|
||||
.output('out', video_size=video_size)
|
||||
.get_args()
|
||||
)
|
||||
assert args == ['-i', 'in', '-video_size', '320x240', 'out']
|
||||
|
||||
|
||||
def test_filter_normal_arg_escape():
|
||||
"""Test string escaping of normal filter args (e.g. ``font`` param of ``drawtext`` filter)."""
|
||||
def _get_drawtext_font_repr(font):
|
||||
|
Loading…
x
Reference in New Issue
Block a user