Compare commits

..

No commits in common. "master" and "v0.1.8" have entirely different histories.

73 changed files with 2345 additions and 4951 deletions

View File

@ -1,45 +0,0 @@
name: CI
on:
- push
- pull_request
jobs:
test:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
python-version:
- "2.7"
- "3.5"
- "3.6"
- "3.7"
- "3.8"
- "3.9"
- "3.10"
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install ffmpeg
run: |
sudo apt update
sudo apt install ffmpeg
- name: Setup pip + tox
run: |
python -m pip install --upgrade \
"pip==20.3.4; python_version < '3.6'" \
"pip==21.3.1; python_version >= '3.6'"
python -m pip install tox==3.24.5 tox-gh-actions==2.9.1
- name: Test with tox
run: tox
black:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Black
run: |
# TODO: use standard `psf/black` action after dropping Python 2 support.
pip install black==21.12b0 click==8.0.2 # https://stackoverflow.com/questions/71673404
black ffmpeg --check --color --diff

1
.gitignore vendored
View File

@ -5,4 +5,3 @@ dist/
ffmpeg/tests/sample_data/out*.mp4 ffmpeg/tests/sample_data/out*.mp4
ffmpeg_python.egg-info/ ffmpeg_python.egg-info/
venv* venv*
build/

37
.travis.yml Normal file
View File

@ -0,0 +1,37 @@
language: python
before_install:
- >
[ -f ffmpeg-release/ffmpeg ] || (
curl -O https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-64bit-static.tar.xz &&
mkdir -p ffmpeg-release &&
tar Jxf ffmpeg-release-64bit-static.tar.xz --strip-components=1 -C ffmpeg-release
)
matrix:
include:
- python: 2.7
env:
- TOX_ENV=py27
- python: 3.3
env:
- TOX_ENV=py33
- python: 3.4
env:
- TOX_ENV=py34
- python: 3.5
env:
- TOX_ENV=py35
- python: 3.6
env:
- TOX_ENV=py36
- python: pypy
env:
- TOX_ENV=pypy
install:
- pip install tox
script:
- export PATH=$(readlink -f ffmpeg-release):$PATH
- tox -e $TOX_ENV
cache:
directories:
- .tox
- ffmpeg-release

208
LICENSE
View File

@ -1,201 +1,13 @@
Apache License Copyright 2017 Karl Kroening
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
1. Definitions. http://www.apache.org/licenses/LICENSE-2.0
"License" shall mean the terms and conditions for use, reproduction, Unless required by applicable law or agreed to in writing, software
and distribution as defined by Sections 1 through 9 of this document. distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"Licensor" shall mean the copyright owner or entity authorized by See the License for the specific language governing permissions and
the copyright owner that is granting the License. limitations under the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 Karl Kroening
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

235
README.md
View File

@ -1,21 +1,16 @@
# ffmpeg-python: Python bindings for FFmpeg # ffmpeg-python: Python bindings for FFmpeg
[![CI][ci-badge]][ci] [![Build status](https://travis-ci.org/kkroening/ffmpeg-python.svg?branch=master)](https://travis-ci.org/kkroening/ffmpeg-python)
[ci-badge]: https://github.com/kkroening/ffmpeg-python/actions/workflows/ci.yml/badge.svg
[ci]: https://github.com/kkroening/ffmpeg-python/actions/workflows/ci.yml
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/formula.png" alt="ffmpeg-python logo" width="60%" />
## Overview ## Overview
There are tons of Python FFmpeg wrappers out there but they seem to lack complex filter support. `ffmpeg-python` works well for simple as well as complex signal graphs. There are tons of Python FFmpeg wrappers out there but they seem to lack complex filter support. `ffmpeg-python` works well for simple as well as complex signal graphs.
## Quickstart ## Quickstart
Flip a video horizontally: Flip a video horizontally:
```python ```
import ffmpeg import ffmpeg
stream = ffmpeg.input('input.mp4') stream = ffmpeg.input('input.mp4')
stream = ffmpeg.hflip(stream) stream = ffmpeg.hflip(stream)
@ -24,10 +19,9 @@ ffmpeg.run(stream)
``` ```
Or if you prefer a fluent interface: Or if you prefer a fluent interface:
```python ```
import ffmpeg import ffmpeg
( (ffmpeg
ffmpeg
.input('input.mp4') .input('input.mp4')
.hflip() .hflip()
.output('output.mp4') .output('output.mp4')
@ -35,33 +29,35 @@ import ffmpeg
) )
``` ```
## [API reference](https://kkroening.github.io/ffmpeg-python/)
## Complex filter graphs ## Complex filter graphs
FFmpeg is extremely powerful, but its command-line interface gets really complicated rather quickly - especially when working with signal graphs and doing anything more than trivial. FFmpeg is extremely powerful, but its command-line interface gets really complicated really quickly - especially when working with signal graphs and doing anything more than trivial.
Take for example a signal graph that looks like this: Take for example a signal graph that looks like this:
![Signal graph](https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/graph1.png) ![Signal graph](https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/graph1.png)
The corresponding command-line arguments are pretty gnarly: The corresponding command-line arguments are pretty gnarly:
```bash ```
ffmpeg -i input.mp4 -i overlay.png -filter_complex "[0]trim=start_frame=10:end_frame=20[v0];\ ffmpeg -i input.mp4 \
[0]trim=start_frame=30:end_frame=40[v1];[v0][v1]concat=n=2[v2];[1]hflip[v3];\ -filter_complex "\
[v2][v3]overlay=eof_action=repeat[v4];[v4]drawbox=50:50:120:120:red:t=5[v5]"\ [0]trim=start_frame=10:end_frame=20[v0];\
-map [v5] output.mp4 [0]trim=start_frame=30:end_frame=40[v1];\
[v0][v1]concat=n=2[v2];\
[1]hflip[v3];\
[v2][v3]overlay=eof_action=repeat[v4];\
[v4]drawbox=50:50:120:120:red:t=5[v5]"\
-map [v5] output.mp4
``` ```
Maybe this looks great to you, but if you're not an FFmpeg command-line expert, it probably looks alien. Maybe this looks great to you, but if you're not an FFmpeg command-line expert, it probably looks alien.
If you're like me and find Python to be powerful and readable, it's easier with `ffmpeg-python`: If you're like me and find Python to be powerful and readable, it's easy with `ffmpeg-python`:
```python ```
import ffmpeg import ffmpeg
in_file = ffmpeg.input('input.mp4') in_file = ffmpeg.input('input.mp4')
overlay_file = ffmpeg.input('overlay.png') overlay_file = ffmpeg.input('overlay.png')
( (ffmpeg
ffmpeg
.concat( .concat(
in_file.trim(start_frame=10, end_frame=20), in_file.trim(start_frame=10, end_frame=20),
in_file.trim(start_frame=30, end_frame=40), in_file.trim(start_frame=30, end_frame=40),
@ -73,215 +69,74 @@ overlay_file = ffmpeg.input('overlay.png')
) )
``` ```
`ffmpeg-python` takes care of running `ffmpeg` with the command-line arguments that correspond to the above filter diagram, in familiar Python terms. `ffmpeg-python` takes care of running `ffmpeg` with the command-line arguments that correspond to the above filter diagram, and it's easy to see what's going on and make changes as needed.
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/screenshot.png" alt="Screenshot" align="middle" width="60%" /> <img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/screenshot.png" alt="Screenshot" align="middle" width="60%" />
Real-world signal graphs can get a heck of a lot more complex, but `ffmpeg-python` handles arbitrarily large (directed-acyclic) signal graphs. Real-world signal graphs can get a heck of a lot more complex, but `ffmpeg-python` handles them with ease.
## Installation ## Installation
### Installing `ffmpeg-python` The easiest way to acquire the latest version of `ffmpeg-python` is through pip:
The latest version of `ffmpeg-python` can be acquired via a typical pip install: ```
```bash
pip install ffmpeg-python pip install ffmpeg-python
``` ```
Or the source can be cloned and installed from locally: It's also possible to clone the source and put it on your python path (`$PYTHONPATH`, `sys.path`, etc.):
```bash ```
git clone git@github.com:kkroening/ffmpeg-python.git > git clone git@github.com:kkroening/ffmpeg-python.git
pip install -e ./ffmpeg-python > export PYTHONPATH=${PYTHONPATH}:ffmpeg-python
> python
>>> import ffmpeg
``` ```
> **Note**: `ffmpeg-python` makes no attempt to download/install FFmpeg, as `ffmpeg-python` is merely a pure-Python wrapper - whereas FFmpeg installation is platform-dependent/environment-specific, and is thus the responsibility of the user, as described below. ## [API Reference](https://kkroening.github.io/ffmpeg-python/)
### Installing FFmpeg API documentation is automatically generated from python docstrings and hosted on github pages: https://kkroening.github.io/ffmpeg-python/
Before using `ffmpeg-python`, FFmpeg must be installed and accessible via the `$PATH` environment variable.
There are a variety of ways to install FFmpeg, such as the [official download links](https://ffmpeg.org/download.html), or using your package manager of choice (e.g. `sudo apt install ffmpeg` on Debian/Ubuntu, `brew install ffmpeg` on OS X, etc.).
Regardless of how FFmpeg is installed, you can check if your environment path is set correctly by running the `ffmpeg` command from the terminal, in which case the version information should appear, as in the following example (truncated for brevity):
Alternatively, standard python help is available, such as at the python REPL prompt as follows:
``` ```
$ ffmpeg import ffmpeg
ffmpeg version 4.2.4-1ubuntu0.1 Copyright (c) 2000-2020 the FFmpeg developers help(ffmpeg)
built with gcc 9 (Ubuntu 9.3.0-10ubuntu2)
``` ```
> **Note**: The actual version information displayed here may vary from one system to another; but if a message such as `ffmpeg: command not found` appears instead of the version information, FFmpeg is not properly installed.
## [Examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples)
When in doubt, take a look at the [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples) to see if there's something that's close to whatever you're trying to do.
Here are a few:
- [Convert video to numpy array](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#convert-video-to-numpy-array)
- [Generate thumbnail for video](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#generate-thumbnail-for-video)
- [Read raw PCM audio via pipe](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#convert-sound-to-raw-pcm-audio)
- [JupyterLab/Notebook stream editor](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#jupyter-stream-editor)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/jupyter-demo.gif" alt="jupyter demo" width="75%" />
- [Tensorflow/DeepDream streaming](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#tensorflow-streaming)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/dream.png" alt="deep dream streaming" width="40%" />
See the [Examples README](https://github.com/kkroening/ffmpeg-python/tree/master/examples) for additional examples.
## Custom Filters ## Custom Filters
Don't see the filter you're looking for? While `ffmpeg-python` includes shorthand notation for some of the most commonly used filters (such as `concat`), all filters can be referenced via the `.filter` operator: Don't see the filter you're looking for? `ffmpeg-python` is a work in progress, but it's easy to use any arbitrary ffmpeg filter:
```python ```
stream = ffmpeg.input('dummy.mp4') stream = ffmpeg.input('dummy.mp4')
stream = ffmpeg.filter(stream, 'fps', fps=25, round='up') stream = ffmpeg.filter_(stream, 'fps', fps=25, round='up')
stream = ffmpeg.output(stream, 'dummy2.mp4') stream = ffmpeg.output(stream, 'dummy2.mp4')
ffmpeg.run(stream) ffmpeg.run(stream)
``` ```
Or fluently: Or fluently:
```python ```
( (ffmpeg
ffmpeg
.input('dummy.mp4') .input('dummy.mp4')
.filter('fps', fps=25, round='up') .filter_('fps', fps=25, round='up')
.output('dummy2.mp4') .output('dummy2.mp4')
.run() .run()
) )
``` ```
**Special option names:** When in doubt, refer to the [existing filters](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/_filters.py) and/or the [official ffmpeg documentation](https://ffmpeg.org/ffmpeg-filters.html).
Arguments with special names such as `-qscale:v` (variable bitrate), `-b:v` (constant bitrate), etc. can be specified as a keyword-args dictionary as follows:
```python
(
ffmpeg
.input('in.mp4')
.output('out.mp4', **{'qscale:v': 3})
.run()
)
```
**Multiple inputs:**
Filters that take multiple input streams can be used by passing the input streams as an array to `ffmpeg.filter`:
```python
main = ffmpeg.input('main.mp4')
logo = ffmpeg.input('logo.png')
(
ffmpeg
.filter([main, logo], 'overlay', 10, 10)
.output('out.mp4')
.run()
)
```
**Multiple outputs:**
Filters that produce multiple outputs can be used with `.filter_multi_output`:
```python
split = (
ffmpeg
.input('in.mp4')
.filter_multi_output('split') # or `.split()`
)
(
ffmpeg
.concat(split[0], split[1].reverse())
.output('out.mp4')
.run()
)
```
(In this particular case, `.split()` is the equivalent shorthand, but the general approach works for other multi-output filters)
**String expressions:**
Expressions to be interpreted by ffmpeg can be included as string parameters and reference any special ffmpeg variable names:
```python
(
ffmpeg
.input('in.mp4')
.filter('crop', 'in_w-2*10', 'in_h-2*20')
.input('out.mp4')
)
```
<br />
When in doubt, refer to the [existing filters](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/_filters.py), [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples), and/or the [official ffmpeg documentation](https://ffmpeg.org/ffmpeg-filters.html).
## Frequently asked questions
**Why do I get an import/attribute/etc. error from `import ffmpeg`?**
Make sure you ran `pip install ffmpeg-python` and _**not**_ `pip install ffmpeg` (wrong) or `pip install python-ffmpeg` (also wrong).
**Why did my audio stream get dropped?**
Some ffmpeg filters drop audio streams, and care must be taken to preserve the audio in the final output. The ``.audio`` and ``.video`` operators can be used to reference the audio/video portions of a stream so that they can be processed separately and then re-combined later in the pipeline.
This dilemma is intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the way while users may refer to the official ffmpeg documentation as to why certain filters drop audio.
As usual, take a look at the [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples#audiovideo-pipeline) (*Audio/video pipeline* in particular).
**How can I find out the used command line arguments?**
You can run `stream.get_args()` before `stream.run()` to retrieve the command line arguments that will be passed to `ffmpeg`. You can also run `stream.compile()` that also includes the `ffmpeg` executable as the first argument.
**How do I do XYZ?**
Take a look at each of the links in the [Additional Resources](https://kkroening.github.io/ffmpeg-python/) section at the end of this README. If you look everywhere and can't find what you're looking for and have a question that may be relevant to other users, you may open an issue asking how to do it, while providing a thorough explanation of what you're trying to do and what you've tried so far.
Issues not directly related to `ffmpeg-python` or issues asking others to write your code for you or how to do the work of solving a complex signal processing problem for you that's not relevant to other users will be closed.
That said, we hope to continue improving our documentation and provide a community of support for people using `ffmpeg-python` to do cool and exciting things.
## Contributing ## Contributing
<img align="right" src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/logo.png" alt="ffmpeg-python logo" width="20%" /> Feel free to report any bugs or feature requests.
One of the best things you can do to help make `ffmpeg-python` better is to answer [open questions](https://github.com/kkroening/ffmpeg-python/labels/question) in the issue tracker. The questions that are answered will be tagged and incorporated into the documentation, examples, and other learning resources. It should be fairly easy to use filters that aren't explicitly built into `ffmpeg-python` but if there's a feature or filter you'd really like to see included in the library, don't hesitate to open a feature request.
If you notice things that could be better in the documentation or overall development experience, please say so in the [issue tracker](https://github.com/kkroening/ffmpeg-python/issues). And of course, feel free to report any bugs or submit feature requests. Pull requests are welcome as well.
Pull requests are welcome as well, but it wouldn't hurt to touch base in the issue tracker or hop on the [Matrix chat channel](https://riot.im/app/#/room/#ffmpeg-python:matrix.org) first.
Anyone who fixes any of the [open bugs](https://github.com/kkroening/ffmpeg-python/labels/bug) or implements [requested enhancements](https://github.com/kkroening/ffmpeg-python/labels/enhancement) is a hero, but changes should include passing tests.
### Running tests
```bash
git clone git@github.com:kkroening/ffmpeg-python.git
cd ffmpeg-python
virtualenv venv
. venv/bin/activate # (OS X / Linux)
venv\bin\activate # (Windows)
pip install -e .[dev]
pytest
```
<br />
### Special thanks
- [Fabrice Bellard](https://bellard.org/)
- [The FFmpeg team](https://ffmpeg.org/donations.html)
- [Arne de Laat](https://github.com/153957)
- [Davide Depau](https://github.com/depau)
- [Dim](https://github.com/lloti)
- [Noah Stier](https://github.com/noahstier)
## Additional Resources ## Additional Resources
- [API Reference](https://kkroening.github.io/ffmpeg-python/) - [API Reference](https://kkroening.github.io/ffmpeg-python/)
- [Examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples)
- [Filters](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/_filters.py) - [Filters](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/_filters.py)
- [Tests](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/tests/test_ffmpeg.py)
- [FFmpeg Homepage](https://ffmpeg.org/) - [FFmpeg Homepage](https://ffmpeg.org/)
- [FFmpeg Documentation](https://ffmpeg.org/ffmpeg.html) - [FFmpeg Documentation](https://ffmpeg.org/ffmpeg.html)
- [FFmpeg Filters Documentation](https://ffmpeg.org/ffmpeg-filters.html) - [FFmpeg Filters Documentation](https://ffmpeg.org/ffmpeg-filters.html)
- [Test cases](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/tests/test_ffmpeg.py)
- [Issue tracker](https://github.com/kkroening/ffmpeg-python/issues)
- Matrix Chat: [#ffmpeg-python:matrix.org](https://riot.im/app/#/room/#ffmpeg-python:matrix.org)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

View File

@ -1,4 +1,4 @@
# Sphinx build info version 1 # Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: f3635c9edf6e9bff1735d57d26069ada config: d3019c15b90af9d4beabe6f0fbc238a9
tags: 645f666f9bcd5a90fca523b33c5a78b7 tags: 645f666f9bcd5a90fca523b33c5a78b7

Binary file not shown.

After

Width:  |  Height:  |  Size: 673 B

View File

@ -4,7 +4,7 @@
* *
* Sphinx stylesheet -- basic theme. * Sphinx stylesheet -- basic theme.
* *
* :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details. * :license: BSD, see LICENSE for details.
* *
*/ */
@ -81,26 +81,10 @@ div.sphinxsidebar input {
font-size: 1em; font-size: 1em;
} }
div.sphinxsidebar #searchbox form.search {
overflow: hidden;
}
div.sphinxsidebar #searchbox input[type="text"] { div.sphinxsidebar #searchbox input[type="text"] {
float: left; width: 170px;
width: 80%;
padding: 0.25em;
box-sizing: border-box;
} }
div.sphinxsidebar #searchbox input[type="submit"] {
float: left;
width: 20%;
border-left: none;
padding: 0.25em;
box-sizing: border-box;
}
img { img {
border: 0; border: 0;
max-width: 100%; max-width: 100%;
@ -215,11 +199,6 @@ table.modindextable td {
/* -- general body styles --------------------------------------------------- */ /* -- general body styles --------------------------------------------------- */
div.body {
min-width: 450px;
max-width: 800px;
}
div.body p, div.body dd, div.body li, div.body blockquote { div.body p, div.body dd, div.body li, div.body blockquote {
-moz-hyphens: auto; -moz-hyphens: auto;
-ms-hyphens: auto; -ms-hyphens: auto;
@ -231,16 +210,6 @@ a.headerlink {
visibility: hidden; visibility: hidden;
} }
a.brackets:before,
span.brackets > a:before{
content: "[";
}
a.brackets:after,
span.brackets > a:after {
content: "]";
}
h1:hover > a.headerlink, h1:hover > a.headerlink,
h2:hover > a.headerlink, h2:hover > a.headerlink,
h3:hover > a.headerlink, h3:hover > a.headerlink,
@ -289,12 +258,6 @@ img.align-center, .figure.align-center, object.align-center {
margin-right: auto; margin-right: auto;
} }
img.align-default, .figure.align-default {
display: block;
margin-left: auto;
margin-right: auto;
}
.align-left { .align-left {
text-align: left; text-align: left;
} }
@ -303,10 +266,6 @@ img.align-default, .figure.align-default {
text-align: center; text-align: center;
} }
.align-default {
text-align: center;
}
.align-right { .align-right {
text-align: right; text-align: right;
} }
@ -373,16 +332,6 @@ table.docutils {
border-collapse: collapse; border-collapse: collapse;
} }
table.align-center {
margin-left: auto;
margin-right: auto;
}
table.align-default {
margin-left: auto;
margin-right: auto;
}
table caption span.caption-number { table caption span.caption-number {
font-style: italic; font-style: italic;
} }
@ -416,16 +365,6 @@ table.citation td {
border-bottom: none; border-bottom: none;
} }
th > p:first-child,
td > p:first-child {
margin-top: 0px;
}
th > p:last-child,
td > p:last-child {
margin-bottom: 0px;
}
/* -- figures --------------------------------------------------------------- */ /* -- figures --------------------------------------------------------------- */
div.figure { div.figure {
@ -466,13 +405,6 @@ table.field-list td, table.field-list th {
hyphens: manual; hyphens: manual;
} }
/* -- hlist styles ---------------------------------------------------------- */
table.hlist td {
vertical-align: top;
}
/* -- other body styles ----------------------------------------------------- */ /* -- other body styles ----------------------------------------------------- */
ol.arabic { ol.arabic {
@ -495,57 +427,11 @@ ol.upperroman {
list-style: upper-roman; list-style: upper-roman;
} }
li > p:first-child {
margin-top: 0px;
}
li > p:last-child {
margin-bottom: 0px;
}
dl.footnote > dt,
dl.citation > dt {
float: left;
}
dl.footnote > dd,
dl.citation > dd {
margin-bottom: 0em;
}
dl.footnote > dd:after,
dl.citation > dd:after {
content: "";
clear: both;
}
dl.field-list {
display: flex;
flex-wrap: wrap;
}
dl.field-list > dt {
flex-basis: 20%;
font-weight: bold;
word-break: break-word;
}
dl.field-list > dt:after {
content: ":";
}
dl.field-list > dd {
flex-basis: 70%;
padding-left: 1em;
margin-left: 0em;
margin-bottom: 0em;
}
dl { dl {
margin-bottom: 15px; margin-bottom: 15px;
} }
dd > p:first-child { dd p {
margin-top: 0px; margin-top: 0px;
} }
@ -559,14 +445,10 @@ dd {
margin-left: 30px; margin-left: 30px;
} }
dt:target, span.highlighted { dt:target, .highlighted {
background-color: #fbe54e; background-color: #fbe54e;
} }
rect.highlighted {
fill: #fbe54e;
}
dl.glossary dt { dl.glossary dt {
font-weight: bold; font-weight: bold;
font-size: 1.1em; font-size: 1.1em;
@ -618,12 +500,6 @@ dl.glossary dt {
font-style: oblique; font-style: oblique;
} }
.classifier:before {
font-style: normal;
margin: 0.5em;
content: ":";
}
abbr, acronym { abbr, acronym {
border-bottom: dotted 1px; border-bottom: dotted 1px;
cursor: help; cursor: help;

Binary file not shown.

After

Width:  |  Height:  |  Size: 756 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 829 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 641 B

View File

@ -4,7 +4,7 @@
* *
* Sphinx JavaScript utilities for all documentation. * Sphinx JavaScript utilities for all documentation.
* *
* :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details. * :license: BSD, see LICENSE for details.
* *
*/ */
@ -45,7 +45,7 @@ jQuery.urlencode = encodeURIComponent;
* it will always return arrays of strings for the value parts. * it will always return arrays of strings for the value parts.
*/ */
jQuery.getQueryParameters = function(s) { jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined') if (typeof s == 'undefined')
s = document.location.search; s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&'); var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {}; var result = {};
@ -66,54 +66,29 @@ jQuery.getQueryParameters = function(s) {
* span elements with the given class name. * span elements with the given class name.
*/ */
jQuery.fn.highlightText = function(text, className) { jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) { function highlight(node) {
if (node.nodeType === 3) { if (node.nodeType == 3) {
var val = node.nodeValue; var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text); var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
!jQuery(node.parentNode).hasClass(className) && var span = document.createElement("span");
!jQuery(node.parentNode).hasClass("nohighlight")) { span.className = className;
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length))); span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore( node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)), document.createTextNode(val.substr(pos + text.length)),
node.nextSibling)); node.nextSibling));
node.nodeValue = val.substr(0, pos); node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
var bbox = node.parentElement.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
addItems.push({
"parent": node.parentNode,
"target": rect});
}
} }
} }
else if (!jQuery(node).is("button, select, textarea")) { else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() { jQuery.each(node.childNodes, function() {
highlight(this, addItems); highlight(this);
}); });
} }
} }
var addItems = []; return this.each(function() {
var result = this.each(function() { highlight(this);
highlight(this, addItems);
}); });
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
}; };
/* /*
@ -149,30 +124,28 @@ var Documentation = {
this.fixFirefoxAnchorBug(); this.fixFirefoxAnchorBug();
this.highlightSearchWords(); this.highlightSearchWords();
this.initIndexTable(); this.initIndexTable();
if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
this.initOnKeyListeners();
}
}, },
/** /**
* i18n support * i18n support
*/ */
TRANSLATIONS : {}, TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown', LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions // gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext) // can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) { gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string]; var translated = Documentation.TRANSLATIONS[string];
if (typeof translated === 'undefined') if (typeof translated == 'undefined')
return string; return string;
return (typeof translated === 'string') ? translated : translated[0]; return (typeof translated == 'string') ? translated : translated[0];
}, },
ngettext : function(singular, plural, n) { ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular]; var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated === 'undefined') if (typeof translated == 'undefined')
return (n == 1) ? singular : plural; return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)]; return translated[Documentation.PLURALEXPR(n)];
}, },
@ -207,7 +180,7 @@ var Documentation = {
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/ */
fixFirefoxAnchorBug : function() { fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla) if (document.location.hash)
window.setTimeout(function() { window.setTimeout(function() {
document.location.href += ''; document.location.href += '';
}, 10); }, 10);
@ -243,7 +216,7 @@ var Documentation = {
var src = $(this).attr('src'); var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7); var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle(); $('tr.cg-' + idnum).toggle();
if (src.substr(-9) === 'minus.png') if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
@ -275,7 +248,7 @@ var Documentation = {
var path = document.location.pathname; var path = document.location.pathname;
var parts = path.split(/\//); var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this === '..') if (this == '..')
parts.pop(); parts.pop();
}); });
var url = parts.join('/'); var url = parts.join('/');

View File

@ -1,10 +0,0 @@
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
VERSION: '',
LANGUAGE: 'None',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt',
NAVIGATION_WITH_KEYS: false
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 B

BIN
doc/html/_static/down.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 B

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -1,297 +0,0 @@
/*
* language_data.js
* ~~~~~~~~~~~~~~~~
*
* This script contains the language-specific data used by searchtools.js,
* namely the list of stopwords, stemmer, scorer and splitter.
*
* :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
/* Non-minified version JS is _stemmer.js if file is provided */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
}

View File

@ -4,7 +4,7 @@
* *
* Sphinx stylesheet -- nature theme. * Sphinx stylesheet -- nature theme.
* *
* :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details. * :license: BSD, see LICENSE for details.
* *
*/ */
@ -16,7 +16,7 @@
body { body {
font-family: Arial, sans-serif; font-family: Arial, sans-serif;
font-size: 100%; font-size: 100%;
background-color: #fff; background-color: #111;
color: #555; color: #555;
margin: 0; margin: 0;
padding: 0; padding: 0;
@ -125,9 +125,12 @@ div.sphinxsidebar input {
font-size: 1em; font-size: 1em;
} }
div.sphinxsidebar .searchformwrapper { div.sphinxsidebar input[type=text]{
margin-left: 20px;
}
div.sphinxsidebar input[type=submit]{
margin-left: 20px; margin-left: 20px;
margin-right: 20px;
} }
/* -- body styles ----------------------------------------------------------- */ /* -- body styles ----------------------------------------------------------- */

View File

@ -1,54 +1,331 @@
/* /*
* searchtools.js * searchtools.js_t
* ~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~
* *
* Sphinx JavaScript utilities for the full-text search. * Sphinx JavaScript utilities for the full-text search.
* *
* :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details. * :license: BSD, see LICENSE for details.
* *
*/ */
if (!Scorer) {
/**
* Simple result scoring code.
*/
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [filename, title, anchor, descr, score]
// and returns the new score.
/*
score: function(result) {
return result[4];
},
*/
// query matches the full name of an object /* Non-minified version JS is _stemmer.js if file is provided */
objNameMatch: 11, /**
// or matches in the last dotted part of the object name * Porter Stemmer
objPartialMatch: 6, */
// Additive scores depending on the priority of the object var Stemmer = function() {
objPrio: {0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5}, // used to be unimportantResults
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title var step2list = {
title: 15, ational: 'ate',
partialTitle: 7, tional: 'tion',
// query found in terms enci: 'ence',
term: 5, anci: 'ance',
partialTerm: 2 izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
}; };
}
if (!splitQuery) { var step3list = {
function splitQuery(query) { icate: 'ic',
return query.split(/\s+/); ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
} }
} }
/**
* Simple result scoring code.
*/
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [filename, title, anchor, descr, score]
// and returns the new score.
/*
score: function(result) {
return result[4];
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5}, // used to be unimportantResults
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
// query found in terms
term: 5
};
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
}
/** /**
* Search Module * Search Module
*/ */
@ -58,14 +335,6 @@ var Search = {
_queued_query : null, _queued_query : null,
_pulse_status : -1, _pulse_status : -1,
htmlToText : function(htmlString) {
var htmlElement = document.createElement('span');
htmlElement.innerHTML = htmlString;
$(htmlElement).find('.headerlink').remove();
docContent = $(htmlElement).find('[role=main]')[0];
return docContent.textContent || docContent.innerText;
},
init : function() { init : function() {
var params = $.getQueryParameters(); var params = $.getQueryParameters();
if (params.q) { if (params.q) {
@ -130,7 +399,7 @@ var Search = {
this.out = $('#search-results'); this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out); this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title); this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p class="search-summary">&nbsp;</p>').appendTo(this.out); this.status = $('<p style="display: none"></p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out); this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...')); $('#search-progress').text(_('Preparing search...'));
@ -148,6 +417,7 @@ var Search = {
*/ */
query : function(query) { query : function(query) {
var i; var i;
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
// stem the searchterms and add them to the correct list // stem the searchterms and add them to the correct list
var stemmer = new Stemmer(); var stemmer = new Stemmer();
@ -269,7 +539,8 @@ var Search = {
displayNextItem(); displayNextItem();
}); });
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) { } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
$.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX, var suffix = DOCUMENTATION_OPTIONS.SOURCELINK_SUFFIX;
$.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[5] + (item[5].slice(-suffix.length) === suffix ? '' : suffix),
dataType: "text", dataType: "text",
complete: function(jqxhr, textstatus) { complete: function(jqxhr, textstatus) {
var data = jqxhr.responseText; var data = jqxhr.responseText;
@ -319,13 +590,12 @@ var Search = {
for (var prefix in objects) { for (var prefix in objects) {
for (var name in objects[prefix]) { for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name; var fullname = (prefix ? prefix + '.' : '') + name;
var fullnameLower = fullname.toLowerCase() if (fullname.toLowerCase().indexOf(object) > -1) {
if (fullnameLower.indexOf(object) > -1) {
var score = 0; var score = 0;
var parts = fullnameLower.split('.'); var parts = fullname.split('.');
// check for different match types: exact matches of full name or // check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part) // "last name" (i.e. last dotted part)
if (fullnameLower == object || parts[parts.length - 1] == object) { if (fullname == object || parts[parts.length - 1] == object) {
score += Scorer.objNameMatch; score += Scorer.objNameMatch;
// matches in last name // matches in last name
} else if (parts[parts.length - 1].indexOf(object) > -1) { } else if (parts[parts.length - 1].indexOf(object) > -1) {
@ -392,19 +662,6 @@ var Search = {
{files: terms[word], score: Scorer.term}, {files: terms[word], score: Scorer.term},
{files: titleterms[word], score: Scorer.title} {files: titleterms[word], score: Scorer.title}
]; ];
// add support for partial matches
if (word.length > 2) {
for (var w in terms) {
if (w.match(word) && !terms[word]) {
_o.push({files: terms[w], score: Scorer.partialTerm})
}
}
for (var w in titleterms) {
if (w.match(word) && !titleterms[word]) {
_o.push({files: titleterms[w], score: Scorer.partialTitle})
}
}
}
// no match but word was a required one // no match but word was a required one
if ($u.every(_o, function(o){return o.files === undefined;})) { if ($u.every(_o, function(o){return o.files === undefined;})) {
@ -444,12 +701,8 @@ var Search = {
var valid = true; var valid = true;
// check if all requirements are matched // check if all requirements are matched
var filteredTermCount = // as search terms with length < 3 are discarded: ignore if (fileMap[file].length != searchterms.length)
searchterms.filter(function(term){return term.length > 2}).length continue;
if (
fileMap[file].length != searchterms.length &&
fileMap[file].length != filteredTermCount
) continue;
// ensure that none of the excluded terms is in the search result // ensure that none of the excluded terms is in the search result
for (i = 0; i < excluded.length; i++) { for (i = 0; i < excluded.length; i++) {
@ -480,8 +733,7 @@ var Search = {
* words. the first one is used to find the occurrence, the * words. the first one is used to find the occurrence, the
* latter for highlighting it. * latter for highlighting it.
*/ */
makeSearchSummary : function(htmlText, keywords, hlwords) { makeSearchSummary : function(text, keywords, hlwords) {
var text = Search.htmlToText(htmlText);
var textLower = text.toLowerCase(); var textLower = text.toLowerCase();
var start = 0; var start = 0;
$.each(keywords, function() { $.each(keywords, function() {

Binary file not shown.

After

Width:  |  Height:  |  Size: 214 B

BIN
doc/html/_static/up.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 203 B

View File

@ -0,0 +1,808 @@
/*
* websupport.js
* ~~~~~~~~~~~~~
*
* sphinx.websupport utilities for all documentation.
*
* :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
(function($) {
$.fn.autogrow = function() {
return this.each(function() {
var textarea = this;
$.fn.autogrow.resize(textarea);
$(textarea)
.focus(function() {
textarea.interval = setInterval(function() {
$.fn.autogrow.resize(textarea);
}, 500);
})
.blur(function() {
clearInterval(textarea.interval);
});
});
};
$.fn.autogrow.resize = function(textarea) {
var lineHeight = parseInt($(textarea).css('line-height'), 10);
var lines = textarea.value.split('\n');
var columns = textarea.cols;
var lineCount = 0;
$.each(lines, function() {
lineCount += Math.ceil(this.length / columns) || 1;
});
var height = lineHeight * (lineCount + 1);
$(textarea).css('height', height);
};
})(jQuery);
(function($) {
var comp, by;
function init() {
initEvents();
initComparator();
}
function initEvents() {
$(document).on("click", 'a.comment-close', function(event) {
event.preventDefault();
hide($(this).attr('id').substring(2));
});
$(document).on("click", 'a.vote', function(event) {
event.preventDefault();
handleVote($(this));
});
$(document).on("click", 'a.reply', function(event) {
event.preventDefault();
openReply($(this).attr('id').substring(2));
});
$(document).on("click", 'a.close-reply', function(event) {
event.preventDefault();
closeReply($(this).attr('id').substring(2));
});
$(document).on("click", 'a.sort-option', function(event) {
event.preventDefault();
handleReSort($(this));
});
$(document).on("click", 'a.show-proposal', function(event) {
event.preventDefault();
showProposal($(this).attr('id').substring(2));
});
$(document).on("click", 'a.hide-proposal', function(event) {
event.preventDefault();
hideProposal($(this).attr('id').substring(2));
});
$(document).on("click", 'a.show-propose-change', function(event) {
event.preventDefault();
showProposeChange($(this).attr('id').substring(2));
});
$(document).on("click", 'a.hide-propose-change', function(event) {
event.preventDefault();
hideProposeChange($(this).attr('id').substring(2));
});
$(document).on("click", 'a.accept-comment', function(event) {
event.preventDefault();
acceptComment($(this).attr('id').substring(2));
});
$(document).on("click", 'a.delete-comment', function(event) {
event.preventDefault();
deleteComment($(this).attr('id').substring(2));
});
$(document).on("click", 'a.comment-markup', function(event) {
event.preventDefault();
toggleCommentMarkupBox($(this).attr('id').substring(2));
});
}
/**
* Set comp, which is a comparator function used for sorting and
* inserting comments into the list.
*/
function setComparator() {
// If the first three letters are "asc", sort in ascending order
// and remove the prefix.
if (by.substring(0,3) == 'asc') {
var i = by.substring(3);
comp = function(a, b) { return a[i] - b[i]; };
} else {
// Otherwise sort in descending order.
comp = function(a, b) { return b[by] - a[by]; };
}
// Reset link styles and format the selected sort option.
$('a.sel').attr('href', '#').removeClass('sel');
$('a.by' + by).removeAttr('href').addClass('sel');
}
/**
* Create a comp function. If the user has preferences stored in
* the sortBy cookie, use those, otherwise use the default.
*/
function initComparator() {
by = 'rating'; // Default to sort by rating.
// If the sortBy cookie is set, use that instead.
if (document.cookie.length > 0) {
var start = document.cookie.indexOf('sortBy=');
if (start != -1) {
start = start + 7;
var end = document.cookie.indexOf(";", start);
if (end == -1) {
end = document.cookie.length;
by = unescape(document.cookie.substring(start, end));
}
}
}
setComparator();
}
/**
* Show a comment div.
*/
function show(id) {
$('#ao' + id).hide();
$('#ah' + id).show();
var context = $.extend({id: id}, opts);
var popup = $(renderTemplate(popupTemplate, context)).hide();
popup.find('textarea[name="proposal"]').hide();
popup.find('a.by' + by).addClass('sel');
var form = popup.find('#cf' + id);
form.submit(function(event) {
event.preventDefault();
addComment(form);
});
$('#s' + id).after(popup);
popup.slideDown('fast', function() {
getComments(id);
});
}
/**
* Hide a comment div.
*/
function hide(id) {
$('#ah' + id).hide();
$('#ao' + id).show();
var div = $('#sc' + id);
div.slideUp('fast', function() {
div.remove();
});
}
/**
* Perform an ajax request to get comments for a node
* and insert the comments into the comments tree.
*/
function getComments(id) {
$.ajax({
type: 'GET',
url: opts.getCommentsURL,
data: {node: id},
success: function(data, textStatus, request) {
var ul = $('#cl' + id);
var speed = 100;
$('#cf' + id)
.find('textarea[name="proposal"]')
.data('source', data.source);
if (data.comments.length === 0) {
ul.html('<li>No comments yet.</li>');
ul.data('empty', true);
} else {
// If there are comments, sort them and put them in the list.
var comments = sortComments(data.comments);
speed = data.comments.length * 100;
appendComments(comments, ul);
ul.data('empty', false);
}
$('#cn' + id).slideUp(speed + 200);
ul.slideDown(speed);
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem retrieving the comments.');
},
dataType: 'json'
});
}
/**
* Add a comment via ajax and insert the comment into the comment tree.
*/
function addComment(form) {
var node_id = form.find('input[name="node"]').val();
var parent_id = form.find('input[name="parent"]').val();
var text = form.find('textarea[name="comment"]').val();
var proposal = form.find('textarea[name="proposal"]').val();
if (text == '') {
showError('Please enter a comment.');
return;
}
// Disable the form that is being submitted.
form.find('textarea,input').attr('disabled', 'disabled');
// Send the comment to the server.
$.ajax({
type: "POST",
url: opts.addCommentURL,
dataType: 'json',
data: {
node: node_id,
parent: parent_id,
text: text,
proposal: proposal
},
success: function(data, textStatus, error) {
// Reset the form.
if (node_id) {
hideProposeChange(node_id);
}
form.find('textarea')
.val('')
.add(form.find('input'))
.removeAttr('disabled');
var ul = $('#cl' + (node_id || parent_id));
if (ul.data('empty')) {
$(ul).empty();
ul.data('empty', false);
}
insertComment(data.comment);
var ao = $('#ao' + node_id);
ao.find('img').attr({'src': opts.commentBrightImage});
if (node_id) {
// if this was a "root" comment, remove the commenting box
// (the user can get it back by reopening the comment popup)
$('#ca' + node_id).slideUp();
}
},
error: function(request, textStatus, error) {
form.find('textarea,input').removeAttr('disabled');
showError('Oops, there was a problem adding the comment.');
}
});
}
/**
* Recursively append comments to the main comment list and children
* lists, creating the comment tree.
*/
function appendComments(comments, ul) {
$.each(comments, function() {
var div = createCommentDiv(this);
ul.append($(document.createElement('li')).html(div));
appendComments(this.children, div.find('ul.comment-children'));
// To avoid stagnating data, don't store the comments children in data.
this.children = null;
div.data('comment', this);
});
}
/**
* After adding a new comment, it must be inserted in the correct
* location in the comment tree.
*/
function insertComment(comment) {
var div = createCommentDiv(comment);
// To avoid stagnating data, don't store the comments children in data.
comment.children = null;
div.data('comment', comment);
var ul = $('#cl' + (comment.node || comment.parent));
var siblings = getChildren(ul);
var li = $(document.createElement('li'));
li.hide();
// Determine where in the parents children list to insert this comment.
for(i=0; i < siblings.length; i++) {
if (comp(comment, siblings[i]) <= 0) {
$('#cd' + siblings[i].id)
.parent()
.before(li.html(div));
li.slideDown('fast');
return;
}
}
// If we get here, this comment rates lower than all the others,
// or it is the only comment in the list.
ul.append(li.html(div));
li.slideDown('fast');
}
function acceptComment(id) {
$.ajax({
type: 'POST',
url: opts.acceptCommentURL,
data: {id: id},
success: function(data, textStatus, request) {
$('#cm' + id).fadeOut('fast');
$('#cd' + id).removeClass('moderate');
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem accepting the comment.');
}
});
}
function deleteComment(id) {
$.ajax({
type: 'POST',
url: opts.deleteCommentURL,
data: {id: id},
success: function(data, textStatus, request) {
var div = $('#cd' + id);
if (data == 'delete') {
// Moderator mode: remove the comment and all children immediately
div.slideUp('fast', function() {
div.remove();
});
return;
}
// User mode: only mark the comment as deleted
div
.find('span.user-id:first')
.text('[deleted]').end()
.find('div.comment-text:first')
.text('[deleted]').end()
.find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
.remove();
var comment = div.data('comment');
comment.username = '[deleted]';
comment.text = '[deleted]';
div.data('comment', comment);
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem deleting the comment.');
}
});
}
function showProposal(id) {
$('#sp' + id).hide();
$('#hp' + id).show();
$('#pr' + id).slideDown('fast');
}
function hideProposal(id) {
$('#hp' + id).hide();
$('#sp' + id).show();
$('#pr' + id).slideUp('fast');
}
function showProposeChange(id) {
$('#pc' + id).hide();
$('#hc' + id).show();
var textarea = $('#pt' + id);
textarea.val(textarea.data('source'));
$.fn.autogrow.resize(textarea[0]);
textarea.slideDown('fast');
}
function hideProposeChange(id) {
$('#hc' + id).hide();
$('#pc' + id).show();
var textarea = $('#pt' + id);
textarea.val('').removeAttr('disabled');
textarea.slideUp('fast');
}
function toggleCommentMarkupBox(id) {
$('#mb' + id).toggle();
}
/** Handle when the user clicks on a sort by link. */
function handleReSort(link) {
var classes = link.attr('class').split(/\s+/);
for (var i=0; i<classes.length; i++) {
if (classes[i] != 'sort-option') {
by = classes[i].substring(2);
}
}
setComparator();
// Save/update the sortBy cookie.
var expiration = new Date();
expiration.setDate(expiration.getDate() + 365);
document.cookie= 'sortBy=' + escape(by) +
';expires=' + expiration.toUTCString();
$('ul.comment-ul').each(function(index, ul) {
var comments = getChildren($(ul), true);
comments = sortComments(comments);
appendComments(comments, $(ul).empty());
});
}
/**
* Function to process a vote when a user clicks an arrow.
*/
function handleVote(link) {
if (!opts.voting) {
showError("You'll need to login to vote.");
return;
}
var id = link.attr('id');
if (!id) {
// Didn't click on one of the voting arrows.
return;
}
// If it is an unvote, the new vote value is 0,
// Otherwise it's 1 for an upvote, or -1 for a downvote.
var value = 0;
if (id.charAt(1) != 'u') {
value = id.charAt(0) == 'u' ? 1 : -1;
}
// The data to be sent to the server.
var d = {
comment_id: id.substring(2),
value: value
};
// Swap the vote and unvote links.
link.hide();
$('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
.show();
// The div the comment is displayed in.
var div = $('div#cd' + d.comment_id);
var data = div.data('comment');
// If this is not an unvote, and the other vote arrow has
// already been pressed, unpress it.
if ((d.value !== 0) && (data.vote === d.value * -1)) {
$('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
$('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
}
// Update the comments rating in the local data.
data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
data.vote = d.value;
div.data('comment', data);
// Change the rating text.
div.find('.rating:first')
.text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
// Send the vote information to the server.
$.ajax({
type: "POST",
url: opts.processVoteURL,
data: d,
error: function(request, textStatus, error) {
showError('Oops, there was a problem casting that vote.');
}
});
}
/**
* Open a reply form used to reply to an existing comment.
*/
function openReply(id) {
// Swap out the reply link for the hide link
$('#rl' + id).hide();
$('#cr' + id).show();
// Add the reply li to the children ul.
var div = $(renderTemplate(replyTemplate, {id: id})).hide();
$('#cl' + id)
.prepend(div)
// Setup the submit handler for the reply form.
.find('#rf' + id)
.submit(function(event) {
event.preventDefault();
addComment($('#rf' + id));
closeReply(id);
})
.find('input[type=button]')
.click(function() {
closeReply(id);
});
div.slideDown('fast', function() {
$('#rf' + id).find('textarea').focus();
});
}
/**
* Close the reply form opened with openReply.
*/
function closeReply(id) {
// Remove the reply div from the DOM.
$('#rd' + id).slideUp('fast', function() {
$(this).remove();
});
// Swap out the hide link for the reply link
$('#cr' + id).hide();
$('#rl' + id).show();
}
/**
* Recursively sort a tree of comments using the comp comparator.
*/
function sortComments(comments) {
comments.sort(comp);
$.each(comments, function() {
this.children = sortComments(this.children);
});
return comments;
}
/**
* Get the children comments from a ul. If recursive is true,
* recursively include childrens' children.
*/
function getChildren(ul, recursive) {
var children = [];
ul.children().children("[id^='cd']")
.each(function() {
var comment = $(this).data('comment');
if (recursive)
comment.children = getChildren($(this).find('#cl' + comment.id), true);
children.push(comment);
});
return children;
}
/** Create a div to display a comment in. */
function createCommentDiv(comment) {
if (!comment.displayed && !opts.moderator) {
return $('<div class="moderate">Thank you! Your comment will show up '
+ 'once it is has been approved by a moderator.</div>');
}
// Prettify the comment rating.
comment.pretty_rating = comment.rating + ' point' +
(comment.rating == 1 ? '' : 's');
// Make a class (for displaying not yet moderated comments differently)
comment.css_class = comment.displayed ? '' : ' moderate';
// Create a div for this comment.
var context = $.extend({}, opts, comment);
var div = $(renderTemplate(commentTemplate, context));
// If the user has voted on this comment, highlight the correct arrow.
if (comment.vote) {
var direction = (comment.vote == 1) ? 'u' : 'd';
div.find('#' + direction + 'v' + comment.id).hide();
div.find('#' + direction + 'u' + comment.id).show();
}
if (opts.moderator || comment.text != '[deleted]') {
div.find('a.reply').show();
if (comment.proposal_diff)
div.find('#sp' + comment.id).show();
if (opts.moderator && !comment.displayed)
div.find('#cm' + comment.id).show();
if (opts.moderator || (opts.username == comment.username))
div.find('#dc' + comment.id).show();
}
return div;
}
/**
* A simple template renderer. Placeholders such as <%id%> are replaced
* by context['id'] with items being escaped. Placeholders such as <#id#>
* are not escaped.
*/
function renderTemplate(template, context) {
var esc = $(document.createElement('div'));
function handle(ph, escape) {
var cur = context;
$.each(ph.split('.'), function() {
cur = cur[this];
});
return escape ? esc.text(cur || "").html() : cur;
}
return template.replace(/<([%#])([\w\.]*)\1>/g, function() {
return handle(arguments[2], arguments[1] == '%' ? true : false);
});
}
/** Flash an error message briefly. */
function showError(message) {
$(document.createElement('div')).attr({'class': 'popup-error'})
.append($(document.createElement('div'))
.attr({'class': 'error-message'}).text(message))
.appendTo('body')
.fadeIn("slow")
.delay(2000)
.fadeOut("slow");
}
/** Add a link the user uses to open the comments popup. */
$.fn.comment = function() {
return this.each(function() {
var id = $(this).attr('id').substring(1);
var count = COMMENT_METADATA[id];
var title = count + ' comment' + (count == 1 ? '' : 's');
var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
var addcls = count == 0 ? ' nocomment' : '';
$(this)
.append(
$(document.createElement('a')).attr({
href: '#',
'class': 'sphinx-comment-open' + addcls,
id: 'ao' + id
})
.append($(document.createElement('img')).attr({
src: image,
alt: 'comment',
title: title
}))
.click(function(event) {
event.preventDefault();
show($(this).attr('id').substring(2));
})
)
.append(
$(document.createElement('a')).attr({
href: '#',
'class': 'sphinx-comment-close hidden',
id: 'ah' + id
})
.append($(document.createElement('img')).attr({
src: opts.closeCommentImage,
alt: 'close',
title: 'close'
}))
.click(function(event) {
event.preventDefault();
hide($(this).attr('id').substring(2));
})
);
});
};
var opts = {
processVoteURL: '/_process_vote',
addCommentURL: '/_add_comment',
getCommentsURL: '/_get_comments',
acceptCommentURL: '/_accept_comment',
deleteCommentURL: '/_delete_comment',
commentImage: '/static/_static/comment.png',
closeCommentImage: '/static/_static/comment-close.png',
loadingImage: '/static/_static/ajax-loader.gif',
commentBrightImage: '/static/_static/comment-bright.png',
upArrow: '/static/_static/up.png',
downArrow: '/static/_static/down.png',
upArrowPressed: '/static/_static/up-pressed.png',
downArrowPressed: '/static/_static/down-pressed.png',
voting: false,
moderator: false
};
if (typeof COMMENT_OPTIONS != "undefined") {
opts = jQuery.extend(opts, COMMENT_OPTIONS);
}
var popupTemplate = '\
<div class="sphinx-comments" id="sc<%id%>">\
<p class="sort-options">\
Sort by:\
<a href="#" class="sort-option byrating">best rated</a>\
<a href="#" class="sort-option byascage">newest</a>\
<a href="#" class="sort-option byage">oldest</a>\
</p>\
<div class="comment-header">Comments</div>\
<div class="comment-loading" id="cn<%id%>">\
loading comments... <img src="<%loadingImage%>" alt="" /></div>\
<ul id="cl<%id%>" class="comment-ul"></ul>\
<div id="ca<%id%>">\
<p class="add-a-comment">Add a comment\
(<a href="#" class="comment-markup" id="ab<%id%>">markup</a>):</p>\
<div class="comment-markup-box" id="mb<%id%>">\
reStructured text markup: <i>*emph*</i>, <b>**strong**</b>, \
<code>``code``</code>, \
code blocks: <code>::</code> and an indented block after blank line</div>\
<form method="post" id="cf<%id%>" class="comment-form" action="">\
<textarea name="comment" cols="80"></textarea>\
<p class="propose-button">\
<a href="#" id="pc<%id%>" class="show-propose-change">\
Propose a change &#9657;\
</a>\
<a href="#" id="hc<%id%>" class="hide-propose-change">\
Propose a change &#9663;\
</a>\
</p>\
<textarea name="proposal" id="pt<%id%>" cols="80"\
spellcheck="false"></textarea>\
<input type="submit" value="Add comment" />\
<input type="hidden" name="node" value="<%id%>" />\
<input type="hidden" name="parent" value="" />\
</form>\
</div>\
</div>';
var commentTemplate = '\
<div id="cd<%id%>" class="sphinx-comment<%css_class%>">\
<div class="vote">\
<div class="arrow">\
<a href="#" id="uv<%id%>" class="vote" title="vote up">\
<img src="<%upArrow%>" />\
</a>\
<a href="#" id="uu<%id%>" class="un vote" title="vote up">\
<img src="<%upArrowPressed%>" />\
</a>\
</div>\
<div class="arrow">\
<a href="#" id="dv<%id%>" class="vote" title="vote down">\
<img src="<%downArrow%>" id="da<%id%>" />\
</a>\
<a href="#" id="du<%id%>" class="un vote" title="vote down">\
<img src="<%downArrowPressed%>" />\
</a>\
</div>\
</div>\
<div class="comment-content">\
<p class="tagline comment">\
<span class="user-id"><%username%></span>\
<span class="rating"><%pretty_rating%></span>\
<span class="delta"><%time.delta%></span>\
</p>\
<div class="comment-text comment"><#text#></div>\
<p class="comment-opts comment">\
<a href="#" class="reply hidden" id="rl<%id%>">reply &#9657;</a>\
<a href="#" class="close-reply" id="cr<%id%>">reply &#9663;</a>\
<a href="#" id="sp<%id%>" class="show-proposal">proposal &#9657;</a>\
<a href="#" id="hp<%id%>" class="hide-proposal">proposal &#9663;</a>\
<a href="#" id="dc<%id%>" class="delete-comment hidden">delete</a>\
<span id="cm<%id%>" class="moderation hidden">\
<a href="#" id="ac<%id%>" class="accept-comment">accept</a>\
</span>\
</p>\
<pre class="proposal" id="pr<%id%>">\
<#proposal_diff#>\
</pre>\
<ul class="comment-children" id="cl<%id%>"></ul>\
</div>\
<div class="clearleft"></div>\
</div>\
</div>';
var replyTemplate = '\
<li>\
<div class="reply-div" id="rd<%id%>">\
<form id="rf<%id%>">\
<textarea name="comment" cols="80"></textarea>\
<input type="submit" value="Add reply" />\
<input type="button" value="Cancel" />\
<input type="hidden" name="parent" value="<%id%>" />\
<input type="hidden" name="node" value="" />\
</form>\
</div>\
</li>';
$(document).ready(function() {
init();
});
})(jQuery);
$(document).ready(function() {
// add comment anchors for all paragraphs that are commentable
$('.sphinx-has-comment').comment();
// highlight search words in search results
$("div.context").each(function() {
var params = $.getQueryParameters();
var terms = (params.q) ? params.q[0].split(/\s+/) : [];
var result = $(this);
$.each(terms, function() {
result.highlightText(this.toLowerCase(), 'highlighted');
});
});
// directly open comment window if requested
var anchor = document.location.hash;
if (anchor.substring(0, 9) == '#comment-') {
$('#ao' + anchor.substring(9)).click();
document.location.hash = '#s' + anchor.substring(9);
}
});

View File

@ -1,21 +1,34 @@
<!DOCTYPE html> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"> <html xmlns="http://www.w3.org/1999/xhtml">
<head> <head>
<meta charset="utf-8" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Index &#8212; ffmpeg-python documentation</title> <title>Index &#8212; ffmpeg-python documentation</title>
<link rel="stylesheet" href="_static/nature.css" type="text/css" /> <link rel="stylesheet" href="_static/nature.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" /> <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: './',
VERSION: '',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt'
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script> <script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script> <script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script> <script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<link rel="index" title="Index" href="#" /> <link rel="index" title="Index" href="#" />
<link rel="search" title="Search" href="search.html" /> <link rel="search" title="Search" href="search.html" />
</head><body> </head>
<body>
<div class="related" role="navigation" aria-label="related navigation"> <div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3> <h3>Navigation</h3>
<ul> <ul>
@ -38,17 +51,14 @@
<h1 id="index">Index</h1> <h1 id="index">Index</h1>
<div class="genindex-jumpbox"> <div class="genindex-jumpbox">
<a href="#A"><strong>A</strong></a> <a href="#C"><strong>C</strong></a>
| <a href="#C"><strong>C</strong></a>
| <a href="#D"><strong>D</strong></a> | <a href="#D"><strong>D</strong></a>
| <a href="#E"><strong>E</strong></a>
| <a href="#F"><strong>F</strong></a> | <a href="#F"><strong>F</strong></a>
| <a href="#G"><strong>G</strong></a> | <a href="#G"><strong>G</strong></a>
| <a href="#H"><strong>H</strong></a> | <a href="#H"><strong>H</strong></a>
| <a href="#I"><strong>I</strong></a> | <a href="#I"><strong>I</strong></a>
| <a href="#M"><strong>M</strong></a> | <a href="#M"><strong>M</strong></a>
| <a href="#O"><strong>O</strong></a> | <a href="#O"><strong>O</strong></a>
| <a href="#P"><strong>P</strong></a>
| <a href="#R"><strong>R</strong></a> | <a href="#R"><strong>R</strong></a>
| <a href="#S"><strong>S</strong></a> | <a href="#S"><strong>S</strong></a>
| <a href="#T"><strong>T</strong></a> | <a href="#T"><strong>T</strong></a>
@ -56,26 +66,14 @@
| <a href="#Z"><strong>Z</strong></a> | <a href="#Z"><strong>Z</strong></a>
</div> </div>
<h2 id="A">A</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.Stream.audio">audio() (ffmpeg.Stream property)</a>
</li>
</ul></td>
</tr></table>
<h2 id="C">C</h2> <h2 id="C">C</h2>
<table style="width: 100%" class="indextable genindextable"><tr> <table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.colorchannelmixer">colorchannelmixer() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.colorchannelmixer">colorchannelmixer() (in module ffmpeg)</a>
</li>
<li><a href="index.html#ffmpeg.compile">compile() (in module ffmpeg)</a>
</li> </li>
</ul></td> </ul></td>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.concat">concat() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.concat">concat() (in module ffmpeg)</a>
</li>
<li><a href="index.html#ffmpeg.crop">crop() (in module ffmpeg)</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -84,18 +82,6 @@
<table style="width: 100%" class="indextable genindextable"><tr> <table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.drawbox">drawbox() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.drawbox">drawbox() (in module ffmpeg)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.drawtext">drawtext() (in module ffmpeg)</a>
</li>
</ul></td>
</tr></table>
<h2 id="E">E</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.Error">Error</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -104,14 +90,12 @@
<table style="width: 100%" class="indextable genindextable"><tr> <table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#module-ffmpeg">ffmpeg (module)</a> <li><a href="index.html#module-ffmpeg">ffmpeg (module)</a>
</li>
<li><a href="index.html#ffmpeg.filter">filter() (in module ffmpeg)</a>
</li> </li>
</ul></td> </ul></td>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.filter_">filter_() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.filter_">filter_() (in module ffmpeg)</a>
</li> </li>
<li><a href="index.html#ffmpeg.filter_multi_output">filter_multi_output() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.filter_multi">filter_multi() (in module ffmpeg)</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -166,22 +150,10 @@
</ul></td> </ul></td>
</tr></table> </tr></table>
<h2 id="P">P</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.probe">probe() (in module ffmpeg)</a>
</li>
</ul></td>
</tr></table>
<h2 id="R">R</h2> <h2 id="R">R</h2>
<table style="width: 100%" class="indextable genindextable"><tr> <table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.run">run() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.run">run() (in module ffmpeg)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.run_async">run_async() (in module ffmpeg)</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -190,10 +162,6 @@
<table style="width: 100%" class="indextable genindextable"><tr> <table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.setpts">setpts() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.setpts">setpts() (in module ffmpeg)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.Stream">Stream (class in ffmpeg)</a>
</li> </li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -211,16 +179,6 @@
<td style="width: 33%; vertical-align: top;"><ul> <td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.vflip">vflip() (in module ffmpeg)</a> <li><a href="index.html#ffmpeg.vflip">vflip() (in module ffmpeg)</a>
</li> </li>
<li><a href="index.html#ffmpeg.Stream.video">video() (ffmpeg.Stream property)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="index.html#ffmpeg.Stream.view">view() (ffmpeg.Stream method)</a>
<ul>
<li><a href="index.html#ffmpeg.view">(in module ffmpeg)</a>
</li>
</ul></li>
</ul></td> </ul></td>
</tr></table> </tr></table>
@ -239,14 +197,17 @@
</div> </div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation"> <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper"> <div class="sphinxsidebarwrapper">
<div id="searchbox" style="display: none" role="search"> <div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3> <h3>Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get"> <form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" /> <div><input type="text" name="q" /></div>
<input type="submit" value="Go" /> <div><input type="submit" value="Go" /></div>
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form> </form>
</div>
</div> </div>
<script type="text/javascript">$('#searchbox').show(0);</script> <script type="text/javascript">$('#searchbox').show(0);</script>
</div> </div>
@ -267,7 +228,7 @@
</div> </div>
<div class="footer" role="contentinfo"> <div class="footer" role="contentinfo">
&#169; Copyright 2017, Karl Kroening. &#169; Copyright 2017, Karl Kroening.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 2.1.0. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.6.2.
</div> </div>
</body> </body>
</html> </html>

View File

@ -1,20 +1,33 @@
<!DOCTYPE html> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"> <html xmlns="http://www.w3.org/1999/xhtml">
<head> <head>
<meta charset="utf-8" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>ffmpeg-python: Python bindings for FFmpeg &#8212; ffmpeg-python documentation</title> <title>ffmpeg-python: Python bindings for FFmpeg &#8212; ffmpeg-python documentation</title>
<link rel="stylesheet" href="_static/nature.css" type="text/css" /> <link rel="stylesheet" href="_static/nature.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" /> <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: './',
VERSION: '',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt'
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script> <script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script> <script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script> <script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<link rel="index" title="Index" href="genindex.html" /> <link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" /> <link rel="search" title="Search" href="search.html" />
</head><body> </head>
<body>
<div class="related" role="navigation" aria-label="related navigation"> <div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3> <h3>Navigation</h3>
<ul> <ul>
@ -35,286 +48,37 @@
<div class="section" id="ffmpeg-python-python-bindings-for-ffmpeg"> <div class="section" id="ffmpeg-python-python-bindings-for-ffmpeg">
<h1>ffmpeg-python: Python bindings for FFmpeg<a class="headerlink" href="#ffmpeg-python-python-bindings-for-ffmpeg" title="Permalink to this headline"></a></h1> <h1>ffmpeg-python: Python bindings for FFmpeg<a class="headerlink" href="#ffmpeg-python-python-bindings-for-ffmpeg" title="Permalink to this headline"></a></h1>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Github</dt> <col class="field-name" />
<dd class="field-odd"><p><a class="reference external" href="https://github.com/kkroening/ffmpeg-python">https://github.com/kkroening/ffmpeg-python</a></p> <col class="field-body" />
</dd> <tbody valign="top">
</dl> <tr class="field-odd field"><th class="field-name">Github:</th><td class="field-body"><a class="reference external" href="https://github.com/kkroening/ffmpeg-python">https://github.com/kkroening/ffmpeg-python</a></td>
</tr>
</tbody>
</table>
<div class="toctree-wrapper compound"> <div class="toctree-wrapper compound">
</div> </div>
<span class="target" id="module-ffmpeg"></span><dl class="class"> <span class="target" id="module-ffmpeg"></span><dl class="function">
<dt id="ffmpeg.Stream">
<em class="property">class </em><code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">Stream</code><span class="sig-paren">(</span><em class="sig-param">upstream_node</em>, <em class="sig-param">upstream_label</em>, <em class="sig-param">node_types</em>, <em class="sig-param">upstream_selector=None</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.Stream" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></p>
<p>Represents the outgoing edge of an upstream node; may be used to create more downstream nodes.</p>
<dl class="method">
<dt id="ffmpeg.Stream.audio">
<em class="property">property </em><code class="sig-name descname">audio</code><a class="headerlink" href="#ffmpeg.Stream.audio" title="Permalink to this definition"></a></dt>
<dd><p>Select the audio-portion of a stream.</p>
<p>Some ffmpeg filters drop audio streams, and care must be taken
to preserve the audio in the final output. The <code class="docutils literal notranslate"><span class="pre">.audio</span></code> and
<code class="docutils literal notranslate"><span class="pre">.video</span></code> operators can be used to reference the audio/video
portions of a stream so that they can be processed separately
and then re-combined later in the pipeline. This dilemma is
intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the
way while users may refer to the official ffmpeg documentation
as to why certain filters drop audio.</p>
<p><code class="docutils literal notranslate"><span class="pre">stream.audio</span></code> is a shorthand for <code class="docutils literal notranslate"><span class="pre">stream['a']</span></code>.</p>
<p class="rubric">Example</p>
<p>Process the audio and video portions of a stream independently:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="nb">input</span> <span class="o">=</span> <span class="n">ffmpeg</span><span class="o">.</span><span class="n">input</span><span class="p">(</span><span class="s1">&#39;in.mp4&#39;</span><span class="p">)</span>
<span class="n">audio</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">audio</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="s2">&quot;aecho&quot;</span><span class="p">,</span> <span class="mf">0.8</span><span class="p">,</span> <span class="mf">0.9</span><span class="p">,</span> <span class="mi">1000</span><span class="p">,</span> <span class="mf">0.3</span><span class="p">)</span>
<span class="n">video</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">video</span><span class="o">.</span><span class="n">hflip</span><span class="p">()</span>
<span class="n">out</span> <span class="o">=</span> <span class="n">ffmpeg</span><span class="o">.</span><span class="n">output</span><span class="p">(</span><span class="n">audio</span><span class="p">,</span> <span class="n">video</span><span class="p">,</span> <span class="s1">&#39;out.mp4&#39;</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>
<dl class="method">
<dt id="ffmpeg.Stream.video">
<em class="property">property </em><code class="sig-name descname">video</code><a class="headerlink" href="#ffmpeg.Stream.video" title="Permalink to this definition"></a></dt>
<dd><p>Select the video-portion of a stream.</p>
<p>Some ffmpeg filters drop audio streams, and care must be taken
to preserve the audio in the final output. The <code class="docutils literal notranslate"><span class="pre">.audio</span></code> and
<code class="docutils literal notranslate"><span class="pre">.video</span></code> operators can be used to reference the audio/video
portions of a stream so that they can be processed separately
and then re-combined later in the pipeline. This dilemma is
intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the
way while users may refer to the official ffmpeg documentation
as to why certain filters drop audio.</p>
<p><code class="docutils literal notranslate"><span class="pre">stream.video</span></code> is a shorthand for <code class="docutils literal notranslate"><span class="pre">stream['v']</span></code>.</p>
<p class="rubric">Example</p>
<p>Process the audio and video portions of a stream independently:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="nb">input</span> <span class="o">=</span> <span class="n">ffmpeg</span><span class="o">.</span><span class="n">input</span><span class="p">(</span><span class="s1">&#39;in.mp4&#39;</span><span class="p">)</span>
<span class="n">audio</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">audio</span><span class="o">.</span><span class="n">filter</span><span class="p">(</span><span class="s2">&quot;aecho&quot;</span><span class="p">,</span> <span class="mf">0.8</span><span class="p">,</span> <span class="mf">0.9</span><span class="p">,</span> <span class="mi">1000</span><span class="p">,</span> <span class="mf">0.3</span><span class="p">)</span>
<span class="n">video</span> <span class="o">=</span> <span class="nb">input</span><span class="o">.</span><span class="n">video</span><span class="o">.</span><span class="n">hflip</span><span class="p">()</span>
<span class="n">out</span> <span class="o">=</span> <span class="n">ffmpeg</span><span class="o">.</span><span class="n">output</span><span class="p">(</span><span class="n">audio</span><span class="p">,</span> <span class="n">video</span><span class="p">,</span> <span class="s1">&#39;out.mp4&#39;</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>
<dl class="method">
<dt id="ffmpeg.Stream.view">
<code class="sig-name descname">view</code><span class="sig-paren">(</span><em class="sig-param">detail=False</em>, <em class="sig-param">filename=None</em>, <em class="sig-param">pipe=False</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.Stream.view" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.input">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">input</code><span class="sig-paren">(</span><em class="sig-param">filename</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.input" title="Permalink to this definition"></a></dt>
<dd><p>Input file URL (ffmpeg <code class="docutils literal notranslate"><span class="pre">-i</span></code> option)</p>
<p>Any supplied kwargs are passed to ffmpeg verbatim (e.g. <code class="docutils literal notranslate"><span class="pre">t=20</span></code>,
<code class="docutils literal notranslate"><span class="pre">f='mp4'</span></code>, <code class="docutils literal notranslate"><span class="pre">acodec='pcm'</span></code>, etc.).</p>
<p>To tell ffmpeg to read from stdin, use <code class="docutils literal notranslate"><span class="pre">pipe:</span></code> as the filename.</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg.html#Main-options">Main options</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.merge_outputs">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">merge_outputs</code><span class="sig-paren">(</span><em class="sig-param">*streams</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.merge_outputs" title="Permalink to this definition"></a></dt>
<dd><p>Include all given outputs in one ffmpeg command line</p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.output">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">output</code><span class="sig-paren">(</span><em class="sig-param">*streams_and_filename</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.output" title="Permalink to this definition"></a></dt>
<dd><p>Output file URL</p>
<dl class="simple">
<dt>Syntax:</dt><dd><p><cite>ffmpeg.output(stream1[, stream2, stream3…], filename, **ffmpeg_args)</cite></p>
</dd>
</dl>
<p>Any supplied keyword arguments are passed to ffmpeg verbatim (e.g.
<code class="docutils literal notranslate"><span class="pre">t=20</span></code>, <code class="docutils literal notranslate"><span class="pre">f='mp4'</span></code>, <code class="docutils literal notranslate"><span class="pre">acodec='pcm'</span></code>, <code class="docutils literal notranslate"><span class="pre">vcodec='rawvideo'</span></code>,
etc.). Some keyword-arguments are handled specially, as shown below.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>video_bitrate</strong> parameter for <code class="docutils literal notranslate"><span class="pre">-b:v</span></code>, e.g. <code class="docutils literal notranslate"><span class="pre">video_bitrate=1000</span></code>.</p></li>
<li><p><strong>audio_bitrate</strong> parameter for <code class="docutils literal notranslate"><span class="pre">-b:a</span></code>, e.g. <code class="docutils literal notranslate"><span class="pre">audio_bitrate=200</span></code>.</p></li>
<li><p><strong>format</strong> alias for <code class="docutils literal notranslate"><span class="pre">-f</span></code> parameter, e.g. <code class="docutils literal notranslate"><span class="pre">format='mp4'</span></code>
(equivalent to <code class="docutils literal notranslate"><span class="pre">f='mp4'</span></code>).</p></li>
</ul>
</dd>
</dl>
<p>If multiple streams are provided, they are mapped to the same
output.</p>
<p>To tell ffmpeg to write to stdout, use <code class="docutils literal notranslate"><span class="pre">pipe:</span></code> as the filename.</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg.html#Synopsis">Synopsis</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.overwrite_output">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">overwrite_output</code><span class="sig-paren">(</span><em class="sig-param">stream</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.overwrite_output" title="Permalink to this definition"></a></dt>
<dd><p>Overwrite output files without asking (ffmpeg <code class="docutils literal notranslate"><span class="pre">-y</span></code> option)</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg.html#Main-options">Main options</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.probe">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">probe</code><span class="sig-paren">(</span><em class="sig-param">filename</em>, <em class="sig-param">cmd='ffprobe'</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.probe" title="Permalink to this definition"></a></dt>
<dd><p>Run ffprobe on the specified file and return a JSON representation of the output.</p>
<dl class="field-list simple">
<dt class="field-odd">Raises</dt>
<dd class="field-odd"><p><a class="reference internal" href="#ffmpeg.Error" title="ffmpeg.Error"><strong>ffmpeg.Error</strong></a> if ffprobe returns a non-zero exit code,
an <a class="reference internal" href="#ffmpeg.Error" title="ffmpeg.Error"><code class="xref py py-class docutils literal notranslate"><span class="pre">Error</span></code></a> is returned with a generic error message.
The stderr output can be retrieved by accessing the
<code class="docutils literal notranslate"><span class="pre">stderr</span></code> property of the exception.</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.compile">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">compile</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">cmd='ffmpeg'</em>, <em class="sig-param">overwrite_output=False</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.compile" title="Permalink to this definition"></a></dt>
<dd><p>Build command-line for invoking ffmpeg.</p>
<p>The <a class="reference internal" href="#ffmpeg.run" title="ffmpeg.run"><code class="xref py py-meth docutils literal notranslate"><span class="pre">run()</span></code></a> function uses this to build the command line
arguments and should work in most cases, but calling this function
directly is useful for debugging or if you need to invoke ffmpeg
manually for whatever reason.</p>
<p>This is the same as calling <a class="reference internal" href="#ffmpeg.get_args" title="ffmpeg.get_args"><code class="xref py py-meth docutils literal notranslate"><span class="pre">get_args()</span></code></a> except that it also
includes the <code class="docutils literal notranslate"><span class="pre">ffmpeg</span></code> command as the first argument.</p>
</dd></dl>
<dl class="exception">
<dt id="ffmpeg.Error">
<em class="property">exception </em><code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">Error</code><span class="sig-paren">(</span><em class="sig-param">cmd</em>, <em class="sig-param">stdout</em>, <em class="sig-param">stderr</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.Error" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <code class="xref py py-class docutils literal notranslate"><span class="pre">Exception</span></code></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.get_args">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">get_args</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">overwrite_output=False</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.get_args" title="Permalink to this definition"></a></dt>
<dd><p>Build command-line arguments to be passed to ffmpeg.</p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.run">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">run</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">cmd='ffmpeg'</em>, <em class="sig-param">capture_stdout=False</em>, <em class="sig-param">capture_stderr=False</em>, <em class="sig-param">input=None</em>, <em class="sig-param">quiet=False</em>, <em class="sig-param">overwrite_output=False</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.run" title="Permalink to this definition"></a></dt>
<dd><p>Invoke ffmpeg for the supplied node graph.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>capture_stdout</strong> if True, capture stdout (to be used with
<code class="docutils literal notranslate"><span class="pre">pipe:</span></code> ffmpeg outputs).</p></li>
<li><p><strong>capture_stderr</strong> if True, capture stderr.</p></li>
<li><p><strong>quiet</strong> shorthand for setting <code class="docutils literal notranslate"><span class="pre">capture_stdout</span></code> and <code class="docutils literal notranslate"><span class="pre">capture_stderr</span></code>.</p></li>
<li><p><strong>input</strong> text to be sent to stdin (to be used with <code class="docutils literal notranslate"><span class="pre">pipe:</span></code>
ffmpeg inputs)</p></li>
<li><p><strong>**kwargs</strong> keyword-arguments passed to <code class="docutils literal notranslate"><span class="pre">get_args()</span></code> (e.g.
<code class="docutils literal notranslate"><span class="pre">overwrite_output=True</span></code>).</p></li>
</ul>
</dd>
</dl>
<p>Returns: (out, err) tuple containing captured stdout and stderr data.</p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.run_async">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">run_async</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">cmd='ffmpeg'</em>, <em class="sig-param">pipe_stdin=False</em>, <em class="sig-param">pipe_stdout=False</em>, <em class="sig-param">pipe_stderr=False</em>, <em class="sig-param">quiet=False</em>, <em class="sig-param">overwrite_output=False</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.run_async" title="Permalink to this definition"></a></dt>
<dd><p>Asynchronously invoke ffmpeg for the supplied node graph.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>pipe_stdin</strong> if True, connect pipe to subprocess stdin (to be
used with <code class="docutils literal notranslate"><span class="pre">pipe:</span></code> ffmpeg inputs).</p></li>
<li><p><strong>pipe_stdout</strong> if True, connect pipe to subprocess stdout (to be
used with <code class="docutils literal notranslate"><span class="pre">pipe:</span></code> ffmpeg outputs).</p></li>
<li><p><strong>pipe_stderr</strong> if True, connect pipe to subprocess stderr.</p></li>
<li><p><strong>quiet</strong> shorthand for setting <code class="docutils literal notranslate"><span class="pre">capture_stdout</span></code> and
<code class="docutils literal notranslate"><span class="pre">capture_stderr</span></code>.</p></li>
<li><p><strong>**kwargs</strong> keyword-arguments passed to <code class="docutils literal notranslate"><span class="pre">get_args()</span></code> (e.g.
<code class="docutils literal notranslate"><span class="pre">overwrite_output=True</span></code>).</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>A <a class="reference external" href="https://docs.python.org/3/library/subprocess.html#popen-objects">subprocess Popen</a> object representing the child process.</p>
</dd>
</dl>
<p class="rubric">Examples</p>
<p>Run and stream input:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">process</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">ffmpeg</span>
<span class="o">.</span><span class="n">input</span><span class="p">(</span><span class="s1">&#39;pipe:&#39;</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="s1">&#39;rawvideo&#39;</span><span class="p">,</span> <span class="n">pix_fmt</span><span class="o">=</span><span class="s1">&#39;rgb24&#39;</span><span class="p">,</span> <span class="n">s</span><span class="o">=</span><span class="s1">&#39;</span><span class="si">{}</span><span class="s1">x</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">width</span><span class="p">,</span> <span class="n">height</span><span class="p">))</span>
<span class="o">.</span><span class="n">output</span><span class="p">(</span><span class="n">out_filename</span><span class="p">,</span> <span class="n">pix_fmt</span><span class="o">=</span><span class="s1">&#39;yuv420p&#39;</span><span class="p">)</span>
<span class="o">.</span><span class="n">overwrite_output</span><span class="p">()</span>
<span class="o">.</span><span class="n">run_async</span><span class="p">(</span><span class="n">pipe_stdin</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">process</span><span class="o">.</span><span class="n">communicate</span><span class="p">(</span><span class="nb">input</span><span class="o">=</span><span class="n">input_data</span><span class="p">)</span>
</pre></div>
</div>
<p>Run and capture output:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">process</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">ffmpeg</span>
<span class="o">.</span><span class="n">input</span><span class="p">(</span><span class="n">in_filename</span><span class="p">)</span>
<span class="o">.</span><span class="n">output</span><span class="p">(</span><span class="s1">&#39;pipe&#39;</span><span class="p">:,</span> <span class="nb">format</span><span class="o">=</span><span class="s1">&#39;rawvideo&#39;</span><span class="p">,</span> <span class="n">pix_fmt</span><span class="o">=</span><span class="s1">&#39;rgb24&#39;</span><span class="p">)</span>
<span class="o">.</span><span class="n">run_async</span><span class="p">(</span><span class="n">pipe_stdout</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">pipe_stderr</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">out</span><span class="p">,</span> <span class="n">err</span> <span class="o">=</span> <span class="n">process</span><span class="o">.</span><span class="n">communicate</span><span class="p">()</span>
</pre></div>
</div>
<p>Process video frame-by-frame using numpy:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">process1</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">ffmpeg</span>
<span class="o">.</span><span class="n">input</span><span class="p">(</span><span class="n">in_filename</span><span class="p">)</span>
<span class="o">.</span><span class="n">output</span><span class="p">(</span><span class="s1">&#39;pipe:&#39;</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="s1">&#39;rawvideo&#39;</span><span class="p">,</span> <span class="n">pix_fmt</span><span class="o">=</span><span class="s1">&#39;rgb24&#39;</span><span class="p">)</span>
<span class="o">.</span><span class="n">run_async</span><span class="p">(</span><span class="n">pipe_stdout</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">process2</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">ffmpeg</span>
<span class="o">.</span><span class="n">input</span><span class="p">(</span><span class="s1">&#39;pipe:&#39;</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="s1">&#39;rawvideo&#39;</span><span class="p">,</span> <span class="n">pix_fmt</span><span class="o">=</span><span class="s1">&#39;rgb24&#39;</span><span class="p">,</span> <span class="n">s</span><span class="o">=</span><span class="s1">&#39;</span><span class="si">{}</span><span class="s1">x</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">width</span><span class="p">,</span> <span class="n">height</span><span class="p">))</span>
<span class="o">.</span><span class="n">output</span><span class="p">(</span><span class="n">out_filename</span><span class="p">,</span> <span class="n">pix_fmt</span><span class="o">=</span><span class="s1">&#39;yuv420p&#39;</span><span class="p">)</span>
<span class="o">.</span><span class="n">overwrite_output</span><span class="p">()</span>
<span class="o">.</span><span class="n">run_async</span><span class="p">(</span><span class="n">pipe_stdin</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="p">)</span>
<span class="k">while</span> <span class="kc">True</span><span class="p">:</span>
<span class="n">in_bytes</span> <span class="o">=</span> <span class="n">process1</span><span class="o">.</span><span class="n">stdout</span><span class="o">.</span><span class="n">read</span><span class="p">(</span><span class="n">width</span> <span class="o">*</span> <span class="n">height</span> <span class="o">*</span> <span class="mi">3</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">in_bytes</span><span class="p">:</span>
<span class="k">break</span>
<span class="n">in_frame</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">np</span>
<span class="o">.</span><span class="n">frombuffer</span><span class="p">(</span><span class="n">in_bytes</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">uint8</span><span class="p">)</span>
<span class="o">.</span><span class="n">reshape</span><span class="p">([</span><span class="n">height</span><span class="p">,</span> <span class="n">width</span><span class="p">,</span> <span class="mi">3</span><span class="p">])</span>
<span class="p">)</span>
<span class="n">out_frame</span> <span class="o">=</span> <span class="n">in_frame</span> <span class="o">*</span> <span class="mf">0.3</span>
<span class="n">process2</span><span class="o">.</span><span class="n">stdin</span><span class="o">.</span><span class="n">write</span><span class="p">(</span>
<span class="n">frame</span>
<span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">uint8</span><span class="p">)</span>
<span class="o">.</span><span class="n">tobytes</span><span class="p">()</span>
<span class="p">)</span>
<span class="n">process2</span><span class="o">.</span><span class="n">stdin</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<span class="n">process1</span><span class="o">.</span><span class="n">wait</span><span class="p">()</span>
<span class="n">process2</span><span class="o">.</span><span class="n">wait</span><span class="p">()</span>
</pre></div>
</div>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.view">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">view</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">detail=False</em>, <em class="sig-param">filename=None</em>, <em class="sig-param">pipe=False</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.view" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="function">
<dt id="ffmpeg.colorchannelmixer"> <dt id="ffmpeg.colorchannelmixer">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">colorchannelmixer</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">*args</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.colorchannelmixer" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">colorchannelmixer</code><span class="sig-paren">(</span><em>parent_node</em>, <em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.colorchannelmixer" title="Permalink to this definition"></a></dt>
<dd><p>Adjust video input frames by re-mixing color channels.</p> <dd><p>Adjust video input frames by re-mixing color channels.</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#colorchannelmixer">colorchannelmixer</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#colorchannelmixer">colorchannelmixer</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.concat"> <dt id="ffmpeg.concat">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">concat</code><span class="sig-paren">(</span><em class="sig-param">*streams</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.concat" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">concat</code><span class="sig-paren">(</span><em>*parent_nodes</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.concat" title="Permalink to this definition"></a></dt>
<dd><p>Concatenate audio and video streams, joining them together one after the other.</p> <dd><p>Concatenate audio and video streams, joining them together one after the other.</p>
<p>The filter works on segments of synchronized video and audio streams. All segments must have the same number of <p>The filter works on segments of synchronized video and audio streams. All segments must have the same number of
streams of each type, and that will also be the number of streams at output.</p> streams of each type, and that will also be the number of streams at output.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><p><strong>unsafe</strong> Activate unsafe mode: do not fail if segments have a different format.</p> <col class="field-body" />
</dd> <tbody valign="top">
</dl> <tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>unsafe</strong> Activate unsafe mode: do not fail if segments have a different format.</td>
</tr>
</tbody>
</table>
<p>Related streams do not always have exactly the same duration, for various reasons including codec frame size or <p>Related streams do not always have exactly the same duration, for various reasons including codec frame size or
sloppy authoring. For that reason, related synchronized streams (e.g. a video and its audio track) should be sloppy authoring. For that reason, related synchronized streams (e.g. a video and its audio track) should be
concatenated at once. The concat filter will use the duration of the longest stream in each segment (except the concatenated at once. The concat filter will use the duration of the longest stream in each segment (except the
@ -328,384 +92,299 @@ output file to handle it.</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#concat">concat</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#concat">concat</a></p>
</dd></dl> </dd></dl>
<dl class="function">
<dt id="ffmpeg.crop">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">crop</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">x</em>, <em class="sig-param">y</em>, <em class="sig-param">width</em>, <em class="sig-param">height</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.crop" title="Permalink to this definition"></a></dt>
<dd><p>Crop the input video.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>x</strong> The horizontal position, in the input video, of the left edge of
the output video.</p></li>
<li><p><strong>y</strong> The vertical position, in the input video, of the top edge of the
output video.</p></li>
<li><p><strong>width</strong> The width of the output video. Must be greater than 0.</p></li>
<li><p><strong>height</strong> The height of the output video. Must be greater than 0.</p></li>
</ul>
</dd>
</dl>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#crop">crop</a></p>
</dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.drawbox"> <dt id="ffmpeg.drawbox">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">drawbox</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">x</em>, <em class="sig-param">y</em>, <em class="sig-param">width</em>, <em class="sig-param">height</em>, <em class="sig-param">color</em>, <em class="sig-param">thickness=None</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.drawbox" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">drawbox</code><span class="sig-paren">(</span><em>parent_node</em>, <em>x</em>, <em>y</em>, <em>width</em>, <em>height</em>, <em>color</em>, <em>thickness=None</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.drawbox" title="Permalink to this definition"></a></dt>
<dd><p>Draw a colored box on the input image.</p> <dd><p>Draw a colored box on the input image.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><ul class="simple"> <col class="field-body" />
<li><p><strong>x</strong> The expression which specifies the top left corner x coordinate of the box. It defaults to 0.</p></li> <tbody valign="top">
<li><p><strong>y</strong> The expression which specifies the top left corner y coordinate of the box. It defaults to 0.</p></li> <tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><p><strong>width</strong> Specify the width of the box; if 0 interpreted as the input width. It defaults to 0.</p></li> <li><strong>x</strong> The expression which specifies the top left corner x coordinate of the box. It defaults to 0.</li>
<li><p><strong>height</strong> Specify the height of the box; if 0 interpreted as the input height. It defaults to 0.</p></li> <li><strong>y</strong> The expression which specifies the top left corner y coordinate of the box. It defaults to 0.</li>
<li><p><strong>color</strong> Specify the color of the box to write. For the general syntax of this option, check the “Color” section <li><strong>width</strong> Specify the width of the box; if 0 interpreted as the input width. It defaults to 0.</li>
<li><strong>heigth</strong> Specify the height of the box; if 0 interpreted as the input height. It defaults to 0.</li>
<li><strong>color</strong> Specify the color of the box to write. For the general syntax of this option, check the “Color” section
in the ffmpeg-utils manual. If the special value invert is used, the box edge color is the same as the in the ffmpeg-utils manual. If the special value invert is used, the box edge color is the same as the
video with inverted luma.</p></li> video with inverted luma.</li>
<li><p><strong>thickness</strong> The expression which sets the thickness of the box edge. Default value is 3.</p></li> <li><strong>thickness</strong> The expression which sets the thickness of the box edge. Default value is 3.</li>
<li><p><strong>w</strong> Alias for <code class="docutils literal notranslate"><span class="pre">width</span></code>.</p></li> <li><strong>w</strong> Alias for <code class="docutils literal"><span class="pre">width</span></code>.</li>
<li><p><strong>h</strong> Alias for <code class="docutils literal notranslate"><span class="pre">height</span></code>.</p></li> <li><strong>h</strong> Alias for <code class="docutils literal"><span class="pre">height</span></code>.</li>
<li><p><strong>c</strong> Alias for <code class="docutils literal notranslate"><span class="pre">color</span></code>.</p></li> <li><strong>c</strong> Alias for <code class="docutils literal"><span class="pre">color</span></code>.</li>
<li><p><strong>t</strong> Alias for <code class="docutils literal notranslate"><span class="pre">thickness</span></code>.</p></li> <li><strong>t</strong> Alias for <code class="docutils literal"><span class="pre">thickness</span></code>.</li>
</ul> </ul>
</dd> </td>
</dl> </tr>
</tbody>
</table>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#drawbox">drawbox</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#drawbox">drawbox</a></p>
</dd></dl> </dd></dl>
<dl class="function">
<dt id="ffmpeg.drawtext">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">drawtext</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">text=None</em>, <em class="sig-param">x=0</em>, <em class="sig-param">y=0</em>, <em class="sig-param">escape_text=True</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.drawtext" title="Permalink to this definition"></a></dt>
<dd><p>Draw a text string or text from a specified file on top of a video, using the libfreetype library.</p>
<p>To enable compilation of this filter, you need to configure FFmpeg with <code class="docutils literal notranslate"><span class="pre">--enable-libfreetype</span></code>. To enable default
font fallback and the font option you need to configure FFmpeg with <code class="docutils literal notranslate"><span class="pre">--enable-libfontconfig</span></code>. To enable the
text_shaping option, you need to configure FFmpeg with <code class="docutils literal notranslate"><span class="pre">--enable-libfribidi</span></code>.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>box</strong> Used to draw a box around text using the background color. The value must be either 1 (enable) or 0
(disable). The default value of box is 0.</p></li>
<li><p><strong>boxborderw</strong> Set the width of the border to be drawn around the box using boxcolor. The default value of
boxborderw is 0.</p></li>
<li><p><strong>boxcolor</strong> The color to be used for drawing box around text. For the syntax of this option, check the “Color”
section in the ffmpeg-utils manual. The default value of boxcolor is “white”.</p></li>
<li><p><strong>line_spacing</strong> Set the line spacing in pixels of the border to be drawn around the box using box. The default
value of line_spacing is 0.</p></li>
<li><p><strong>borderw</strong> Set the width of the border to be drawn around the text using bordercolor. The default value of
borderw is 0.</p></li>
<li><p><strong>bordercolor</strong> Set the color to be used for drawing border around text. For the syntax of this option, check the
“Color” section in the ffmpeg-utils manual. The default value of bordercolor is “black”.</p></li>
<li><p><strong>expansion</strong> Select how the text is expanded. Can be either none, strftime (deprecated) or normal (default). See
the Text expansion section below for details.</p></li>
<li><p><strong>basetime</strong> Set a start time for the count. Value is in microseconds. Only applied in the deprecated strftime
expansion mode. To emulate in normal expansion mode use the pts function, supplying the start time (in
seconds) as the second argument.</p></li>
<li><p><strong>fix_bounds</strong> If true, check and fix text coords to avoid clipping.</p></li>
<li><p><strong>fontcolor</strong> The color to be used for drawing fonts. For the syntax of this option, check the “Color” section in
the ffmpeg-utils manual. The default value of fontcolor is “black”.</p></li>
<li><p><strong>fontcolor_expr</strong> String which is expanded the same way as text to obtain dynamic fontcolor value. By default
this option has empty value and is not processed. When this option is set, it overrides fontcolor option.</p></li>
<li><p><strong>font</strong> The font family to be used for drawing text. By default Sans.</p></li>
<li><p><strong>fontfile</strong> The font file to be used for drawing text. The path must be included. This parameter is mandatory if
the fontconfig support is disabled.</p></li>
<li><p><strong>alpha</strong> Draw the text applying alpha blending. The value can be a number between 0.0 and 1.0. The expression
accepts the same variables x, y as well. The default value is 1. Please see fontcolor_expr.</p></li>
<li><p><strong>fontsize</strong> The font size to be used for drawing text. The default value of fontsize is 16.</p></li>
<li><p><strong>text_shaping</strong> If set to 1, attempt to shape the text (for example, reverse the order of right-to-left text and
join Arabic characters) before drawing it. Otherwise, just draw the text exactly as given. By default 1 (if
supported).</p></li>
<li><p><strong>ft_load_flags</strong> <p>The flags to be used for loading the fonts. The flags map the corresponding flags supported by
libfreetype, and are a combination of the following values:</p>
<ul>
<li><p><code class="docutils literal notranslate"><span class="pre">default</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">no_scale</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">no_hinting</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">render</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">no_bitmap</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">vertical_layout</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">force_autohint</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">crop_bitmap</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">pedantic</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">ignore_global_advance_width</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">no_recurse</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">ignore_transform</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">monochrome</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">linear_design</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">no_autohint</span></code></p></li>
</ul>
<p>Default value is “default”. For more information consult the documentation for the FT_LOAD_* libfreetype
flags.</p>
</p></li>
<li><p><strong>shadowcolor</strong> The color to be used for drawing a shadow behind the drawn text. For the syntax of this option,
check the “Color” section in the ffmpeg-utils manual. The default value of shadowcolor is “black”.</p></li>
<li><p><strong>shadowx</strong> The x offset for the text shadow position with respect to the position of the text. It can be either
positive or negative values. The default value is “0”.</p></li>
<li><p><strong>shadowy</strong> The y offset for the text shadow position with respect to the position of the text. It can be either
positive or negative values. The default value is “0”.</p></li>
<li><p><strong>start_number</strong> The starting frame number for the n/frame_num variable. The default value is “0”.</p></li>
<li><p><strong>tabsize</strong> The size in number of spaces to use for rendering the tab. Default value is 4.</p></li>
<li><p><strong>timecode</strong> Set the initial timecode representation in “hh:mm:ss[:;.]ff” format. It can be used with or without
text parameter. timecode_rate option must be specified.</p></li>
<li><p><strong>rate</strong> Set the timecode frame rate (timecode only).</p></li>
<li><p><strong>timecode_rate</strong> Alias for <code class="docutils literal notranslate"><span class="pre">rate</span></code>.</p></li>
<li><p><strong>r</strong> Alias for <code class="docutils literal notranslate"><span class="pre">rate</span></code>.</p></li>
<li><p><strong>tc24hmax</strong> If set to 1, the output of the timecode option will wrap around at 24 hours. Default is 0 (disabled).</p></li>
<li><p><strong>text</strong> The text string to be drawn. The text must be a sequence of UTF-8 encoded characters. This parameter is
mandatory if no file is specified with the parameter textfile.</p></li>
<li><p><strong>textfile</strong> A text file containing text to be drawn. The text must be a sequence of UTF-8 encoded characters.
This parameter is mandatory if no text string is specified with the parameter text. If both text and
textfile are specified, an error is thrown.</p></li>
<li><p><strong>reload</strong> If set to 1, the textfile will be reloaded before each frame. Be sure to update it atomically, or it
may be read partially, or even fail.</p></li>
<li><p><strong>x</strong> The expression which specifies the offset where text will be drawn within the video frame. It is relative to
the left border of the output image. The default value is “0”.</p></li>
<li><p><strong>y</strong> The expression which specifies the offset where text will be drawn within the video frame. It is relative to
the top border of the output image. The default value is “0”. See below for the list of accepted constants
and functions.</p></li>
</ul>
</dd>
</dl>
<dl>
<dt>Expression constants:</dt><dd><dl class="simple">
<dt>The parameters for x and y are expressions containing the following constants and functions:</dt><dd><ul class="simple">
<li><p>dar: input display aspect ratio, it is the same as <code class="docutils literal notranslate"><span class="pre">(w</span> <span class="pre">/</span> <span class="pre">h)</span> <span class="pre">*</span> <span class="pre">sar</span></code></p></li>
<li><p>hsub: horizontal chroma subsample values. For example for the pixel format “yuv422p” hsub is 2 and vsub
is 1.</p></li>
<li><p>vsub: vertical chroma subsample values. For example for the pixel format “yuv422p” hsub is 2 and vsub
is 1.</p></li>
<li><p>line_h: the height of each text line</p></li>
<li><p>lh: Alias for <code class="docutils literal notranslate"><span class="pre">line_h</span></code>.</p></li>
<li><p>main_h: the input height</p></li>
<li><p>h: Alias for <code class="docutils literal notranslate"><span class="pre">main_h</span></code>.</p></li>
<li><p>H: Alias for <code class="docutils literal notranslate"><span class="pre">main_h</span></code>.</p></li>
<li><p>main_w: the input width</p></li>
<li><p>w: Alias for <code class="docutils literal notranslate"><span class="pre">main_w</span></code>.</p></li>
<li><p>W: Alias for <code class="docutils literal notranslate"><span class="pre">main_w</span></code>.</p></li>
<li><p>ascent: the maximum distance from the baseline to the highest/upper grid coordinate used to place a glyph
outline point, for all the rendered glyphs. It is a positive value, due to the grids orientation with the Y
axis upwards.</p></li>
<li><p>max_glyph_a: Alias for <code class="docutils literal notranslate"><span class="pre">ascent</span></code>.</p></li>
<li><p>descent: the maximum distance from the baseline to the lowest grid coordinate used to place a glyph outline
point, for all the rendered glyphs. This is a negative value, due to the grids orientation, with the Y axis
upwards.</p></li>
<li><p>max_glyph_d: Alias for <code class="docutils literal notranslate"><span class="pre">descent</span></code>.</p></li>
<li><p>max_glyph_h: maximum glyph height, that is the maximum height for all the glyphs contained in the rendered
text, it is equivalent to ascent - descent.</p></li>
<li><p>max_glyph_w: maximum glyph width, that is the maximum width for all the glyphs contained in the rendered
text.</p></li>
<li><p>n: the number of input frame, starting from 0</p></li>
<li><p>rand(min, max): return a random number included between min and max</p></li>
<li><p>sar: The input sample aspect ratio.</p></li>
<li><p>t: timestamp expressed in seconds, NAN if the input timestamp is unknown</p></li>
<li><p>text_h: the height of the rendered text</p></li>
<li><p>th: Alias for <code class="docutils literal notranslate"><span class="pre">text_h</span></code>.</p></li>
<li><p>text_w: the width of the rendered text</p></li>
<li><p>tw: Alias for <code class="docutils literal notranslate"><span class="pre">text_w</span></code>.</p></li>
<li><p>x: the x offset coordinates where the text is drawn.</p></li>
<li><p>y: the y offset coordinates where the text is drawn.</p></li>
</ul>
</dd>
</dl>
<p>These parameters allow the x and y expressions to refer each other, so you can for example specify
<code class="docutils literal notranslate"><span class="pre">y=x/dar</span></code>.</p>
</dd>
</dl>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#drawtext">drawtext</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.filter">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">filter</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">filter_name</em>, <em class="sig-param">*args</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.filter" title="Permalink to this definition"></a></dt>
<dd><p>Apply custom filter.</p>
<p><code class="docutils literal notranslate"><span class="pre">filter_</span></code> is normally used by higher-level filter functions such as <code class="docutils literal notranslate"><span class="pre">hflip</span></code>, but if a filter implementation
is missing from <code class="docutils literal notranslate"><span class="pre">ffmpeg-python</span></code>, you can call <code class="docutils literal notranslate"><span class="pre">filter_</span></code> directly to have <code class="docutils literal notranslate"><span class="pre">ffmpeg-python</span></code> pass the filter name
and arguments to ffmpeg verbatim.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>stream_spec</strong> a Stream, list of Streams, or label-to-Stream dictionary mapping</p></li>
<li><p><strong>filter_name</strong> ffmpeg filter name, e.g. <cite>colorchannelmixer</cite></p></li>
<li><p><strong>*args</strong> list of args to pass to ffmpeg verbatim</p></li>
<li><p><strong>**kwargs</strong> list of keyword-args to pass to ffmpeg verbatim</p></li>
</ul>
</dd>
</dl>
<p>The function name is suffixed with <code class="docutils literal notranslate"><span class="pre">_</span></code> in order avoid confusion with the standard python <code class="docutils literal notranslate"><span class="pre">filter</span></code> function.</p>
<p class="rubric">Example</p>
<p><code class="docutils literal notranslate"><span class="pre">ffmpeg.input('in.mp4').filter('hflip').output('out.mp4').run()</span></code></p>
</dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.filter_"> <dt id="ffmpeg.filter_">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">filter_</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">filter_name</em>, <em class="sig-param">*args</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.filter_" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">filter_</code><span class="sig-paren">(</span><em>parent_node</em>, <em>filter_name</em>, <em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.filter_" title="Permalink to this definition"></a></dt>
<dd><p>Alternate name for <code class="docutils literal notranslate"><span class="pre">filter</span></code>, so as to not collide with the <dd><p>Apply custom single-source filter.</p>
built-in python <code class="docutils literal notranslate"><span class="pre">filter</span></code> operator.</p> <p><code class="docutils literal"><span class="pre">filter_</span></code> is normally used by higher-level filter functions such as <code class="docutils literal"><span class="pre">hflip</span></code>, but if a filter implementation
is missing from <code class="docutils literal"><span class="pre">fmpeg-python</span></code>, you can call <code class="docutils literal"><span class="pre">filter_</span></code> directly to have <code class="docutils literal"><span class="pre">fmpeg-python</span></code> pass the filter name
and arguments to ffmpeg verbatim.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>parent_node</strong> Source stream to apply filter to.</li>
<li><strong>filter_name</strong> ffmpeg filter name, e.g. <cite>colorchannelmixer</cite></li>
<li><strong>*args</strong> list of args to pass to ffmpeg verbatim</li>
<li><strong>**kwargs</strong> list of keyword-args to pass to ffmpeg verbatim</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>This function is used internally by all of the other single-source filters (e.g. <code class="docutils literal"><span class="pre">hflip</span></code>, <code class="docutils literal"><span class="pre">crop</span></code>, etc.).
For custom multi-source filters, see <code class="docutils literal"><span class="pre">filter_multi</span></code> instead.</p>
<p>The function name is suffixed with <code class="docutils literal"><span class="pre">_</span></code> in order avoid confusion with the standard python <code class="docutils literal"><span class="pre">filter</span></code> function.</p>
<p class="rubric">Example</p>
<p><code class="docutils literal"><span class="pre">ffmpeg.input('in.mp4').filter_('hflip').output('out.mp4').run()</span></code></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.filter_multi_output"> <dt id="ffmpeg.filter_multi">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">filter_multi_output</code><span class="sig-paren">(</span><em class="sig-param">stream_spec</em>, <em class="sig-param">filter_name</em>, <em class="sig-param">*args</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.filter_multi_output" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">filter_multi</code><span class="sig-paren">(</span><em>parent_nodes</em>, <em>filter_name</em>, <em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.filter_multi" title="Permalink to this definition"></a></dt>
<dd><p>Apply custom filter with one or more outputs.</p> <dd><p>Apply custom multi-source filter.</p>
<p>This is the same as <code class="docutils literal notranslate"><span class="pre">filter</span></code> except that the filter can produce more than one output.</p> <p>This is nearly identical to the <code class="docutils literal"><span class="pre">filter</span></code> function except that it allows filters to be applied to multiple
<p>To reference an output stream, use either the <code class="docutils literal notranslate"><span class="pre">.stream</span></code> operator or bracket shorthand:</p> streams. Its normally used by higher-level filter functions such as <code class="docutils literal"><span class="pre">concat</span></code>, but if a filter implementation
is missing from <code class="docutils literal"><span class="pre">fmpeg-python</span></code>, you can call <code class="docutils literal"><span class="pre">filter_multi</span></code> directly.</p>
<p>Note that because it applies to multiple streams, it cant be used as an operator, unlike the <code class="docutils literal"><span class="pre">filter</span></code> function
(e.g. <code class="docutils literal"><span class="pre">ffmpeg.input('in.mp4').filter_('hflip')</span></code>)</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>parent_nodes</strong> List of source streams to apply filter to.</li>
<li><strong>filter_name</strong> ffmpeg filter name, e.g. <cite>concat</cite></li>
<li><strong>*args</strong> list of args to pass to ffmpeg verbatim</li>
<li><strong>**kwargs</strong> list of keyword-args to pass to ffmpeg verbatim</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>For custom single-source filters, see <code class="docutils literal"><span class="pre">filter_multi</span></code> instead.</p>
<p class="rubric">Example</p> <p class="rubric">Example</p>
<p><code class="docutils literal notranslate"><span class="pre">`</span> <p><code class="docutils literal"><span class="pre">ffmpeg.filter_multi(ffmpeg.input('in1.mp4'),</span> <span class="pre">ffmpeg.input('in2.mp4'),</span> <span class="pre">'concat',</span> <span class="pre">n=2).output('out.mp4').run()</span></code></p>
<span class="pre">split</span> <span class="pre">=</span> <span class="pre">ffmpeg.input('in.mp4').filter_multi_output('split')</span>
<span class="pre">split0</span> <span class="pre">=</span> <span class="pre">split.stream(0)</span>
<span class="pre">split1</span> <span class="pre">=</span> <span class="pre">split[1]</span>
<span class="pre">ffmpeg.concat(split0,</span> <span class="pre">split1).output('out.mp4').run()</span>
<span class="pre">`</span></code></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.hflip"> <dt id="ffmpeg.hflip">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">hflip</code><span class="sig-paren">(</span><em class="sig-param">stream</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.hflip" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">hflip</code><span class="sig-paren">(</span><em>parent_node</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.hflip" title="Permalink to this definition"></a></dt>
<dd><p>Flip the input video horizontally.</p> <dd><p>Flip the input video horizontally.</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#hflip">hflip</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#hflip">hflip</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.hue"> <dt id="ffmpeg.hue">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">hue</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.hue" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">hue</code><span class="sig-paren">(</span><em>parent_node</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.hue" title="Permalink to this definition"></a></dt>
<dd><p>Modify the hue and/or the saturation of the input.</p> <dd><p>Modify the hue and/or the saturation of the input.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><ul class="simple"> <col class="field-body" />
<li><p><strong>h</strong> Specify the hue angle as a number of degrees. It accepts an expression, and defaults to “0”.</p></li> <tbody valign="top">
<li><p><strong>s</strong> Specify the saturation in the [-10,10] range. It accepts an expression and defaults to “1”.</p></li> <tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><p><strong>H</strong> Specify the hue angle as a number of radians. It accepts an expression, and defaults to “0”.</p></li> <li><strong>h</strong> Specify the hue angle as a number of degrees. It accepts an expression, and defaults to “0”.</li>
<li><p><strong>b</strong> Specify the brightness in the [-10,10] range. It accepts an expression and defaults to “0”.</p></li> <li><strong>s</strong> Specify the saturation in the [-10,10] range. It accepts an expression and defaults to “1”.</li>
<li><strong>H</strong> Specify the hue angle as a number of radians. It accepts an expression, and defaults to “0”.</li>
<li><strong>b</strong> Specify the brightness in the [-10,10] range. It accepts an expression and defaults to “0”.</li>
</ul> </ul>
</dd> </td>
</dl> </tr>
</tbody>
</table>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#hue">hue</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#hue">hue</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.overlay"> <dt id="ffmpeg.overlay">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">overlay</code><span class="sig-paren">(</span><em class="sig-param">main_parent_node</em>, <em class="sig-param">overlay_parent_node</em>, <em class="sig-param">eof_action='repeat'</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.overlay" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">overlay</code><span class="sig-paren">(</span><em>main_parent_node</em>, <em>overlay_parent_node</em>, <em>eof_action=urepeat</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.overlay" title="Permalink to this definition"></a></dt>
<dd><p>Overlay one video on top of another.</p> <dd><p>Overlay one video on top of another.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><ul class="simple"> <col class="field-body" />
<li><p><strong>x</strong> Set the expression for the x coordinates of the overlaid video on the main video. Default value is 0. In <tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>x</strong> Set the expression for the x coordinates of the overlaid video on the main video. Default value is 0. In
case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed
within the output visible area).</p></li> within the output visible area).</li>
<li><p><strong>y</strong> Set the expression for the y coordinates of the overlaid video on the main video. Default value is 0. In <li><strong>y</strong> Set the expression for the y coordinates of the overlaid video on the main video. Default value is 0. In
case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed
within the output visible area).</p></li> within the output visible area).</li>
<li><p><strong>eof_action</strong> <p>The action to take when EOF is encountered on the secondary input; it accepts one of the following <li><strong>eof_action</strong> <p>The action to take when EOF is encountered on the secondary input; it accepts one of the following
values:</p> values:</p>
<ul> <ul>
<li><p><code class="docutils literal notranslate"><span class="pre">repeat</span></code>: Repeat the last frame (the default).</p></li> <li><code class="docutils literal"><span class="pre">repeat</span></code>: Repeat the last frame (the default).</li>
<li><p><code class="docutils literal notranslate"><span class="pre">endall</span></code>: End both streams.</p></li> <li><code class="docutils literal"><span class="pre">endall</span></code>: End both streams.</li>
<li><p><code class="docutils literal notranslate"><span class="pre">pass</span></code>: Pass the main input through.</p></li> <li><code class="docutils literal"><span class="pre">pass</span></code>: Pass the main input through.</li>
</ul> </ul>
</p></li> </li>
<li><p><strong>eval</strong> <p>Set when the expressions for x, and y are evaluated. <li><strong>eval</strong> <p>Set when the expressions for x, and y are evaluated.
It accepts the following values:</p> It accepts the following values:</p>
<ul> <ul>
<li><dl class="simple"> <li><dl class="first docutils">
<dt><code class="docutils literal notranslate"><span class="pre">init</span></code>: only evaluate expressions once during the filter initialization or when a command is</dt><dd><p>processed</p> <dt><code class="docutils literal"><span class="pre">init</span></code>: only evaluate expressions once during the filter initialization or when a command is</dt>
</dd> <dd>processed</dd>
</dl> </dl>
</li> </li>
<li><p><code class="docutils literal notranslate"><span class="pre">frame</span></code>: evaluate expressions for each incoming frame</p></li> <li><code class="docutils literal"><span class="pre">frame</span></code>: evaluate expressions for each incoming frame</li>
</ul> </ul>
<p>Default value is <code class="docutils literal notranslate"><span class="pre">frame</span></code>.</p> <p>Default value is <code class="docutils literal"><span class="pre">frame</span></code>.</p>
</p></li> </li>
<li><p><strong>shortest</strong> If set to 1, force the output to terminate when the shortest input terminates. Default value is 0.</p></li> <li><strong>shortest</strong> If set to 1, force the output to terminate when the shortest input terminates. Default value is 0.</li>
<li><p><strong>format</strong> <p>Set the format for the output video. <li><strong>format</strong> <p>Set the format for the output video.
It accepts the following values:</p> It accepts the following values:</p>
<ul> <ul>
<li><p><code class="docutils literal notranslate"><span class="pre">yuv420</span></code>: force YUV420 output</p></li> <li><code class="docutils literal"><span class="pre">yuv420</span></code>: force YUV420 output</li>
<li><p><code class="docutils literal notranslate"><span class="pre">yuv422</span></code>: force YUV422 output</p></li> <li><code class="docutils literal"><span class="pre">yuv422</span></code>: force YUV422 output</li>
<li><p><code class="docutils literal notranslate"><span class="pre">yuv444</span></code>: force YUV444 output</p></li> <li><code class="docutils literal"><span class="pre">yuv444</span></code>: force YUV444 output</li>
<li><p><code class="docutils literal notranslate"><span class="pre">rgb</span></code>: force packed RGB output</p></li> <li><code class="docutils literal"><span class="pre">rgb</span></code>: force packed RGB output</li>
<li><p><code class="docutils literal notranslate"><span class="pre">gbrp</span></code>: force planar RGB output</p></li> <li><code class="docutils literal"><span class="pre">gbrp</span></code>: force planar RGB output</li>
</ul> </ul>
<p>Default value is <code class="docutils literal notranslate"><span class="pre">yuv420</span></code>.</p> <p>Default value is <code class="docutils literal"><span class="pre">yuv420</span></code>.</p>
</p></li> </li>
<li><p><strong>rgb</strong> (<em>deprecated</em>) If set to 1, force the filter to accept inputs in the RGB color space. Default value is 0. <li><strong>rgb</strong> (<em>deprecated</em>) If set to 1, force the filter to accept inputs in the RGB color space. Default value is 0.
This option is deprecated, use format instead.</p></li> This option is deprecated, use format instead.</li>
<li><p><strong>repeatlast</strong> If set to 1, force the filter to draw the last overlay frame over the main input until the end of <li><strong>repeatlast</strong> If set to 1, force the filter to draw the last overlay frame over the main input until the end of
the stream. A value of 0 disables this behavior. Default value is 1.</p></li> the stream. A value of 0 disables this behavior. Default value is 1.</li>
</ul> </ul>
</dd> </td>
</dl> </tr>
</tbody>
</table>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#overlay-1">overlay</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#overlay-1">overlay</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.setpts"> <dt id="ffmpeg.setpts">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">setpts</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">expr</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.setpts" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">setpts</code><span class="sig-paren">(</span><em>parent_node</em>, <em>expr</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.setpts" title="Permalink to this definition"></a></dt>
<dd><p>Change the PTS (presentation timestamp) of the input frames.</p> <dd><p>Change the PTS (presentation timestamp) of the input frames.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><p><strong>expr</strong> The expression which is evaluated for each frame to construct its timestamp.</p> <col class="field-body" />
</dd> <tbody valign="top">
</dl> <tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>expr</strong> The expression which is evaluated for each frame to construct its timestamp.</td>
</tr>
</tbody>
</table>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts">setpts, asetpts</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts">setpts, asetpts</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.trim"> <dt id="ffmpeg.trim">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">trim</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.trim" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">trim</code><span class="sig-paren">(</span><em>parent_node</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.trim" title="Permalink to this definition"></a></dt>
<dd><p>Trim the input so that the output contains one continuous subpart of the input.</p> <dd><p>Trim the input so that the output contains one continuous subpart of the input.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><ul class="simple"> <col class="field-body" />
<li><p><strong>start</strong> Specify the time of the start of the kept section, i.e. the frame with the timestamp start will be the <tbody valign="top">
first frame in the output.</p></li> <tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><p><strong>end</strong> Specify the time of the first frame that will be dropped, i.e. the frame immediately preceding the one <li><strong>start</strong> Specify the time of the start of the kept section, i.e. the frame with the timestamp start will be the
with the timestamp end will be the last frame in the output.</p></li> first frame in the output.</li>
<li><p><strong>start_pts</strong> This is the same as start, except this option sets the start timestamp in timebase units instead of <li><strong>end</strong> Specify the time of the first frame that will be dropped, i.e. the frame immediately preceding the one
seconds.</p></li> with the timestamp end will be the last frame in the output.</li>
<li><p><strong>end_pts</strong> This is the same as end, except this option sets the end timestamp in timebase units instead of <li><strong>start_pts</strong> This is the same as start, except this option sets the start timestamp in timebase units instead of
seconds.</p></li> seconds.</li>
<li><p><strong>duration</strong> The maximum duration of the output in seconds.</p></li> <li><strong>end_pts</strong> This is the same as end, except this option sets the end timestamp in timebase units instead of
<li><p><strong>start_frame</strong> The number of the first frame that should be passed to the output.</p></li> seconds.</li>
<li><p><strong>end_frame</strong> The number of the first frame that should be dropped.</p></li> <li><strong>duration</strong> The maximum duration of the output in seconds.</li>
<li><strong>start_frame</strong> The number of the first frame that should be passed to the output.</li>
<li><strong>end_frame</strong> The number of the first frame that should be dropped.</li>
</ul> </ul>
</dd> </td>
</dl> </tr>
</tbody>
</table>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#trim">trim</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#trim">trim</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.vflip"> <dt id="ffmpeg.vflip">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">vflip</code><span class="sig-paren">(</span><em class="sig-param">stream</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.vflip" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">vflip</code><span class="sig-paren">(</span><em>parent_node</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.vflip" title="Permalink to this definition"></a></dt>
<dd><p>Flip the input video vertically.</p> <dd><p>Flip the input video vertically.</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#vflip">vflip</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#vflip">vflip</a></p>
</dd></dl> </dd></dl>
<dl class="function"> <dl class="function">
<dt id="ffmpeg.zoompan"> <dt id="ffmpeg.zoompan">
<code class="sig-prename descclassname">ffmpeg.</code><code class="sig-name descname">zoompan</code><span class="sig-paren">(</span><em class="sig-param">stream</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.zoompan" title="Permalink to this definition"></a></dt> <code class="descclassname">ffmpeg.</code><code class="descname">zoompan</code><span class="sig-paren">(</span><em>parent_node</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.zoompan" title="Permalink to this definition"></a></dt>
<dd><p>Apply Zoom &amp; Pan effect.</p> <dd><p>Apply Zoom &amp; Pan effect.</p>
<dl class="field-list simple"> <table class="docutils field-list" frame="void" rules="none">
<dt class="field-odd">Parameters</dt> <col class="field-name" />
<dd class="field-odd"><ul class="simple"> <col class="field-body" />
<li><p><strong>zoom</strong> Set the zoom expression. Default is 1.</p></li> <tbody valign="top">
<li><p><strong>x</strong> Set the x expression. Default is 0.</p></li> <tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><p><strong>y</strong> Set the y expression. Default is 0.</p></li> <li><strong>zoom</strong> Set the zoom expression. Default is 1.</li>
<li><p><strong>d</strong> Set the duration expression in number of frames. This sets for how many number of frames effect will last <li><strong>x</strong> Set the x expression. Default is 0.</li>
for single input image.</p></li> <li><strong>y</strong> Set the y expression. Default is 0.</li>
<li><p><strong>s</strong> Set the output image size, default is <code class="docutils literal notranslate"><span class="pre">hd720</span></code>.</p></li> <li><strong>d</strong> Set the duration expression in number of frames. This sets for how many number of frames effect will last
<li><p><strong>fps</strong> Set the output frame rate, default is 25.</p></li> for single input image.</li>
<li><p><strong>z</strong> Alias for <code class="docutils literal notranslate"><span class="pre">zoom</span></code>.</p></li> <li><strong>s</strong> Set the output image size, default is <code class="docutils literal"><span class="pre">hd720</span></code>.</li>
<li><strong>fps</strong> Set the output frame rate, default is 25.</li>
<li><strong>z</strong> Alias for <code class="docutils literal"><span class="pre">zoom</span></code>.</li>
</ul> </ul>
</dd> </td>
</dl> </tr>
</tbody>
</table>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#zoompan">zoompan</a></p> <p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg-filters.html#zoompan">zoompan</a></p>
</dd></dl> </dd></dl>
<dl class="function">
<dt id="ffmpeg.input">
<code class="descclassname">ffmpeg.</code><code class="descname">input</code><span class="sig-paren">(</span><em>filename</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.input" title="Permalink to this definition"></a></dt>
<dd><p>Input file URL (ffmpeg <code class="docutils literal"><span class="pre">-i</span></code> option)</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg.html#Main-options">Main options</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.merge_outputs">
<code class="descclassname">ffmpeg.</code><code class="descname">merge_outputs</code><span class="sig-paren">(</span><em>*parent_nodes</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.merge_outputs" title="Permalink to this definition"></a></dt>
<dd><p>Include all given outputs in one ffmpeg command line</p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.output">
<code class="descclassname">ffmpeg.</code><code class="descname">output</code><span class="sig-paren">(</span><em>parent_node</em>, <em>filename</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.output" title="Permalink to this definition"></a></dt>
<dd><p>Output file URL</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg.html#Synopsis">Synopsis</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.overwrite_output">
<code class="descclassname">ffmpeg.</code><code class="descname">overwrite_output</code><span class="sig-paren">(</span><em>parent_node</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.overwrite_output" title="Permalink to this definition"></a></dt>
<dd><p>Overwrite output files without asking (ffmpeg <code class="docutils literal"><span class="pre">-y</span></code> option)</p>
<p>Official documentation: <a class="reference external" href="https://ffmpeg.org/ffmpeg.html#Main-options">Main options</a></p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.get_args">
<code class="descclassname">ffmpeg.</code><code class="descname">get_args</code><span class="sig-paren">(</span><em>node</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.get_args" title="Permalink to this definition"></a></dt>
<dd><p>Get command-line arguments for ffmpeg.</p>
</dd></dl>
<dl class="function">
<dt id="ffmpeg.run">
<code class="descclassname">ffmpeg.</code><code class="descname">run</code><span class="sig-paren">(</span><em>node</em>, <em>cmd=uffmpeg</em><span class="sig-paren">)</span><a class="headerlink" href="#ffmpeg.run" title="Permalink to this definition"></a></dt>
<dd><p>Run ffmpeg on node graph.</p>
</dd></dl>
</div> </div>
<div class="section" id="indices-and-tables"> <div class="section" id="indices-and-tables">
<h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this headline"></a></h1> <h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this headline"></a></h1>
<ul class="simple"> <ul class="simple">
<li><p><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></p></li> <li><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></li>
<li><p><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></p></li> <li><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></li>
<li><p><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></p></li> <li><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></li>
</ul> </ul>
</div> </div>
@ -715,7 +394,7 @@ for single input image.</p></li>
</div> </div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation"> <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper"> <div class="sphinxsidebarwrapper">
<h3><a href="#">Table of Contents</a></h3> <h3><a href="#">Table Of Contents</a></h3>
<ul> <ul>
<li><a class="reference internal" href="#">ffmpeg-python: Python bindings for FFmpeg</a></li> <li><a class="reference internal" href="#">ffmpeg-python: Python bindings for FFmpeg</a></li>
<li><a class="reference internal" href="#indices-and-tables">Indices and tables</a></li> <li><a class="reference internal" href="#indices-and-tables">Indices and tables</a></li>
@ -729,13 +408,13 @@ for single input image.</p></li>
</ul> </ul>
</div> </div>
<div id="searchbox" style="display: none" role="search"> <div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3> <h3>Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get"> <form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" /> <div><input type="text" name="q" /></div>
<input type="submit" value="Go" /> <div><input type="submit" value="Go" /></div>
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form> </form>
</div>
</div> </div>
<script type="text/javascript">$('#searchbox').show(0);</script> <script type="text/javascript">$('#searchbox').show(0);</script>
</div> </div>
@ -756,7 +435,7 @@ for single input image.</p></li>
</div> </div>
<div class="footer" role="contentinfo"> <div class="footer" role="contentinfo">
&#169; Copyright 2017, Karl Kroening. &#169; Copyright 2017, Karl Kroening.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 2.1.0. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.6.2.
</div> </div>
</body> </body>
</html> </html>

Binary file not shown.

View File

@ -1,17 +1,29 @@
<!DOCTYPE html> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"> <html xmlns="http://www.w3.org/1999/xhtml">
<head> <head>
<meta charset="utf-8" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Python Module Index &#8212; ffmpeg-python documentation</title> <title>Python Module Index &#8212; ffmpeg-python documentation</title>
<link rel="stylesheet" href="_static/nature.css" type="text/css" /> <link rel="stylesheet" href="_static/nature.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" /> <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: './',
VERSION: '',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt'
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script> <script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script> <script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script> <script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<link rel="index" title="Index" href="genindex.html" /> <link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" /> <link rel="search" title="Search" href="search.html" />
@ -21,7 +33,8 @@
</script> </script>
</head><body> </head>
<body>
<div class="related" role="navigation" aria-label="related navigation"> <div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3> <h3>Navigation</h3>
<ul> <ul>
@ -65,13 +78,13 @@
<div class="sphinxsidebar" role="navigation" aria-label="main navigation"> <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper"> <div class="sphinxsidebarwrapper">
<div id="searchbox" style="display: none" role="search"> <div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3> <h3>Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get"> <form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" /> <div><input type="text" name="q" /></div>
<input type="submit" value="Go" /> <div><input type="submit" value="Go" /></div>
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form> </form>
</div>
</div> </div>
<script type="text/javascript">$('#searchbox').show(0);</script> <script type="text/javascript">$('#searchbox').show(0);</script>
</div> </div>
@ -92,7 +105,7 @@
</div> </div>
<div class="footer" role="contentinfo"> <div class="footer" role="contentinfo">
&#169; Copyright 2017, Karl Kroening. &#169; Copyright 2017, Karl Kroening.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 2.1.0. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.6.2.
</div> </div>
</body> </body>
</html> </html>

View File

@ -1,25 +1,41 @@
<!DOCTYPE html> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"> <html xmlns="http://www.w3.org/1999/xhtml">
<head> <head>
<meta charset="utf-8" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Search &#8212; ffmpeg-python documentation</title> <title>Search &#8212; ffmpeg-python documentation</title>
<link rel="stylesheet" href="_static/nature.css" type="text/css" /> <link rel="stylesheet" href="_static/nature.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" /> <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script> <script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: './',
VERSION: '',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt'
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script> <script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script> <script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script> <script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<script type="text/javascript" src="_static/searchtools.js"></script> <script type="text/javascript" src="_static/searchtools.js"></script>
<link rel="index" title="Index" href="genindex.html" /> <link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="#" /> <link rel="search" title="Search" href="#" />
<script type="text/javascript" src="searchindex.js" defer></script> <script type="text/javascript">
jQuery(function() { Search.loadIndex("searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
</head><body> </head>
<body>
<div class="related" role="navigation" aria-label="related navigation"> <div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3> <h3>Navigation</h3>
<ul> <ul>
@ -85,7 +101,7 @@
</div> </div>
<div class="footer" role="contentinfo"> <div class="footer" role="contentinfo">
&#169; Copyright 2017, Karl Kroening. &#169; Copyright 2017, Karl Kroening.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 2.1.0. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.6.2.
</div> </div>
</body> </body>
</html> </html>

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 912 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 461 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

View File

@ -1,263 +0,0 @@
# Examples
## [Get video info (ffprobe)](https://github.com/kkroening/ffmpeg-python/blob/master/examples/video_info.py#L15)
```python
probe = ffmpeg.probe(args.in_filename)
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
```
## [Generate thumbnail for video](https://github.com/kkroening/ffmpeg-python/blob/master/examples/get_video_thumbnail.py#L21)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/get_video_thumbnail.png" alt="get-video-thumbnail graph" width="30%" />
```python
(
ffmpeg
.input(in_filename, ss=time)
.filter('scale', width, -1)
.output(out_filename, vframes=1)
.run()
)
```
## [Convert video to numpy array](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/ffmpeg-numpy.png" alt="ffmpeg-numpy graph" width="20%" />
```python
out, _ = (
ffmpeg
.input('in.mp4')
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True)
)
video = (
np
.frombuffer(out, np.uint8)
.reshape([-1, height, width, 3])
)
```
## [Read single video frame as jpeg through pipe](https://github.com/kkroening/ffmpeg-python/blob/master/examples/read_frame_as_jpeg.py#L16)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/read_frame_as_jpeg.png" alt="read-frame-as-jpeg graph" width="30%" />
```python
out, _ = (
ffmpeg
.input(in_filename)
.filter('select', 'gte(n,{})'.format(frame_num))
.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
.run(capture_stdout=True)
)
```
## [Convert sound to raw PCM audio](https://github.com/kkroening/ffmpeg-python/blob/master/examples/transcribe.py#L23)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/transcribe.png" alt="transcribe graph" width="30%" />
```python
out, _ = (ffmpeg
.input(in_filename, **input_kwargs)
.output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run(capture_stdout=True)
)
```
## Assemble video from sequence of frames
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/glob.png" alt="glob" width="25%" />
```python
(
ffmpeg
.input('/path/to/jpegs/*.jpg', pattern_type='glob', framerate=25)
.output('movie.mp4')
.run()
)
```
With additional filtering:
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/glob-filter.png" alt="glob-filter" width="50%" />
```python
(
ffmpeg
.input('/path/to/jpegs/*.jpg', pattern_type='glob', framerate=25)
.filter('deflicker', mode='pm', size=10)
.filter('scale', size='hd1080', force_original_aspect_ratio='increase')
.output('movie.mp4', crf=20, preset='slower', movflags='faststart', pix_fmt='yuv420p')
.view(filename='filter_graph')
.run()
)
```
## Audio/video pipeline
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/av-pipeline.png" alt="av-pipeline graph" width="80%" />
```python
in1 = ffmpeg.input('in1.mp4')
in2 = ffmpeg.input('in2.mp4')
v1 = in1.video.hflip()
a1 = in1.audio
v2 = in2.video.filter('reverse').filter('hue', s=0)
a2 = in2.audio.filter('areverse').filter('aphaser')
joined = ffmpeg.concat(v1, a1, v2, a2, v=1, a=1).node
v3 = joined[0]
a3 = joined[1].filter('volume', 0.8)
out = ffmpeg.output(v3, a3, 'out.mp4')
out.run()
```
## Mono to stereo with offsets and video
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/mono-to-stereo.png" alt="mono-to-stereo graph" width="80%" />
```python
audio_left = (
ffmpeg
.input('audio-left.wav')
.filter('atrim', start=5)
.filter('asetpts', 'PTS-STARTPTS')
)
audio_right = (
ffmpeg
.input('audio-right.wav')
.filter('atrim', start=10)
.filter('asetpts', 'PTS-STARTPTS')
)
input_video = ffmpeg.input('input-video.mp4')
(
ffmpeg
.filter((audio_left, audio_right), 'join', inputs=2, channel_layout='stereo')
.output(input_video.video, 'output-video.mp4', shortest=None, vcodec='copy')
.overwrite_output()
.run()
)
```
## [Jupyter Frame Viewer](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/jupyter-screenshot.png" alt="jupyter screenshot" width="75%" />
## [Jupyter Stream Editor](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/doc/jupyter-demo.gif" alt="jupyter demo" width="75%" />
## [Tensorflow Streaming](https://github.com/kkroening/ffmpeg-python/blob/master/examples/tensorflow_stream.py)
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/tensorflow-stream.png" alt="tensorflow streaming; challenge mode: combine this with the webcam example below" width="55%" />
- Decode input video with ffmpeg
- Process video with tensorflow using "deep dream" example
- Encode output video with ffmpeg
```python
process1 = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=8)
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
)
while True:
in_bytes = process1.stdout.read(width * height * 3)
if not in_bytes:
break
in_frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
# See examples/tensorflow_stream.py:
out_frame = deep_dream.process_frame(in_frame)
process2.stdin.write(
out_frame
.astype(np.uint8)
.tobytes()
)
process2.stdin.close()
process1.wait()
process2.wait()
```
<img src="https://raw.githubusercontent.com/kkroening/ffmpeg-python/master/examples/graphs/dream.png" alt="deep dream streaming" width="40%" />
## [FaceTime webcam input (OS X)](https://github.com/kkroening/ffmpeg-python/blob/master/examples/facetime.py)
```python
(
ffmpeg
.input('FaceTime', format='avfoundation', pix_fmt='uyvy422', framerate=30)
.output('out.mp4', pix_fmt='yuv420p', vframes=100)
.run()
)
```
## Stream from a local video to HTTP server
```python
video_format = "flv"
server_url = "http://127.0.0.1:8080"
process = (
ffmpeg
.input("input.mp4")
.output(
server_url,
codec = "copy", # use same codecs of the original video
listen=1, # enables HTTP server
f=video_format)
.global_args("-re") # argument to act as a live stream
.run()
)
```
to receive the video you can use ffplay in the terminal:
```
$ ffplay -f flv http://localhost:8080
```
## Stream from RTSP server to TCP socket
```python
packet_size = 4096
process = (
ffmpeg
.input('rtsp://%s:8554/default')
.output('-', format='h264')
.run_async(pipe_stdout=True)
)
while process.poll() is None:
packet = process.stdout.read(packet_size)
try:
tcp_socket.send(packet)
except socket.error:
process.stdout.close()
process.wait()
break
```

View File

@ -1,8 +0,0 @@
import ffmpeg
(
ffmpeg
.input('FaceTime', format='avfoundation', pix_fmt='uyvy422', framerate=30)
.output('out.mp4', pix_fmt='yuv420p', vframes=100)
.run()
)

View File

@ -1,216 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from ipywidgets import interact\n",
"from matplotlib import pyplot as plt\n",
"import ffmpeg\n",
"import ipywidgets as widgets\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"probe = ffmpeg.probe('in.mp4')\n",
"video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')\n",
"width = int(video_info['width'])\n",
"height = int(video_info['height'])\n",
"num_frames = int(video_info['nb_frames'])"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5f63dc164956464c994ec58d86ee7cd9",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"interactive(children=(IntSlider(value=0, description='frame', max=209), Output()), _dom_classes=('widget-inter…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"out, err = (\n",
" ffmpeg\n",
" .input('in.mp4')\n",
" .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n",
" .run(capture_stdout=True)\n",
")\n",
"video = (\n",
" np\n",
" .frombuffer(out, np.uint8)\n",
" .reshape([-1, height, width, 3])\n",
")\n",
"\n",
"@interact(frame=(0, num_frames))\n",
"def show_frame(frame=0):\n",
" plt.imshow(video[frame,:,:,:])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "84bcac52195f47f8854f09acd7666b84",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"interactive(children=(Checkbox(value=True, description='enable_overlay'), Checkbox(value=True, description='en…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from io import BytesIO\n",
"from PIL import Image\n",
"\n",
"\n",
"def extract_frame(stream, frame_num):\n",
" while isinstance(stream, ffmpeg.nodes.OutputStream):\n",
" stream = stream.node.incoming_edges[0].upstream_node.stream()\n",
" out, _ = (\n",
" stream\n",
" .filter_('select', 'gte(n,{})'.format(frame_num))\n",
" .output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=1)\n",
" .run(capture_stdout=True, capture_stderr=True)\n",
" )\n",
" return np.frombuffer(out, np.uint8).reshape([height, width, 3])\n",
"\n",
"\n",
"def png_to_np(png_bytes):\n",
" buffer = BytesIO(png_bytes)\n",
" pil_image = Image.open(buffer)\n",
" return np.array(pil_image)\n",
" \n",
"\n",
"def build_graph(\n",
" enable_overlay, flip_overlay, enable_box, box_x, box_y,\n",
" thickness, color):\n",
"\n",
" stream = ffmpeg.input('in.mp4')\n",
"\n",
" if enable_overlay:\n",
" overlay = ffmpeg.input('overlay.png')\n",
" if flip_overlay:\n",
" overlay = overlay.hflip()\n",
" stream = stream.overlay(overlay)\n",
"\n",
" if enable_box:\n",
" stream = stream.drawbox(\n",
" box_x, box_y, 120, 120, color=color, t=thickness)\n",
"\n",
" return stream.output('out.mp4')\n",
"\n",
"\n",
"def show_image(ax, stream, frame_num):\n",
" try:\n",
" image = extract_frame(stream, frame_num)\n",
" ax.imshow(image)\n",
" ax.axis('off')\n",
" except ffmpeg.Error as e:\n",
" print(e.stderr.decode())\n",
"\n",
"\n",
"def show_graph(ax, stream, detail):\n",
" data = ffmpeg.view(stream, detail=detail, pipe=True)\n",
" image = png_to_np(data)\n",
" ax.imshow(image, aspect='equal', interpolation='hanning')\n",
" ax.set_xlim(0, 1100)\n",
" ax.axis('off')\n",
"\n",
"\n",
"@interact(\n",
" frame_num=(0, num_frames),\n",
" box_x=(0, 200),\n",
" box_y=(0, 200),\n",
" thickness=(1, 40),\n",
" color=['red', 'green', 'magenta', 'blue'],\n",
")\n",
"def f(\n",
" enable_overlay=True,\n",
" enable_box=True,\n",
" flip_overlay=True,\n",
" graph_detail=False,\n",
" frame_num=0,\n",
" box_x=50,\n",
" box_y=50,\n",
" thickness=5,\n",
" color='red'):\n",
"\n",
" stream = build_graph(\n",
" enable_overlay,\n",
" flip_overlay,\n",
" enable_box,\n",
" box_x,\n",
" box_y,\n",
" thickness,\n",
" color\n",
" )\n",
"\n",
" fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15,4))\n",
" plt.tight_layout()\n",
" show_image(ax0, stream, frame_num)\n",
" show_graph(ax1, stream, graph_detail)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,35 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import argparse
import ffmpeg
import sys
parser = argparse.ArgumentParser(description='Generate video thumbnail')
parser.add_argument('in_filename', help='Input filename')
parser.add_argument('out_filename', help='Output filename')
parser.add_argument(
'--time', type=int, default=0.1, help='Time offset')
parser.add_argument(
'--width', type=int, default=120,
help='Width of output thumbnail (height automatically determined by aspect ratio)')
def generate_thumbnail(in_filename, out_filename, time, width):
try:
(
ffmpeg
.input(in_filename, ss=time)
.filter('scale', width, -1)
.output(out_filename, vframes=1)
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
print(e.stderr.decode(), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
args = parser.parse_args()
generate_thumbnail(args.in_filename, args.out_filename, args.time, args.width)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 700 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

View File

@ -1,28 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import argparse
import ffmpeg
import sys
parser = argparse.ArgumentParser(
description='Read individual video frame into memory as jpeg and write to stdout')
parser.add_argument('in_filename', help='Input filename')
parser.add_argument('frame_num', help='Frame number')
def read_frame_as_jpeg(in_filename, frame_num):
out, err = (
ffmpeg
.input(in_filename)
.filter('select', 'gte(n,{})'.format(frame_num))
.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
.run(capture_stdout=True)
)
return out
if __name__ == '__main__':
args = parser.parse_args()
out = read_frame_as_jpeg(args.in_filename, args.frame_num)
sys.stdout.buffer.write(out)

View File

@ -1,9 +0,0 @@
ffmpeg-python
gevent
google-cloud-speech
graphviz
ipywidgets
jupyter
matplotlib
Pillow
tqdm

View File

@ -1,130 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
from tqdm import tqdm
import argparse
import contextlib
import ffmpeg
import gevent
import gevent.monkey; gevent.monkey.patch_all(thread=False)
import os
import shutil
import socket
import sys
import tempfile
import textwrap
parser = argparse.ArgumentParser(description=textwrap.dedent('''\
Process video and report and show progress bar.
This is an example of using the ffmpeg `-progress` option with a
unix-domain socket to report progress in the form of a progress
bar.
The video processing simply consists of converting the video to
sepia colors, but the same pattern can be applied to other use
cases.
'''))
parser.add_argument('in_filename', help='Input filename')
parser.add_argument('out_filename', help='Output filename')
@contextlib.contextmanager
def _tmpdir_scope():
tmpdir = tempfile.mkdtemp()
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def _do_watch_progress(filename, sock, handler):
"""Function to run in a separate gevent greenlet to read progress
events from a unix-domain socket."""
connection, client_address = sock.accept()
data = b''
try:
while True:
more_data = connection.recv(16)
if not more_data:
break
data += more_data
lines = data.split(b'\n')
for line in lines[:-1]:
line = line.decode()
parts = line.split('=')
key = parts[0] if len(parts) > 0 else None
value = parts[1] if len(parts) > 1 else None
handler(key, value)
data = lines[-1]
finally:
connection.close()
@contextlib.contextmanager
def _watch_progress(handler):
"""Context manager for creating a unix-domain socket and listen for
ffmpeg progress events.
The socket filename is yielded from the context manager and the
socket is closed when the context manager is exited.
Args:
handler: a function to be called when progress events are
received; receives a ``key`` argument and ``value``
argument. (The example ``show_progress`` below uses tqdm)
Yields:
socket_filename: the name of the socket file.
"""
with _tmpdir_scope() as tmpdir:
socket_filename = os.path.join(tmpdir, 'sock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.bind(socket_filename)
sock.listen(1)
child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler)
try:
yield socket_filename
except:
gevent.kill(child)
raise
@contextlib.contextmanager
def show_progress(total_duration):
"""Create a unix-domain socket to watch progress and render tqdm
progress bar."""
with tqdm(total=round(total_duration, 2)) as bar:
def handler(key, value):
if key == 'out_time_ms':
time = round(float(value) / 1000000., 2)
bar.update(time - bar.n)
elif key == 'progress' and value == 'end':
bar.update(bar.total - bar.n)
with _watch_progress(handler) as socket_filename:
yield socket_filename
if __name__ == '__main__':
args = parser.parse_args()
total_duration = float(ffmpeg.probe(args.in_filename)['format']['duration'])
with show_progress(total_duration) as socket_filename:
# See https://ffmpeg.org/ffmpeg-filters.html#Examples-44
sepia_values = [.393, .769, .189, 0, .349, .686, .168, 0, .272, .534, .131]
try:
(ffmpeg
.input(args.in_filename)
.colorchannelmixer(*sepia_values)
.output(args.out_filename)
.global_args('-progress', 'unix://{}'.format(socket_filename))
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
print(e.stderr, file=sys.stderr)
sys.exit(1)

View File

@ -1,141 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import argparse
import errno
import ffmpeg
import logging
import os
import re
import subprocess
import sys
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
DEFAULT_DURATION = 0.3
DEFAULT_THRESHOLD = -60
parser = argparse.ArgumentParser(description='Split media into separate chunks wherever silence occurs')
parser.add_argument('in_filename', help='Input filename (`-` for stdin)')
parser.add_argument('out_pattern', help='Output filename pattern (e.g. `out/chunk_{:04d}.wav`)')
parser.add_argument('--silence-threshold', default=DEFAULT_THRESHOLD, type=int, help='Silence threshold (in dB)')
parser.add_argument('--silence-duration', default=DEFAULT_DURATION, type=float, help='Silence duration')
parser.add_argument('--start-time', type=float, help='Start time (seconds)')
parser.add_argument('--end-time', type=float, help='End time (seconds)')
parser.add_argument('-v', dest='verbose', action='store_true', help='Verbose mode')
silence_start_re = re.compile(r' silence_start: (?P<start>[0-9]+(\.?[0-9]*))$')
silence_end_re = re.compile(r' silence_end: (?P<end>[0-9]+(\.?[0-9]*)) ')
total_duration_re = re.compile(
r'size=[^ ]+ time=(?P<hours>[0-9]{2}):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9\.]{5}) bitrate=')
def _logged_popen(cmd_line, *args, **kwargs):
logger.debug('Running command: {}'.format(subprocess.list2cmdline(cmd_line)))
return subprocess.Popen(cmd_line, *args, **kwargs)
def get_chunk_times(in_filename, silence_threshold, silence_duration, start_time=None, end_time=None):
input_kwargs = {}
if start_time is not None:
input_kwargs['ss'] = start_time
else:
start_time = 0.
if end_time is not None:
input_kwargs['t'] = end_time - start_time
p = _logged_popen(
(ffmpeg
.input(in_filename, **input_kwargs)
.filter('silencedetect', n='{}dB'.format(silence_threshold), d=silence_duration)
.output('-', format='null')
.compile()
) + ['-nostats'], # FIXME: use .nostats() once it's implemented in ffmpeg-python.
stderr=subprocess.PIPE
)
output = p.communicate()[1].decode('utf-8')
if p.returncode != 0:
sys.stderr.write(output)
sys.exit(1)
logger.debug(output)
lines = output.splitlines()
# Chunks start when silence ends, and chunks end when silence starts.
chunk_starts = []
chunk_ends = []
for line in lines:
silence_start_match = silence_start_re.search(line)
silence_end_match = silence_end_re.search(line)
total_duration_match = total_duration_re.search(line)
if silence_start_match:
chunk_ends.append(float(silence_start_match.group('start')))
if len(chunk_starts) == 0:
# Started with non-silence.
chunk_starts.append(start_time or 0.)
elif silence_end_match:
chunk_starts.append(float(silence_end_match.group('end')))
elif total_duration_match:
hours = int(total_duration_match.group('hours'))
minutes = int(total_duration_match.group('minutes'))
seconds = float(total_duration_match.group('seconds'))
end_time = hours * 3600 + minutes * 60 + seconds
if len(chunk_starts) == 0:
# No silence found.
chunk_starts.append(start_time)
if len(chunk_starts) > len(chunk_ends):
# Finished with non-silence.
chunk_ends.append(end_time or 10000000.)
return list(zip(chunk_starts, chunk_ends))
def _makedirs(path):
"""Python2-compatible version of ``os.makedirs(path, exist_ok=True)``."""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
def split_audio(
in_filename,
out_pattern,
silence_threshold=DEFAULT_THRESHOLD,
silence_duration=DEFAULT_DURATION,
start_time=None,
end_time=None,
verbose=False,
):
chunk_times = get_chunk_times(in_filename, silence_threshold, silence_duration, start_time, end_time)
for i, (start_time, end_time) in enumerate(chunk_times):
time = end_time - start_time
out_filename = out_pattern.format(i, i=i)
_makedirs(os.path.dirname(out_filename))
logger.info('{}: start={:.02f}, end={:.02f}, duration={:.02f}'.format(out_filename, start_time, end_time,
time))
_logged_popen(
(ffmpeg
.input(in_filename, ss=start_time, t=time)
.output(out_filename)
.overwrite_output()
.compile()
),
stdout=subprocess.PIPE if not verbose else None,
stderr=subprocess.PIPE if not verbose else None,
).communicate()
if __name__ == '__main__':
kwargs = vars(parser.parse_args())
if kwargs['verbose']:
logging.basicConfig(level=logging.DEBUG, format='%(levels): %(message)s')
logger.setLevel(logging.DEBUG)
split_audio(**kwargs)

View File

@ -1,248 +0,0 @@
'''Example streaming ffmpeg numpy processing.
Demonstrates using ffmpeg to decode video input, process the frames in
python, and then encode video output using ffmpeg.
This example uses two ffmpeg processes - one to decode the input video
and one to encode an output video - while the raw frame processing is
done in python with numpy.
At a high level, the signal graph looks like this:
(input video) -> [ffmpeg process 1] -> [python] -> [ffmpeg process 2] -> (output video)
This example reads/writes video files on the local filesystem, but the
same pattern can be used for other kinds of input/output (e.g. webcam,
rtmp, etc.).
The simplest processing example simply darkens each frame by
multiplying the frame's numpy array by a constant value; see
``process_frame_simple``.
A more sophisticated example processes each frame with tensorflow using
the "deep dream" tensorflow tutorial; activate this mode by calling
the script with the optional `--dream` argument. (Make sure tensorflow
is installed before running)
'''
from __future__ import print_function
import argparse
import ffmpeg
import logging
import numpy as np
import os
import subprocess
import zipfile
parser = argparse.ArgumentParser(description='Example streaming ffmpeg numpy processing')
parser.add_argument('in_filename', help='Input filename')
parser.add_argument('out_filename', help='Output filename')
parser.add_argument(
'--dream', action='store_true', help='Use DeepDream frame processing (requires tensorflow)')
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_video_size(filename):
logger.info('Getting video size for {!r}'.format(filename))
probe = ffmpeg.probe(filename)
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
width = int(video_info['width'])
height = int(video_info['height'])
return width, height
def start_ffmpeg_process1(in_filename):
logger.info('Starting ffmpeg process1')
args = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.compile()
)
return subprocess.Popen(args, stdout=subprocess.PIPE)
def start_ffmpeg_process2(out_filename, width, height):
logger.info('Starting ffmpeg process2')
args = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.compile()
)
return subprocess.Popen(args, stdin=subprocess.PIPE)
def read_frame(process1, width, height):
logger.debug('Reading frame')
# Note: RGB24 == 3 bytes per pixel.
frame_size = width * height * 3
in_bytes = process1.stdout.read(frame_size)
if len(in_bytes) == 0:
frame = None
else:
assert len(in_bytes) == frame_size
frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
return frame
def process_frame_simple(frame):
'''Simple processing example: darken frame.'''
return frame * 0.3
def write_frame(process2, frame):
logger.debug('Writing frame')
process2.stdin.write(
frame
.astype(np.uint8)
.tobytes()
)
def run(in_filename, out_filename, process_frame):
width, height = get_video_size(in_filename)
process1 = start_ffmpeg_process1(in_filename)
process2 = start_ffmpeg_process2(out_filename, width, height)
while True:
in_frame = read_frame(process1, width, height)
if in_frame is None:
logger.info('End of input stream')
break
logger.debug('Processing frame')
out_frame = process_frame(in_frame)
write_frame(process2, out_frame)
logger.info('Waiting for ffmpeg process1')
process1.wait()
logger.info('Waiting for ffmpeg process2')
process2.stdin.close()
process2.wait()
logger.info('Done')
class DeepDream(object):
'''DeepDream implementation, adapted from official tensorflow deepdream tutorial:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/tutorials/deepdream
Credit: Alexander Mordvintsev
'''
_DOWNLOAD_URL = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
_ZIP_FILENAME = 'deepdream_model.zip'
_MODEL_FILENAME = 'tensorflow_inception_graph.pb'
@staticmethod
def _download_model():
logger.info('Downloading deepdream model...')
try:
from urllib.request import urlretrieve # python 3
except ImportError:
from urllib import urlretrieve # python 2
urlretrieve(DeepDream._DOWNLOAD_URL, DeepDream._ZIP_FILENAME)
logger.info('Extracting deepdream model...')
zipfile.ZipFile(DeepDream._ZIP_FILENAME, 'r').extractall('.')
@staticmethod
def _tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See `_resize` function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
@staticmethod
def _base_resize(img, size):
'''Helper function that uses TF to resize an image'''
img = tf.expand_dims(img, 0)
return tf.image.resize_bilinear(img, size)[0,:,:,:]
def __init__(self):
if not os.path.exists(DeepDream._MODEL_FILENAME):
self._download_model()
self._graph = tf.Graph()
self._session = tf.InteractiveSession(graph=self._graph)
self._resize = self._tffunc(np.float32, np.int32)(self._base_resize)
with tf.gfile.FastGFile(DeepDream._MODEL_FILENAME, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self._t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(self._t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
self.t_obj = self.T('mixed4d_3x3_bottleneck_pre_relu')[:,:,:,139]
#self.t_obj = tf.square(self.T('mixed4c'))
def T(self, layer_name):
'''Helper for getting layer output tensor'''
return self._graph.get_tensor_by_name('import/%s:0'%layer_name)
def _calc_grad_tiled(self, img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = self._session.run(t_grad, {self._t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def process_frame(self, frame, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
t_score = tf.reduce_mean(self.t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, self._t_input)[0] # behold the power of automatic differentiation!
# split the image into a number of octaves
img = frame
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = self._resize(img, np.int32(np.float32(hw)/octave_scale))
hi = img-self._resize(lo, hw)
img = lo
octaves.append(hi)
# generate details octave by octave
for octave in range(octave_n):
if octave>0:
hi = octaves[-octave]
img = self._resize(img, hi.shape[:2])+hi
for i in range(iter_n):
g = self._calc_grad_tiled(img, t_grad)
img += g*(step / (np.abs(g).mean()+1e-7))
#print('.',end = ' ')
return img
if __name__ == '__main__':
args = parser.parse_args()
if args.dream:
import tensorflow as tf
process_frame = DeepDream().process_frame
else:
process_frame = process_frame_simple
run(args.in_filename, args.out_filename, process_frame)

View File

@ -1,56 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import argparse
import ffmpeg
import logging
import sys
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(description='Convert speech audio to text using Google Speech API')
parser.add_argument('in_filename', help='Input filename (`-` for stdin)')
def decode_audio(in_filename, **input_kwargs):
try:
out, err = (ffmpeg
.input(in_filename, **input_kwargs)
.output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
print(e.stderr, file=sys.stderr)
sys.exit(1)
return out
def get_transcripts(audio_data):
client = speech.SpeechClient()
audio = types.RecognitionAudio(content=audio_data)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='en-US'
)
response = client.recognize(config, audio)
return [result.alternatives[0].transcript for result in response.results]
def transcribe(in_filename):
audio_data = decode_audio(in_filename)
transcripts = get_transcripts(audio_data)
for transcript in transcripts:
print(repr(transcript.encode('utf-8')))
if __name__ == '__main__':
args = parser.parse_args()
transcribe(args.in_filename)

View File

@ -1,31 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import argparse
import ffmpeg
import sys
parser = argparse.ArgumentParser(description='Get video information')
parser.add_argument('in_filename', help='Input filename')
if __name__ == '__main__':
args = parser.parse_args()
try:
probe = ffmpeg.probe(args.in_filename)
except ffmpeg.Error as e:
print(e.stderr, file=sys.stderr)
sys.exit(1)
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
if video_stream is None:
print('No video stream found', file=sys.stderr)
sys.exit(1)
width = int(video_stream['width'])
height = int(video_stream['height'])
num_frames = int(video_stream['nb_frames'])
print('width: {}'.format(width))
print('height: {}'.format(height))
print('num_frames: {}'.format(num_frames))

View File

@ -1,22 +1,9 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from . import nodes
from . import _ffmpeg from . import _filters, _ffmpeg, _run
from . import _filters
from . import _probe
from . import _run
from . import _view
from .nodes import *
from ._ffmpeg import *
from ._filters import * from ._filters import *
from ._probe import * from ._ffmpeg import *
from ._run import * from ._run import *
from ._view import * from ._view import *
__all__ = ( __all__ = _filters.__all__ + _ffmpeg.__all__ + _run.__all__ + _view.__all__
nodes.__all__
+ _ffmpeg.__all__
+ _probe.__all__
+ _run.__all__
+ _view.__all__
+ _filters.__all__
)

View File

@ -1,8 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from past.builtins import basestring
from ._utils import basestring
from .nodes import ( from .nodes import (
filter_operator, filter_operator,
GlobalNode, GlobalNode,
@ -16,11 +13,6 @@ from .nodes import (
def input(filename, **kwargs): def input(filename, **kwargs):
"""Input file URL (ffmpeg ``-i`` option) """Input file URL (ffmpeg ``-i`` option)
Any supplied kwargs are passed to ffmpeg verbatim (e.g. ``t=20``,
``f='mp4'``, ``acodec='pcm'``, etc.).
To tell ffmpeg to read from stdin, use ``pipe:`` as the filename.
Official documentation: `Main options <https://ffmpeg.org/ffmpeg.html#Main-options>`__ Official documentation: `Main options <https://ffmpeg.org/ffmpeg.html#Main-options>`__
""" """
kwargs['filename'] = filename kwargs['filename'] = filename
@ -32,64 +24,41 @@ def input(filename, **kwargs):
return InputNode(input.__name__, kwargs=kwargs).stream() return InputNode(input.__name__, kwargs=kwargs).stream()
@output_operator()
def global_args(stream, *args):
"""Add extra global command-line argument(s), e.g. ``-progress``."""
return GlobalNode(stream, global_args.__name__, args).stream()
@output_operator() @output_operator()
def overwrite_output(stream): def overwrite_output(stream):
"""Overwrite output files without asking (ffmpeg ``-y`` option) """Overwrite output files without asking (ffmpeg ``-y`` option)
Official documentation: `Main options <https://ffmpeg.org/ffmpeg.html#Main-options>`__ Official documentation: `Main options <https://ffmpeg.org/ffmpeg.html#Main-options>`__
""" """
return GlobalNode(stream, overwrite_output.__name__, ['-y']).stream() return GlobalNode(stream, overwrite_output.__name__).stream()
@output_operator() @output_operator()
def merge_outputs(*streams): def merge_outputs(*streams):
"""Include all given outputs in one ffmpeg command line""" """Include all given outputs in one ffmpeg command line
"""
return MergeOutputsNode(streams, merge_outputs.__name__).stream() return MergeOutputsNode(streams, merge_outputs.__name__).stream()
@filter_operator() @filter_operator()
def output(*streams_and_filename, **kwargs): def output(stream, filename, **kwargs):
"""Output file URL """Output file URL
Syntax:
`ffmpeg.output(stream1[, stream2, stream3...], filename, **ffmpeg_args)`
Any supplied keyword arguments are passed to ffmpeg verbatim (e.g.
``t=20``, ``f='mp4'``, ``acodec='pcm'``, ``vcodec='rawvideo'``,
etc.). Some keyword-arguments are handled specially, as shown below.
Args:
video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``.
audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``.
format: alias for ``-f`` parameter, e.g. ``format='mp4'``
(equivalent to ``f='mp4'``).
If multiple streams are provided, they are mapped to the same
output.
To tell ffmpeg to write to stdout, use ``pipe:`` as the filename.
Official documentation: `Synopsis <https://ffmpeg.org/ffmpeg.html#Synopsis>`__ Official documentation: `Synopsis <https://ffmpeg.org/ffmpeg.html#Synopsis>`__
""" """
streams_and_filename = list(streams_and_filename) kwargs['filename'] = filename
if 'filename' not in kwargs:
if not isinstance(streams_and_filename[-1], basestring):
raise ValueError('A filename must be provided')
kwargs['filename'] = streams_and_filename.pop(-1)
streams = streams_and_filename
fmt = kwargs.pop('f', None) fmt = kwargs.pop('f', None)
if fmt: if fmt:
if 'format' in kwargs: if 'format' in kwargs:
raise ValueError("Can't specify both `format` and `f` kwargs") raise ValueError("Can't specify both `format` and `f` kwargs")
kwargs['format'] = fmt kwargs['format'] = fmt
return OutputNode(streams, output.__name__, kwargs=kwargs).stream() return OutputNode(stream, output.__name__, kwargs=kwargs).stream()
__all__ = ['input', 'merge_outputs', 'output', 'overwrite_output']
__all__ = [
'input',
'merge_outputs',
'output',
'overwrite_output',
]

View File

@ -8,11 +8,9 @@ from ._utils import escape_chars
def filter_multi_output(stream_spec, filter_name, *args, **kwargs): def filter_multi_output(stream_spec, filter_name, *args, **kwargs):
"""Apply custom filter with one or more outputs. """Apply custom filter with one or more outputs.
This is the same as ``filter`` except that the filter can produce more than one This is the same as ``filter_`` except that the filter can produce more than one output.
output.
To reference an output stream, use either the ``.stream`` operator or bracket To reference an output stream, use either the ``.stream`` operator or bracket shorthand:
shorthand:
Example: Example:
@ -23,19 +21,16 @@ def filter_multi_output(stream_spec, filter_name, *args, **kwargs):
ffmpeg.concat(split0, split1).output('out.mp4').run() ffmpeg.concat(split0, split1).output('out.mp4').run()
``` ```
""" """
return FilterNode( return FilterNode(stream_spec, filter_name, args=args, kwargs=kwargs, max_inputs=None)
stream_spec, filter_name, args=args, kwargs=kwargs, max_inputs=None
)
@filter_operator() @filter_operator()
def filter(stream_spec, filter_name, *args, **kwargs): def filter_(stream_spec, filter_name, *args, **kwargs):
"""Apply custom filter. """Apply custom filter.
``filter_`` is normally used by higher-level filter functions such as ``hflip``, ``filter_`` is normally used by higher-level filter functions such as ``hflip``, but if a filter implementation
but if a filter implementation is missing from ``ffmpeg-python``, you can call is missing from ``fmpeg-python``, you can call ``filter_`` directly to have ``fmpeg-python`` pass the filter name
``filter_`` directly to have ``ffmpeg-python`` pass the filter name and arguments and arguments to ffmpeg verbatim.
to ffmpeg verbatim.
Args: Args:
stream_spec: a Stream, list of Streams, or label-to-Stream dictionary mapping stream_spec: a Stream, list of Streams, or label-to-Stream dictionary mapping
@ -43,41 +38,26 @@ def filter(stream_spec, filter_name, *args, **kwargs):
*args: list of args to pass to ffmpeg verbatim *args: list of args to pass to ffmpeg verbatim
**kwargs: list of keyword-args to pass to ffmpeg verbatim **kwargs: list of keyword-args to pass to ffmpeg verbatim
The function name is suffixed with ``_`` in order avoid confusion with the standard The function name is suffixed with ``_`` in order avoid confusion with the standard python ``filter`` function.
python ``filter`` function.
Example: Example:
``ffmpeg.input('in.mp4').filter('hflip').output('out.mp4').run()`` ``ffmpeg.input('in.mp4').filter_('hflip').output('out.mp4').run()``
""" """
return filter_multi_output(stream_spec, filter_name, *args, **kwargs).stream() return filter_multi_output(stream_spec, filter_name, *args, **kwargs).stream()
@filter_operator()
def filter_(stream_spec, filter_name, *args, **kwargs):
"""Alternate name for ``filter``, so as to not collide with the
built-in python ``filter`` operator.
"""
return filter(stream_spec, filter_name, *args, **kwargs)
@filter_operator() @filter_operator()
def split(stream): def split(stream):
return FilterNode(stream, split.__name__) return FilterNode(stream, split.__name__)
@filter_operator()
def asplit(stream):
return FilterNode(stream, asplit.__name__)
@filter_operator() @filter_operator()
def setpts(stream, expr): def setpts(stream, expr):
"""Change the PTS (presentation timestamp) of the input frames. """Change the PTS (presentation timestamp) of the input frames.
Args: Args:
expr: The expression which is evaluated for each frame to construct its expr: The expression which is evaluated for each frame to construct its timestamp.
timestamp.
Official documentation: `setpts, asetpts <https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts>`__ Official documentation: `setpts, asetpts <https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts>`__
""" """
@ -89,15 +69,14 @@ def trim(stream, **kwargs):
"""Trim the input so that the output contains one continuous subpart of the input. """Trim the input so that the output contains one continuous subpart of the input.
Args: Args:
start: Specify the time of the start of the kept section, i.e. the frame with start: Specify the time of the start of the kept section, i.e. the frame with the timestamp start will be the
the timestamp start will be the first frame in the output. first frame in the output.
end: Specify the time of the first frame that will be dropped, i.e. the frame end: Specify the time of the first frame that will be dropped, i.e. the frame immediately preceding the one
immediately preceding the one with the timestamp end will be the last frame with the timestamp end will be the last frame in the output.
in the output. start_pts: This is the same as start, except this option sets the start timestamp in timebase units instead of
start_pts: This is the same as start, except this option sets the start seconds.
timestamp in timebase units instead of seconds. end_pts: This is the same as end, except this option sets the end timestamp in timebase units instead of
end_pts: This is the same as end, except this option sets the end timestamp in seconds.
timebase units instead of seconds.
duration: The maximum duration of the output in seconds. duration: The maximum duration of the output in seconds.
start_frame: The number of the first frame that should be passed to the output. start_frame: The number of the first frame that should be passed to the output.
end_frame: The number of the first frame that should be dropped. end_frame: The number of the first frame that should be dropped.
@ -112,16 +91,14 @@ def overlay(main_parent_node, overlay_parent_node, eof_action='repeat', **kwargs
"""Overlay one video on top of another. """Overlay one video on top of another.
Args: Args:
x: Set the expression for the x coordinates of the overlaid video on the main x: Set the expression for the x coordinates of the overlaid video on the main video. Default value is 0. In
video. Default value is 0. In case the expression is invalid, it is set to case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed
a huge value (meaning that the overlay will not be displayed within the within the output visible area).
output visible area). y: Set the expression for the y coordinates of the overlaid video on the main video. Default value is 0. In
y: Set the expression for the y coordinates of the overlaid video on the main case the expression is invalid, it is set to a huge value (meaning that the overlay will not be displayed
video. Default value is 0. In case the expression is invalid, it is set to within the output visible area).
a huge value (meaning that the overlay will not be displayed within the eof_action: The action to take when EOF is encountered on the secondary input; it accepts one of the following
output visible area). values:
eof_action: The action to take when EOF is encountered on the secondary input;
it accepts one of the following values:
* ``repeat``: Repeat the last frame (the default). * ``repeat``: Repeat the last frame (the default).
* ``endall``: End both streams. * ``endall``: End both streams.
@ -130,13 +107,12 @@ def overlay(main_parent_node, overlay_parent_node, eof_action='repeat', **kwargs
eval: Set when the expressions for x, and y are evaluated. eval: Set when the expressions for x, and y are evaluated.
It accepts the following values: It accepts the following values:
* ``init``: only evaluate expressions once during the filter initialization * ``init``: only evaluate expressions once during the filter initialization or when a command is
or when a command is processed processed
* ``frame``: evaluate expressions for each incoming frame * ``frame``: evaluate expressions for each incoming frame
Default value is ``frame``. Default value is ``frame``.
shortest: If set to 1, force the output to terminate when the shortest input shortest: If set to 1, force the output to terminate when the shortest input terminates. Default value is 0.
terminates. Default value is 0.
format: Set the format for the output video. format: Set the format for the output video.
It accepts the following values: It accepts the following values:
@ -147,22 +123,15 @@ def overlay(main_parent_node, overlay_parent_node, eof_action='repeat', **kwargs
* ``gbrp``: force planar RGB output * ``gbrp``: force planar RGB output
Default value is ``yuv420``. Default value is ``yuv420``.
rgb (deprecated): If set to 1, force the filter to accept inputs in the RGB rgb (deprecated): If set to 1, force the filter to accept inputs in the RGB color space. Default value is 0.
color space. Default value is 0. This option is deprecated, use format This option is deprecated, use format instead.
instead. repeatlast: If set to 1, force the filter to draw the last overlay frame over the main input until the end of
repeatlast: If set to 1, force the filter to draw the last overlay frame over the stream. A value of 0 disables this behavior. Default value is 1.
the main input until the end of the stream. A value of 0 disables this
behavior. Default value is 1.
Official documentation: `overlay <https://ffmpeg.org/ffmpeg-filters.html#overlay-1>`__ Official documentation: `overlay <https://ffmpeg.org/ffmpeg-filters.html#overlay-1>`__
""" """
kwargs['eof_action'] = eof_action kwargs['eof_action'] = eof_action
return FilterNode( return FilterNode([main_parent_node, overlay_parent_node], overlay.__name__, kwargs=kwargs, max_inputs=2).stream()
[main_parent_node, overlay_parent_node],
overlay.__name__,
kwargs=kwargs,
max_inputs=2,
).stream()
@filter_operator() @filter_operator()
@ -183,44 +152,19 @@ def vflip(stream):
return FilterNode(stream, vflip.__name__).stream() return FilterNode(stream, vflip.__name__).stream()
@filter_operator()
def crop(stream, x, y, width, height, **kwargs):
"""Crop the input video.
Args:
x: The horizontal position, in the input video, of the left edge of
the output video.
y: The vertical position, in the input video, of the top edge of the
output video.
width: The width of the output video. Must be greater than 0.
height: The height of the output video. Must be greater than 0.
Official documentation: `crop <https://ffmpeg.org/ffmpeg-filters.html#crop>`__
"""
return FilterNode(
stream, crop.__name__, args=[width, height, x, y], kwargs=kwargs
).stream()
@filter_operator() @filter_operator()
def drawbox(stream, x, y, width, height, color, thickness=None, **kwargs): def drawbox(stream, x, y, width, height, color, thickness=None, **kwargs):
"""Draw a colored box on the input image. """Draw a colored box on the input image.
Args: Args:
x: The expression which specifies the top left corner x coordinate of the box. x: The expression which specifies the top left corner x coordinate of the box. It defaults to 0.
It defaults to 0. y: The expression which specifies the top left corner y coordinate of the box. It defaults to 0.
y: The expression which specifies the top left corner y coordinate of the box. width: Specify the width of the box; if 0 interpreted as the input width. It defaults to 0.
It defaults to 0. heigth: Specify the height of the box; if 0 interpreted as the input height. It defaults to 0.
width: Specify the width of the box; if 0 interpreted as the input width. It color: Specify the color of the box to write. For the general syntax of this option, check the "Color" section
defaults to 0. in the ffmpeg-utils manual. If the special value invert is used, the box edge color is the same as the
height: Specify the height of the box; if 0 interpreted as the input height. It video with inverted luma.
defaults to 0. thickness: The expression which sets the thickness of the box edge. Default value is 3.
color: Specify the color of the box to write. For the general syntax of this
option, check the "Color" section in the ffmpeg-utils manual. If the
special value invert is used, the box edge color is the same as the video
with inverted luma.
thickness: The expression which sets the thickness of the box edge. Default
value is 3.
w: Alias for ``width``. w: Alias for ``width``.
h: Alias for ``height``. h: Alias for ``height``.
c: Alias for ``color``. c: Alias for ``color``.
@ -230,64 +174,51 @@ def drawbox(stream, x, y, width, height, color, thickness=None, **kwargs):
""" """
if thickness: if thickness:
kwargs['t'] = thickness kwargs['t'] = thickness
return FilterNode( return FilterNode(stream, drawbox.__name__, args=[x, y, width, height, color], kwargs=kwargs).stream()
stream, drawbox.__name__, args=[x, y, width, height, color], kwargs=kwargs
).stream()
@filter_operator() @filter_operator()
def drawtext(stream, text=None, x=0, y=0, escape_text=True, **kwargs): def drawtext(stream, text=None, x=0, y=0, escape_text=True, **kwargs):
"""Draw a text string or text from a specified file on top of a video, using the """Draw a text string or text from a specified file on top of a video, using the libfreetype library.
libfreetype library.
To enable compilation of this filter, you need to configure FFmpeg with To enable compilation of this filter, you need to configure FFmpeg with ``--enable-libfreetype``. To enable default
``--enable-libfreetype``. To enable default font fallback and the font option you font fallback and the font option you need to configure FFmpeg with ``--enable-libfontconfig``. To enable the
need to configure FFmpeg with ``--enable-libfontconfig``. To enable the
text_shaping option, you need to configure FFmpeg with ``--enable-libfribidi``. text_shaping option, you need to configure FFmpeg with ``--enable-libfribidi``.
Args: Args:
box: Used to draw a box around text using the background color. The value must box: Used to draw a box around text using the background color. The value must be either 1 (enable) or 0
be either 1 (enable) or 0 (disable). The default value of box is 0. (disable). The default value of box is 0.
boxborderw: Set the width of the border to be drawn around the box using boxborderw: Set the width of the border to be drawn around the box using boxcolor. The default value of
boxcolor. The default value of boxborderw is 0. boxborderw is 0.
boxcolor: The color to be used for drawing box around text. For the syntax of boxcolor: The color to be used for drawing box around text. For the syntax of this option, check the "Color"
this option, check the "Color" section in the ffmpeg-utils manual. The section in the ffmpeg-utils manual. The default value of boxcolor is "white".
default value of boxcolor is "white". line_spacing: Set the line spacing in pixels of the border to be drawn around the box using box. The default
line_spacing: Set the line spacing in pixels of the border to be drawn around value of line_spacing is 0.
the box using box. The default value of line_spacing is 0. borderw: Set the width of the border to be drawn around the text using bordercolor. The default value of
borderw: Set the width of the border to be drawn around the text using borderw is 0.
bordercolor. The default value of borderw is 0. bordercolor: Set the color to be used for drawing border around text. For the syntax of this option, check the
bordercolor: Set the color to be used for drawing border around text. For the "Color" section in the ffmpeg-utils manual. The default value of bordercolor is "black".
syntax of this option, check the "Color" section in the ffmpeg-utils expansion: Select how the text is expanded. Can be either none, strftime (deprecated) or normal (default). See
manual. The default value of bordercolor is "black". the Text expansion section below for details.
expansion: Select how the text is expanded. Can be either none, strftime basetime: Set a start time for the count. Value is in microseconds. Only applied in the deprecated strftime
(deprecated) or normal (default). See the Text expansion section below for expansion mode. To emulate in normal expansion mode use the pts function, supplying the start time (in
details. seconds) as the second argument.
basetime: Set a start time for the count. Value is in microseconds. Only
applied in the deprecated strftime expansion mode. To emulate in normal
expansion mode use the pts function, supplying the start time (in seconds)
as the second argument.
fix_bounds: If true, check and fix text coords to avoid clipping. fix_bounds: If true, check and fix text coords to avoid clipping.
fontcolor: The color to be used for drawing fonts. For the syntax of this fontcolor: The color to be used for drawing fonts. For the syntax of this option, check the "Color" section in
option, check the "Color" section in the ffmpeg-utils manual. The default the ffmpeg-utils manual. The default value of fontcolor is "black".
value of fontcolor is "black". fontcolor_expr: String which is expanded the same way as text to obtain dynamic fontcolor value. By default
fontcolor_expr: String which is expanded the same way as text to obtain dynamic this option has empty value and is not processed. When this option is set, it overrides fontcolor option.
fontcolor value. By default this option has empty value and is not
processed. When this option is set, it overrides fontcolor option.
font: The font family to be used for drawing text. By default Sans. font: The font family to be used for drawing text. By default Sans.
fontfile: The font file to be used for drawing text. The path must be included. fontfile: The font file to be used for drawing text. The path must be included. This parameter is mandatory if
This parameter is mandatory if the fontconfig support is disabled. the fontconfig support is disabled.
alpha: Draw the text applying alpha blending. The value can be a number between alpha: Draw the text applying alpha blending. The value can be a number between 0.0 and 1.0. The expression
0.0 and 1.0. The expression accepts the same variables x, y as well. The accepts the same variables x, y as well. The default value is 1. Please see fontcolor_expr.
default value is 1. Please see fontcolor_expr. fontsize: The font size to be used for drawing text. The default value of fontsize is 16.
fontsize: The font size to be used for drawing text. The default value of text_shaping: If set to 1, attempt to shape the text (for example, reverse the order of right-to-left text and
fontsize is 16. join Arabic characters) before drawing it. Otherwise, just draw the text exactly as given. By default 1 (if
text_shaping: If set to 1, attempt to shape the text (for example, reverse the supported).
order of right-to-left text and join Arabic characters) before drawing it. ft_load_flags: The flags to be used for loading the fonts. The flags map the corresponding flags supported by
Otherwise, just draw the text exactly as given. By default 1 (if supported). libfreetype, and are a combination of the following values:
ft_load_flags: The flags to be used for loading the fonts. The flags map the
corresponding flags supported by libfreetype, and are a combination of the
following values:
* ``default`` * ``default``
* ``no_scale`` * ``no_scale``
@ -305,89 +236,75 @@ def drawtext(stream, text=None, x=0, y=0, escape_text=True, **kwargs):
* ``linear_design`` * ``linear_design``
* ``no_autohint`` * ``no_autohint``
Default value is "default". For more information consult the documentation Default value is "default". For more information consult the documentation for the FT_LOAD_* libfreetype
for the FT_LOAD_* libfreetype flags. flags.
shadowcolor: The color to be used for drawing a shadow behind the drawn text. shadowcolor: The color to be used for drawing a shadow behind the drawn text. For the syntax of this option,
For the syntax of this option, check the "Color" section in the ffmpeg-utils check the "Color" section in the ffmpeg-utils manual. The default value of shadowcolor is "black".
manual. The default value of shadowcolor is "black". shadowx: The x offset for the text shadow position with respect to the position of the text. It can be either
shadowx: The x offset for the text shadow position with respect to the position positive or negative values. The default value is "0".
of the text. It can be either positive or negative values. The default value shadowy: The y offset for the text shadow position with respect to the position of the text. It can be either
is "0". positive or negative values. The default value is "0".
shadowy: The y offset for the text shadow position with respect to the position start_number: The starting frame number for the n/frame_num variable. The default value is "0".
of the text. It can be either positive or negative values. The default value tabsize: The size in number of spaces to use for rendering the tab. Default value is 4.
is "0". timecode: Set the initial timecode representation in "hh:mm:ss[:;.]ff" format. It can be used with or without
start_number: The starting frame number for the n/frame_num variable. The text parameter. timecode_rate option must be specified.
default value is "0".
tabsize: The size in number of spaces to use for rendering the tab. Default
value is 4.
timecode: Set the initial timecode representation in "hh:mm:ss[:;.]ff" format.
It can be used with or without text parameter. timecode_rate option must be
specified.
rate: Set the timecode frame rate (timecode only). rate: Set the timecode frame rate (timecode only).
timecode_rate: Alias for ``rate``. timecode_rate: Alias for ``rate``.
r: Alias for ``rate``. r: Alias for ``rate``.
tc24hmax: If set to 1, the output of the timecode option will wrap around at 24 tc24hmax: If set to 1, the output of the timecode option will wrap around at 24 hours. Default is 0 (disabled).
hours. Default is 0 (disabled). text: The text string to be drawn. The text must be a sequence of UTF-8 encoded characters. This parameter is
text: The text string to be drawn. The text must be a sequence of UTF-8 encoded mandatory if no file is specified with the parameter textfile.
characters. This parameter is mandatory if no file is specified with the textfile: A text file containing text to be drawn. The text must be a sequence of UTF-8 encoded characters.
parameter textfile. This parameter is mandatory if no text string is specified with the parameter text. If both text and
textfile: A text file containing text to be drawn. The text must be a sequence textfile are specified, an error is thrown.
of UTF-8 encoded characters. This parameter is mandatory if no text string reload: If set to 1, the textfile will be reloaded before each frame. Be sure to update it atomically, or it
is specified with the parameter text. If both text and textfile are may be read partially, or even fail.
specified, an error is thrown. x: The expression which specifies the offset where text will be drawn within the video frame. It is relative to
reload: If set to 1, the textfile will be reloaded before each frame. Be sure the left border of the output image. The default value is "0".
to update it atomically, or it may be read partially, or even fail. y: The expression which specifies the offset where text will be drawn within the video frame. It is relative to
x: The expression which specifies the offset where text will be drawn within the top border of the output image. The default value is "0". See below for the list of accepted constants
the video frame. It is relative to the left border of the output image. The and functions.
default value is "0".
y: The expression which specifies the offset where text will be drawn within
the video frame. It is relative to the top border of the output image. The
default value is "0". See below for the list of accepted constants and
functions.
Expression constants: Expression constants:
The parameters for x and y are expressions containing the following constants The parameters for x and y are expressions containing the following constants and functions:
and functions: dar: input display aspect ratio, it is the same as ``(w / h) * sar``
- dar: input display aspect ratio, it is the same as ``(w / h) * sar`` hsub: horizontal chroma subsample values. For example for the pixel format "yuv422p" hsub is 2 and vsub
- hsub: horizontal chroma subsample values. For example for the pixel format is 1.
"yuv422p" hsub is 2 and vsub is 1. vsub: vertical chroma subsample values. For example for the pixel format "yuv422p" hsub is 2 and vsub
- vsub: vertical chroma subsample values. For example for the pixel format is 1.
"yuv422p" hsub is 2 and vsub is 1. line_h: the height of each text line
- line_h: the height of each text line lh: Alias for ``line_h``.
- lh: Alias for ``line_h``. main_h: the input height
- main_h: the input height h: Alias for ``main_h``.
- h: Alias for ``main_h``. H: Alias for ``main_h``.
- H: Alias for ``main_h``. main_w: the input width
- main_w: the input width w: Alias for ``main_w``.
- w: Alias for ``main_w``. W: Alias for ``main_w``.
- W: Alias for ``main_w``. ascent: the maximum distance from the baseline to the highest/upper grid coordinate used to place a
- ascent: the maximum distance from the baseline to the highest/upper grid glyph outline point, for all the rendered glyphs. It is a positive value, due to the grid's
coordinate used to place a glyph outline point, for all the rendered glyphs. orientation with the Y axis upwards.
It is a positive value, due to the grid's orientation with the Y axis max_glyph_a: Alias for ``ascent``.
upwards. descent: the maximum distance from the baseline to the lowest grid coordinate used to place a glyph
- max_glyph_a: Alias for ``ascent``. outline point, for all the rendered glyphs. This is a negative value, due to the grid's
- descent: the maximum distance from the baseline to the lowest grid orientation, with the Y axis upwards.
coordinate used to place a glyph outline max_glyph_d: Alias for ``descent``.
point, for all the rendered glyphs. This is a negative value, due to the max_glyph_h: maximum glyph height, that is the maximum height for all the glyphs contained in the
grid's orientation, with the Y axis upwards. rendered text, it is equivalent to ascent - descent.
- max_glyph_d: Alias for ``descent``. max_glyph_w: maximum glyph width, that is the maximum width for all the glyphs contained in the
- max_glyph_h: maximum glyph height, that is the maximum height for all the rendered text
glyphs contained in the rendered text, it is equivalent to ascent - descent. n: the number of input frame, starting from 0
- max_glyph_w: maximum glyph width, that is the maximum width for all the rand(min, max): return a random number included between min and max
glyphs contained in the rendered text. sar: The input sample aspect ratio.
- n: the number of input frame, starting from 0 t: timestamp expressed in seconds, NAN if the input timestamp is unknown
- rand(min, max): return a random number included between min and max text_h: the height of the rendered text
- sar: The input sample aspect ratio. th: Alias for ``text_h``.
- t: timestamp expressed in seconds, NAN if the input timestamp is unknown text_w: the width of the rendered text
- text_h: the height of the rendered text tw: Alias for ``text_w``.
- th: Alias for ``text_h``. x: the x offset coordinates where the text is drawn.
- text_w: the width of the rendered text y: the y offset coordinates where the text is drawn.
- tw: Alias for ``text_w``.
- x: the x offset coordinates where the text is drawn.
- y: the y offset coordinates where the text is drawn.
These parameters allow the x and y expressions to refer each other, so you can These parameters allow the x and y expressions to refer each other, so you can for example specify
for example specify ``y=x/dar``. ``y=x/dar``.
Official documentation: `drawtext <https://ffmpeg.org/ffmpeg-filters.html#drawtext>`__ Official documentation: `drawtext <https://ffmpeg.org/ffmpeg-filters.html#drawtext>`__
""" """
@ -399,48 +316,36 @@ def drawtext(stream, text=None, x=0, y=0, escape_text=True, **kwargs):
kwargs['x'] = x kwargs['x'] = x
if y != 0: if y != 0:
kwargs['y'] = y kwargs['y'] = y
return filter(stream, drawtext.__name__, **kwargs) return filter_(stream, drawtext.__name__, **kwargs)
@filter_operator() @filter_operator()
def concat(*streams, **kwargs): def concat(*streams, **kwargs):
"""Concatenate audio and video streams, joining them together one after the other. """Concatenate audio and video streams, joining them together one after the other.
The filter works on segments of synchronized video and audio streams. All segments The filter works on segments of synchronized video and audio streams. All segments must have the same number of
must have the same number of streams of each type, and that will also be the number streams of each type, and that will also be the number of streams at output.
of streams at output.
Args: Args:
unsafe: Activate unsafe mode: do not fail if segments have a different format. unsafe: Activate unsafe mode: do not fail if segments have a different format.
Related streams do not always have exactly the same duration, for various reasons Related streams do not always have exactly the same duration, for various reasons including codec frame size or
including codec frame size or sloppy authoring. For that reason, related sloppy authoring. For that reason, related synchronized streams (e.g. a video and its audio track) should be
synchronized streams (e.g. a video and its audio track) should be concatenated at concatenated at once. The concat filter will use the duration of the longest stream in each segment (except the
once. The concat filter will use the duration of the longest stream in each segment last one), and if necessary pad shorter audio streams with silence.
(except the last one), and if necessary pad shorter audio streams with silence.
For this filter to work correctly, all segments must start at timestamp 0. For this filter to work correctly, all segments must start at timestamp 0.
All corresponding streams must have the same parameters in all segments; the All corresponding streams must have the same parameters in all segments; the filtering system will automatically
filtering system will automatically select a common pixel format for video streams, select a common pixel format for video streams, and a common sample format, sample rate and channel layout for
and a common sample format, sample rate and channel layout for audio streams, but audio streams, but other settings, such as resolution, must be converted explicitly by the user.
other settings, such as resolution, must be converted explicitly by the user.
Different frame rates are acceptable but will result in variable frame rate at Different frame rates are acceptable but will result in variable frame rate at output; be sure to configure the
output; be sure to configure the output file to handle it. output file to handle it.
Official documentation: `concat <https://ffmpeg.org/ffmpeg-filters.html#concat>`__ Official documentation: `concat <https://ffmpeg.org/ffmpeg-filters.html#concat>`__
""" """
video_stream_count = kwargs.get('v', 1) kwargs['n'] = len(streams)
audio_stream_count = kwargs.get('a', 0)
stream_count = video_stream_count + audio_stream_count
if len(streams) % stream_count != 0:
raise ValueError(
'Expected concat input streams to have length multiple of {} (v={}, a={}); got {}'.format(
stream_count, video_stream_count, audio_stream_count, len(streams)
)
)
kwargs['n'] = int(len(streams) / stream_count)
return FilterNode(streams, concat.__name__, kwargs=kwargs, max_inputs=None).stream() return FilterNode(streams, concat.__name__, kwargs=kwargs, max_inputs=None).stream()
@ -452,8 +357,8 @@ def zoompan(stream, **kwargs):
zoom: Set the zoom expression. Default is 1. zoom: Set the zoom expression. Default is 1.
x: Set the x expression. Default is 0. x: Set the x expression. Default is 0.
y: Set the y expression. Default is 0. y: Set the y expression. Default is 0.
d: Set the duration expression in number of frames. This sets for how many d: Set the duration expression in number of frames. This sets for how many number of frames effect will last
number of frames effect will last for single input image. for single input image.
s: Set the output image size, default is ``hd720``. s: Set the output image size, default is ``hd720``.
fps: Set the output frame rate, default is 25. fps: Set the output frame rate, default is 25.
z: Alias for ``zoom``. z: Alias for ``zoom``.
@ -468,14 +373,10 @@ def hue(stream, **kwargs):
"""Modify the hue and/or the saturation of the input. """Modify the hue and/or the saturation of the input.
Args: Args:
h: Specify the hue angle as a number of degrees. It accepts an expression, and h: Specify the hue angle as a number of degrees. It accepts an expression, and defaults to "0".
defaults to "0". s: Specify the saturation in the [-10,10] range. It accepts an expression and defaults to "1".
s: Specify the saturation in the [-10,10] range. It accepts an expression and H: Specify the hue angle as a number of radians. It accepts an expression, and defaults to "0".
defaults to "1". b: Specify the brightness in the [-10,10] range. It accepts an expression and defaults to "0".
H: Specify the hue angle as a number of radians. It accepts an expression, and
defaults to "0".
b: Specify the brightness in the [-10,10] range. It accepts an expression and
defaults to "0".
Official documentation: `hue <https://ffmpeg.org/ffmpeg-filters.html#hue>`__ Official documentation: `hue <https://ffmpeg.org/ffmpeg-filters.html#hue>`__
""" """
@ -494,12 +395,8 @@ def colorchannelmixer(stream, *args, **kwargs):
__all__ = [ __all__ = [
'colorchannelmixer', 'colorchannelmixer',
'concat', 'concat',
'crop',
'drawbox', 'drawbox',
'drawtext',
'filter',
'filter_', 'filter_',
'filter_multi_output',
'hflip', 'hflip',
'hue', 'hue',
'overlay', 'overlay',

View File

@ -1,30 +0,0 @@
import json
import subprocess
from ._run import Error
from ._utils import convert_kwargs_to_cmd_line_args
def probe(filename, cmd='ffprobe', timeout=None, **kwargs):
"""Run ffprobe on the specified file and return a JSON representation of the output.
Raises:
:class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code,
an :class:`Error` is returned with a generic error message.
The stderr output can be retrieved by accessing the
``stderr`` property of the exception.
"""
args = [cmd, '-show_format', '-show_streams', '-of', 'json']
args += convert_kwargs_to_cmd_line_args(kwargs)
args += [filename]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
communicate_kwargs = {}
if timeout is not None:
communicate_kwargs['timeout'] = timeout
out, err = p.communicate(**communicate_kwargs)
if p.returncode != 0:
raise Error('ffprobe', out, err)
return json.loads(out.decode('utf-8'))
__all__ = ['probe']

View File

@ -1,13 +1,17 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .dag import get_outgoing_edges, topo_sort from .dag import get_outgoing_edges, topo_sort
from ._utils import basestring, convert_kwargs_to_cmd_line_args
from builtins import str
from functools import reduce from functools import reduce
from past.builtins import basestring
import copy import copy
import operator import operator
import subprocess import subprocess as _subprocess
from ._ffmpeg import input, output from ._ffmpeg import (
input,
output,
overwrite_output,
)
from .nodes import ( from .nodes import (
get_stream_spec_nodes, get_stream_spec_nodes,
FilterNode, FilterNode,
@ -17,19 +21,19 @@ from .nodes import (
output_operator, output_operator,
) )
try:
from collections.abc import Iterable def _get_stream_name(name):
except ImportError: return '[{}]'.format(name)
from collections import Iterable
class Error(Exception): def _convert_kwargs_to_cmd_line_args(kwargs):
def __init__(self, cmd, stdout, stderr): args = []
super(Error, self).__init__( for k in sorted(kwargs.keys()):
'{} error (see stderr output for detail)'.format(cmd) v = kwargs[k]
) args.append('-{}'.format(k))
self.stdout = stdout if v:
self.stderr = stderr args.append('{}'.format(v))
return args
def _get_input_args(input_node): def _get_input_args(input_node):
@ -43,44 +47,19 @@ def _get_input_args(input_node):
args += ['-f', fmt] args += ['-f', fmt]
if video_size: if video_size:
args += ['-video_size', '{}x{}'.format(video_size[0], video_size[1])] args += ['-video_size', '{}x{}'.format(video_size[0], video_size[1])]
args += convert_kwargs_to_cmd_line_args(kwargs) args += _convert_kwargs_to_cmd_line_args(kwargs)
args += ['-i', filename] args += ['-i', filename]
else: else:
raise ValueError('Unsupported input node: {}'.format(input_node)) raise ValueError('Unsupported input node: {}'.format(input_node))
return args return args
def _format_input_stream_name(stream_name_map, edge, is_final_arg=False):
prefix = stream_name_map[edge.upstream_node, edge.upstream_label]
if not edge.upstream_selector:
suffix = ''
else:
suffix = ':{}'.format(edge.upstream_selector)
if is_final_arg and isinstance(edge.upstream_node, InputNode):
## Special case: `-map` args should not have brackets for input
## nodes.
fmt = '{}{}'
else:
fmt = '[{}{}]'
return fmt.format(prefix, suffix)
def _format_output_stream_name(stream_name_map, edge):
return '[{}]'.format(stream_name_map[edge.upstream_node, edge.upstream_label])
def _get_filter_spec(node, outgoing_edge_map, stream_name_map): def _get_filter_spec(node, outgoing_edge_map, stream_name_map):
incoming_edges = node.incoming_edges incoming_edges = node.incoming_edges
outgoing_edges = get_outgoing_edges(node, outgoing_edge_map) outgoing_edges = get_outgoing_edges(node, outgoing_edge_map)
inputs = [ inputs = [stream_name_map[edge.upstream_node, edge.upstream_label] for edge in incoming_edges]
_format_input_stream_name(stream_name_map, edge) for edge in incoming_edges outputs = [stream_name_map[edge.upstream_node, edge.upstream_label] for edge in outgoing_edges]
] filter_spec = '{}{}{}'.format(''.join(inputs), node._get_filter(outgoing_edges), ''.join(outputs))
outputs = [
_format_output_stream_name(stream_name_map, edge) for edge in outgoing_edges
]
filter_spec = '{}{}{}'.format(
''.join(inputs), node._get_filter(outgoing_edges), ''.join(outputs)
)
return filter_spec return filter_spec
@ -88,69 +67,50 @@ def _allocate_filter_stream_names(filter_nodes, outgoing_edge_maps, stream_name_
stream_count = 0 stream_count = 0
for upstream_node in filter_nodes: for upstream_node in filter_nodes:
outgoing_edge_map = outgoing_edge_maps[upstream_node] outgoing_edge_map = outgoing_edge_maps[upstream_node]
for upstream_label, downstreams in sorted(outgoing_edge_map.items()): for upstream_label, downstreams in list(outgoing_edge_map.items()):
if len(downstreams) > 1: if len(downstreams) > 1:
# TODO: automatically insert `splits` ahead of time via graph transformation. # TODO: automatically insert `splits` ahead of time via graph transformation.
raise ValueError( raise ValueError('Encountered {} with multiple outgoing edges with same upstream label {!r}; a '
'Encountered {} with multiple outgoing edges with same upstream ' '`split` filter is probably required'.format(upstream_node, upstream_label))
'label {!r}; a `split` filter is probably required'.format( stream_name_map[upstream_node, upstream_label] = _get_stream_name('s{}'.format(stream_count))
upstream_node, upstream_label
)
)
stream_name_map[upstream_node, upstream_label] = 's{}'.format(stream_count)
stream_count += 1 stream_count += 1
def _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map): def _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map):
_allocate_filter_stream_names(filter_nodes, outgoing_edge_maps, stream_name_map) _allocate_filter_stream_names(filter_nodes, outgoing_edge_maps, stream_name_map)
filter_specs = [ filter_specs = [_get_filter_spec(node, outgoing_edge_maps[node], stream_name_map) for node in filter_nodes]
_get_filter_spec(node, outgoing_edge_maps[node], stream_name_map)
for node in filter_nodes
]
return ';'.join(filter_specs) return ';'.join(filter_specs)
def _get_global_args(node): def _get_global_args(node):
return list(node.args) if node.name == overwrite_output.__name__:
return ['-y']
else:
raise ValueError('Unsupported global node: {}'.format(node))
def _get_output_args(node, stream_name_map): def _get_output_args(node, stream_name_map):
if node.name != output.__name__: if node.name != output.__name__:
raise ValueError('Unsupported output node: {}'.format(node)) raise ValueError('Unsupported output node: {}'.format(node))
args = [] args = []
assert len(node.incoming_edges) == 1
if len(node.incoming_edges) == 0: edge = node.incoming_edges[0]
raise ValueError('Output node {} has no mapped streams'.format(node)) stream_name = stream_name_map[edge.upstream_node, edge.upstream_label]
if stream_name != '[0]':
for edge in node.incoming_edges: args += ['-map', stream_name]
# edge = node.incoming_edges[0]
stream_name = _format_input_stream_name(
stream_name_map, edge, is_final_arg=True
)
if stream_name != '0' or len(node.incoming_edges) > 1:
args += ['-map', stream_name]
kwargs = copy.copy(node.kwargs) kwargs = copy.copy(node.kwargs)
filename = kwargs.pop('filename') filename = kwargs.pop('filename')
if 'format' in kwargs: fmt = kwargs.pop('format', None)
args += ['-f', kwargs.pop('format')] if fmt:
if 'video_bitrate' in kwargs: args += ['-f', fmt]
args += ['-b:v', str(kwargs.pop('video_bitrate'))] args += _convert_kwargs_to_cmd_line_args(kwargs)
if 'audio_bitrate' in kwargs:
args += ['-b:a', str(kwargs.pop('audio_bitrate'))]
if 'video_size' in kwargs:
video_size = kwargs.pop('video_size')
if not isinstance(video_size, basestring) and isinstance(video_size, Iterable):
video_size = '{}x{}'.format(video_size[0], video_size[1])
args += ['-video_size', video_size]
args += convert_kwargs_to_cmd_line_args(kwargs)
args += [filename] args += [filename]
return args return args
@output_operator() @output_operator()
def get_args(stream_spec, overwrite_output=False): def get_args(stream_spec, overwrite_output=False):
"""Build command-line arguments to be passed to ffmpeg.""" """Get command-line arguments for ffmpeg."""
nodes = get_stream_spec_nodes(stream_spec) nodes = get_stream_spec_nodes(stream_spec)
args = [] args = []
# TODO: group nodes together, e.g. `-i somefile -r somerate`. # TODO: group nodes together, e.g. `-i somefile -r somerate`.
@ -159,14 +119,12 @@ def get_args(stream_spec, overwrite_output=False):
output_nodes = [node for node in sorted_nodes if isinstance(node, OutputNode)] output_nodes = [node for node in sorted_nodes if isinstance(node, OutputNode)]
global_nodes = [node for node in sorted_nodes if isinstance(node, GlobalNode)] global_nodes = [node for node in sorted_nodes if isinstance(node, GlobalNode)]
filter_nodes = [node for node in sorted_nodes if isinstance(node, FilterNode)] filter_nodes = [node for node in sorted_nodes if isinstance(node, FilterNode)]
stream_name_map = {(node, None): str(i) for i, node in enumerate(input_nodes)} stream_name_map = {(node, None): _get_stream_name(i) for i, node in enumerate(input_nodes)}
filter_arg = _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map) filter_arg = _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map)
args += reduce(operator.add, [_get_input_args(node) for node in input_nodes]) args += reduce(operator.add, [_get_input_args(node) for node in input_nodes])
if filter_arg: if filter_arg:
args += ['-filter_complex', filter_arg] args += ['-filter_complex', filter_arg]
args += reduce( args += reduce(operator.add, [_get_output_args(node, stream_name_map) for node in output_nodes])
operator.add, [_get_output_args(node, stream_name_map) for node in output_nodes]
)
args += reduce(operator.add, [_get_global_args(node) for node in global_nodes], []) args += reduce(operator.add, [_get_global_args(node) for node in global_nodes], [])
if overwrite_output: if overwrite_output:
args += ['-y'] args += ['-y']
@ -174,174 +132,21 @@ def get_args(stream_spec, overwrite_output=False):
@output_operator() @output_operator()
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False): def run(stream_spec, cmd='ffmpeg', **kwargs):
"""Build command-line for invoking ffmpeg. """Run ffmpeg on node graph.
The :meth:`run` function uses this to build the command line Args:
arguments and should work in most cases, but calling this function **kwargs: keyword-arguments passed to ``get_args()`` (e.g. ``overwrite_output=True``).
directly is useful for debugging or if you need to invoke ffmpeg
manually for whatever reason.
This is the same as calling :meth:`get_args` except that it also
includes the ``ffmpeg`` command as the first argument.
""" """
if isinstance(cmd, basestring): if isinstance(cmd, basestring):
cmd = [cmd] cmd = [cmd]
elif type(cmd) != list: elif type(cmd) != list:
cmd = list(cmd) cmd = list(cmd)
return cmd + get_args(stream_spec, overwrite_output=overwrite_output) args = cmd + get_args(stream_spec, **kwargs)
_subprocess.check_call(args)
@output_operator()
def run_async(
stream_spec,
cmd='ffmpeg',
pipe_stdin=False,
pipe_stdout=False,
pipe_stderr=False,
quiet=False,
overwrite_output=False,
cwd=None,
):
"""Asynchronously invoke ffmpeg for the supplied node graph.
Args:
pipe_stdin: if True, connect pipe to subprocess stdin (to be
used with ``pipe:`` ffmpeg inputs).
pipe_stdout: if True, connect pipe to subprocess stdout (to be
used with ``pipe:`` ffmpeg outputs).
pipe_stderr: if True, connect pipe to subprocess stderr.
quiet: shorthand for setting ``capture_stdout`` and
``capture_stderr``.
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns:
A `subprocess Popen`_ object representing the child process.
Examples:
Run and stream input::
process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
)
process.communicate(input=input_data)
Run and capture output::
process = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate()
Process video frame-by-frame using numpy::
process1 = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
)
while True:
in_bytes = process1.stdout.read(width * height * 3)
if not in_bytes:
break
in_frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
out_frame = in_frame * 0.3
process2.stdin.write(
frame
.astype(np.uint8)
.tobytes()
)
process2.stdin.close()
process1.wait()
process2.wait()
.. _subprocess Popen: https://docs.python.org/3/library/subprocess.html#popen-objects
"""
args = compile(stream_spec, cmd, overwrite_output=overwrite_output)
stdin_stream = subprocess.PIPE if pipe_stdin else None
stdout_stream = subprocess.PIPE if pipe_stdout else None
stderr_stream = subprocess.PIPE if pipe_stderr else None
if quiet:
stderr_stream = subprocess.STDOUT
stdout_stream = subprocess.DEVNULL
return subprocess.Popen(
args,
stdin=stdin_stream,
stdout=stdout_stream,
stderr=stderr_stream,
cwd=cwd,
)
@output_operator()
def run(
stream_spec,
cmd='ffmpeg',
capture_stdout=False,
capture_stderr=False,
input=None,
quiet=False,
overwrite_output=False,
cwd=None,
):
"""Invoke ffmpeg for the supplied node graph.
Args:
capture_stdout: if True, capture stdout (to be used with
``pipe:`` ffmpeg outputs).
capture_stderr: if True, capture stderr.
quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
input: text to be sent to stdin (to be used with ``pipe:``
ffmpeg inputs)
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns: (out, err) tuple containing captured stdout and stderr data.
"""
process = run_async(
stream_spec,
cmd,
pipe_stdin=input is not None,
pipe_stdout=capture_stdout,
pipe_stderr=capture_stderr,
quiet=quiet,
overwrite_output=overwrite_output,
cwd=cwd,
)
out, err = process.communicate(input)
retcode = process.poll()
if retcode:
raise Error('ffmpeg', out, err)
return out, err
__all__ = [ __all__ = [
'compile',
'Error',
'get_args', 'get_args',
'run', 'run',
'run_async',
] ]

View File

@ -1,69 +1,22 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from builtins import str from builtins import str
from past.builtins import basestring from past.builtins import basestring
import hashlib import hashlib
import sys
if sys.version_info.major == 2:
# noinspection PyUnresolvedReferences,PyShadowingBuiltins
str = str
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
# `past.builtins.basestring` module can't be imported on Python3 in some environments (Ubuntu).
# This code is copy-pasted from it to avoid crashes.
class BaseBaseString(type):
def __instancecheck__(cls, instance):
return isinstance(instance, (bytes, str))
def __subclasshook__(cls, thing):
# TODO: What should go here?
raise NotImplemented
def with_metaclass(meta, *bases):
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
if sys.version_info.major >= 3:
class basestring(with_metaclass(BaseBaseString)):
pass
else:
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import basestring
def _recursive_repr(item): def _recursive_repr(item):
"""Hack around python `repr` to deterministically represent dictionaries. """Hack around python `repr` to deterministically represent dictionaries.
This is able to represent more things than json.dumps, since it does not require This is able to represent more things than json.dumps, since it does not require things to be JSON serializable
things to be JSON serializable (e.g. datetimes). (e.g. datetimes).
""" """
if isinstance(item, basestring): if isinstance(item, basestring):
result = str(item) result = str(item)
elif isinstance(item, list): elif isinstance(item, list):
result = '[{}]'.format(', '.join([_recursive_repr(x) for x in item])) result = '[{}]'.format(', '.join([_recursive_repr(x) for x in item]))
elif isinstance(item, dict): elif isinstance(item, dict):
kv_pairs = [ kv_pairs = ['{}: {}'.format(_recursive_repr(k), _recursive_repr(item[k])) for k in sorted(item)]
'{}: {}'.format(_recursive_repr(k), _recursive_repr(item[k]))
for k in sorted(item)
]
result = '{' + ', '.join(kv_pairs) + '}' result = '{' + ', '.join(kv_pairs) + '}'
else: else:
result = repr(item) result = repr(item)
@ -74,7 +27,6 @@ def get_hash(item):
repr_ = _recursive_repr(item).encode('utf-8') repr_ = _recursive_repr(item).encode('utf-8')
return hashlib.md5(repr_).hexdigest() return hashlib.md5(repr_).hexdigest()
def get_hash_int(item): def get_hash_int(item):
return int(get_hash(item), base=16) return int(get_hash(item), base=16)
@ -89,20 +41,3 @@ def escape_chars(text, chars):
for ch in chars: for ch in chars:
text = text.replace(ch, '\\' + ch) text = text.replace(ch, '\\' + ch)
return text return text
def convert_kwargs_to_cmd_line_args(kwargs):
"""Helper function to build command line arguments out of dict."""
args = []
for k in sorted(kwargs.keys()):
v = kwargs[k]
if isinstance(v, Iterable) and not isinstance(v, str):
for value in v:
args.append('-{}'.format(k))
if value is not None:
args.append('{}'.format(value))
continue
args.append('-{}'.format(k))
if v is not None:
args.append('{}'.format(v))
return args

View File

@ -3,6 +3,7 @@ from __future__ import unicode_literals
from builtins import str from builtins import str
from .dag import get_outgoing_edges from .dag import get_outgoing_edges
from ._run import topo_sort from ._run import topo_sort
import os
import tempfile import tempfile
from ffmpeg.nodes import ( from ffmpeg.nodes import (
@ -10,6 +11,7 @@ from ffmpeg.nodes import (
get_stream_spec_nodes, get_stream_spec_nodes,
InputNode, InputNode,
OutputNode, OutputNode,
Stream,
stream_operator, stream_operator,
) )
@ -30,63 +32,39 @@ def _get_node_color(node):
@stream_operator() @stream_operator()
def view(stream_spec, detail=False, filename=None, pipe=False, **kwargs): def view(stream_spec, **kwargs):
try: try:
import graphviz import graphviz
except ImportError: except ImportError:
raise ImportError( raise ImportError('failed to import graphviz; please make sure graphviz is installed (e.g. `pip install '
'failed to import graphviz; please make sure graphviz is installed (e.g. ' 'graphviz`)')
'`pip install graphviz`)'
)
filename = kwargs.pop('filename', None)
show_labels = kwargs.pop('show_labels', True) show_labels = kwargs.pop('show_labels', True)
if pipe and filename is not None: if filename is None:
raise ValueError('Can\'t specify both `filename` and `pipe`')
elif not pipe and filename is None:
filename = tempfile.mktemp() filename = tempfile.mktemp()
nodes = get_stream_spec_nodes(stream_spec) nodes = get_stream_spec_nodes(stream_spec)
sorted_nodes, outgoing_edge_maps = topo_sort(nodes) sorted_nodes, outgoing_edge_maps = topo_sort(nodes)
graph = graphviz.Digraph(format='png') graph = graphviz.Digraph()
graph.attr(rankdir='LR') graph.attr(rankdir='LR')
if len(list(kwargs.keys())) != 0: if len(list(kwargs.keys())) != 0:
raise ValueError( raise ValueError('Invalid kwargs key(s): {}'.format(', '.join(list(kwargs.keys()))))
'Invalid kwargs key(s): {}'.format(', '.join(list(kwargs.keys())))
)
for node in sorted_nodes: for node in sorted_nodes:
color = _get_node_color(node) color = _get_node_color(node)
if detail: graph.node(str(hash(node)), node.short_repr, shape='box', style='filled', fillcolor=color)
lines = [node.short_repr]
lines += ['{!r}'.format(arg) for arg in node.args]
lines += [
'{}={!r}'.format(key, node.kwargs[key]) for key in sorted(node.kwargs)
]
node_text = '\n'.join(lines)
else:
node_text = node.short_repr
graph.node(
str(hash(node)), node_text, shape='box', style='filled', fillcolor=color
)
outgoing_edge_map = outgoing_edge_maps.get(node, {}) outgoing_edge_map = outgoing_edge_maps.get(node, {})
for edge in get_outgoing_edges(node, outgoing_edge_map): for edge in get_outgoing_edges(node, outgoing_edge_map):
kwargs = {} kwargs = {}
up_label = edge.upstream_label up_label = edge.upstream_label
down_label = edge.downstream_label down_label = edge.downstream_label
up_selector = edge.upstream_selector if show_labels and (up_label is not None or down_label is not None):
if show_labels and (
up_label is not None
or down_label is not None
or up_selector is not None
):
if up_label is None: if up_label is None:
up_label = '' up_label = ''
if up_selector is not None:
up_label += ":" + up_selector
if down_label is None: if down_label is None:
down_label = '' down_label = ''
if up_label != '' and down_label != '': if up_label != '' and down_label != '':
@ -98,11 +76,11 @@ def view(stream_spec, detail=False, filename=None, pipe=False, **kwargs):
downstream_node_id = str(hash(edge.downstream_node)) downstream_node_id = str(hash(edge.downstream_node))
graph.edge(upstream_node_id, downstream_node_id, **kwargs) graph.edge(upstream_node_id, downstream_node_id, **kwargs)
if pipe: graph.view(filename)
return graph.pipe()
else: return stream_spec
graph.view(filename, cleanup=True)
return stream_spec
__all__ = ['view'] __all__ = [
'view',
]

View File

@ -9,55 +9,45 @@ class DagNode(object):
"""Node in a directed-acyclic graph (DAG). """Node in a directed-acyclic graph (DAG).
Edges: Edges:
DagNodes are connected by edges. An edge connects two nodes with a label for DagNodes are connected by edges. An edge connects two nodes with a label for each side:
each side:
- ``upstream_node``: upstream/parent node - ``upstream_node``: upstream/parent node
- ``upstream_label``: label on the outgoing side of the upstream node - ``upstream_label``: label on the outgoing side of the upstream node
- ``downstream_node``: downstream/child node - ``downstream_node``: downstream/child node
- ``downstream_label``: label on the incoming side of the downstream node - ``downstream_label``: label on the incoming side of the downstream node
For example, DagNode A may be connected to DagNode B with an edge labelled For example, DagNode A may be connected to DagNode B with an edge labelled "foo" on A's side, and "bar" on B's
"foo" on A's side, and "bar" on B's side: side:
_____ _____ _____ _____
| | | | | | | |
| A >[foo]---[bar]> B | | A >[foo]---[bar]> B |
|_____| |_____| |_____| |_____|
Edge labels may be integers or strings, and nodes cannot have more than one Edge labels may be integers or strings, and nodes cannot have more than one incoming edge with the same label.
incoming edge with the same label.
DagNodes may have any number of incoming edges and any number of outgoing DagNodes may have any number of incoming edges and any number of outgoing edges. DagNodes keep track only of
edges. DagNodes keep track only of their incoming edges, but the entire graph their incoming edges, but the entire graph structure can be inferred by looking at the furthest downstream
structure can be inferred by looking at the furthest downstream nodes and nodes and working backwards.
working backwards.
Hashing: Hashing:
DagNodes must be hashable, and two nodes are considered to be equivalent if DagNodes must be hashable, and two nodes are considered to be equivalent if they have the same hash value.
they have the same hash value.
Nodes are immutable, and the hash should remain constant as a result. If a Nodes are immutable, and the hash should remain constant as a result. If a node with new contents is required,
node with new contents is required, create a new node and throw the old one create a new node and throw the old one away.
away.
String representation: String representation:
In order for graph visualization tools to show useful information, nodes must In order for graph visualization tools to show useful information, nodes must be representable as strings. The
be representable as strings. The ``repr`` operator should provide a more or ``repr`` operator should provide a more or less "full" representation of the node, and the ``short_repr``
less "full" representation of the node, and the ``short_repr`` property should property should be a shortened, concise representation.
be a shortened, concise representation.
Again, because nodes are immutable, the string representations should remain Again, because nodes are immutable, the string representations should remain constant.
constant.
""" """
def __hash__(self): def __hash__(self):
"""Return an integer hash of the node.""" """Return an integer hash of the node."""
raise NotImplementedError() raise NotImplementedError()
def __eq__(self, other): def __eq__(self, other):
"""Compare two nodes; implementations should return True if (and only if) """Compare two nodes; implementations should return True if (and only if) hashes match."""
hashes match.
"""
raise NotImplementedError() raise NotImplementedError()
def __repr__(self, other): def __repr__(self, other):
@ -73,75 +63,38 @@ class DagNode(object):
def incoming_edge_map(self): def incoming_edge_map(self):
"""Provides information about all incoming edges that connect to this node. """Provides information about all incoming edges that connect to this node.
The edge map is a dictionary that maps an ``incoming_label`` to The edge map is a dictionary that maps an ``incoming_label`` to ``(outgoing_node, outgoing_label)``. Note that
``(outgoing_node, outgoing_label)``. Note that implicitly, ``incoming_node`` is implicity, ``incoming_node`` is ``self``. See "Edges" section above.
``self``. See "Edges" section above.
""" """
raise NotImplementedError() raise NotImplementedError()
DagEdge = namedtuple( DagEdge = namedtuple('DagEdge', ['downstream_node', 'downstream_label', 'upstream_node', 'upstream_label'])
'DagEdge',
[
'downstream_node',
'downstream_label',
'upstream_node',
'upstream_label',
'upstream_selector',
],
)
def get_incoming_edges(downstream_node, incoming_edge_map): def get_incoming_edges(downstream_node, incoming_edge_map):
edges = [] edges = []
for downstream_label, upstream_info in list(incoming_edge_map.items()): for downstream_label, (upstream_node, upstream_label) in list(incoming_edge_map.items()):
upstream_node, upstream_label, upstream_selector = upstream_info edges += [DagEdge(downstream_node, downstream_label, upstream_node, upstream_label)]
edges += [
DagEdge(
downstream_node,
downstream_label,
upstream_node,
upstream_label,
upstream_selector,
)
]
return edges return edges
def get_outgoing_edges(upstream_node, outgoing_edge_map): def get_outgoing_edges(upstream_node, outgoing_edge_map):
edges = [] edges = []
for upstream_label, downstream_infos in sorted(outgoing_edge_map.items()): for upstream_label, downstream_infos in list(outgoing_edge_map.items()):
for downstream_info in downstream_infos: for (downstream_node, downstream_label) in downstream_infos:
downstream_node, downstream_label, downstream_selector = downstream_info edges += [DagEdge(downstream_node, downstream_label, upstream_node, upstream_label)]
edges += [
DagEdge(
downstream_node,
downstream_label,
upstream_node,
upstream_label,
downstream_selector,
)
]
return edges return edges
class KwargReprNode(DagNode): class KwargReprNode(DagNode):
"""A DagNode that can be represented as a set of args+kwargs.""" """A DagNode that can be represented as a set of args+kwargs.
"""
@property @property
def __upstream_hashes(self): def __upstream_hashes(self):
hashes = [] hashes = []
for downstream_label, upstream_info in list(self.incoming_edge_map.items()): for downstream_label, (upstream_node, upstream_label) in list(self.incoming_edge_map.items()):
upstream_node, upstream_label, upstream_selector = upstream_info hashes += [hash(x) for x in [downstream_label, upstream_node, upstream_label]]
hashes += [
hash(x)
for x in [
downstream_label,
upstream_node,
upstream_label,
upstream_selector,
]
]
return hashes return hashes
@property @property
@ -172,9 +125,7 @@ class KwargReprNode(DagNode):
def long_repr(self, include_hash=True): def long_repr(self, include_hash=True):
formatted_props = ['{!r}'.format(arg) for arg in self.args] formatted_props = ['{!r}'.format(arg) for arg in self.args]
formatted_props += [ formatted_props += ['{}={!r}'.format(key, self.kwargs[key]) for key in sorted(self.kwargs)]
'{}={!r}'.format(key, self.kwargs[key]) for key in sorted(self.kwargs)
]
out = '{}({})'.format(self.name, ', '.join(formatted_props)) out = '{}({})'.format(self.name, ', '.join(formatted_props))
if include_hash: if include_hash:
out += ' <{}>'.format(self.short_hash) out += ' <{}>'.format(self.short_hash)
@ -201,35 +152,21 @@ def topo_sort(downstream_nodes):
sorted_nodes = [] sorted_nodes = []
outgoing_edge_maps = {} outgoing_edge_maps = {}
def visit( def visit(upstream_node, upstream_label, downstream_node, downstream_label):
upstream_node,
upstream_label,
downstream_node,
downstream_label,
downstream_selector=None,
):
if upstream_node in marked_nodes: if upstream_node in marked_nodes:
raise RuntimeError('Graph is not a DAG') raise RuntimeError('Graph is not a DAG')
if downstream_node is not None: if downstream_node is not None:
outgoing_edge_map = outgoing_edge_maps.get(upstream_node, {}) outgoing_edge_map = outgoing_edge_maps.get(upstream_node, {})
outgoing_edge_infos = outgoing_edge_map.get(upstream_label, []) outgoing_edge_infos = outgoing_edge_map.get(upstream_label, [])
outgoing_edge_infos += [ outgoing_edge_infos += [(downstream_node, downstream_label)]
(downstream_node, downstream_label, downstream_selector)
]
outgoing_edge_map[upstream_label] = outgoing_edge_infos outgoing_edge_map[upstream_label] = outgoing_edge_infos
outgoing_edge_maps[upstream_node] = outgoing_edge_map outgoing_edge_maps[upstream_node] = outgoing_edge_map
if upstream_node not in sorted_nodes: if upstream_node not in sorted_nodes:
marked_nodes.append(upstream_node) marked_nodes.append(upstream_node)
for edge in upstream_node.incoming_edges: for edge in upstream_node.incoming_edges:
visit( visit(edge.upstream_node, edge.upstream_label, edge.downstream_node, edge.downstream_label)
edge.upstream_node,
edge.upstream_label,
edge.downstream_node,
edge.downstream_label,
edge.upstream_selector,
)
marked_nodes.remove(upstream_node) marked_nodes.remove(upstream_node)
sorted_nodes.append(upstream_node) sorted_nodes.append(upstream_node)

View File

@ -1,6 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from past.builtins import basestring
from .dag import KwargReprNode from .dag import KwargReprNode
from ._utils import escape_chars, get_hash_int from ._utils import escape_chars, get_hash_int
from builtins import object from builtins import object
@ -21,22 +20,13 @@ def _get_types_str(types):
class Stream(object): class Stream(object):
"""Represents the outgoing edge of an upstream node; may be used to create more """Represents the outgoing edge of an upstream node; may be used to create more downstream nodes."""
downstream nodes. def __init__(self, upstream_node, upstream_label, node_types):
"""
def __init__(
self, upstream_node, upstream_label, node_types, upstream_selector=None
):
if not _is_of_types(upstream_node, node_types): if not _is_of_types(upstream_node, node_types):
raise TypeError( raise TypeError('Expected upstream node to be of one of the following type(s): {}; got {}'.format(
'Expected upstream node to be of one of the following type(s): {}; got {}'.format( _get_types_str(node_types), type(upstream_node)))
_get_types_str(node_types), type(upstream_node)
)
)
self.node = upstream_node self.node = upstream_node
self.label = upstream_label self.label = upstream_label
self.selector = upstream_selector
def __hash__(self): def __hash__(self):
return get_hash_int([hash(self.node), hash(self.label)]) return get_hash_int([hash(self.node), hash(self.label)])
@ -46,82 +36,9 @@ class Stream(object):
def __repr__(self): def __repr__(self):
node_repr = self.node.long_repr(include_hash=False) node_repr = self.node.long_repr(include_hash=False)
selector = '' out = '{}[{!r}] <{}>'.format(node_repr, self.label, self.node.short_hash)
if self.selector:
selector = ':{}'.format(self.selector)
out = '{}[{!r}{}] <{}>'.format(
node_repr, self.label, selector, self.node.short_hash
)
return out return out
def __getitem__(self, index):
"""
Select a component (audio, video) of the stream.
Example:
Process the audio and video portions of a stream independently::
input = ffmpeg.input('in.mp4')
audio = input['a'].filter("aecho", 0.8, 0.9, 1000, 0.3)
video = input['v'].hflip()
out = ffmpeg.output(audio, video, 'out.mp4')
"""
if self.selector is not None:
raise ValueError('Stream already has a selector: {}'.format(self))
elif not isinstance(index, basestring):
raise TypeError("Expected string index (e.g. 'a'); got {!r}".format(index))
return self.node.stream(label=self.label, selector=index)
@property
def audio(self):
"""Select the audio-portion of a stream.
Some ffmpeg filters drop audio streams, and care must be taken
to preserve the audio in the final output. The ``.audio`` and
``.video`` operators can be used to reference the audio/video
portions of a stream so that they can be processed separately
and then re-combined later in the pipeline. This dilemma is
intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the
way while users may refer to the official ffmpeg documentation
as to why certain filters drop audio.
``stream.audio`` is a shorthand for ``stream['a']``.
Example:
Process the audio and video portions of a stream independently::
input = ffmpeg.input('in.mp4')
audio = input.audio.filter("aecho", 0.8, 0.9, 1000, 0.3)
video = input.video.hflip()
out = ffmpeg.output(audio, video, 'out.mp4')
"""
return self['a']
@property
def video(self):
"""Select the video-portion of a stream.
Some ffmpeg filters drop audio streams, and care must be taken
to preserve the audio in the final output. The ``.audio`` and
``.video`` operators can be used to reference the audio/video
portions of a stream so that they can be processed separately
and then re-combined later in the pipeline. This dilemma is
intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the
way while users may refer to the official ffmpeg documentation
as to why certain filters drop audio.
``stream.video`` is a shorthand for ``stream['v']``.
Example:
Process the audio and video portions of a stream independently::
input = ffmpeg.input('in.mp4')
audio = input.audio.filter("aecho", 0.8, 0.9, 1000, 0.3)
video = input.video.hflip()
out = ffmpeg.output(audio, video, 'out.mp4')
"""
return self['v']
def get_stream_map(stream_spec): def get_stream_map(stream_spec):
if stream_spec is None: if stream_spec is None:
@ -151,101 +68,56 @@ def get_stream_spec_nodes(stream_spec):
class Node(KwargReprNode): class Node(KwargReprNode):
"""Node base""" """Node base"""
@classmethod @classmethod
def __check_input_len(cls, stream_map, min_inputs, max_inputs): def __check_input_len(cls, stream_map, min_inputs, max_inputs):
if min_inputs is not None and len(stream_map) < min_inputs: if min_inputs is not None and len(stream_map) < min_inputs:
raise ValueError( raise ValueError('Expected at least {} input stream(s); got {}'.format(min_inputs, len(stream_map)))
'Expected at least {} input stream(s); got {}'.format(
min_inputs, len(stream_map)
)
)
elif max_inputs is not None and len(stream_map) > max_inputs: elif max_inputs is not None and len(stream_map) > max_inputs:
raise ValueError( raise ValueError('Expected at most {} input stream(s); got {}'.format(max_inputs, len(stream_map)))
'Expected at most {} input stream(s); got {}'.format(
max_inputs, len(stream_map)
)
)
@classmethod @classmethod
def __check_input_types(cls, stream_map, incoming_stream_types): def __check_input_types(cls, stream_map, incoming_stream_types):
for stream in list(stream_map.values()): for stream in list(stream_map.values()):
if not _is_of_types(stream, incoming_stream_types): if not _is_of_types(stream, incoming_stream_types):
raise TypeError( raise TypeError('Expected incoming stream(s) to be of one of the following types: {}; got {}'
'Expected incoming stream(s) to be of one of the following types: {}; got {}'.format( .format(_get_types_str(incoming_stream_types), type(stream)))
_get_types_str(incoming_stream_types), type(stream)
)
)
@classmethod @classmethod
def __get_incoming_edge_map(cls, stream_map): def __get_incoming_edge_map(cls, stream_map):
incoming_edge_map = {} incoming_edge_map = {}
for downstream_label, upstream in list(stream_map.items()): for downstream_label, upstream in list(stream_map.items()):
incoming_edge_map[downstream_label] = ( incoming_edge_map[downstream_label] = (upstream.node, upstream.label)
upstream.node,
upstream.label,
upstream.selector,
)
return incoming_edge_map return incoming_edge_map
def __init__( def __init__(self, stream_spec, name, incoming_stream_types, outgoing_stream_type, min_inputs, max_inputs, args=[],
self, kwargs={}):
stream_spec,
name,
incoming_stream_types,
outgoing_stream_type,
min_inputs,
max_inputs,
args=[],
kwargs={},
):
stream_map = get_stream_map(stream_spec) stream_map = get_stream_map(stream_spec)
self.__check_input_len(stream_map, min_inputs, max_inputs) self.__check_input_len(stream_map, min_inputs, max_inputs)
self.__check_input_types(stream_map, incoming_stream_types) self.__check_input_types(stream_map, incoming_stream_types)
incoming_edge_map = self.__get_incoming_edge_map(stream_map) incoming_edge_map = self.__get_incoming_edge_map(stream_map)
super(Node, self).__init__(incoming_edge_map, name, args, kwargs) super(Node, self).__init__(incoming_edge_map, name, args, kwargs)
self.__outgoing_stream_type = outgoing_stream_type self.__outgoing_stream_type = outgoing_stream_type
self.__incoming_stream_types = incoming_stream_types
def stream(self, label=None, selector=None): def stream(self, label=None):
"""Create an outgoing stream originating from this node. """Create an outgoing stream originating from this node.
More nodes may be attached onto the outgoing stream. More nodes may be attached onto the outgoing stream.
""" """
return self.__outgoing_stream_type(self, label, upstream_selector=selector) return self.__outgoing_stream_type(self, label)
def __getitem__(self, item): def __getitem__(self, label):
"""Create an outgoing stream originating from this node; syntactic sugar for """Create an outgoing stream originating from this node; syntactic sugar for ``self.stream(label)``.
``self.stream(label)``. It can also be used to apply a selector: e.g.
``node[0:'a']`` returns a stream with label 0 and selector ``'a'``, which is
the same as ``node.stream(label=0, selector='a')``.
Example:
Process the audio and video portions of a stream independently::
input = ffmpeg.input('in.mp4')
audio = input[:'a'].filter("aecho", 0.8, 0.9, 1000, 0.3)
video = input[:'v'].hflip()
out = ffmpeg.output(audio, video, 'out.mp4')
""" """
if isinstance(item, slice): return self.stream(label)
return self.stream(label=item.start, selector=item.stop)
else:
return self.stream(label=item)
class FilterableStream(Stream): class FilterableStream(Stream):
def __init__(self, upstream_node, upstream_label, upstream_selector=None): def __init__(self, upstream_node, upstream_label):
super(FilterableStream, self).__init__( super(FilterableStream, self).__init__(upstream_node, upstream_label, {InputNode, FilterNode})
upstream_node, upstream_label, {InputNode, FilterNode}, upstream_selector
)
# noinspection PyMethodOverriding
class InputNode(Node): class InputNode(Node):
"""InputNode type""" """InputNode type"""
def __init__(self, name, args=[], kwargs={}): def __init__(self, name, args=[], kwargs={}):
super(InputNode, self).__init__( super(InputNode, self).__init__(
stream_spec=None, stream_spec=None,
@ -255,7 +127,7 @@ class InputNode(Node):
min_inputs=0, min_inputs=0,
max_inputs=0, max_inputs=0,
args=args, args=args,
kwargs=kwargs, kwargs=kwargs
) )
@property @property
@ -263,7 +135,6 @@ class InputNode(Node):
return os.path.basename(self.kwargs['filename']) return os.path.basename(self.kwargs['filename'])
# noinspection PyMethodOverriding
class FilterNode(Node): class FilterNode(Node):
def __init__(self, stream_spec, name, max_inputs=1, args=[], kwargs={}): def __init__(self, stream_spec, name, max_inputs=1, args=[], kwargs={}):
super(FilterNode, self).__init__( super(FilterNode, self).__init__(
@ -274,15 +145,14 @@ class FilterNode(Node):
min_inputs=1, min_inputs=1,
max_inputs=max_inputs, max_inputs=max_inputs,
args=args, args=args,
kwargs=kwargs, kwargs=kwargs
) )
"""FilterNode""" """FilterNode"""
def _get_filter(self, outgoing_edges): def _get_filter(self, outgoing_edges):
args = self.args args = self.args
kwargs = self.kwargs kwargs = self.kwargs
if self.name in ('split', 'asplit'): if self.name == 'split':
args = [len(outgoing_edges)] args = [len(outgoing_edges)]
out_args = [escape_chars(x, '\\\'=:') for x in args] out_args = [escape_chars(x, '\\\'=:') for x in args]
@ -303,7 +173,6 @@ class FilterNode(Node):
return escape_chars(params_text, '\\\'[],;') return escape_chars(params_text, '\\\'[],;')
# noinspection PyMethodOverriding
class OutputNode(Node): class OutputNode(Node):
def __init__(self, stream, name, args=[], kwargs={}): def __init__(self, stream, name, args=[], kwargs={}):
super(OutputNode, self).__init__( super(OutputNode, self).__init__(
@ -312,9 +181,9 @@ class OutputNode(Node):
incoming_stream_types={FilterableStream}, incoming_stream_types={FilterableStream},
outgoing_stream_type=OutputStream, outgoing_stream_type=OutputStream,
min_inputs=1, min_inputs=1,
max_inputs=None, max_inputs=1,
args=args, args=args,
kwargs=kwargs, kwargs=kwargs
) )
@property @property
@ -323,16 +192,10 @@ class OutputNode(Node):
class OutputStream(Stream): class OutputStream(Stream):
def __init__(self, upstream_node, upstream_label, upstream_selector=None): def __init__(self, upstream_node, upstream_label):
super(OutputStream, self).__init__( super(OutputStream, self).__init__(upstream_node, upstream_label, {OutputNode, GlobalNode, MergeOutputsNode})
upstream_node,
upstream_label,
{OutputNode, GlobalNode, MergeOutputsNode},
upstream_selector=upstream_selector,
)
# noinspection PyMethodOverriding
class MergeOutputsNode(Node): class MergeOutputsNode(Node):
def __init__(self, streams, name): def __init__(self, streams, name):
super(MergeOutputsNode, self).__init__( super(MergeOutputsNode, self).__init__(
@ -341,11 +204,10 @@ class MergeOutputsNode(Node):
incoming_stream_types={OutputStream}, incoming_stream_types={OutputStream},
outgoing_stream_type=OutputStream, outgoing_stream_type=OutputStream,
min_inputs=1, min_inputs=1,
max_inputs=None, max_inputs=None
) )
# noinspection PyMethodOverriding
class GlobalNode(Node): class GlobalNode(Node):
def __init__(self, stream, name, args=[], kwargs={}): def __init__(self, stream, name, args=[], kwargs={}):
super(GlobalNode, self).__init__( super(GlobalNode, self).__init__(
@ -356,7 +218,7 @@ class GlobalNode(Node):
min_inputs=1, min_inputs=1,
max_inputs=1, max_inputs=1,
args=args, args=args,
kwargs=kwargs, kwargs=kwargs
) )
@ -365,7 +227,6 @@ def stream_operator(stream_classes={Stream}, name=None):
func_name = name or func.__name__ func_name = name or func.__name__
[setattr(stream_class, func_name, func) for stream_class in stream_classes] [setattr(stream_class, func_name, func) for stream_class in stream_classes]
return func return func
return decorator return decorator
@ -375,6 +236,3 @@ def filter_operator(name=None):
def output_operator(name=None): def output_operator(name=None):
return stream_operator(stream_classes={OutputStream}, name=name) return stream_operator(stream_classes={OutputStream}, name=name)
__all__ = ['Stream']

View File

@ -1,20 +1,13 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from builtins import bytes from builtins import bytes
from builtins import range from builtins import range
from builtins import str
import ffmpeg import ffmpeg
import os import os
import pytest import pytest
import random import random
import re import re
import subprocess import subprocess
import sys
try:
import mock # python 2
except ImportError:
from unittest import mock # python 3
TEST_DIR = os.path.dirname(__file__) TEST_DIR = os.path.dirname(__file__)
@ -23,19 +16,15 @@ TEST_INPUT_FILE1 = os.path.join(SAMPLE_DATA_DIR, 'in1.mp4')
TEST_OVERLAY_FILE = os.path.join(SAMPLE_DATA_DIR, 'overlay.png') TEST_OVERLAY_FILE = os.path.join(SAMPLE_DATA_DIR, 'overlay.png')
TEST_OUTPUT_FILE1 = os.path.join(SAMPLE_DATA_DIR, 'out1.mp4') TEST_OUTPUT_FILE1 = os.path.join(SAMPLE_DATA_DIR, 'out1.mp4')
TEST_OUTPUT_FILE2 = os.path.join(SAMPLE_DATA_DIR, 'out2.mp4') TEST_OUTPUT_FILE2 = os.path.join(SAMPLE_DATA_DIR, 'out2.mp4')
BOGUS_INPUT_FILE = os.path.join(SAMPLE_DATA_DIR, 'bogus')
subprocess.check_call(['ffmpeg', '-version']) subprocess.check_call(['ffmpeg', '-version'])
def test_escape_chars(): def test_escape_chars():
assert ffmpeg._utils.escape_chars('a:b', ':') == r'a\:b' assert ffmpeg._utils.escape_chars('a:b', ':') == 'a\:b'
assert ffmpeg._utils.escape_chars('a\\:b', ':\\') == 'a\\\\\\:b' assert ffmpeg._utils.escape_chars('a\\:b', ':\\') == 'a\\\\\\:b'
assert ( assert ffmpeg._utils.escape_chars('a:b,c[d]e%{}f\'g\'h\\i', '\\\':,[]%') == 'a\\:b\\,c\\[d\\]e\\%{}f\\\'g\\\'h\\\\i'
ffmpeg._utils.escape_chars('a:b,c[d]e%{}f\'g\'h\\i', '\\\':,[]%')
== 'a\\:b\\,c\\[d\\]e\\%{}f\\\'g\\\'h\\\\i'
)
assert ffmpeg._utils.escape_chars(123, ':\\') == '123' assert ffmpeg._utils.escape_chars(123, ':\\') == '123'
@ -67,16 +56,23 @@ def test_fluent_concat():
def test_fluent_output(): def test_fluent_output():
ffmpeg.input('dummy.mp4').trim(start_frame=10, end_frame=20).output('dummy2.mp4') (ffmpeg
.input('dummy.mp4')
.trim(start_frame=10, end_frame=20)
.output('dummy2.mp4')
)
def test_fluent_complex_filter(): def test_fluent_complex_filter():
in_file = ffmpeg.input('dummy.mp4') in_file = ffmpeg.input('dummy.mp4')
return ffmpeg.concat( return (ffmpeg
in_file.trim(start_frame=10, end_frame=20), .concat(
in_file.trim(start_frame=30, end_frame=40), in_file.trim(start_frame=10, end_frame=20),
in_file.trim(start_frame=50, end_frame=60), in_file.trim(start_frame=30, end_frame=40),
).output('dummy2.mp4') in_file.trim(start_frame=50, end_frame=60)
)
.output('dummy2.mp4')
)
def test_node_repr(): def test_node_repr():
@ -86,85 +82,40 @@ def test_node_repr():
trim3 = ffmpeg.trim(in_file, start_frame=50, end_frame=60) trim3 = ffmpeg.trim(in_file, start_frame=50, end_frame=60)
concatted = ffmpeg.concat(trim1, trim2, trim3) concatted = ffmpeg.concat(trim1, trim2, trim3)
output = ffmpeg.output(concatted, 'dummy2.mp4') output = ffmpeg.output(concatted, 'dummy2.mp4')
assert repr(in_file.node) == 'input(filename={!r}) <{}>'.format( assert repr(in_file.node) == "input(filename={!r}) <{}>".format('dummy.mp4', in_file.node.short_hash)
'dummy.mp4', in_file.node.short_hash assert repr(trim1.node) == "trim(end_frame=20, start_frame=10) <{}>".format(trim1.node.short_hash)
) assert repr(trim2.node) == "trim(end_frame=40, start_frame=30) <{}>".format(trim2.node.short_hash)
assert repr(trim1.node) == 'trim(end_frame=20, start_frame=10) <{}>'.format( assert repr(trim3.node) == "trim(end_frame=60, start_frame=50) <{}>".format(trim3.node.short_hash)
trim1.node.short_hash assert repr(concatted.node) == "concat(n=3) <{}>".format(concatted.node.short_hash)
) assert repr(output.node) == "output(filename={!r}) <{}>".format('dummy2.mp4', output.node.short_hash)
assert repr(trim2.node) == 'trim(end_frame=40, start_frame=30) <{}>'.format(
trim2.node.short_hash
)
assert repr(trim3.node) == 'trim(end_frame=60, start_frame=50) <{}>'.format(
trim3.node.short_hash
)
assert repr(concatted.node) == 'concat(n=3) <{}>'.format(concatted.node.short_hash)
assert repr(output.node) == 'output(filename={!r}) <{}>'.format(
'dummy2.mp4', output.node.short_hash
)
def test_stream_repr(): def test_stream_repr():
in_file = ffmpeg.input('dummy.mp4') in_file = ffmpeg.input('dummy.mp4')
assert repr(in_file) == 'input(filename={!r})[None] <{}>'.format( assert repr(in_file) == "input(filename={!r})[None] <{}>".format('dummy.mp4', in_file.node.short_hash)
'dummy.mp4', in_file.node.short_hash
)
split0 = in_file.filter_multi_output('split')[0] split0 = in_file.filter_multi_output('split')[0]
assert repr(split0) == 'split()[0] <{}>'.format(split0.node.short_hash) assert repr(split0) == "split()[0] <{}>".format(split0.node.short_hash)
dummy_out = in_file.filter_multi_output('dummy')['out'] dummy_out = in_file.filter_multi_output('dummy')['out']
assert repr(dummy_out) == 'dummy()[{!r}] <{}>'.format( assert repr(dummy_out) == "dummy()[{!r}] <{}>".format(dummy_out.label, dummy_out.node.short_hash)
dummy_out.label, dummy_out.node.short_hash
)
def test_repeated_args(): def test_get_args_simple():
out_file = ffmpeg.input('dummy.mp4').output(
'dummy2.mp4', streamid=['0:0x101', '1:0x102']
)
assert out_file.get_args() == [
'-i',
'dummy.mp4',
'-streamid',
'0:0x101',
'-streamid',
'1:0x102',
'dummy2.mp4',
]
def test__get_args__simple():
out_file = ffmpeg.input('dummy.mp4').output('dummy2.mp4') out_file = ffmpeg.input('dummy.mp4').output('dummy2.mp4')
assert out_file.get_args() == ['-i', 'dummy.mp4', 'dummy2.mp4'] assert out_file.get_args() == ['-i', 'dummy.mp4', 'dummy2.mp4']
def test_global_args():
out_file = (
ffmpeg.input('dummy.mp4')
.output('dummy2.mp4')
.global_args('-progress', 'someurl')
)
assert out_file.get_args() == [
'-i',
'dummy.mp4',
'dummy2.mp4',
'-progress',
'someurl',
]
def _get_simple_example():
return ffmpeg.input(TEST_INPUT_FILE1).output(TEST_OUTPUT_FILE1)
def _get_complex_filter_example(): def _get_complex_filter_example():
split = ffmpeg.input(TEST_INPUT_FILE1).vflip().split() split = (ffmpeg
.input(TEST_INPUT_FILE1)
.vflip()
.split()
)
split0 = split[0] split0 = split[0]
split1 = split[1] split1 = split[1]
overlay_file = ffmpeg.input(TEST_OVERLAY_FILE) overlay_file = ffmpeg.input(TEST_OVERLAY_FILE)
overlay_file = ffmpeg.crop(overlay_file, 10, 10, 158, 112) return (ffmpeg
return ( .concat(
ffmpeg.concat(
split0.trim(start_frame=10, end_frame=20), split0.trim(start_frame=10, end_frame=20),
split1.trim(start_frame=30, end_frame=40), split1.trim(start_frame=30, end_frame=40),
) )
@ -175,225 +126,38 @@ def _get_complex_filter_example():
) )
def test__get_args__complex_filter(): def test_get_args_complex_filter():
out = _get_complex_filter_example() out = _get_complex_filter_example()
args = ffmpeg.get_args(out) args = ffmpeg.get_args(out)
assert args == [ assert args == ['-i', TEST_INPUT_FILE1,
'-i', '-i', TEST_OVERLAY_FILE,
TEST_INPUT_FILE1,
'-i',
TEST_OVERLAY_FILE,
'-filter_complex', '-filter_complex',
'[0]vflip[s0];' '[0]vflip[s0];' \
'[s0]split=2[s1][s2];' '[s0]split=2[s1][s2];' \
'[s1]trim=end_frame=20:start_frame=10[s3];' '[s1]trim=end_frame=20:start_frame=10[s3];' \
'[s2]trim=end_frame=40:start_frame=30[s4];' '[s2]trim=end_frame=40:start_frame=30[s4];' \
'[s3][s4]concat=n=2[s5];' '[s3][s4]concat=n=2[s5];' \
'[1]crop=158:112:10:10[s6];' '[1]hflip[s6];' \
'[s6]hflip[s7];' '[s5][s6]overlay=eof_action=repeat[s7];' \
'[s5][s7]overlay=eof_action=repeat[s8];' '[s7]drawbox=50:50:120:120:red:t=5[s8]',
'[s8]drawbox=50:50:120:120:red:t=5[s9]', '-map', '[s8]', TEST_OUTPUT_FILE1,
'-map', '-y'
'[s9]',
TEST_OUTPUT_FILE1,
'-y',
] ]
def test_combined_output():
i1 = ffmpeg.input(TEST_INPUT_FILE1)
i2 = ffmpeg.input(TEST_OVERLAY_FILE)
out = ffmpeg.output(i1, i2, TEST_OUTPUT_FILE1)
assert out.get_args() == [
'-i',
TEST_INPUT_FILE1,
'-i',
TEST_OVERLAY_FILE,
'-map',
'0',
'-map',
'1',
TEST_OUTPUT_FILE1,
]
@pytest.mark.parametrize('use_shorthand', [True, False])
def test_filter_with_selector(use_shorthand):
i = ffmpeg.input(TEST_INPUT_FILE1)
if use_shorthand:
v1 = i.video.hflip()
a1 = i.audio.filter('aecho', 0.8, 0.9, 1000, 0.3)
else:
v1 = i['v'].hflip()
a1 = i['a'].filter('aecho', 0.8, 0.9, 1000, 0.3)
out = ffmpeg.output(a1, v1, TEST_OUTPUT_FILE1)
assert out.get_args() == [
'-i',
TEST_INPUT_FILE1,
'-filter_complex',
'[0:a]aecho=0.8:0.9:1000:0.3[s0];' '[0:v]hflip[s1]',
'-map',
'[s0]',
'-map',
'[s1]',
TEST_OUTPUT_FILE1,
]
def test_get_item_with_bad_selectors():
input = ffmpeg.input(TEST_INPUT_FILE1)
with pytest.raises(ValueError) as excinfo:
input['a']['a']
assert str(excinfo.value).startswith('Stream already has a selector:')
with pytest.raises(TypeError) as excinfo:
input[:'a']
assert str(excinfo.value).startswith("Expected string index (e.g. 'a')")
with pytest.raises(TypeError) as excinfo:
input[5]
assert str(excinfo.value).startswith("Expected string index (e.g. 'a')")
def _get_complex_filter_asplit_example():
split = ffmpeg.input(TEST_INPUT_FILE1).vflip().asplit()
split0 = split[0]
split1 = split[1]
return (
ffmpeg.concat(
split0.filter('atrim', start=10, end=20),
split1.filter('atrim', start=30, end=40),
)
.output(TEST_OUTPUT_FILE1)
.overwrite_output()
)
def test_filter_concat__video_only():
in1 = ffmpeg.input('in1.mp4')
in2 = ffmpeg.input('in2.mp4')
args = ffmpeg.concat(in1, in2).output('out.mp4').get_args()
assert args == [
'-i',
'in1.mp4',
'-i',
'in2.mp4',
'-filter_complex',
'[0][1]concat=n=2[s0]',
'-map',
'[s0]',
'out.mp4',
]
def test_filter_concat__audio_only():
in1 = ffmpeg.input('in1.mp4')
in2 = ffmpeg.input('in2.mp4')
args = ffmpeg.concat(in1, in2, v=0, a=1).output('out.mp4').get_args()
assert args == [
'-i',
'in1.mp4',
'-i',
'in2.mp4',
'-filter_complex',
'[0][1]concat=a=1:n=2:v=0[s0]',
'-map',
'[s0]',
'out.mp4',
]
def test_filter_concat__audio_video():
in1 = ffmpeg.input('in1.mp4')
in2 = ffmpeg.input('in2.mp4')
joined = ffmpeg.concat(in1.video, in1.audio, in2.hflip(), in2['a'], v=1, a=1).node
args = ffmpeg.output(joined[0], joined[1], 'out.mp4').get_args()
assert args == [
'-i',
'in1.mp4',
'-i',
'in2.mp4',
'-filter_complex',
'[1]hflip[s0];[0:v][0:a][s0][1:a]concat=a=1:n=2:v=1[s1][s2]',
'-map',
'[s1]',
'-map',
'[s2]',
'out.mp4',
]
def test_filter_concat__wrong_stream_count():
in1 = ffmpeg.input('in1.mp4')
in2 = ffmpeg.input('in2.mp4')
with pytest.raises(ValueError) as excinfo:
ffmpeg.concat(in1.video, in1.audio, in2.hflip(), v=1, a=1).node
assert (
str(excinfo.value)
== 'Expected concat input streams to have length multiple of 2 (v=1, a=1); got 3'
)
def test_filter_asplit():
out = _get_complex_filter_asplit_example()
args = out.get_args()
assert args == [
'-i',
TEST_INPUT_FILE1,
'-filter_complex',
(
'[0]vflip[s0];'
'[s0]asplit=2[s1][s2];'
'[s1]atrim=end=20:start=10[s3];'
'[s2]atrim=end=40:start=30[s4];'
'[s3][s4]concat=n=2[s5]'
),
'-map',
'[s5]',
TEST_OUTPUT_FILE1,
'-y',
]
def test__output__bitrate():
args = (
ffmpeg.input('in')
.output('out', video_bitrate=1000, audio_bitrate=200)
.get_args()
)
assert args == ['-i', 'in', '-b:v', '1000', '-b:a', '200', 'out']
@pytest.mark.parametrize('video_size', [(320, 240), '320x240'])
def test__output__video_size(video_size):
args = ffmpeg.input('in').output('out', video_size=video_size).get_args()
assert args == ['-i', 'in', '-video_size', '320x240', 'out']
def test_filter_normal_arg_escape(): def test_filter_normal_arg_escape():
"""Test string escaping of normal filter args (e.g. ``font`` param of ``drawtext`` """Test string escaping of normal filter args (e.g. ``font`` param of ``drawtext`` filter)."""
filter).
"""
def _get_drawtext_font_repr(font): def _get_drawtext_font_repr(font):
"""Build a command-line arg using drawtext ``font`` param and extract the """Build a command-line arg using drawtext ``font`` param and extract the ``-filter_complex`` arg."""
``-filter_complex`` arg. args = (ffmpeg
""" .input('in')
args = (
ffmpeg.input('in')
.drawtext('test', font='a{}b'.format(font)) .drawtext('test', font='a{}b'.format(font))
.output('out') .output('out')
.get_args() .get_args()
) )
assert args[:3] == ['-i', 'in', '-filter_complex'] assert args[:3] == ['-i', 'in', '-filter_complex']
assert args[4:] == ['-map', '[s0]', 'out'] assert args[4:] == ['-map', '[s0]', 'out']
match = re.match( match = re.match(r'\[0\]drawtext=font=a((.|\n)*)b:text=test\[s0\]', args[3], re.MULTILINE)
r'\[0\]drawtext=font=a((.|\n)*)b:text=test\[s0\]',
args[3],
re.MULTILINE,
)
assert match is not None, 'Invalid -filter_complex arg: {!r}'.format(args[3]) assert match is not None, 'Invalid -filter_complex arg: {!r}'.format(args[3])
return match.group(1) return match.group(1)
@ -416,15 +180,15 @@ def test_filter_normal_arg_escape():
def test_filter_text_arg_str_escape(): def test_filter_text_arg_str_escape():
"""Test string escaping of normal filter args (e.g. ``text`` param of ``drawtext`` """Test string escaping of normal filter args (e.g. ``text`` param of ``drawtext`` filter)."""
filter).
"""
def _get_drawtext_text_repr(text): def _get_drawtext_text_repr(text):
"""Build a command-line arg using drawtext ``text`` param and extract the """Build a command-line arg using drawtext ``text`` param and extract the ``-filter_complex`` arg."""
``-filter_complex`` arg. args = (ffmpeg
""" .input('in')
args = ffmpeg.input('in').drawtext('a{}b'.format(text)).output('out').get_args() .drawtext('a{}b'.format(text))
.output('out')
.get_args()
)
assert args[:3] == ['-i', 'in', '-filter_complex'] assert args[:3] == ['-i', 'in', '-filter_complex']
assert args[4:] == ['-map', '[s0]', 'out'] assert args[4:] == ['-map', '[s0]', 'out']
match = re.match(r'\[0\]drawtext=text=a((.|\n)*)b\[s0\]', args[3], re.MULTILINE) match = re.match(r'\[0\]drawtext=text=a((.|\n)*)b\[s0\]', args[3], re.MULTILINE)
@ -449,182 +213,73 @@ def test_filter_text_arg_str_escape():
assert expected == actual assert expected == actual
# def test_version(): #def test_version():
# subprocess.check_call(['ffmpeg', '-version']) # subprocess.check_call(['ffmpeg', '-version'])
def test__compile(): def test_run():
out_file = ffmpeg.input('dummy.mp4').output('dummy2.mp4')
assert out_file.compile() == ['ffmpeg', '-i', 'dummy.mp4', 'dummy2.mp4']
assert out_file.compile(cmd='ffmpeg.old') == [
'ffmpeg.old',
'-i',
'dummy.mp4',
'dummy2.mp4',
]
@pytest.mark.parametrize('pipe_stdin', [True, False])
@pytest.mark.parametrize('pipe_stdout', [True, False])
@pytest.mark.parametrize('pipe_stderr', [True, False])
@pytest.mark.parametrize('cwd', [None, '/tmp'])
def test__run_async(mocker, pipe_stdin, pipe_stdout, pipe_stderr, cwd):
process__mock = mock.Mock()
popen__mock = mocker.patch.object(subprocess, 'Popen', return_value=process__mock)
stream = _get_simple_example()
process = ffmpeg.run_async(
stream,
pipe_stdin=pipe_stdin,
pipe_stdout=pipe_stdout,
pipe_stderr=pipe_stderr,
cwd=cwd,
)
assert process is process__mock
expected_stdin = subprocess.PIPE if pipe_stdin else None
expected_stdout = subprocess.PIPE if pipe_stdout else None
expected_stderr = subprocess.PIPE if pipe_stderr else None
(args,), kwargs = popen__mock.call_args
assert args == ffmpeg.compile(stream)
assert kwargs == dict(
stdin=expected_stdin,
stdout=expected_stdout,
stderr=expected_stderr,
cwd=cwd,
)
def test__run():
stream = _get_complex_filter_example() stream = _get_complex_filter_example()
out, err = ffmpeg.run(stream) ffmpeg.run(stream)
assert out is None
assert err is None
@pytest.mark.parametrize('capture_stdout', [True, False]) def test_run_multi_output():
@pytest.mark.parametrize('capture_stderr', [True, False])
def test__run__capture_out(mocker, capture_stdout, capture_stderr):
mocker.patch.object(ffmpeg._run, 'compile', return_value=['echo', 'test'])
stream = _get_simple_example()
out, err = ffmpeg.run(
stream, capture_stdout=capture_stdout, capture_stderr=capture_stderr
)
if capture_stdout:
assert out == 'test\n'.encode()
else:
assert out is None
if capture_stderr:
assert err == ''.encode()
else:
assert err is None
def test__run__input_output(mocker):
mocker.patch.object(ffmpeg._run, 'compile', return_value=['cat'])
stream = _get_simple_example()
out, err = ffmpeg.run(stream, input='test'.encode(), capture_stdout=True)
assert out == 'test'.encode()
assert err is None
@pytest.mark.parametrize('capture_stdout', [True, False])
@pytest.mark.parametrize('capture_stderr', [True, False])
def test__run__error(mocker, capture_stdout, capture_stderr):
mocker.patch.object(ffmpeg._run, 'compile', return_value=['ffmpeg'])
stream = _get_complex_filter_example()
with pytest.raises(ffmpeg.Error) as excinfo:
out, err = ffmpeg.run(
stream, capture_stdout=capture_stdout, capture_stderr=capture_stderr
)
assert str(excinfo.value) == 'ffmpeg error (see stderr output for detail)'
out = excinfo.value.stdout
err = excinfo.value.stderr
if capture_stdout:
assert out == ''.encode()
else:
assert out is None
if capture_stderr:
assert err.decode().startswith('ffmpeg version')
else:
assert err is None
def test__run__multi_output():
in_ = ffmpeg.input(TEST_INPUT_FILE1) in_ = ffmpeg.input(TEST_INPUT_FILE1)
out1 = in_.output(TEST_OUTPUT_FILE1) out1 = in_.output(TEST_OUTPUT_FILE1)
out2 = in_.output(TEST_OUTPUT_FILE2) out2 = in_.output(TEST_OUTPUT_FILE2)
ffmpeg.run([out1, out2], overwrite_output=True) ffmpeg.run([out1, out2], overwrite_output=True)
def test__run__dummy_cmd(): def test_run_dummy_cmd():
stream = _get_complex_filter_example() stream = _get_complex_filter_example()
ffmpeg.run(stream, cmd='true') ffmpeg.run(stream, cmd='true')
def test__run__dummy_cmd_list(): def test_run_dummy_cmd_list():
stream = _get_complex_filter_example() stream = _get_complex_filter_example()
ffmpeg.run(stream, cmd=['true', 'ignored']) ffmpeg.run(stream, cmd=['true', 'ignored'])
def test__filter__custom(): def test_run_failing_cmd():
stream = _get_complex_filter_example()
with pytest.raises(subprocess.CalledProcessError):
ffmpeg.run(stream, cmd='false')
def test_custom_filter():
stream = ffmpeg.input('dummy.mp4') stream = ffmpeg.input('dummy.mp4')
stream = ffmpeg.filter(stream, 'custom_filter', 'a', 'b', kwarg1='c') stream = ffmpeg.filter_(stream, 'custom_filter', 'a', 'b', kwarg1='c')
stream = ffmpeg.output(stream, 'dummy2.mp4') stream = ffmpeg.output(stream, 'dummy2.mp4')
assert stream.get_args() == [ assert stream.get_args() == [
'-i', '-i', 'dummy.mp4',
'dummy.mp4', '-filter_complex', '[0]custom_filter=a:b:kwarg1=c[s0]',
'-filter_complex', '-map', '[s0]',
'[0]custom_filter=a:b:kwarg1=c[s0]', 'dummy2.mp4'
'-map',
'[s0]',
'dummy2.mp4',
] ]
def test__filter__custom_fluent(): def test_custom_filter_fluent():
stream = ( stream = (ffmpeg
ffmpeg.input('dummy.mp4') .input('dummy.mp4')
.filter('custom_filter', 'a', 'b', kwarg1='c') .filter_('custom_filter', 'a', 'b', kwarg1='c')
.output('dummy2.mp4') .output('dummy2.mp4')
) )
assert stream.get_args() == [ assert stream.get_args() == [
'-i', '-i', 'dummy.mp4',
'dummy.mp4', '-filter_complex', '[0]custom_filter=a:b:kwarg1=c[s0]',
'-filter_complex', '-map', '[s0]',
'[0]custom_filter=a:b:kwarg1=c[s0]', 'dummy2.mp4'
'-map',
'[s0]',
'dummy2.mp4',
] ]
def test__merge_outputs(): def test_merge_outputs():
in_ = ffmpeg.input('in.mp4') in_ = ffmpeg.input('in.mp4')
out1 = in_.output('out1.mp4') out1 = in_.output('out1.mp4')
out2 = in_.output('out2.mp4') out2 = in_.output('out2.mp4')
assert ffmpeg.merge_outputs(out1, out2).get_args() == [ assert ffmpeg.merge_outputs(out1, out2).get_args() == [
'-i', '-i', 'in.mp4', 'out1.mp4', 'out2.mp4'
'in.mp4',
'out1.mp4',
'out2.mp4',
] ]
assert ffmpeg.get_args([out1, out2]) == ['-i', 'in.mp4', 'out2.mp4', 'out1.mp4'] assert ffmpeg.get_args([out1, out2]) == [
'-i', 'in.mp4', 'out2.mp4', 'out1.mp4'
def test__input__start_time():
assert ffmpeg.input('in', ss=10.5).output('out').get_args() == [
'-ss',
'10.5',
'-i',
'in',
'out',
]
assert ffmpeg.input('in', ss=0.0).output('out').get_args() == [
'-ss',
'0.0',
'-i',
'in',
'out',
] ]
@ -633,54 +288,18 @@ def test_multi_passthrough():
out2 = ffmpeg.input('in2.mp4').output('out2.mp4') out2 = ffmpeg.input('in2.mp4').output('out2.mp4')
out = ffmpeg.merge_outputs(out1, out2) out = ffmpeg.merge_outputs(out1, out2)
assert ffmpeg.get_args(out) == [ assert ffmpeg.get_args(out) == [
'-i', '-i', 'in1.mp4',
'in1.mp4', '-i', 'in2.mp4',
'-i',
'in2.mp4',
'out1.mp4', 'out1.mp4',
'-map', '-map', '[1]', # FIXME: this should not be here (see #23)
'1', 'out2.mp4'
'out2.mp4',
] ]
assert ffmpeg.get_args([out1, out2]) == [ assert ffmpeg.get_args([out1, out2]) == [
'-i', '-i', 'in2.mp4',
'in2.mp4', '-i', 'in1.mp4',
'-i',
'in1.mp4',
'out2.mp4', 'out2.mp4',
'-map', '-map', '[1]', # FIXME: this should not be here (see #23)
'1', 'out1.mp4'
'out1.mp4',
]
def test_passthrough_selectors():
i1 = ffmpeg.input(TEST_INPUT_FILE1)
args = ffmpeg.output(i1['1'], i1['2'], TEST_OUTPUT_FILE1).get_args()
assert args == [
'-i',
TEST_INPUT_FILE1,
'-map',
'0:1',
'-map',
'0:2',
TEST_OUTPUT_FILE1,
]
def test_mixed_passthrough_selectors():
i1 = ffmpeg.input(TEST_INPUT_FILE1)
args = ffmpeg.output(i1['1'].hflip(), i1['2'], TEST_OUTPUT_FILE1).get_args()
assert args == [
'-i',
TEST_INPUT_FILE1,
'-filter_complex',
'[0:1]hflip[s0]',
'-map',
'[s0]',
'-map',
'0:2',
TEST_OUTPUT_FILE1,
] ]
@ -691,131 +310,33 @@ def test_pipe():
frame_count = 10 frame_count = 10
start_frame = 2 start_frame = 2
out = ( out = (ffmpeg
ffmpeg.input( .input('pipe:0', format='rawvideo', pixel_format='rgb24', video_size=(width, height), framerate=10)
'pipe:0',
format='rawvideo',
pixel_format='rgb24',
video_size=(width, height),
framerate=10,
)
.trim(start_frame=start_frame) .trim(start_frame=start_frame)
.output('pipe:1', format='rawvideo') .output('pipe:1', format='rawvideo')
) )
args = out.get_args() args = out.get_args()
assert args == [ assert args == [
'-f', '-f', 'rawvideo',
'rawvideo', '-video_size', '{}x{}'.format(width, height),
'-video_size', '-framerate', '10',
'{}x{}'.format(width, height), '-pixel_format', 'rgb24',
'-framerate', '-i', 'pipe:0',
'10',
'-pixel_format',
'rgb24',
'-i',
'pipe:0',
'-filter_complex', '-filter_complex',
'[0]trim=start_frame=2[s0]', '[0]trim=start_frame=2[s0]',
'-map', '-map', '[s0]',
'[s0]', '-f', 'rawvideo',
'-f', 'pipe:1'
'rawvideo',
'pipe:1',
] ]
cmd = ['ffmpeg'] + args cmd = ['ffmpeg'] + args
p = subprocess.Popen( p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
in_data = bytes( in_data = bytes(bytearray([random.randint(0,255) for _ in range(frame_size * frame_count)]))
bytearray([random.randint(0, 255) for _ in range(frame_size * frame_count)])
)
p.stdin.write(in_data) # note: this could block, in which case need to use threads p.stdin.write(in_data) # note: this could block, in which case need to use threads
p.stdin.close() p.stdin.close()
out_data = p.stdout.read() out_data = p.stdout.read()
assert len(out_data) == frame_size * (frame_count - start_frame) assert len(out_data) == frame_size * (frame_count - start_frame)
assert out_data == in_data[start_frame * frame_size :] assert out_data == in_data[start_frame*frame_size:]
def test__probe():
data = ffmpeg.probe(TEST_INPUT_FILE1)
assert set(data.keys()) == {'format', 'streams'}
assert data['format']['duration'] == '7.036000'
@pytest.mark.skipif(sys.version_info < (3, 3), reason='requires python3.3 or higher')
def test__probe_timeout():
with pytest.raises(subprocess.TimeoutExpired) as excinfo:
ffmpeg.probe(TEST_INPUT_FILE1, timeout=0)
assert 'timed out after 0 seconds' in str(excinfo.value)
def test__probe__exception():
with pytest.raises(ffmpeg.Error) as excinfo:
ffmpeg.probe(BOGUS_INPUT_FILE)
assert str(excinfo.value) == 'ffprobe error (see stderr output for detail)'
assert 'No such file or directory'.encode() in excinfo.value.stderr
def test__probe__extra_args():
data = ffmpeg.probe(TEST_INPUT_FILE1, show_frames=None)
assert set(data.keys()) == {'format', 'streams', 'frames'}
def get_filter_complex_input(flt, name):
m = re.search(r'\[([^]]+)\]{}(?=[[;]|$)'.format(name), flt)
if m:
return m.group(1)
else:
return None
def get_filter_complex_outputs(flt, name):
m = re.search(r'(^|[];]){}((\[[^]]+\])+)(?=;|$)'.format(name), flt)
if m:
return m.group(2)[1:-1].split('][')
else:
return None
def test__get_filter_complex_input():
assert get_filter_complex_input('', 'scale') is None
assert get_filter_complex_input('scale', 'scale') is None
assert get_filter_complex_input('scale[s3][s4];etc', 'scale') is None
assert get_filter_complex_input('[s2]scale', 'scale') == 's2'
assert get_filter_complex_input('[s2]scale;etc', 'scale') == 's2'
assert get_filter_complex_input('[s2]scale[s3][s4];etc', 'scale') == 's2'
def test__get_filter_complex_outputs():
assert get_filter_complex_outputs('', 'scale') is None
assert get_filter_complex_outputs('scale', 'scale') is None
assert get_filter_complex_outputs('scalex[s0][s1]', 'scale') is None
assert get_filter_complex_outputs('scale[s0][s1]', 'scale') == ['s0', 's1']
assert get_filter_complex_outputs('[s5]scale[s0][s1]', 'scale') == ['s0', 's1']
assert get_filter_complex_outputs('[s5]scale[s1][s0]', 'scale') == ['s1', 's0']
assert get_filter_complex_outputs('[s5]scale[s1]', 'scale') == ['s1']
assert get_filter_complex_outputs('[s5]scale[s1];x', 'scale') == ['s1']
assert get_filter_complex_outputs('y;[s5]scale[s1];x', 'scale') == ['s1']
def test__multi_output_edge_label_order():
scale2ref = ffmpeg.filter_multi_output(
[ffmpeg.input('x'), ffmpeg.input('y')], 'scale2ref'
)
out = ffmpeg.merge_outputs(
scale2ref[1].filter('scale').output('a'),
scale2ref[10000].filter('hflip').output('b'),
)
args = out.get_args()
flt_cmpl = args[args.index('-filter_complex') + 1]
out1, out2 = get_filter_complex_outputs(flt_cmpl, 'scale2ref')
assert out1 == get_filter_complex_input(flt_cmpl, 'scale')
assert out2 == get_filter_complex_input(flt_cmpl, 'hflip')

View File

@ -1,15 +0,0 @@
[tool.black]
skip-string-normalization = true
target_version = ['py27'] # TODO: drop Python 2 support (... "Soon").
include = '\.pyi?$'
exclude = '''
(
/(
\.eggs
| \.git
| \.tox
| \venv
| dist
)/
)
'''

5
requirements-base.txt Normal file
View File

@ -0,0 +1,5 @@
future
pytest
pytest-runner
sphinx
tox

View File

@ -1,40 +1,25 @@
alabaster==0.7.12 alabaster==0.7.10
atomicwrites==1.3.0 Babel==2.5.1
attrs==19.1.0 certifi==2017.7.27.1
Babel==2.7.0
certifi==2019.3.9
chardet==3.0.4 chardet==3.0.4
docutils==0.14 docutils==0.14
filelock==3.0.12 future==0.16.0
future==0.17.1 idna==2.6
idna==2.8 imagesize==0.7.1
imagesize==1.1.0 Jinja2==2.9.6
importlib-metadata==0.17 MarkupSafe==1.0
Jinja2==2.10.1 pluggy==0.5.2
MarkupSafe==1.1.1 py==1.4.34
more-itertools==7.0.0 Pygments==2.2.0
numpy==1.16.4 pytest==3.2.3
packaging==19.0 pytest-runner==3.0
pluggy==0.12.0 pytz==2017.3
py==1.8.0 requests==2.18.4
Pygments==2.4.2 six==1.11.0
pyparsing==2.4.0
pytest==4.6.1
pytest-mock==1.10.4
pytz==2019.1
requests==2.22.0
six==1.12.0
snowballstemmer==1.2.1 snowballstemmer==1.2.1
Sphinx==2.1.0 Sphinx==1.6.5
sphinxcontrib-applehelp==1.0.1 sphinxcontrib-websupport==1.0.1
sphinxcontrib-devhelp==1.0.1 tox==2.9.1
sphinxcontrib-htmlhelp==1.0.2 typing==3.6.2
sphinxcontrib-jsmath==1.0.1 urllib3==1.22
sphinxcontrib-qthelp==1.0.2 virtualenv==15.1.0
sphinxcontrib-serializinghtml==1.1.3
toml==0.10.0
tox==3.12.1
urllib3==1.25.3
virtualenv==16.6.0
wcwidth==0.1.7
zipp==0.5.1

View File

@ -1,27 +1,24 @@
from setuptools import setup from setuptools import setup
from textwrap import dedent from textwrap import dedent
import subprocess
version = '0.2.0' version = '0.1.8'
download_url = 'https://github.com/kkroening/ffmpeg-python/archive/v{}.zip'.format( download_url = 'https://github.com/kkroening/ffmpeg-python/archive/v{}.zip'.format(version)
version
)
long_description = dedent( long_description = dedent("""\
'''\
ffmpeg-python: Python bindings for FFmpeg ffmpeg-python: Python bindings for FFmpeg
========================================= =========================================
:Github: https://github.com/kkroening/ffmpeg-python :Github: https://github.com/kkroening/ffmpeg-python
:API Reference: https://kkroening.github.io/ffmpeg-python/ :API Reference: https://kkroening.github.io/ffmpeg-python/
''' """)
)
file_formats = [ file_formats = [
'aac', 'aac',
'ac3', 'ac3',
'avi', 'avi',
'bmp', 'bmp'
'flac', 'flac',
'gif', 'gif',
'mov', 'mov',
@ -60,8 +57,10 @@ keywords = misc_keywords + file_formats
setup( setup(
name='ffmpeg-python', name='ffmpeg-python',
packages=['ffmpeg'], packages=['ffmpeg'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
version=version, version=version,
description='Python bindings for FFmpeg - with complex filtering support', description='Python bindings for FFmpeg - with support for complex filtering',
author='Karl Kroening', author='Karl Kroening',
author_email='karlk@kralnet.us', author_email='karlk@kralnet.us',
url='https://github.com/kkroening/ffmpeg-python', url='https://github.com/kkroening/ffmpeg-python',
@ -69,16 +68,6 @@ setup(
keywords=keywords, keywords=keywords,
long_description=long_description, long_description=long_description,
install_requires=['future'], install_requires=['future'],
extras_require={
'dev': [
'future==0.17.1',
'numpy==1.16.4',
'pytest-mock==1.10.4',
'pytest==4.6.1',
'Sphinx==2.1.0',
'tox==3.12.1',
]
},
classifiers=[ classifiers=[
'Intended Audience :: Developers', 'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License', 'License :: OSI Approved :: Apache Software License',
@ -92,9 +81,5 @@ setup(
'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
], ],
) )

13
tox.ini
View File

@ -4,21 +4,10 @@
# and then run "tox" from this directory. # and then run "tox" from this directory.
[tox] [tox]
envlist = py27, py35, py36, py37, py38, py39, py310 envlist = py27, py33, py34, py35, py36, pypy
[gh-actions]
python =
2.7: py27
3.5: py35
3.6: py36
3.7: py37
3.8: py38
3.9: py39
3.10: py310
[testenv] [testenv]
commands = py.test -vv commands = py.test -vv
deps = deps =
future future
pytest pytest
pytest-mock