mirror of
https://github.com/kkroening/ffmpeg-python.git
synced 2025-08-07 18:39:47 +08:00
Merge 506d94ab01117545674200847b00d214b6619ae8 into 78fb6cf2f11cb93c6071c978a92a640f5743a9fb
This commit is contained in:
commit
65f68a8943
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,3 +1,5 @@
|
||||
*.py[co]
|
||||
|
||||
.cache
|
||||
.eggs
|
||||
.tox/
|
||||
@ -6,3 +8,5 @@ ffmpeg/tests/sample_data/out*.mp4
|
||||
ffmpeg_python.egg-info/
|
||||
venv*
|
||||
build/
|
||||
|
||||
*~
|
19
Makefile
Normal file
19
Makefile
Normal file
@ -0,0 +1,19 @@
|
||||
## Automate common development tasks
|
||||
|
||||
|
||||
.PHONY: default
|
||||
defalt: ffmpeg/detect.json
|
||||
|
||||
|
||||
.tox/py37/bin/python:
|
||||
tox -e py37
|
||||
touch "$(@)"
|
||||
|
||||
.tox/py37/lib/python3.7/site-packages/pandas: .tox/py37/bin/python
|
||||
.tox/py37/bin/pip install requests lxml pandas
|
||||
touch "$(@)"
|
||||
|
||||
.PHONY: ffmpeg/detect.json
|
||||
ffmpeg/detect.json: .tox/py37/lib/python3.7/site-packages/pandas
|
||||
.tox/py37/bin/python examples/get_detect_data.py >"$(@)"
|
||||
|
194
examples/get_detect_data.py
Executable file
194
examples/get_detect_data.py
Executable file
@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Retrieve and process all the external data for hardware detection.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import collections
|
||||
import math
|
||||
import json
|
||||
|
||||
import requests
|
||||
import pandas
|
||||
|
||||
from ffmpeg import _detect
|
||||
|
||||
PLATFORM_TO_PY = {
|
||||
'Apple': 'Darwin',
|
||||
}
|
||||
|
||||
HWACCELINTRO_URL = 'https://trac.ffmpeg.org/wiki/HWAccelIntro'
|
||||
API_TO_HWACCEL = {
|
||||
'AMF': 'amf',
|
||||
'NVENC/NVDEC/CUVID': 'cuvid',
|
||||
'Direct3D 11': 'd3d11va',
|
||||
'Direct3D 9 (DXVA2)': 'dxva2',
|
||||
'libmfx': 'libmfx',
|
||||
'MediaCodec': 'mediacodec',
|
||||
'Media Foundation': 'mediafoundation',
|
||||
'MMAL': 'mmal',
|
||||
'OpenCL': 'opencl',
|
||||
'OpenMAX': 'omx',
|
||||
'V4L2 M2M': 'v4l2m2m',
|
||||
'VAAPI': 'vaapi',
|
||||
'VDPAU': 'vdpau',
|
||||
'VideoToolbox': 'videotoolbox',
|
||||
}
|
||||
|
||||
NVIDIA_GPU_MATRIX_URL = (
|
||||
'https://developer.nvidia.com/video-encode-decode-gpu-support-matrix')
|
||||
NVIDIA_LINE_SUFFIXES = {'geforce': ['gtx titan', 'gtx', 'gt', 'rtx']}
|
||||
NVIDIA_CODEC_COLUMN_PREFIXES = {
|
||||
'mpeg-1': 'mpeg1video', 'mpeg-2': 'mpeg2video',
|
||||
'vc-1': 'vc1',
|
||||
'vp8': 'vp8', 'vp9': 'vp9',
|
||||
'h.264': 'h264', 'h.265': 'hevc'}
|
||||
|
||||
|
||||
def get_hwaccel_data():
|
||||
"""
|
||||
Download the ffmpeg hwaccel API support matrix to detection data.
|
||||
"""
|
||||
response = requests.get(HWACCELINTRO_URL)
|
||||
api_avail_table, impl_table = pandas.read_html(response.content)
|
||||
|
||||
gpu_vendor_cols = api_avail_table.loc[1][1:]
|
||||
platform_cols = api_avail_table.loc[0][1:]
|
||||
api_rows = api_avail_table[0][2:]
|
||||
|
||||
hwaccels = collections.OrderedDict()
|
||||
hwaccels['api_avail'] = platforms = collections.OrderedDict()
|
||||
for gpu_vendor_idx, gpu_vendor in enumerate(gpu_vendor_cols):
|
||||
platform = platform_cols[gpu_vendor_idx + 1]
|
||||
platform = PLATFORM_TO_PY.get(platform, platform)
|
||||
gpu_vendors = platforms.setdefault(platform, collections.OrderedDict())
|
||||
avail_hwaccels = gpu_vendors.setdefault(gpu_vendor, [])
|
||||
for api_idx, api in enumerate(api_rows):
|
||||
if api_avail_table[gpu_vendor_idx + 1][api_idx + 2] != 'N':
|
||||
avail_hwaccels.append(API_TO_HWACCEL[api])
|
||||
|
||||
return hwaccels
|
||||
|
||||
|
||||
def get_nvidia_data():
|
||||
"""
|
||||
Download the NVIDIA GPU support matrix to detection data.
|
||||
"""
|
||||
response = requests.get(NVIDIA_GPU_MATRIX_URL)
|
||||
tables = pandas.read_html(response.content)
|
||||
(
|
||||
nvenc_recent, nvenc_consumer, nvenc_workstation, nvenc_virt,
|
||||
nvdec_recent, nvdec_consumer, nvdec_workstation, nvdec_virt) = tables
|
||||
nv_coders = dict(
|
||||
encoders=(
|
||||
nvenc_recent, nvenc_consumer, nvenc_workstation, nvenc_virt),
|
||||
decoders=(
|
||||
nvdec_recent, nvdec_consumer, nvdec_workstation, nvdec_virt))
|
||||
nvidia = collections.OrderedDict(lines=[])
|
||||
|
||||
# Compile aggregate data needed to parse individual rows
|
||||
for nv_coder_table in tables:
|
||||
for board in nv_coder_table['BOARD']:
|
||||
if board == 'BOARD':
|
||||
continue
|
||||
line = board.replace('\xa0', ' ').split(None, 1)[0].lower()
|
||||
if line not in nvidia['lines']:
|
||||
nvidia['lines'].append(line)
|
||||
for line, line_suffixes in NVIDIA_LINE_SUFFIXES.items():
|
||||
for line_suffix in reversed(line_suffixes):
|
||||
nvidia['lines'].insert(0, ' '.join((line, line_suffix)))
|
||||
|
||||
for coder_type, nv_coder_tables in nv_coders.items():
|
||||
coder_data = nvidia[coder_type] = collections.OrderedDict(
|
||||
model_lines=collections.OrderedDict(),
|
||||
boards=collections.OrderedDict())
|
||||
for nv_coder_table in nv_coder_tables:
|
||||
for nv_coder_row_idx, nv_coder_row in nv_coder_table.iterrows():
|
||||
nv_coder_row_values = {
|
||||
idx: cell for idx, cell in enumerate(nv_coder_row[1:]) if (
|
||||
cell and
|
||||
not (isinstance(cell, float) and math.isnan(cell)))}
|
||||
if not nv_coder_row_values:
|
||||
# Divider row
|
||||
continue
|
||||
|
||||
# Assemble the data for this row to use for each model or range
|
||||
model_data = collections.OrderedDict()
|
||||
for key, value in nv_coder_row.items():
|
||||
if isinstance(key, tuple):
|
||||
if key[0] == key[1]:
|
||||
key = key[0]
|
||||
else:
|
||||
key = ' '.join(key)
|
||||
if value in {'YES', 'NO'}:
|
||||
model_data[key] = value == 'YES'
|
||||
else:
|
||||
model_data[key] = value
|
||||
model_data['BOARD'] = model_data['BOARD'].replace('\xa0', ' ')
|
||||
# Add keys for the ffmpeg codec names for fast lookup
|
||||
for codec_prefix, codec in (
|
||||
NVIDIA_CODEC_COLUMN_PREFIXES.items()):
|
||||
for column_idx, column in enumerate(nv_coder_row.keys()):
|
||||
if isinstance(column, tuple):
|
||||
if column[0] == column[1]:
|
||||
column = column[0]
|
||||
else:
|
||||
column = ' '.join(column)
|
||||
if column.lower().startswith(codec_prefix):
|
||||
model_data[codec] = nv_coder_row[
|
||||
column_idx] == 'YES'
|
||||
break
|
||||
else:
|
||||
# Assume encoder support is not available
|
||||
model_data[codec] = False
|
||||
|
||||
coder_data['boards'][model_data['BOARD']] = model_data
|
||||
|
||||
_detect._parse_models(
|
||||
model_lines=nvidia['lines'],
|
||||
boards=model_data['BOARD'].lower(),
|
||||
model_data=model_data['BOARD'],
|
||||
model_lines_data=coder_data['model_lines'])
|
||||
|
||||
# Cleanup any deviations from the convention where models from
|
||||
# multiple lines are in the same BOARD cell
|
||||
for model_line, model_line_data in coder_data['model_lines'].items():
|
||||
for line, line_suffixes in NVIDIA_LINE_SUFFIXES.items():
|
||||
if not model_line.startswith(line):
|
||||
continue
|
||||
for model_num, boards in list(
|
||||
model_line_data['models'].items()):
|
||||
for line_suffix in line_suffixes:
|
||||
if not model_num.startswith(line_suffix + ' '):
|
||||
continue
|
||||
coder_data['model_lines'][
|
||||
' '.join((line, line_suffix))]['models'][
|
||||
model_num[len(line_suffix + ' '):]
|
||||
] = model_line_data['models'].pop(model_num)
|
||||
# Clean up some annoying clashes between the titan model line and
|
||||
# GeForce GTX model numbers
|
||||
del coder_data['model_lines']['geforce gtx titan']['models']['']
|
||||
coder_data['model_lines']['geforce gtx titan']['models'][
|
||||
'xp'] = coder_data['model_lines']['titan']['models'].pop('xp')
|
||||
coder_data['model_lines']['geforce gtx titan']['models'][
|
||||
'black'] = titan_black = coder_data['model_lines'][
|
||||
'titan']['models'].pop('black')
|
||||
coder_data['model_lines']['geforce gtx']['models'][
|
||||
'titan'] = titan_black
|
||||
|
||||
return nvidia
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Download ffmpeg detection data.
|
||||
"""
|
||||
data = collections.OrderedDict(
|
||||
hwaccels=get_hwaccel_data(),
|
||||
nvidia=get_nvidia_data(),
|
||||
)
|
||||
json.dump(data, sys.stdout, indent=2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,11 +1,15 @@
|
||||
from __future__ import unicode_literals
|
||||
from . import nodes
|
||||
from . import _build
|
||||
from . import _detect
|
||||
from . import _ffmpeg
|
||||
from . import _filters
|
||||
from . import _probe
|
||||
from . import _run
|
||||
from . import _view
|
||||
from .nodes import *
|
||||
from ._build import *
|
||||
from ._detect import *
|
||||
from ._ffmpeg import *
|
||||
from ._filters import *
|
||||
from ._probe import *
|
||||
@ -14,6 +18,8 @@ from ._view import *
|
||||
|
||||
__all__ = (
|
||||
nodes.__all__
|
||||
+ _build.__all__
|
||||
+ _detect.__all__
|
||||
+ _ffmpeg.__all__
|
||||
+ _probe.__all__
|
||||
+ _run.__all__
|
||||
|
350
ffmpeg/_build.py
Normal file
350
ffmpeg/_build.py
Normal file
@ -0,0 +1,350 @@
|
||||
"""
|
||||
Extract details about the ffmpeg build.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
import json
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
'--ffmpeg', default='ffmpeg',
|
||||
help='The path to the ffmpeg execuatble')
|
||||
|
||||
|
||||
VERSION = dict(
|
||||
RE=re.compile(r' version (?P<version>[^ ]+) '))
|
||||
|
||||
MUXER = dict(
|
||||
RE=re.compile(
|
||||
r'^ (?P<demuxing>[D ])(?P<muxing>[E ]) '
|
||||
r'(?P<name>[^ ]+) +(?P<description>.+)$',
|
||||
re.M),
|
||||
FLAGS=dict(demuxing='D', muxing='E'))
|
||||
|
||||
CODEC = dict(
|
||||
RE=re.compile(
|
||||
r'^ (?P<decoding>[D.])(?P<encoding>[E.])'
|
||||
r'(?P<stream>[VAS.])(?P<intra_frame>[I.])'
|
||||
r'(?P<lossy>[L.])(?P<lossless>[S.]) '
|
||||
r'(?P<name>[^ ]+) +(?P<description>.+)$',
|
||||
re.M),
|
||||
FLAGS=dict(
|
||||
decoding='D', encoding='E',
|
||||
stream=dict(video='V', audio='A', subtitle='S'),
|
||||
intra_frame='I', lossy='L', lossless='S'),
|
||||
DESCRIPTION_RE=re.compile(
|
||||
r'^(?P<description>.+?) \((de|en)coders: [^)]+ \)'),
|
||||
CODERS_RE=re.compile(
|
||||
r' \((?P<type>(de|en)coders): (?P<coders>[^)]+) \)'))
|
||||
|
||||
HWACCEL = dict(
|
||||
SYNONYMS=dict(cuvid=['nvenc', 'nvdec', 'cuda']))
|
||||
|
||||
FILTER = dict(
|
||||
RE=re.compile(
|
||||
r'^ (?P<timeline>[T.])(?P<slice>[S.])(?P<command>[C.]) '
|
||||
r'(?P<name>[^ ]+) +(?P<io>[^ ]+) +(?P<description>.+)$',
|
||||
re.M),
|
||||
FLAGS=dict(timeline='T', slice='S', command='C'))
|
||||
|
||||
PIX_FMT = dict(
|
||||
RE=re.compile(
|
||||
r'^(?P<input>[I.])(?P<output>[O.])(?P<accelerated>[H.])'
|
||||
r'(?P<palleted>[P.])(?P<bitstream>[B.]) '
|
||||
r'(?P<name>[^ ]+) +(?P<components>[0-9]+) +(?P<bits>[0-9]+)$',
|
||||
re.M),
|
||||
FLAGS=dict(
|
||||
input='I', output='O', accelerated='H', palleted='P', bitstream='B'),
|
||||
INT_FIELDS={'components', 'bits'})
|
||||
|
||||
|
||||
def _run(args):
|
||||
"""
|
||||
Run the command and return stdout but only print stderr on failure.
|
||||
"""
|
||||
process = subprocess.Popen(
|
||||
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
stdout, stderr = process.communicate()
|
||||
if process.returncode != 0:
|
||||
logger.error(stderr)
|
||||
raise subprocess.CalledProcessError(
|
||||
process.returncode, process.args, output=stdout, stderr=stderr)
|
||||
return stdout
|
||||
|
||||
|
||||
def _get_line_fields(
|
||||
stdout, header_lines, line_re, flags={}, int_fields=set()):
|
||||
"""
|
||||
Extract field values from a line using the regular expression.
|
||||
"""
|
||||
non_fields = set(flags).union({'name'})
|
||||
lines = stdout.split('\n', header_lines)[header_lines]
|
||||
data = {}
|
||||
for match in line_re.finditer(lines):
|
||||
groupdict = match.groupdict()
|
||||
|
||||
data[match.group('name')] = fields = {
|
||||
key: key in int_fields and int(value) or value
|
||||
for key, value in groupdict.items()
|
||||
if key not in non_fields}
|
||||
|
||||
if flags:
|
||||
fields['flags'] = {}
|
||||
for key, flag in flags.items():
|
||||
if isinstance(flag, dict):
|
||||
fields['flags'][key] = groupdict[key]
|
||||
for sub_key, sub_flag in flag.items():
|
||||
fields['flags'][sub_key] = groupdict[key] == sub_flag
|
||||
else:
|
||||
fields['flags'][key] = groupdict[key] == flag
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_version(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the version of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-version'])
|
||||
match = VERSION['RE'].search(stdout.split('\n')[0])
|
||||
return match.group('version')
|
||||
|
||||
|
||||
def get_formats(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the formats of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-formats'])
|
||||
return _get_line_fields(stdout, 4, MUXER['RE'], MUXER['FLAGS'])
|
||||
|
||||
|
||||
def get_demuxers(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the demuxers of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-demuxers'])
|
||||
return _get_line_fields(stdout, 4, MUXER['RE'], MUXER['FLAGS'])
|
||||
|
||||
|
||||
def get_muxers(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the muxers of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-muxers'])
|
||||
return _get_line_fields(stdout, 4, MUXER['RE'], MUXER['FLAGS'])
|
||||
|
||||
|
||||
def get_codecs(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the codecs of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-codecs'])
|
||||
codecs = _get_line_fields(stdout, 10, CODEC['RE'], CODEC['FLAGS'])
|
||||
for codec in codecs.values():
|
||||
for coders_match in CODEC['CODERS_RE'].finditer(codec['description']):
|
||||
coders = coders_match.group(3).split()
|
||||
if coders:
|
||||
codec[coders_match.group(1)] = coders
|
||||
description_match = CODEC['DESCRIPTION_RE'].search(
|
||||
codec['description'])
|
||||
if description_match is not None:
|
||||
codec['description'] = description_match.group('description')
|
||||
return codecs
|
||||
|
||||
|
||||
def get_bsfs(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the bsfs of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-bsfs'])
|
||||
return stdout.split('\n')[1:-2]
|
||||
|
||||
|
||||
def get_protocols(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the protocols of the ffmpeg build.
|
||||
"""
|
||||
stdout = [
|
||||
line.strip() for line in
|
||||
_run([cmd, '-protocols']).split('\n')]
|
||||
input_idx = stdout.index('Input:')
|
||||
output_idx = stdout.index('Output:')
|
||||
return dict(
|
||||
input=stdout[input_idx + 1:output_idx],
|
||||
output=stdout[output_idx + 1:-1])
|
||||
|
||||
|
||||
def get_filters(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the filters of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-filters'])
|
||||
return _get_line_fields(stdout, 8, FILTER['RE'], FILTER['FLAGS'])
|
||||
|
||||
|
||||
def get_pix_fmts(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the pix_fmts of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-pix_fmts'])
|
||||
return _get_line_fields(
|
||||
stdout, 8, PIX_FMT['RE'], PIX_FMT['FLAGS'], PIX_FMT['INT_FIELDS'])
|
||||
|
||||
|
||||
def get_sample_fmts(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the sample_fmts of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-sample_fmts'])
|
||||
fmts = {}
|
||||
for line in stdout.split('\n')[1:-1]:
|
||||
name, depth = line.split()
|
||||
fmts[name] = int(depth)
|
||||
return fmts
|
||||
|
||||
|
||||
def get_layouts(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the layouts of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-layouts']).split('\n')
|
||||
channels_idx = stdout.index('Individual channels:')
|
||||
layouts_idx = stdout.index('Standard channel layouts:')
|
||||
data = {}
|
||||
|
||||
data['channels'] = channels = {}
|
||||
for line in stdout[channels_idx + 2:layouts_idx - 1]:
|
||||
name, description = line.split(None, 1)
|
||||
channels[name] = description
|
||||
|
||||
data['layouts'] = layouts = {}
|
||||
for line in stdout[layouts_idx + 2:-1]:
|
||||
name, decomposition = line.split(None, 1)
|
||||
layouts[name] = decomposition.split('+')
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_colors(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the colors of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-colors'])
|
||||
return dict(line.split() for line in stdout.split('\n')[1:-1])
|
||||
|
||||
|
||||
def get_devices(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the devices of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-devices'])
|
||||
return _get_line_fields(stdout, 4, MUXER['RE'], MUXER['FLAGS'])
|
||||
|
||||
|
||||
def get_hw_devices(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the hardware devices of the ffmpeg build.
|
||||
"""
|
||||
stdout = _run([cmd, '-init_hw_device', 'list'])
|
||||
return stdout.split('\n')[1:-2]
|
||||
|
||||
|
||||
def get_hwaccels(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract the hwaccels of the ffmpeg build, including specific codecs.
|
||||
|
||||
Return all the hardware acceleration APIs supported by this build
|
||||
including all the codecs that are specific to the API.
|
||||
"""
|
||||
data = dict(codecs=get_codecs(cmd=cmd), hwaccels=[])
|
||||
|
||||
stdout = _run([cmd, '-hwaccels'])
|
||||
hwaccel_names = stdout.split('\n')[1:-2]
|
||||
|
||||
for hwaccel_name in hwaccel_names:
|
||||
hwaccel = dict(name=hwaccel_name)
|
||||
data['hwaccels'].append(hwaccel)
|
||||
hwaccel['codecs'] = hwaccel_codecs = {}
|
||||
for codec_name, codec in data['codecs'].items():
|
||||
hwaccel_codec = {}
|
||||
for coders_key in ('decoders', 'encoders'):
|
||||
matching_coders = []
|
||||
for coder in codec.get(coders_key, []):
|
||||
for synonym in (
|
||||
[hwaccel_name] +
|
||||
HWACCEL['SYNONYMS'].get(hwaccel_name, [])):
|
||||
if (
|
||||
coder == synonym or
|
||||
'_' + synonym in coder or
|
||||
synonym + '_' in coder):
|
||||
matching_coders.append(coder)
|
||||
break
|
||||
if matching_coders:
|
||||
hwaccel_codec[coders_key] = matching_coders
|
||||
if hwaccel_codec:
|
||||
hwaccel_codecs[codec_name] = hwaccel_codec
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_build_data(cmd='ffmpeg'):
|
||||
"""
|
||||
Extract details about the ffmpeg build.
|
||||
"""
|
||||
hwaccels_data = get_hwaccels(cmd=cmd)
|
||||
return dict(
|
||||
version=get_version(cmd=cmd),
|
||||
formats=get_formats(cmd=cmd),
|
||||
demuxers=get_demuxers(cmd=cmd),
|
||||
muxers=get_muxers(cmd=cmd),
|
||||
codecs=hwaccels_data['codecs'],
|
||||
bsfs=get_bsfs(cmd=cmd),
|
||||
protocols=get_protocols(cmd=cmd),
|
||||
filters=get_filters(cmd=cmd),
|
||||
pix_fmts=get_pix_fmts(cmd=cmd),
|
||||
sample_fmts=get_sample_fmts(cmd=cmd),
|
||||
layouts=get_layouts(cmd=cmd),
|
||||
colors=get_colors(cmd=cmd),
|
||||
devices=get_devices(cmd=cmd),
|
||||
hw_devices=get_hw_devices(cmd=cmd),
|
||||
hwaccels=hwaccels_data['hwaccels'])
|
||||
|
||||
__all__ = [
|
||||
'get_build_data',
|
||||
'get_version',
|
||||
'get_version',
|
||||
'get_formats',
|
||||
'get_demuxers',
|
||||
'get_muxers',
|
||||
'get_codecs',
|
||||
'get_bsfs',
|
||||
'get_protocols',
|
||||
'get_filters',
|
||||
'get_pix_fmts',
|
||||
'get_sample_fmts',
|
||||
'get_layouts',
|
||||
'get_colors',
|
||||
'get_devices',
|
||||
'get_hw_devices',
|
||||
'get_hwaccels',
|
||||
]
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""
|
||||
Dump all ffmpeg build data to json.
|
||||
"""
|
||||
args = parser.parse_args(args)
|
||||
data = get_build_data(args.ffmpeg)
|
||||
json.dump(data, sys.stdout, indent=2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
358
ffmpeg/_detect.py
Normal file
358
ffmpeg/_detect.py
Normal file
@ -0,0 +1,358 @@
|
||||
"""Detect optimal arguments for various options.
|
||||
|
||||
This module includes a number of constants used to attempt to detect the
|
||||
options which will provide the best performance for a given OS/GPU/etc..
|
||||
|
||||
For most of these constants, it only matters that the best performing option
|
||||
available for a given OS/platform/hardware rank first for that
|
||||
OS/platform/hardware, not which OS/platform/hardware performs better. For
|
||||
example, it doesn't matter if `vdpau` is lower than `cuvid` or vice versa,
|
||||
because one is only available for Linux and the other for Windows. Similarly,
|
||||
it doesn't matter how `amf` is ranked with respect to `cuvid` because one is
|
||||
only available on NVidia GPUs and the other AMD. It *does* matter how
|
||||
`cuvid`/`amf` are ranked with respect to `dxva2` because those could both be
|
||||
available on the same OS and GPU.
|
||||
|
||||
Additions and suggestions for these constants are very much welcome,
|
||||
especially if they come with benchmarks and/or good explanations from those
|
||||
who understand this domain well. Contributions of more complicated or
|
||||
involved detection logic may also be welcome, though the case will have to be
|
||||
made more rigorously.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import platform
|
||||
import os
|
||||
import copy
|
||||
import collections
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
import ffmpeg
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument(
|
||||
'--ffmpeg', default='ffmpeg',
|
||||
help='The path to the ffmpeg execuatble')
|
||||
|
||||
# Separators to divide a range of models within a line
|
||||
MODEL_RANGE_SEPARATORS = ['-', '>']
|
||||
|
||||
HWACCEL = dict(
|
||||
# List `hwaccel` options by order of expected performance when available.
|
||||
BY_PERFORMANCE=[
|
||||
# NVidia cross-OS
|
||||
'cuda', 'cuvid', 'nvdec',
|
||||
# NVidia Linux
|
||||
'vdpau',
|
||||
# AMD Windows
|
||||
'amf',
|
||||
# Intel Windows
|
||||
'qsv',
|
||||
# Linux, not GPU specific
|
||||
'vaapi', 'drm',
|
||||
# Windows, not GPU specific
|
||||
'd3d11va', 'dxva2'],
|
||||
OUTPUT_FORMATS={
|
||||
'nvdec': 'cuda',
|
||||
'vaapi': 'vaapi'})
|
||||
|
||||
GPU = dict(
|
||||
PRODUCT_RE=re.compile(r'(?P<chip>[^[]+)(\[(?P<board>[^]]+)\]|)'),
|
||||
WMI_PROPERTIES=collections.OrderedDict(
|
||||
vendor='AdapterCompatibility', board='VideoProcessor'))
|
||||
|
||||
# Loaded from JSON
|
||||
DATA = None
|
||||
|
||||
|
||||
def detect_gpus():
|
||||
"""
|
||||
Detect the vendor, generation and model for each GPU if possible.
|
||||
"""
|
||||
plat_sys = platform.system()
|
||||
gpus = []
|
||||
|
||||
if plat_sys == 'Linux':
|
||||
# TODO: Android and other Linux'es that don't have `lshw`
|
||||
display_output = subprocess.check_output(
|
||||
['lshw', '-class', 'display', '-json'], universal_newlines=True)
|
||||
displays_data = json.loads(
|
||||
display_output.strip().strip(','),
|
||||
object_pairs_hook=collections.OrderedDict)
|
||||
if not isinstance(displays_data, list):
|
||||
# TODO: Confirm this is how `lshw` handles multiple GPUs
|
||||
displays_data = [displays_data]
|
||||
for display_data in displays_data:
|
||||
gpu = collections.OrderedDict(
|
||||
vendor=display_data['vendor'].replace(' Corporation', ''))
|
||||
# TODO get multiple GPUs from lshw
|
||||
gpus.append(gpu)
|
||||
|
||||
product_match = GPU['PRODUCT_RE'].search(display_data['product'])
|
||||
if product_match:
|
||||
gpu.update(**product_match.groupdict())
|
||||
if not gpu['board']:
|
||||
gpu['board'] = gpu.pop('chip')
|
||||
|
||||
elif plat_sys == 'Windows':
|
||||
import wmi
|
||||
for controller in wmi.WMI().Win32_VideoController():
|
||||
gpu = collections.OrderedDict()
|
||||
for key, wmi_prop in GPU['WMI_PROPERTIES'].items():
|
||||
value = controller.wmi_property(wmi_prop).value
|
||||
if value:
|
||||
gpu[key] = value
|
||||
if gpu:
|
||||
gpus.append(gpu)
|
||||
|
||||
else:
|
||||
# TODO Other platforms
|
||||
raise NotImplementedError(
|
||||
'GPU detection for {0!r} not supported yet'.format(plat_sys))
|
||||
|
||||
if not gpus:
|
||||
raise ValueError('No GPUs detected')
|
||||
|
||||
data = _get_data()
|
||||
for gpu in gpus:
|
||||
vendor_data = data.get(gpu.get('vendor', '').lower())
|
||||
if vendor_data:
|
||||
|
||||
model_lines_data = _parse_models(
|
||||
model_lines=vendor_data['lines'],
|
||||
boards=gpu['board'].lower(), model_data={})
|
||||
gpu['model_line'] = list(model_lines_data.keys())[0]
|
||||
gpu['model_num'] = list(model_lines_data[
|
||||
gpu['model_line']]['models'].keys())[0]
|
||||
|
||||
for coder_type in ['encoders', 'decoders']:
|
||||
model_line_data = vendor_data[coder_type]['model_lines'][
|
||||
gpu['model_line']]
|
||||
coder_boards = model_line_data['models'].get(
|
||||
gpu['model_num'])
|
||||
if coder_boards is None:
|
||||
for model_range, boards in model_line_data[
|
||||
'model_ranges']:
|
||||
# TODO proper model range matching
|
||||
if gpu['model_num'] in model_range:
|
||||
coder_boards = boards
|
||||
break
|
||||
if coder_boards is None:
|
||||
continue
|
||||
gpu[coder_type] = vendor_data[coder_type]['boards'][
|
||||
coder_boards]
|
||||
|
||||
return gpus
|
||||
|
||||
|
||||
def detect_hwaccels(hwaccels=None, cmd='ffmpeg'):
|
||||
"""
|
||||
Order the available hardware accelerations by performance.
|
||||
"""
|
||||
# Filter against what's available in the ffmpeg build
|
||||
hwaccels_data = ffmpeg.get_hwaccels(cmd=cmd)
|
||||
if hwaccels is None:
|
||||
# Consider all the available hwaccels
|
||||
hwaccels = hwaccels_data['hwaccels']
|
||||
else:
|
||||
# Support passing in a restricted set of hwaccels
|
||||
hwaccels = [
|
||||
hwaccel for hwaccel in hwaccels_data['hwaccels']
|
||||
if hwaccel['name'] in hwaccels]
|
||||
|
||||
# Filter against which APIs are available on this OS+GPU
|
||||
data = _get_data()
|
||||
plat_sys = platform.system()
|
||||
gpus = detect_gpus()
|
||||
api_avail = set()
|
||||
for gpu in gpus:
|
||||
vendor_apis = data['hwaccels']['api_avail'][plat_sys].get(
|
||||
gpu['vendor'])
|
||||
if vendor_apis:
|
||||
api_avail.update(vendor_apis)
|
||||
hwaccels = [
|
||||
hwaccel for hwaccel in hwaccels if hwaccel['name'] in api_avail]
|
||||
|
||||
# Filter encoders and decoders based on what's supported by the GPU
|
||||
for gpu in gpus:
|
||||
for coder_type in ['encoders', 'decoders']:
|
||||
coder_data = gpu.get(coder_type)
|
||||
if coder_data is None:
|
||||
continue
|
||||
for hwaccel in hwaccels:
|
||||
for codec, coders in hwaccel.get('codecs', {}).items():
|
||||
coder_supported = coder_data.get(codec)
|
||||
if coder_supported is None or coder_supported:
|
||||
# This encoder/decoder is supported, no need to filter
|
||||
# it out
|
||||
continue
|
||||
|
||||
# This codec isn't supported by the GPU hjardware
|
||||
coders.pop(coder_type, None)
|
||||
|
||||
hwaccels.sort(key=lambda hwaccel: (
|
||||
# Sort unranked hwaccels last, but in the order given by ffmpeg
|
||||
hwaccel['name'] in HWACCEL['BY_PERFORMANCE'] and 1 or 0,
|
||||
(
|
||||
# Sort ranked hwaccels per the constant
|
||||
hwaccel['name'] in HWACCEL['BY_PERFORMANCE'] and
|
||||
HWACCEL['BY_PERFORMANCE'].index(hwaccel['name']))))
|
||||
|
||||
hwaccels_data['hwaccels'] = hwaccels
|
||||
return hwaccels_data
|
||||
|
||||
|
||||
def detect_codecs(decoder, encoder, hwaccels=None, cmd='ffmpeg'):
|
||||
"""
|
||||
Detect the optimal decoders and encoders on the optimal hwaccel.
|
||||
"""
|
||||
hwaccels_data = detect_hwaccels(hwaccels, cmd=cmd)
|
||||
|
||||
build_codecs = hwaccels_data['codecs']
|
||||
|
||||
avail_decoders = build_codecs.get(decoder, {}).get('decoders', [])
|
||||
avail_encoders = build_codecs.get(encoder, {}).get('encoders', [])
|
||||
|
||||
codecs_kwargs = []
|
||||
default_kwargs = collections.OrderedDict(
|
||||
output=collections.OrderedDict())
|
||||
if avail_encoders:
|
||||
default_kwargs['output']['codec'] = avail_encoders[0]
|
||||
for hwaccel in hwaccels_data['hwaccels']:
|
||||
|
||||
if hwaccel['codecs']:
|
||||
# This hwaccel requires specific coders.
|
||||
for hwaccel_encoder in hwaccel['codecs'].get(
|
||||
encoder, {}).get('encoders', []):
|
||||
# We have an accelerated encoder, include it.
|
||||
# Remove hwaccel codecs from future consideration.
|
||||
avail_encoders.remove(hwaccel_encoder)
|
||||
hwaccel_kwargs = collections.OrderedDict(
|
||||
input=collections.OrderedDict(hwaccel=hwaccel['name']),
|
||||
output=collections.OrderedDict(codec=hwaccel_encoder))
|
||||
if hwaccel['name'] in HWACCEL['OUTPUT_FORMATS']:
|
||||
hwaccel_kwargs['input']['hwaccel_output_format'] = (
|
||||
HWACCEL['OUTPUT_FORMATS'][hwaccel['name']])
|
||||
codecs_kwargs.append(hwaccel_kwargs)
|
||||
for hwaccel_decoder in hwaccel['codecs'].get(
|
||||
decoder, {}).get('decoders', []):
|
||||
if hwaccel_decoder in avail_decoders:
|
||||
# We have an accelerated decoder, can make a minor but
|
||||
# significant difference.
|
||||
# Remove hwaccel codecs from future consideration.
|
||||
hwaccel_kwargs['input']['codec'] = hwaccel_decoder
|
||||
avail_decoders.remove(hwaccel_decoder)
|
||||
# Otherwise let ffmpeg choose the decoder
|
||||
|
||||
else:
|
||||
# This hwaccel doesn't require specific coders.
|
||||
hwaccel_kwargs = copy.deepcopy(default_kwargs)
|
||||
hwaccel_kwargs['input'] = collections.OrderedDict(
|
||||
hwaccel=hwaccel['name'])
|
||||
codecs_kwargs.append(hwaccel_kwargs)
|
||||
|
||||
codecs_kwargs.append(default_kwargs)
|
||||
return codecs_kwargs
|
||||
|
||||
|
||||
__all__ = [
|
||||
'detect_gpus',
|
||||
'detect_hwaccels',
|
||||
'detect_codecs',
|
||||
]
|
||||
|
||||
|
||||
def _get_data():
|
||||
"""
|
||||
Don't load the data JSON unless needed, cache in a global.
|
||||
"""
|
||||
global DATA
|
||||
if DATA is None:
|
||||
with open(os.path.join(
|
||||
os.path.dirname(__file__), 'detect.json')) as data_opened:
|
||||
DATA = json.load(
|
||||
data_opened, object_pairs_hook=collections.OrderedDict)
|
||||
return DATA
|
||||
|
||||
|
||||
def _parse_models(
|
||||
model_lines, boards, model_data,
|
||||
model_lines_data=None, model_line=None):
|
||||
"""
|
||||
Parse model lines, sets and ranges from a boards string.
|
||||
"""
|
||||
if model_lines_data is None:
|
||||
model_lines_data = collections.OrderedDict()
|
||||
|
||||
boards = boards.strip().lower()
|
||||
model_line_positions = [
|
||||
(boards.index(next_model_line), idx, next_model_line)
|
||||
for idx, next_model_line in enumerate(model_lines)
|
||||
if next_model_line in boards]
|
||||
if model_line_positions:
|
||||
pos, idx, next_model_line = min(model_line_positions)
|
||||
model_group, next_boards = boards.split(next_model_line.lower(), 1)
|
||||
else:
|
||||
model_group = boards
|
||||
next_boards = ''
|
||||
|
||||
model_group = model_group.strip()
|
||||
if model_group:
|
||||
# First item is a model range for the previous model line
|
||||
model_line_data = model_lines_data.setdefault(
|
||||
model_line, collections.OrderedDict(
|
||||
models=collections.OrderedDict(), model_ranges=[]))
|
||||
|
||||
models = []
|
||||
for model_split in model_group.split('/'):
|
||||
models.extend(
|
||||
model.strip()
|
||||
for model in model_split.split('+'))
|
||||
|
||||
for model_range in models:
|
||||
for model_range_separator in MODEL_RANGE_SEPARATORS:
|
||||
model_range_parameters = model_range.split(
|
||||
model_range_separator)
|
||||
if len(model_range_parameters) > 1:
|
||||
# This is a range of models
|
||||
if model_range in model_line_data['model_ranges']:
|
||||
model_line_data['model_ranges'][
|
||||
model_line_data['model_ranges'].index(
|
||||
model_range)] = model_data
|
||||
else:
|
||||
model_line_data['model_ranges'].append(
|
||||
[model_range, model_data])
|
||||
break
|
||||
else:
|
||||
model_line_data['models'][model_range] = model_data
|
||||
|
||||
next_boards = next_boards.strip()
|
||||
if next_boards:
|
||||
return _parse_models(
|
||||
model_lines=model_lines, boards=next_boards,
|
||||
model_data=model_data, model_lines_data=model_lines_data,
|
||||
model_line=next_model_line)
|
||||
|
||||
return model_lines_data
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""
|
||||
Dump all ffmpeg build data to json.
|
||||
"""
|
||||
args = parser.parse_args(args)
|
||||
data = collections.OrderedDict(
|
||||
gpus=detect_gpus(),
|
||||
hwaccels=detect_hwaccels(cmd=args.ffmpeg),
|
||||
codecs=detect_codecs(cmd=args.ffmpeg))
|
||||
json.dump(data, sys.stdout, indent=2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,3 +1,8 @@
|
||||
"""
|
||||
Run ffprobe on the file and return a JSON representation of the output.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import json
|
||||
import subprocess
|
||||
from ._run import Error
|
||||
@ -17,11 +22,13 @@ def probe(filename, cmd='ffprobe', **kwargs):
|
||||
args += convert_kwargs_to_cmd_line_args(kwargs)
|
||||
args += [filename]
|
||||
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p = subprocess.Popen(
|
||||
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
out, err = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise Error('ffprobe', out, err)
|
||||
return json.loads(out.decode('utf-8'))
|
||||
return json.loads(out, object_pairs_hook=collections.OrderedDict)
|
||||
|
||||
|
||||
__all__ = ['probe']
|
||||
|
5779
ffmpeg/detect.json
Normal file
5779
ffmpeg/detect.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -9,6 +9,8 @@ import random
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
import six
|
||||
|
||||
try:
|
||||
import mock # python 2
|
||||
except ImportError:
|
||||
@ -710,7 +712,7 @@ def test__probe__exception():
|
||||
with pytest.raises(ffmpeg.Error) as excinfo:
|
||||
ffmpeg.probe(BOGUS_INPUT_FILE)
|
||||
assert str(excinfo.value) == 'ffprobe error (see stderr output for detail)'
|
||||
assert 'No such file or directory'.encode() in excinfo.value.stderr
|
||||
assert 'No such file or directory' in excinfo.value.stderr
|
||||
|
||||
|
||||
def test__probe__extra_args():
|
||||
@ -718,6 +720,81 @@ def test__probe__extra_args():
|
||||
assert set(data.keys()) == {'format', 'streams', 'frames'}
|
||||
|
||||
|
||||
def test__build_data():
|
||||
data = ffmpeg.get_build_data()
|
||||
assert set(data.keys()) == {
|
||||
'version', 'formats', 'demuxers', 'muxers', 'codecs', 'bsfs',
|
||||
'protocols', 'filters', 'pix_fmts', 'sample_fmts', 'layouts',
|
||||
'colors', 'devices', 'hw_devices', 'hwaccels'}
|
||||
|
||||
assert isinstance(data['version'], six.string_types)
|
||||
|
||||
assert isinstance(data['codecs'], dict)
|
||||
for codec, coders in data['codecs'].items():
|
||||
assert isinstance(codec, six.string_types)
|
||||
assert isinstance(coders, dict)
|
||||
assert isinstance(data['hwaccels'], list)
|
||||
for hwaccel in data['hwaccels']:
|
||||
assert isinstance(hwaccel, dict)
|
||||
assert 'name' in hwaccel
|
||||
|
||||
for fields_key in {'formats', 'demuxers', 'muxers', 'filters'}:
|
||||
assert isinstance(data[fields_key], dict)
|
||||
|
||||
list_keys = {'bsfs'}
|
||||
for list_key in list_keys:
|
||||
assert isinstance(data[list_key], list)
|
||||
|
||||
assert isinstance(data['protocols'], dict)
|
||||
for protocol_key in {'input', 'output'}:
|
||||
assert protocol_key in data['protocols']
|
||||
assert isinstance(data['protocols'][protocol_key], list)
|
||||
|
||||
|
||||
def test__detect():
|
||||
for hwaccels_data in [
|
||||
ffmpeg.detect_hwaccels(),
|
||||
ffmpeg.detect_hwaccels(['foohwaccel'])]:
|
||||
assert isinstance(hwaccels_data['hwaccels'], list)
|
||||
for hwaccel in hwaccels_data['hwaccels']:
|
||||
assert isinstance(hwaccel, dict)
|
||||
assert 'name' in hwaccel
|
||||
|
||||
for codecs_kwargs in [
|
||||
ffmpeg.detect_codecs('h264', 'h264'),
|
||||
ffmpeg.detect_codecs(
|
||||
'h264', 'h264', ['foohwaccel'])]:
|
||||
for codec_kwargs in codecs_kwargs:
|
||||
assert 'output' in codec_kwargs
|
||||
assert isinstance(codec_kwargs['output'], dict)
|
||||
assert 'codec' in codec_kwargs['output']
|
||||
assert isinstance(
|
||||
codec_kwargs['output']['codec'],
|
||||
six.string_types)
|
||||
|
||||
|
||||
def test__detect_parse_models():
|
||||
"""
|
||||
Parse model lines, sets and ranges.
|
||||
"""
|
||||
model_lines = ffmpeg._detect._parse_models(
|
||||
model_lines=['geforce rtx', 'geforce gtx', 'geforce gt', 'geforce'],
|
||||
boards=(
|
||||
'GeForce GT 630 > 640 GeForce GTX 650 / 660 '
|
||||
'GeForce GT 740+750'),
|
||||
model_data={})
|
||||
assert 'geforce gt' in model_lines
|
||||
assert 'models' in model_lines['geforce gt']
|
||||
assert '740' in model_lines['geforce gt']['models']
|
||||
assert '750' in model_lines['geforce gt']['models']
|
||||
assert 'model_ranges' in model_lines['geforce gt']
|
||||
assert '630 > 640' in model_lines['geforce gt']['model_ranges'][0]
|
||||
assert 'geforce gtx' in model_lines
|
||||
assert 'models' in model_lines['geforce gtx']
|
||||
assert '650' in model_lines['geforce gtx']['models']
|
||||
assert '660' in model_lines['geforce gtx']['models']
|
||||
|
||||
|
||||
def get_filter_complex_input(flt, name):
|
||||
m = re.search(r'\[([^]]+)\]{}(?=[[;]|$)'.format(name), flt)
|
||||
if m:
|
||||
|
8
setup.py
8
setup.py
@ -72,6 +72,9 @@ setup(
|
||||
long_description=long_description,
|
||||
install_requires=['future'],
|
||||
extras_require={
|
||||
'detect': [
|
||||
"pywin32; sys_platform == 'Windows'",
|
||||
"wmi; sys_platform == 'Windows'"],
|
||||
'dev': [
|
||||
'future==0.17.1',
|
||||
'numpy==1.16.4',
|
||||
@ -95,4 +98,9 @@ setup(
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'ffmpeg-build-json=ffmpeg._build:main',
|
||||
'ffmpeg-detect=ffmpeg._detect:main'],
|
||||
},
|
||||
)
|
||||
|
Loading…
x
Reference in New Issue
Block a user