diff --git a/doc/jupyter-demo.gif b/doc/jupyter-demo.gif new file mode 100644 index 0000000..1954a4d Binary files /dev/null and b/doc/jupyter-demo.gif differ diff --git a/examples/README.md b/examples/README.md index bf7997c..5b8df9d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -57,7 +57,10 @@ out, _ = (ffmpeg ) ``` -## [JupyterLab/Notebook widgets](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb) +## [Jupyter Frame Viewer](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb) jupyter screenshot +## [Jupyter Pipeline Editor](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb) + +jupyter demo diff --git a/examples/ffmpeg-numpy.ipynb b/examples/ffmpeg-numpy.ipynb index c64d9ab..b6d991b 100644 --- a/examples/ffmpeg-numpy.ipynb +++ b/examples/ffmpeg-numpy.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 116, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -15,12 +15,12 @@ }, { "cell_type": "code", - "execution_count": 117, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "probe = ffmpeg.probe('in.mp4')\n", - "video_info = next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')\n", + "video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')\n", "width = int(video_info['width'])\n", "height = int(video_info['height'])\n", "num_frames = int(video_info['nb_frames'])" @@ -28,32 +28,13 @@ }, { "cell_type": "code", - "execution_count": 118, - "metadata": {}, - "outputs": [], - "source": [ - "out, err = (\n", - " ffmpeg\n", - " .input('in.mp4')\n", - " .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n", - " .run(capture_stdout=True)\n", - ")\n", - "video = (\n", - " np\n", - " .frombuffer(out, np.uint8)\n", - " .reshape([-1, height, width, 3])\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 115, + "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "17d13d7551114fb39a1fad933cf0398a", + "model_id": "5f63dc164956464c994ec58d86ee7cd9", "version_major": 2, "version_minor": 0 }, @@ -66,11 +47,143 @@ } ], "source": [ + "out, err = (\n", + " ffmpeg\n", + " .input('in.mp4')\n", + " .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n", + " .run(capture_stdout=True)\n", + ")\n", + "video = (\n", + " np\n", + " .frombuffer(out, np.uint8)\n", + " .reshape([-1, height, width, 3])\n", + ")\n", + "\n", "@interact(frame=(0, num_frames))\n", "def show_frame(frame=0):\n", " plt.imshow(video[frame,:,:,:])" ] }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "84bcac52195f47f8854f09acd7666b84", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "interactive(children=(Checkbox(value=True, description='enable_overlay'), Checkbox(value=True, description='en…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from io import BytesIO\n", + "from PIL import Image\n", + "\n", + "\n", + "def extract_frame(stream, frame_num):\n", + " while isinstance(stream, ffmpeg.nodes.OutputStream):\n", + " stream = stream.node.incoming_edges[0].upstream_node.stream()\n", + " out, _ = (\n", + " stream\n", + " .filter_('select', 'gte(n,{})'.format(frame_num))\n", + " .output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=1)\n", + " .run(capture_stdout=True, capture_stderr=True)\n", + " )\n", + " return np.frombuffer(out, np.uint8).reshape([height, width, 3])\n", + "\n", + "\n", + "def png_to_np(png_bytes):\n", + " buffer = BytesIO(png_bytes)\n", + " pil_image = Image.open(buffer)\n", + " return np.array(pil_image)\n", + " \n", + "\n", + "def build_graph(\n", + " enable_overlay, flip_overlay, enable_box, box_x, box_y,\n", + " thickness, color):\n", + "\n", + " stream = ffmpeg.input('in.mp4')\n", + "\n", + " if enable_overlay:\n", + " overlay = ffmpeg.input('overlay.png')\n", + " if flip_overlay:\n", + " overlay = overlay.hflip()\n", + " stream = stream.overlay(overlay)\n", + "\n", + " if enable_box:\n", + " stream = stream.drawbox(\n", + " box_x, box_y, 120, 120, color=color, t=thickness)\n", + "\n", + " return stream.output('out.mp4')\n", + "\n", + "\n", + "def show_image(ax, stream, frame_num):\n", + " try:\n", + " image = extract_frame(stream, frame_num)\n", + " ax.imshow(image)\n", + " ax.axis('off')\n", + " except ffmpeg.Error as e:\n", + " print(e.stderr.decode())\n", + "\n", + "\n", + "def show_graph(ax, stream, detail):\n", + " data = ffmpeg.view(stream, detail=detail, pipe=True)\n", + " image = png_to_np(data)\n", + " ax.imshow(image, aspect='equal', interpolation='hanning')\n", + " ax.set_xlim(0, 1100)\n", + " ax.axis('off')\n", + "\n", + "\n", + "@interact(\n", + " frame_num=(0, num_frames),\n", + " box_x=(0, 200),\n", + " box_y=(0, 200),\n", + " thickness=(1, 40),\n", + " color=['red', 'green', 'magenta', 'blue'],\n", + ")\n", + "def f(\n", + " enable_overlay=True,\n", + " enable_box=True,\n", + " flip_overlay=True,\n", + " graph_detail=False,\n", + " frame_num=0,\n", + " box_x=50,\n", + " box_y=50,\n", + " thickness=5,\n", + " color='red'):\n", + "\n", + " stream = build_graph(\n", + " enable_overlay,\n", + " flip_overlay,\n", + " enable_box,\n", + " box_x,\n", + " box_y,\n", + " thickness,\n", + " color\n", + " )\n", + "\n", + " fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15,4))\n", + " plt.tight_layout()\n", + " show_image(ax0, stream, frame_num)\n", + " show_graph(ax1, stream, graph_detail)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, diff --git a/examples/in.mp4 b/examples/in.mp4 new file mode 100644 index 0000000..2c7d59e Binary files /dev/null and b/examples/in.mp4 differ diff --git a/examples/overlay.png b/examples/overlay.png new file mode 100644 index 0000000..5da0087 Binary files /dev/null and b/examples/overlay.png differ diff --git a/examples/requirements.txt b/examples/requirements.txt index ca8ae60..24830b8 100644 --- a/examples/requirements.txt +++ b/examples/requirements.txt @@ -1,4 +1,9 @@ ffmpeg-python gevent google-cloud-speech +graphviz +ipywidgets +jupyter +matplotlib +Pillow tqdm