freddyaboulton HF staff commited on
Commit
2b9474e
·
verified ·
1 Parent(s): ba51911

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +8 -8
  2. requirements.txt +4 -0
  3. run.ipynb +1 -0
  4. run.py +48 -0
README.md CHANGED
@@ -1,12 +1,12 @@
 
1
  ---
2
- title: Streaming Filter Unified Main
3
- emoji: 🌖
4
- colorFrom: red
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.44.1
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
  ---
3
+ title: streaming_filter_unified_main
4
+ emoji: 🔥
5
+ colorFrom: indigo
6
+ colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 5.0.0
9
+ app_file: run.py
10
  pinned: false
11
+ hf_oauth: true
12
  ---
 
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@bbf9ba7e997022960c621f72baa891185bd03732#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/bbf9ba7e997022960c621f72baa891185bd03732/gradio-5.0.0-py3-none-any.whl
3
+ opencv-python
4
+ numpy
run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter_unified"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "\n", "css=\"\"\".my-group {max-width: 500px !important; max-height: 500px !important;}\n", " .my-column {display: flex !important; justify-content: center !important; align-items: center !important};\"\"\"\n", "\n", "with gr.Blocks(css=css) as demo:\n", " with gr.Column(elem_classes=[\"my-column\"]):\n", " with gr.Group(elem_classes=[\"my-group\"]):\n", " transform = gr.Dropdown(choices=[\"cartoon\", \"edges\", \"flip\"],\n", " value=\"flip\", label=\"Transformation\")\n", " input_img = gr.Image(sources=[\"webcam\"], type=\"numpy\", streaming=True)\n", " input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+
5
+ def transform_cv2(frame, transform):
6
+ if transform == "cartoon":
7
+ # prepare color
8
+ img_color = cv2.pyrDown(cv2.pyrDown(frame))
9
+ for _ in range(6):
10
+ img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
11
+ img_color = cv2.pyrUp(cv2.pyrUp(img_color))
12
+
13
+ # prepare edges
14
+ img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
15
+ img_edges = cv2.adaptiveThreshold(
16
+ cv2.medianBlur(img_edges, 7),
17
+ 255,
18
+ cv2.ADAPTIVE_THRESH_MEAN_C,
19
+ cv2.THRESH_BINARY,
20
+ 9,
21
+ 2,
22
+ )
23
+ img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
24
+ # combine color and edges
25
+ img = cv2.bitwise_and(img_color, img_edges)
26
+ return img
27
+ elif transform == "edges":
28
+ # perform edge detection
29
+ img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
30
+ return img
31
+ else:
32
+ return np.flipud(frame)
33
+
34
+
35
+ css=""".my-group {max-width: 500px !important; max-height: 500px !important;}
36
+ .my-column {display: flex !important; justify-content: center !important; align-items: center !important};"""
37
+
38
+ with gr.Blocks(css=css) as demo:
39
+ with gr.Column(elem_classes=["my-column"]):
40
+ with gr.Group(elem_classes=["my-group"]):
41
+ transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
42
+ value="flip", label="Transformation")
43
+ input_img = gr.Image(sources=["webcam"], type="numpy", streaming=True)
44
+ input_img.stream(transform_cv2, [input_img, transform], [input_img], time_limit=30, stream_every=0.1)
45
+
46
+
47
+ if __name__ == "__main__":
48
+ demo.launch()