Spaces:
Running
Running
rephrase export steps
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ if HF_TOKEN:
|
|
18 |
repo = Repository(local_dir=DATA_DIR, clone_from=DATASET_REPO_URL, token=HF_TOKEN)
|
19 |
|
20 |
|
21 |
-
def export(token: str, model_id: str, task: str) -> str:
|
22 |
if token == "" or model_id == "":
|
23 |
return """
|
24 |
### Invalid input π
|
@@ -78,24 +78,22 @@ TITLE = """
|
|
78 |
"
|
79 |
>
|
80 |
<h1 style="font-weight: 900; margin-bottom: 10px; margin-top: 10px;">
|
81 |
-
Export your model to OpenVINO
|
82 |
</h1>
|
83 |
</div>
|
84 |
"""
|
85 |
|
86 |
DESCRIPTION = """
|
87 |
-
This Space
|
88 |
|
89 |
To export your model you need:
|
90 |
- A read-access token from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens).
|
91 |
-
|
92 |
-
|
93 |
-
- A [task](https://huggingface.co/docs/optimum/main/en/exporters/task_manager#pytorch) that will be used to load the model before exporting it. If set to "auto", the task will be automatically inferred.
|
94 |
|
95 |
That's it ! π₯
|
96 |
|
97 |
After the model conversion, we will open a PR against the source repo.
|
98 |
-
You will then be able to load the resulting model and run inference using [Optimum Intel](https://huggingface.co/docs/optimum/intel/inference).
|
99 |
"""
|
100 |
|
101 |
with gr.Blocks() as demo:
|
@@ -116,18 +114,13 @@ with gr.Blocks() as demo:
|
|
116 |
label="Model name",
|
117 |
placeholder="distilbert-base-uncased-finetuned-sst-2-english",
|
118 |
)
|
119 |
-
input_task = gr.Textbox(
|
120 |
-
value="auto",
|
121 |
-
max_lines=1,
|
122 |
-
label='Task (can be left to "auto", will be automatically inferred)',
|
123 |
-
)
|
124 |
|
125 |
btn = gr.Button("Export")
|
126 |
output = gr.Markdown(label="Output")
|
127 |
|
128 |
btn.click(
|
129 |
fn=export,
|
130 |
-
inputs=[input_token, input_model
|
131 |
outputs=output,
|
132 |
)
|
133 |
|
|
|
18 |
repo = Repository(local_dir=DATA_DIR, clone_from=DATASET_REPO_URL, token=HF_TOKEN)
|
19 |
|
20 |
|
21 |
+
def export(token: str, model_id: str, task: str = "auto") -> str:
|
22 |
if token == "" or model_id == "":
|
23 |
return """
|
24 |
### Invalid input π
|
|
|
78 |
"
|
79 |
>
|
80 |
<h1 style="font-weight: 900; margin-bottom: 10px; margin-top: 10px;">
|
81 |
+
Export your model to OpenVINO
|
82 |
</h1>
|
83 |
</div>
|
84 |
"""
|
85 |
|
86 |
DESCRIPTION = """
|
87 |
+
This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/intel/inference) to automatically export your model to the OpenVINO format.
|
88 |
|
89 |
To export your model you need:
|
90 |
- A read-access token from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens).
|
91 |
+
- A model id from the Hub (for example: [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english))
|
92 |
+
|
|
|
93 |
|
94 |
That's it ! π₯
|
95 |
|
96 |
After the model conversion, we will open a PR against the source repo.
|
|
|
97 |
"""
|
98 |
|
99 |
with gr.Blocks() as demo:
|
|
|
114 |
label="Model name",
|
115 |
placeholder="distilbert-base-uncased-finetuned-sst-2-english",
|
116 |
)
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
btn = gr.Button("Export")
|
119 |
output = gr.Markdown(label="Output")
|
120 |
|
121 |
btn.click(
|
122 |
fn=export,
|
123 |
+
inputs=[input_token, input_model],
|
124 |
outputs=output,
|
125 |
)
|
126 |
|