File size: 9,052 Bytes
3bddc3f bc91045 3bddc3f bc91045 3bddc3f bc91045 3bddc3f 6acf91d 3bddc3f bc91045 3bddc3f bc91045 3bddc3f 6acf91d aa07a0a 6acf91d bc91045 3bddc3f bc91045 3bddc3f bc91045 3bddc3f bc91045 45e1616 bc91045 6acf91d 3bddc3f bc91045 39cf431 bc91045 3bddc3f bc91045 3bddc3f bc91045 3bddc3f bc91045 39cf431 bc91045 3bddc3f bc91045 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import logging
import os
import re
from pathlib import Path
import gradio as gr
import requests
import torch
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
logging.basicConfig(
level=logging.INFO, # Set the logging level to INFO or any other desired level
format="%(asctime)s - %(message)s", # Define the log message format
datefmt="%Y-%m-%d %H:%M:%S", # Define the timestamp format
)
logger = logging.getLogger("my_logger")
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise Exception("HF_TOKEN environment variable is required to call remote API.")
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def init_speech_to_text_model():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model_id = "distil-whisper/distil-medium.en"
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
)
model.to(device)
processor = AutoProcessor.from_pretrained(model_id)
return pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
torch_dtype=torch_dtype,
device=device,
)
whisper_pipe = init_speech_to_text_model()
code_pattern = r'```python\n(.*?)```'
starting_app_code = """import gradio as gr
def greet(name):
return "Hello " + name + "!"
with gr.Blocks(theme="monochrome") as demo:
name = gr.Textbox(label="Name", value="World")
output = gr.Textbox(label="Output Box")
greet_btn = gr.Button("Greet")
greet_btn.click(fn=greet, inputs=name, outputs=output)
name.submit(fn=greet, inputs=name, outputs=output)
if __name__ == "__main__":
demo.css = "footer {visibility: hidden}"
demo.launch()
"""
html_template = Path('gradio-lite-playground.html').read_text()
pattern = r"# APP CODE START(.*?)# APP CODE END"
load_js = f"""() => {{
const htmlString = '<iframe class="my-frame" width="100%" height="512px" src="about:blank"></iframe>';
const parser = new DOMParser();
const doc = parser.parseFromString(htmlString, 'text/html');
const iframe = doc.querySelector('.my-frame');
const div = document.getElementById('demoDiv');
div.appendChild(iframe);
const frame = document.querySelector('.my-frame');
frame.contentWindow.document.open('text/html', 'replace');
frame.contentWindow.document.write(`{html_template}`);
frame.contentWindow.document.close();
}}"""
# TODO: Works but is inefficient because the iframe has to be reloaded each time
update_iframe_js = f"""(code) => {{
const pattern = /# APP CODE START(.*?)# APP CODE END/gs;
const template = `{html_template}`;
const completedTemplate = template.replace(pattern, code);
const oldFrame = document.querySelector('.my-frame');
oldFrame.remove();
const htmlString = '<iframe class="my-frame" width="100%" height="512px" src="about:blank"></iframe>';
const parser = new DOMParser();
const doc = parser.parseFromString(htmlString, 'text/html');
const iframe = doc.querySelector('.my-frame');
const div = document.getElementById('demoDiv');
div.appendChild(iframe);
const frame = document.querySelector('.my-frame');
frame.contentWindow.document.open('text/html', 'replace');
frame.contentWindow.document.write(completedTemplate);
frame.contentWindow.document.close();
}}"""
copy_snippet_js = f"""async (code) => {{
console.log(`DOWNLOADING CODE`);
const pattern = /# APP CODE START(.*?)# APP CODE END/gs;
const template = `<div id="KiteWindApp">\n<script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" />
<gradio-lite>\n# APP CODE START\n\n# APP CODE END\n</gradio-lite>\n</div>\n`;
// Step 1: Generate the HTML content
const completedTemplate = template.replace(pattern, code);
const snippet = completedTemplate;
await navigator.clipboard.writeText(snippet);
}}"""
download_code_js = f"""(code) => {{
const pattern = /# APP CODE START(.*?)# APP CODE END/gs;
const template = `{html_template}`;
// Step 1: Generate the HTML content
const completedTemplate = template.replace(pattern, code);
// Step 2: Create a Blob from the HTML content
const blob = new Blob([completedTemplate], {{ type: "text/html" }});
// Step 3: Create a URL for the Blob
const url = URL.createObjectURL(blob);
// Step 4: Create a download link
const downloadLink = document.createElement("a");
downloadLink.href = url;
downloadLink.download = "gradio-lite-app.html"; // Specify the filename for the download
// Step 5: Trigger a click event on the download link
downloadLink.click();
// Clean up by revoking the URL
URL.revokeObjectURL(url);
}}"""
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def generate_text(code, prompt):
logger.info(f"Calling API with prompt:\n{prompt}")
prompt = f"```python\n{code}```\nGiven the code above return only updated code for the following request:\n{prompt}\n<|assistant|>"
params = {"max_new_tokens": 512}
output = query({
"inputs": prompt,
"parameters": params,
})
if 'error' in output:
logger.warning(f'Language model call failed: {output["error"]}')
raise gr.Warning(f'Language model call failed: {output["error"]}')
logger.info(f'API RESPONSE\n{output[0]["generated_text"]}')
assistant_reply = output[0]["generated_text"].split('<|assistant|>')[1]
match = re.search(code_pattern, assistant_reply, re.DOTALL)
new_code = match.group(1)
logger.info(f'NEW CODE:\nnew_code')
# TODO: error handling here
return assistant_reply, new_code, None
def transcribe(audio):
result = whisper_pipe(audio)
return result["text"], None
def copy_notify(code):
gr.Info("App code snippet copied!")
with gr.Blocks() as demo:
gr.Markdown("<h1 align=\"center\">KiteWind πͺπ</h1>")
gr.Markdown(
"<h4 align=\"center\">Chat-assisted web app creator by <a href=\"https://huggingface.co/gstaff\">@gstaff</a></h4>")
with gr.Row():
with gr.Column():
gr.Markdown("## 1. Run your app in the browser!")
html = gr.HTML(value='<div id="demoDiv"></div>')
gr.Markdown("## 2. Customize using voice requests!")
with gr.Row():
with gr.Column():
with gr.Group():
in_audio = gr.Audio(label="Record a voice request", source='microphone', type='filepath')
in_prompt = gr.Textbox(label="Or type a text request and press Enter",
placeholder="Need an idea? Try one of these:\n- Add a button to reverse the name\n- Change the greeting to Hola\n- Put the reversed name output into a separate textbox\n- Change the theme from monochrome to soft")
out_text = gr.TextArea(label="Chat Assistant Response")
clear = gr.ClearButton([in_prompt, in_audio, out_text])
with gr.Column():
code_area = gr.Code(label="App Code - You can also edit directly and then click Update App",
language='python', value=starting_app_code)
update_btn = gr.Button("Update App", variant="primary")
code_update_params = {'fn': None, 'inputs': code_area, 'outputs': None, '_js': update_iframe_js}
gen_text_params = {'fn': generate_text, 'inputs': [code_area, in_prompt], 'outputs': [out_text, code_area]}
transcribe_params = {'fn': transcribe, 'inputs': [in_audio], 'outputs': [in_prompt, in_audio]}
update_btn.click(**code_update_params)
in_prompt.submit(**gen_text_params).then(**code_update_params)
in_audio.stop_recording(**transcribe_params).then(**gen_text_params).then(**code_update_params)
with gr.Row():
with gr.Column():
gr.Markdown("## 3. Export your app to share!")
copy_snippet_btn = gr.Button("Copy app snippet to paste in another page")
copy_snippet_btn.click(copy_notify, code_area, None, _js=copy_snippet_js)
download_btn = gr.Button("Download app as a standalone file")
download_btn.click(None, code_area, None, _js=download_code_js)
with gr.Row():
with gr.Column():
gr.Markdown("## Current limitations")
with gr.Accordion("Click to view", open=False):
gr.Markdown(
"- Only gradio-lite apps using the python standard libraries and gradio are supported\n- The chat hasn't been tuned on gradio library data; it may make mistakes\n- The app needs to fully reload each time it is changed")
demo.load(None, None, None, _js=load_js)
demo.css = "footer {visibility: hidden}"
if __name__ == "__main__":
demo.queue().launch()
|