Spaces:
Sleeping
Sleeping
File size: 1,666 Bytes
e14fbd5 42ff3ef e14fbd5 42ff3ef e14fbd5 42ff3ef e14fbd5 42ff3ef e14fbd5 42ff3ef e14fbd5 42ff3ef 62c8779 e14fbd5 42ff3ef e14fbd5 42ff3ef e14fbd5 42ff3ef f34b9ef e14fbd5 42ff3ef e14fbd5 42ff3ef e14fbd5 d691d44 e14fbd5 42ff3ef e14fbd5 42ff3ef d691d44 e14fbd5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
from llama_cpp import Llama
from typing import Optional, Dict, Union
from huggingface_hub import hf_hub_download
import gradio as gr
import time
# Download the model from Hugging Face
model_path = hf_hub_download(
repo_id="omeryentur/phi-3-sql",
filename="phi-3-sql.Q4_K_M.gguf",
use_auth_token=True
)
# Initialize the Llama model
llm = Llama(
model_path=model_path,
n_ctx=512,
n_threads=1,
)
def generate_sql_query(text_input_schema: str, text_input_question: str):
try:
# Construct the prompt for the model
prompt = f"""
<|system|>
{text_input_schema}
<|user|>
{text_input_question}
<|sql|>"""
# Generate SQL query
completion = llm(
prompt,
max_tokens=512,
temperature=0,
stop=["<end_of_turn>"]
)
# Extract and return the generated SQL query
generated = completion['choices'][0]['text'].strip()
return {"sql_query":generated}
except Exception as e:
return {"error": str(e)}
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Sql Query")
with gr.Row():
with gr.Column():
text_input_schema = gr.TextArea(label="Schema")
text_input_question = gr.Textbox(label="question")
generate_btn = gr.Button("Create Sql Query")
with gr.Row():
with gr.Column():
output = gr.JSON(label="Sql Query:")
generate_btn.click(
fn=generate_sql_query,
inputs=[text_input_schema, text_input_question],
outputs=[output]
)
if __name__ == "__main__":
demo.launch() |