Spaces:
Runtime error
Runtime error
from transformers import AutoModelForCausalLM,AutoTokenizer | |
import streamlit as st | |
import torch | |
from langchain.prompts import PromptTemplate | |
from langchain_community.llms import CTransformers | |
def LOAD_GEMMA(): | |
model_id = "aryachakraborty/GEMMA-2B-NL-SQL" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id).cpu() | |
return tokenizer,model | |
def DeepSeekCoder(user_input, context): | |
model_id='aryachakraborty/DeepSeek_1.3B_Fine_Tuned' | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id | |
).cpu() | |
device = torch.device("cpu") | |
alpeca_prompt = f"""You are an AI programming assistant, utilizing the Deepseek Coder model, developed by arya chakraborty, and your task is to convert natural language to sql queries. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\n | |
Below are sql tables schemas paired with instruction that describes a task. Using valid SQLite, write a response that appropriately completes the request for the provided tables. | |
### Instruction: {user_input}. ### Input: {context} | |
### Response: | |
""" | |
inputs = tokenizer([alpeca_prompt.format(user_input=user_input, context=context)], return_tensors="pt").to(device) | |
outputs = model.generate(**inputs, max_new_tokens=30) | |
output = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
response_portion = output.split("### Response:")[-1].strip() | |
return response_portion | |
def LOAD_GEMMA_GGUF(user_input, context): | |
llm = CTransformers( | |
model='D:\ISnartech Folder\Project_Folder\Streamlit APP\GgufModels\Q4_K_M.gguf', | |
#model_type='llama', | |
config={'max_new_tokens': 256, 'temperature': 0.01} | |
) | |
alpeca_prompt = f"""Below are sql tables schemas paired with instruction that describes a task. Using valid SQLite, write a response that appropriately completes the request for the provided tables. | |
### Instruction: {user_input}. ### Input: {context} | |
### Response: | |
""" | |
prompt = PromptTemplate(input_variables=['user_input', 'context'], template=alpeca_prompt) | |
#return prompt,llm | |
response = llm(prompt.format(user_input=user_input, context=context)) | |
return response |