Spaces:
Sleeping
Sleeping
# import streamlit as st | |
# from transformers import pipeline | |
# # pipe=pipeline("sentiment-analysis") | |
# # col1, col2 = st.columns(2) | |
# # with col1: | |
# # x=st.button("Sentiment Analysis") | |
# # with col2: | |
# # y=st.button("Text Summarization") | |
# # if x: | |
# # t=st.text_input("Enter the Text") | |
# # st.write(pipe(t)) | |
# # if y: | |
# t1=st.text_input("Enter the Text for Summarization") | |
# st.write(summarizer(t1)) | |
#from transformers import AutoTokenizer, AutoModel | |
# import streamlit as st | |
#tokenizer = AutoTokenizer.from_pretrained("llmware/industry-bert-insurance-v0.1") | |
# #model = AutoModel.from_pretrained("llmware/industry-bert-insurance-v0.1") | |
# # Use a pipeline as a high-level helper | |
# from transformers import pipeline | |
# #pipe = pipeline("feature-extraction") | |
# t=st.text_input("Enter the Text") | |
# pipe = pipeline("summarization") | |
# st.write(pipe(t)) | |
# import pandas as pd | |
# import numpy as np | |
# from ydata_synthetic.synthesizers.regular import RegularSynthesizer | |
# from ydata_synthetic.synthesizers import ModelParameters, TrainParameters | |
# import streamlit as st | |
# from os import getcwd | |
# text_file=st.file_uploader("Upload the Data File") | |
# st.write("-------------------------") | |
# if text_file is not None: | |
# df=pd.read_csv(text_file) | |
# dd_list=df.columns | |
# cat_cols=st.multiselect("Select the Categorical Columns",dd_list) | |
# num_cols=st.multiselect("Select the Numerical Columns",dd_list) | |
# Output_file=st.text_input('Enter Output File Name') | |
# s=st.number_input('Enter the Sample Size',1000) | |
# OP=Output_file + '.csv' | |
# sub=st.button('Submit') | |
# if sub: | |
# batch_size = 50 | |
# epochs = 3 | |
# learning_rate = 2e-4 | |
# beta_1 = 0.5 | |
# beta_2 = 0.9 | |
# ctgan_args = ModelParameters(batch_size=batch_size, | |
# lr=learning_rate, | |
# betas=(beta_1, beta_2)) | |
# train_args = TrainParameters(epochs=epochs) | |
# synth = RegularSynthesizer(modelname='ctgan', model_parameters=ctgan_args) | |
# synth.fit(data=df, train_arguments=train_args, num_cols=num_cols, cat_cols=cat_cols) | |
# df_syn = synth.sample(s) | |
# df_syn.to_csv(OP) | |
# c=getcwd() | |
# c=c + '/' + OP | |
# with open(c,"rb") as file: | |
# st.download_button(label=':blue[Download]',data=file,file_name=OP,mime="image/png") | |
# st.success("Thanks for using the app !!!") | |
import torch | |
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
#torch.set_default_device("cuda") | |
model = AutoModelForCausalLM.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", torch_dtype="auto", trust_remote_code=True) | |
tokenizer = AutoTokenizer.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", trust_remote_code=True) | |
i=st.text_input('Prompt', 'Life of Brian') | |
#inputs = tokenizer('''### Instruction: What Does Basic Homeowners Insurance Cover?\n### Response: ''', return_tensors="pt", return_attention_mask=False) | |
inputs = tokenizer(i, return_tensors="pt", return_attention_mask=False) | |
outputs = model.generate(**inputs, max_length=1024) | |
text = tokenizer.batch_decode(outputs)[0] | |
print(text) | |