Spaces:
Sleeping
Sleeping
File size: 3,089 Bytes
eed818f fc07358 eed818f bb48749 0d3caec 17f5d0b bb48749 eed818f 0d3caec 17f5d0b 0d3caec 17f5d0b 0d3caec 64b9441 0d3caec 64b9441 0d3caec 64b9441 0d3caec 64b9441 7e3d8f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
# import streamlit as st
# from transformers import pipeline
# # pipe=pipeline("sentiment-analysis")
# # col1, col2 = st.columns(2)
# # with col1:
# # x=st.button("Sentiment Analysis")
# # with col2:
# # y=st.button("Text Summarization")
# # if x:
# # t=st.text_input("Enter the Text")
# # st.write(pipe(t))
# # if y:
# t1=st.text_input("Enter the Text for Summarization")
# st.write(summarizer(t1))
#from transformers import AutoTokenizer, AutoModel
# import streamlit as st
#tokenizer = AutoTokenizer.from_pretrained("llmware/industry-bert-insurance-v0.1")
# #model = AutoModel.from_pretrained("llmware/industry-bert-insurance-v0.1")
# # Use a pipeline as a high-level helper
# from transformers import pipeline
# #pipe = pipeline("feature-extraction")
# t=st.text_input("Enter the Text")
# pipe = pipeline("summarization")
# st.write(pipe(t))
# import pandas as pd
# import numpy as np
# from ydata_synthetic.synthesizers.regular import RegularSynthesizer
# from ydata_synthetic.synthesizers import ModelParameters, TrainParameters
# import streamlit as st
# from os import getcwd
# text_file=st.file_uploader("Upload the Data File")
# st.write("-------------------------")
# if text_file is not None:
# df=pd.read_csv(text_file)
# dd_list=df.columns
# cat_cols=st.multiselect("Select the Categorical Columns",dd_list)
# num_cols=st.multiselect("Select the Numerical Columns",dd_list)
# Output_file=st.text_input('Enter Output File Name')
# s=st.number_input('Enter the Sample Size',1000)
# OP=Output_file + '.csv'
# sub=st.button('Submit')
# if sub:
# batch_size = 50
# epochs = 3
# learning_rate = 2e-4
# beta_1 = 0.5
# beta_2 = 0.9
# ctgan_args = ModelParameters(batch_size=batch_size,
# lr=learning_rate,
# betas=(beta_1, beta_2))
# train_args = TrainParameters(epochs=epochs)
# synth = RegularSynthesizer(modelname='ctgan', model_parameters=ctgan_args)
# synth.fit(data=df, train_arguments=train_args, num_cols=num_cols, cat_cols=cat_cols)
# df_syn = synth.sample(s)
# df_syn.to_csv(OP)
# c=getcwd()
# c=c + '/' + OP
# with open(c,"rb") as file:
# st.download_button(label=':blue[Download]',data=file,file_name=OP,mime="image/png")
# st.success("Thanks for using the app !!!")
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
torch.set_default_device("cuda")
model = AutoModelForCausalLM.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", torch_dtype="auto", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", trust_remote_code=True)
inputs = tokenizer('''### Instruction: What Does Basic Homeowners Insurance Cover?\n### Response: ''', return_tensors="pt", return_attention_mask=False)
outputs = model.generate(**inputs, max_length=1024)
text = tokenizer.batch_decode(outputs)[0]
print(text)
|