Spaces:
Sleeping
Sleeping
basics working
Browse files- app.py +45 -2
- requirements.txt +2 -0
app.py
CHANGED
@@ -1,4 +1,47 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import torch
|
4 |
|
5 |
+
big_text = """
|
6 |
+
<div style='text-align: center;'>
|
7 |
+
<h1 style='font-size: 30x;'>Knowledge Extraction 1</h1>
|
8 |
+
</div>
|
9 |
+
"""
|
10 |
+
st.markdown(big_text, unsafe_allow_html=True)
|
11 |
+
|
12 |
+
if 'is_initialized' not in st.session_state:
|
13 |
+
st.session_state['is_initialized'] = True
|
14 |
+
model_name = "EleutherAI/gpt-neo-125M"
|
15 |
+
st.session_state.model_name = "EleutherAI/gpt-neo-125M"
|
16 |
+
st.session_state.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
17 |
+
st.session_state.model = AutoModelForCausalLM.from_pretrained("zmbfeng/gpt-neo-125M_untethered_100_epochs_multiple_paragraph")
|
18 |
+
if torch.cuda.is_available():
|
19 |
+
st.session_state.device = torch.device("cuda")
|
20 |
+
print("Using GPU:", torch.cuda.get_device_name(0))
|
21 |
+
else:
|
22 |
+
st.session_state.device = torch.device("cpu")
|
23 |
+
print("GPU is not available, using CPU instead.")
|
24 |
+
st.session_state.model.to(st.session_state.device)
|
25 |
+
#prompt = "Discuss the impact of artificial intelligence on modern society."
|
26 |
+
#prompt = "What is one of the best teachers in all of life?"
|
27 |
+
#prompt = "What is the necessary awareness for deep and meaningful relationships?"
|
28 |
+
#prompt = "What would happen if you knew you were going to die within a week or month?"
|
29 |
+
#prompt = "question: What is one of the best teachers in all of life? "
|
30 |
+
#prompt = "question: What would happen if death were to happen in an hour, week, or year?"
|
31 |
+
#=============
|
32 |
+
#prompt = "question: What if you live life fully?"
|
33 |
+
#prompt = "question: What does death do to you?"
|
34 |
+
#============
|
35 |
+
#prompt = "question: Do you understand that every minute you're on the verge of death?"
|
36 |
+
#most recent:
|
37 |
+
#prompt = "question: Are you going to wait until the last moment to let death be your teacher?"
|
38 |
+
query = st.text_input("Enter your query")
|
39 |
+
if query:
|
40 |
+
prompt = "question: "+query
|
41 |
+
input_ids = st.session_state.tokenizer(prompt, return_tensors="pt").input_ids.to(st.session_state.device)
|
42 |
+
# Generate a response
|
43 |
+
output = st.session_state.model.generate(input_ids, max_length=2048, do_sample=True,temperature=0.01, pad_token_id=st.session_state.tokenizer.eos_token_id) #exact result for single paragraph
|
44 |
+
|
45 |
+
# Decode the output
|
46 |
+
response = st.session_state.tokenizer.decode(output[0], skip_special_tokens=True)
|
47 |
+
st.write(response)
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|