Maddy90 commited on
Commit
6957aa9
·
verified ·
1 Parent(s): 3f1efa3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -8
app.py CHANGED
@@ -1,11 +1,26 @@
1
- # Use Auto-tokenizer
2
-
3
  import streamlit as st
4
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- pipe = pipeline('sentiment-analysis')
7
- text = st.text_area('Enter some text!')
 
8
 
9
- if text:
10
- out = pipe(text)
11
- st.json(out)
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load model and tokenizer
5
+ model_name = "gpt-3.5-turbo"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ def generate_blog_post(topic):
10
+ prompt = f"Write a detailed blog post about {topic}."
11
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
12
+ outputs = model.generate(inputs, max_length=512, num_return_sequences=1)
13
+ blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+ return blog_post
15
 
16
+ # Streamlit interface
17
+ st.title("Blog Post Generator")
18
+ st.write("Enter a topic to generate a detailed blog post.")
19
 
20
+ topic = st.text_input("Topic", "")
21
+ if st.button("Generate Blog Post"):
22
+ if topic:
23
+ blog_post = generate_blog_post(topic)
24
+ st.write(blog_post)
25
+ else:
26
+ st.write("Please enter a topic to generate a blog post.")