Spaces:
Running
Running
File size: 1,676 Bytes
c0325b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import streamlit as st
from g4f.client import Client
# GPT-4o-mini ν΄λΌμ΄μΈνΈ μ΄κΈ°ν
client = Client()
# νμ΄μ§ μ€μ : μ λͺ©, μμ΄μ½, λ μ΄μμ μ€μ
st.set_page_config(
page_title="GPT-4o Chat",
page_icon="π",
layout="centered"
)
# μΈμ
μν μ΄κΈ°ν: μ±ν
κΈ°λ‘ μ μ₯
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# μ± μ λͺ© νμ
st.title("GPT-4o-mini Chat π€")
# μ±ν
λ©μμ§ UI μΆλ ₯
for message in st.session_state.chat_history:
with st.chat_message(message['role']):
# μ¬μ©μ λλ GPTμ λ©μμ§λ₯Ό ꡬλΆν΄ μΆλ ₯
st.markdown(message['content'])
# μ¬μ©μ μ
λ ₯ μ°½
user_prompt = st.chat_input("GPT-4o-miniμκ² μ§λ¬Έν΄λ³΄μΈμ!")
if user_prompt:
# μ¬μ©μμ μ
λ ₯ λ©μμ§λ₯Ό UIμ νμνκ³ μΈμ
μνμ μ μ₯
st.chat_message("user").markdown(user_prompt)
st.session_state.chat_history.append({"role": "user", "content": user_prompt})
# GPT-4o-mini λͺ¨λΈμκ² λ©μμ§ μ λ¬ λ° μλ΅ μμ±
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant"}, # μμ€ν
ν둬ννΈ
*st.session_state.chat_history # μ΄μ λν λ΄μ© ν¬ν¨
]
)
# GPT μλ΅ κ°μ Έμ€κΈ°
final_response = response.choices[0].message.content
# GPT μλ΅μ UIμ νμνκ³ μΈμ
μνμ μ μ₯
st.session_state.chat_history.append({"role": "assistant", "content": final_response})
with st.chat_message("assistant"):
st.markdown(final_response)
|