File size: 1,676 Bytes
c0325b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import streamlit as st
from g4f.client import Client

# GPT-4o-mini ν΄λΌμ΄μ–ΈνŠΈ μ΄ˆκΈ°ν™”
client = Client()

# νŽ˜μ΄μ§€ μ„€μ •: 제λͺ©, μ•„μ΄μ½˜, λ ˆμ΄μ•„μ›ƒ μ„€μ •
st.set_page_config(
    page_title="GPT-4o Chat",
    page_icon="πŸ“",
    layout="centered"
)

# μ„Έμ…˜ μƒνƒœ μ΄ˆκΈ°ν™”: μ±„νŒ… 기둝 μ €μž₯
if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

# μ•± 제λͺ© ν‘œμ‹œ
st.title("GPT-4o-mini Chat πŸ€–")

# μ±„νŒ… λ©”μ‹œμ§€ UI 좜λ ₯
for message in st.session_state.chat_history:
    with st.chat_message(message['role']):
        # μ‚¬μš©μž λ˜λŠ” GPT의 λ©”μ‹œμ§€λ₯Ό ꡬ뢄해 좜λ ₯
        st.markdown(message['content'])

# μ‚¬μš©μž μž…λ ₯ μ°½
user_prompt = st.chat_input("GPT-4o-miniμ—κ²Œ μ§ˆλ¬Έν•΄λ³΄μ„Έμš”!")

if user_prompt:
    # μ‚¬μš©μžμ˜ μž…λ ₯ λ©”μ‹œμ§€λ₯Ό UI에 ν‘œμ‹œν•˜κ³  μ„Έμ…˜ μƒνƒœμ— μ €μž₯
    st.chat_message("user").markdown(user_prompt)
    st.session_state.chat_history.append({"role": "user", "content": user_prompt})

    # GPT-4o-mini λͺ¨λΈμ—κ²Œ λ©”μ‹œμ§€ 전달 및 응닡 생성
    response = client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {"role": "system", "content": "You are a helpful assistant"},  # μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
            *st.session_state.chat_history  # 이전 λŒ€ν™” λ‚΄μš© 포함
        ]
    )
    
    # GPT 응닡 κ°€μ Έμ˜€κΈ°
    final_response = response.choices[0].message.content

    # GPT 응닡을 UI에 ν‘œμ‹œν•˜κ³  μ„Έμ…˜ μƒνƒœμ— μ €μž₯
    st.session_state.chat_history.append({"role": "assistant", "content": final_response})
    with st.chat_message("assistant"):
        st.markdown(final_response)