|
from langchain.llms import OpenAI |
|
from dotenv import load_dotenv |
|
import streamlit as st |
|
import os |
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
def get_response_ai(question): |
|
llm = OpenAI(model_name = "gpt-3.5-turbo",temperature = 0.75) |
|
response = llm(question) |
|
return response |
|
|
|
|
|
st.set_page_config(page_title = 'Q&A Demo') |
|
st.header('Langchain Application') |
|
|
|
question = st.text_input("Input :",key = "input") |
|
response = get_response_ai(question) |
|
|
|
submit = st.button('Ask a question') |
|
|
|
|
|
if submit: |
|
st.subheader("The Response is ") |
|
st.write(response) |
|
|
|
|
|
|