File size: 2,751 Bytes
c51f11f 496dc6a 02e1f75 529014e 496dc6a 02e1f75 1b3e683 496dc6a 26a9ae0 496dc6a afcb072 496dc6a 8c6b351 496dc6a 9233d2e 496dc6a 6cdcbb5 496dc6a a2ca895 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
from langchain.tools import AIPluginTool
from langchain.utilities import WikipediaAPIWrapper
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.tools import MoveFileTool, format_tool_to_openai_function
from langchain.tools import BaseTool, StructuredTool, Tool, tool
from langchain.chat_models import ChatOpenAI
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain import LLMMathChain, SerpAPIWrapper
import gradio as gr
import os
import openai
import gradio as gr
from gradio import ChatInterface
import time
# Get the value of the openai_api_key from environment variable
openai.api_key = os.getenv("OPENAI_API_KEY")
# Import things that are needed generically from langchain
def predict(inputs, chatbot):
#print(inputs,chatbot,system_message)
messages = []
#messages.append({"role": "system", "content": system_message})
messages.append({"role": "system", "content": "You are a discord bot called 'QuteAI', make your response like human chatting, humans do not response using lists while explaining things and don't say long sentences. Use markdown in response."})
for conv in chatbot:
user = conv[0]
messages.append({"role": "user", "content": user})
assistant = conv[1]
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": inputs})
print(messages)
# a ChatCompletion request
client = openai.OpenAI(base_url="https://api.chatanywhere.tech/v1")
completion = client.chat.completions.create(
model="gpt-3.5-turbo", # this field is currently unused
messages=messages,
temperature=0.7,
stream=True,
)
new_message = {"role": "assistant", "content": ""}
for chunk in completion:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
new_message["content"] += chunk.choices[0].delta.content
yield new_message["content"]
messages.append(new_message)
print(messages)
interface = gr.ChatInterface(predict)
with gr.Blocks() as demo:
gr.Markdown("""
# GPT 3.5 Discord Bot powered by gradio!
To use this space as a discord bot, first install the gradio_client
```bash
pip install gradio_client
```
Then run the following command
```python
client = grc.Client.duplicate("gradio-discord-bots/gpt-35-turbo", private=False, secrets={"OPENAI_API_KEY": "<your-key-here>"}, sleep_timeout=2880)
client.deploy_discord(api_names=["chat"])
""")
with gr.Row(visible=False):
interface.render()
demo.queue().launch() |