Canstralian commited on
Commit
ea290b1
Β·
verified Β·
1 Parent(s): 9a247cf

Upload 14 files

Browse files
hugging_steam-master/.devcontainer/devcontainer.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Python 3",
3
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
4
+ "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye",
5
+ "customizations": {
6
+ "codespaces": {
7
+ "openFiles": [
8
+ "README.md",
9
+ "streamlit_app.py"
10
+ ]
11
+ },
12
+ "vscode": {
13
+ "settings": {},
14
+ "extensions": [
15
+ "ms-python.python",
16
+ "ms-python.vscode-pylance"
17
+ ]
18
+ }
19
+ },
20
+ "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y <packages.txt; [ -f requirements.txt ] && pip3 install --user -r requirements.txt; pip3 install --user streamlit; echo 'βœ… Packages installed and Requirements met'",
21
+ "postAttachCommand": {
22
+ "server": "streamlit run streamlit_app.py --server.enableCORS false --server.enableXsrfProtection false"
23
+ },
24
+ "portsAttributes": {
25
+ "8501": {
26
+ "label": "Application",
27
+ "onAutoForward": "openPreview"
28
+ }
29
+ },
30
+ "forwardPorts": [
31
+ 8501
32
+ ]
33
+ }
hugging_steam-master/.streamlit/config.toml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor="#60b4ff"
3
+ backgroundColor="#FFFFFF"
4
+ secondaryBackgroundColor="#F0F2F6"
5
+ textColor="#262730"
6
+ font="sans serif"
7
+
8
+ [server]
9
+ enableStaticServing = true
hugging_steam-master/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # πŸ€—πŸ’¬ HugChat App
2
+ ```
3
+ This app is an LLM-powered chatbot built using Streamlit and HugChat.
4
+ ```
5
+
6
+ [HugChat](https://github.com/Soulter/hugging-chat-api) is an unofficial port to the [HuggingFace Chat](https://huggingface.co/chat/) API that is powered by the [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model.
7
+
8
+ ## Demo App
9
+
10
+ [![Streamlit App](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://hugchat.streamlit.app/)
11
+
12
+ ## Disclaimer
13
+ The following disclaimer is from the GitHub repo from the authors of the [HugChat](https://github.com/Soulter/hugging-chat-api) port.
14
+ > When you use this project, it means that you have agreed to the following two requirements of the HuggingChat:
15
+ >
16
+ > AI is an area of active research with known problems such as biased generation and misinformation. Do not use this application for high-stakes decisions or advice. Your conversations will be shared with model authors.
17
+
18
+
19
+ ## Libraries used
20
+
21
+ This app is built using the following Python libraries:
22
+ - [Streamlit](https://streamlit.io/)
23
+ - [HugChat](https://github.com/Soulter/hugging-chat-api)
24
+ - [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model
hugging_steam-master/app_v1.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_chat import message
3
+ from streamlit_extras.colored_header import colored_header
4
+ from streamlit_extras.add_vertical_space import add_vertical_space
5
+ from hugchat import hugchat
6
+ import os
7
+
8
+ # Streamlit page config
9
+ st.set_page_config(page_title="HugChat - An LLM-powered Streamlit app")
10
+
11
+ # Sidebar contents
12
+ with st.sidebar:
13
+ st.title('πŸ€—πŸ’¬ HugChat App')
14
+ st.markdown('''
15
+ ## About
16
+ This app is an LLM-powered chatbot built using:
17
+ - [Streamlit](https://streamlit.io/)
18
+ - [HugChat](https://github.com/Soulter/hugging-chat-api)
19
+ - [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model
20
+
21
+ πŸ’‘ Note: No API key required!
22
+ ''')
23
+ add_vertical_space(5)
24
+ st.write('Made with ❀️ by [Data Professor](https://youtube.com/dataprofessor)')
25
+
26
+ # Initialize chatbot and session state
27
+ if 'chatbot' not in st.session_state:
28
+ # Create ChatBot instance
29
+ st.session_state.chatbot = hugchat.ChatBot()
30
+
31
+ if 'generated' not in st.session_state:
32
+ st.session_state['generated'] = ["I'm HugChat, How may I help you?"]
33
+
34
+ if 'past' not in st.session_state:
35
+ st.session_state['past'] = ['Hi!']
36
+
37
+ # Layout of input/response containers
38
+ input_container = st.container()
39
+ colored_header(label='', description='', color_name='blue-30')
40
+ response_container = st.container()
41
+
42
+ # User input
43
+ def get_text():
44
+ return st.text_input("You: ", "", key="input")
45
+
46
+ with input_container:
47
+ user_input = get_text()
48
+
49
+ # AI Response Generation
50
+ def generate_response(prompt):
51
+ try:
52
+ response = st.session_state.chatbot.chat(prompt)
53
+ return response
54
+ except Exception as e:
55
+ return f"An error occurred: {e}"
56
+
57
+ # Display conversation
58
+ with response_container:
59
+ if user_input:
60
+ response = generate_response(user_input)
61
+ st.session_state.past.append(user_input)
62
+ st.session_state.generated.append(response)
63
+
64
+ if st.session_state['generated']:
65
+ for i in range(len(st.session_state['generated'])):
66
+ message(st.session_state['past'][i], is_user=True, key=f"{i}_user")
67
+ message(st.session_state["generated"][i], key=f"{i}")
hugging_steam-master/app_v2.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_chat import message
3
+ from streamlit_extras.colored_header import colored_header
4
+ from streamlit_extras.add_vertical_space import add_vertical_space
5
+ from hugchat import hugchat
6
+ from hugchat.login import Login
7
+
8
+ st.set_page_config(page_title="HugChat - An LLM-powered Streamlit app")
9
+
10
+ # Sidebar contents
11
+ with st.sidebar:
12
+ st.title('πŸ€—πŸ’¬ HugChat App')
13
+
14
+ st.header('Hugging Face Login')
15
+ hf_email = st.text_input('Enter E-mail:', type='password')
16
+ hf_pass = st.text_input('Enter password:', type='password')
17
+
18
+ st.markdown('''
19
+ ## About
20
+ This app is an LLM-powered chatbot built using:
21
+ - [Streamlit](https://streamlit.io/)
22
+ - [HugChat](https://github.com/Soulter/hugging-chat-api)
23
+ - [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model
24
+
25
+ ''')
26
+ add_vertical_space(5)
27
+ st.write('Made with ❀️ by [Data Professor](https://youtube.com/dataprofessor)')
28
+
29
+ # Generate empty lists for generated and past.
30
+ ## generated stores AI generated responses
31
+ if 'generated' not in st.session_state:
32
+ st.session_state['generated'] = ["I'm HugChat, How may I help you?"]
33
+ ## past stores User's questions
34
+ if 'past' not in st.session_state:
35
+ st.session_state['past'] = ['Hi!']
36
+
37
+ # Layout of input/response containers
38
+ input_container = st.container()
39
+ colored_header(label='', description='', color_name='blue-30')
40
+ response_container = st.container()
41
+
42
+ # User input
43
+ ## Function for taking user provided prompt as input
44
+ def get_text():
45
+ input_text = st.text_input("You: ", "", key="input")
46
+ return input_text
47
+ ## Applying the user input box
48
+ with input_container:
49
+ user_input = get_text()
50
+
51
+ # Response output
52
+ ## Function for taking user prompt as input followed by producing AI generated responses
53
+ def generate_response(prompt, email, passwd):
54
+ # Hugging Face Login
55
+ sign = Login(email, passwd)
56
+ cookies = sign.login()
57
+ sign.saveCookies()
58
+ # Create ChatBot
59
+ chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
60
+ response = chatbot.chat(prompt)
61
+ return response
62
+
63
+ ## Conditional display of AI generated responses as a function of user provided prompts
64
+ with response_container:
65
+ if user_input and hf_email and hf_pass:
66
+ response = generate_response(user_input, hf_email, hf_pass)
67
+ st.session_state.past.append(user_input)
68
+ st.session_state.generated.append(response)
69
+
70
+ if st.session_state['generated']:
71
+ for i in range(len(st.session_state['generated'])):
72
+ message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
73
+ message(st.session_state["generated"][i], key=str(i))
hugging_steam-master/app_v3.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from hugchat import hugchat
3
+ from hugchat.login import Login
4
+ import os
5
+
6
+ # App title
7
+ st.set_page_config(page_title="πŸ€—πŸ’¬ HugChat")
8
+
9
+ # Hugging Face Credentials
10
+ with st.sidebar:
11
+ st.title('πŸ€—πŸ’¬ HugChat')
12
+ if ('EMAIL' in st.secrets) and ('PASS' in st.secrets):
13
+ st.success('HuggingFace Login credentials already provided!', icon='βœ…')
14
+ hf_email = st.secrets['EMAIL']
15
+ hf_pass = st.secrets['PASS']
16
+ else:
17
+ hf_email = st.text_input('Enter E-mail:', type='password')
18
+ hf_pass = st.text_input('Enter password:', type='password')
19
+ if not (hf_email and hf_pass):
20
+ st.warning('Please enter your credentials!', icon='⚠️')
21
+ else:
22
+ st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
23
+ st.markdown('πŸ“– Learn how to build this app in this [blog](https://blog.streamlit.io/how-to-build-an-llm-powered-chatbot-with-streamlit/)!')
24
+
25
+ # Store LLM generated responses
26
+ if "messages" not in st.session_state:
27
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
28
+
29
+ # Display or clear chat messages
30
+ for message in st.session_state.messages:
31
+ with st.chat_message(message["role"]):
32
+ st.write(message["content"])
33
+
34
+ def clear_chat_history():
35
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
36
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
37
+
38
+ # Function for generating LLM response
39
+ def generate_response(prompt_input, email, passwd):
40
+ # Hugging Face Login
41
+ sign = Login(email, passwd)
42
+ cookies = sign.login()
43
+ # Create ChatBot
44
+ chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
45
+
46
+ for dict_message in st.session_state.messages:
47
+ string_dialogue = "You are a helpful assistant."
48
+ if dict_message["role"] == "user":
49
+ string_dialogue += "User: " + dict_message["content"] + "\n\n"
50
+ else:
51
+ string_dialogue += "Assistant: " + dict_message["content"] + "\n\n"
52
+
53
+ prompt = f"{string_dialogue} {prompt_input} Assistant: "
54
+ return chatbot.chat(prompt)
55
+
56
+ # User-provided prompt
57
+ if prompt := st.chat_input(disabled=not (hf_email and hf_pass)):
58
+ st.session_state.messages.append({"role": "user", "content": prompt})
59
+ with st.chat_message("user"):
60
+ st.write(prompt)
61
+
62
+ # Generate a new response if last message is not from assistant
63
+ if st.session_state.messages[-1]["role"] != "assistant":
64
+ with st.chat_message("assistant"):
65
+ with st.spinner("Thinking..."):
66
+ response = generate_response(prompt, hf_email, hf_pass)
67
+ st.write(response)
68
+ message = {"role": "assistant", "content": response}
69
+ st.session_state.messages.append(message)
hugging_steam-master/img/placeholder.md ADDED
@@ -0,0 +1 @@
 
 
1
+
hugging_steam-master/img/streamlit.png ADDED
hugging_steam-master/langchain_app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.chains import ConversationChain
3
+ from hugchat import hugchat
4
+ from hugchat.login import Login
5
+
6
+ st.set_page_config(page_title="HugChat - An LLM-powered Streamlit app")
7
+ st.title('πŸ€—πŸ’¬ HugChat App')
8
+
9
+ # Hugging Face Credentials
10
+ with st.sidebar:
11
+ st.header('Hugging Face Login')
12
+ hf_email = st.text_input('Enter E-mail:', type='password')
13
+ hf_pass = st.text_input('Enter password:', type='password')
14
+
15
+ # Store AI generated responses
16
+ if "messages" not in st.session_state.keys():
17
+ st.session_state.messages = [{"role": "assistant", "content": "I'm HugChat, How may I help you?"}]
18
+
19
+ # Display existing chat messages
20
+ for message in st.session_state.messages:
21
+ with st.chat_message(message["role"]):
22
+ st.write(message["content"])
23
+
24
+ # Function for generating LLM response
25
+ def generate_response(prompt, email, passwd):
26
+ # Hugging Face Login
27
+ sign = Login(email, passwd)
28
+ cookies = sign.login()
29
+ sign.saveCookies()
30
+ # Create ChatBot
31
+ chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
32
+ chain = ConversationChain(llm=chatbot)
33
+ response = chain.run(input=prompt)
34
+ return response
35
+
36
+ # Prompt for user input and save
37
+ if prompt := st.chat_input():
38
+ st.session_state.messages.append({"role": "user", "content": prompt})
39
+ with st.chat_message("user"):
40
+ st.write(prompt)
41
+
42
+ # If last message is not from assistant, we need to generate a new response
43
+ if st.session_state.messages[-1]["role"] != "assistant":
44
+ # Call LLM
45
+ with st.chat_message("assistant"):
46
+ with st.spinner("Thinking..."):
47
+ response = generate_response(prompt, hf_email, hf_pass)
48
+ st.write(response)
49
+
50
+ message = {"role": "assistant", "content": response}
51
+ st.session_state.messages.append(message)
hugging_steam-master/notebook/HugChat.ipynb ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "markdown",
19
+ "source": [
20
+ "# **How to use HugChat - the unofficial Hugging Chat API**"
21
+ ],
22
+ "metadata": {
23
+ "id": "R4FiZs77vKXr"
24
+ }
25
+ },
26
+ {
27
+ "cell_type": "markdown",
28
+ "source": [
29
+ "## **Install prerequisite libraries**"
30
+ ],
31
+ "metadata": {
32
+ "id": "6jmfeB1PvL8_"
33
+ }
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": null,
38
+ "metadata": {
39
+ "colab": {
40
+ "base_uri": "https://localhost:8080/"
41
+ },
42
+ "id": "g9bTbqH96XS2",
43
+ "outputId": "452331b1-e565-4228-f241-9e80cfb956f6"
44
+ },
45
+ "outputs": [
46
+ {
47
+ "output_type": "stream",
48
+ "name": "stdout",
49
+ "text": [
50
+ "Collecting hugchat==0.1.0\n",
51
+ " Downloading hugchat-0.1.0-py3-none-any.whl (24 kB)\n",
52
+ "Collecting python-dotenv\n",
53
+ " Downloading python_dotenv-1.0.0-py3-none-any.whl (19 kB)\n",
54
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from hugchat==0.1.0) (2.27.1)\n",
55
+ "Collecting requests-toolbelt (from hugchat==0.1.0)\n",
56
+ " Downloading requests_toolbelt-1.0.0-py2.py3-none-any.whl (54 kB)\n",
57
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.5/54.5 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
58
+ "\u001b[?25hRequirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->hugchat==0.1.0) (1.26.16)\n",
59
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->hugchat==0.1.0) (2023.7.22)\n",
60
+ "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->hugchat==0.1.0) (2.0.12)\n",
61
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->hugchat==0.1.0) (3.4)\n",
62
+ "Installing collected packages: python-dotenv, requests-toolbelt, hugchat\n",
63
+ "Successfully installed hugchat-0.1.0 python-dotenv-1.0.0 requests-toolbelt-1.0.0\n"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "! pip install hugchat==0.1.0 python-dotenv"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "markdown",
73
+ "source": [
74
+ "## **Load Hugging Face credentials**"
75
+ ],
76
+ "metadata": {
77
+ "id": "tXioNOYMv1ti"
78
+ }
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "source": [
83
+ "from dotenv import dotenv_values\n",
84
+ "\n",
85
+ "secrets = dotenv_values('hf.env')"
86
+ ],
87
+ "metadata": {
88
+ "id": "tdJjllXGueGX"
89
+ },
90
+ "execution_count": null,
91
+ "outputs": []
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "source": [
96
+ "hf_email = secrets['EMAIL']\n",
97
+ "hf_pass = secrets['PASS']"
98
+ ],
99
+ "metadata": {
100
+ "id": "tNHBfLF788f2"
101
+ },
102
+ "execution_count": null,
103
+ "outputs": []
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "source": [
108
+ "## **LLM response generation**"
109
+ ],
110
+ "metadata": {
111
+ "id": "_yc18ezBzML6"
112
+ }
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "source": [
117
+ "from hugchat import hugchat\n",
118
+ "from hugchat.login import Login"
119
+ ],
120
+ "metadata": {
121
+ "id": "H21niuMc8xcv"
122
+ },
123
+ "execution_count": null,
124
+ "outputs": []
125
+ },
126
+ {
127
+ "cell_type": "code",
128
+ "source": [
129
+ "# Function for generating LLM response\n",
130
+ "def generate_response(prompt_input, email, passwd):\n",
131
+ " # Hugging Face Login\n",
132
+ " sign = Login(email, passwd)\n",
133
+ " cookies = sign.login()\n",
134
+ " # Create ChatBot\n",
135
+ " chatbot = hugchat.ChatBot(cookies=cookies.get_dict())\n",
136
+ " return chatbot.chat(prompt_input)"
137
+ ],
138
+ "metadata": {
139
+ "id": "4k9iUzyWzwSh"
140
+ },
141
+ "execution_count": null,
142
+ "outputs": []
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "source": [
147
+ "prompt = \"What is Streamlit?\"\n",
148
+ "response = generate_response(prompt, hf_email, hf_pass)"
149
+ ],
150
+ "metadata": {
151
+ "id": "TD0YIqY1zQmK"
152
+ },
153
+ "execution_count": null,
154
+ "outputs": []
155
+ },
156
+ {
157
+ "cell_type": "code",
158
+ "source": [
159
+ "response"
160
+ ],
161
+ "metadata": {
162
+ "id": "2Om92sQ4z3e2",
163
+ "colab": {
164
+ "base_uri": "https://localhost:8080/",
165
+ "height": 125
166
+ },
167
+ "outputId": "89e9b29a-3a54-4e61-dd13-b75395fa82b6"
168
+ },
169
+ "execution_count": null,
170
+ "outputs": [
171
+ {
172
+ "output_type": "execute_result",
173
+ "data": {
174
+ "text/plain": [
175
+ "'Streamlit is a lightweight Python library for creating interactive data analysis tools such as visualizations and reports. It makes use of modern web technologies like React, Redux, and WebSockets to provide fast, responsive UI components that can be used to build custom user interfaces. At its core, Streamlit provides a set of high-level abstractions built on top of NumPy and Pandas that make it easy to create beautiful charts, tables, maps, and other types of output. It works seamlessly with popular libraries like Altair, Bokeh, Matplotlib, and Seaborn, allowing you to combine their unique strengths into powerful analytical tools. Overall, Streamlit simplifies the process of building data science applications by providing a unified interface that integrates development environments with deployment tools, making it possible to iterate quickly and easily share results.'"
176
+ ],
177
+ "application/vnd.google.colaboratory.intrinsic+json": {
178
+ "type": "string"
179
+ }
180
+ },
181
+ "metadata": {},
182
+ "execution_count": 11
183
+ }
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "source": [],
189
+ "metadata": {
190
+ "id": "aIWbxGM-1LDh"
191
+ },
192
+ "execution_count": null,
193
+ "outputs": []
194
+ }
195
+ ]
196
+ }
hugging_steam-master/notebook/hf.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ EMAIL='[email protected]'
2
+ PASS='xxxxxxxxxxx'
hugging_steam-master/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit
2
+ hugchat==0.1.0
hugging_steam-master/streamlit.png ADDED
hugging_steam-master/streamlit_app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from datasets import load_dataset
4
+ from gtts import gTTS
5
+ import os
6
+ import re
7
+ import random
8
+
9
+ # Enable Dark Mode and Custom CSS
10
+ st.markdown(
11
+ """
12
+ <style>
13
+ body {
14
+ background-color: #121212;
15
+ color: white;
16
+ }
17
+ .css-1d391kg {
18
+ background-color: #333;
19
+ }
20
+ .stButton > button {
21
+ background-color: #6200ee;
22
+ color: white;
23
+ }
24
+ .stTextInput input {
25
+ background-color: #333;
26
+ color: white;
27
+ }
28
+ </style>
29
+ """,
30
+ unsafe_allow_html=True,
31
+ )
32
+
33
+ # Load models and datasets
34
+ try:
35
+ code_llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/CodeLlama-7B-Python")
36
+ code_llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/CodeLlama-7B-Python")
37
+ except Exception as e:
38
+ st.error(f"Error loading model: {e}")
39
+ code_llama_model = None
40
+ code_llama_tokenizer = None
41
+
42
+ try:
43
+ wordlist_dataset = load_dataset("Canstralian/Wordlists")
44
+ except Exception as e:
45
+ st.error(f"Error loading Wordlist dataset: {e}")
46
+ wordlist_dataset = None
47
+
48
+ # Initialize chat history storage
49
+ if "messages" not in st.session_state:
50
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you?"}]
51
+
52
+ # Function to validate the prompt using regular expressions
53
+ def validate_prompt(prompt: str) -> bool:
54
+ """
55
+ Validates if the input prompt is not empty and meets some basic format rules.
56
+ Args:
57
+ prompt (str): The input prompt to be validated.
58
+ Returns:
59
+ bool: True if the prompt is valid, False otherwise.
60
+ """
61
+ # Improved validation: Allow alphanumeric characters, spaces, and punctuation
62
+ if re.match(r'^[A-Za-z0-9\s\.,;!?(){}[\]]+$', prompt):
63
+ return True
64
+ return False
65
+
66
+ # Function to convert text to speech
67
+ def text_to_speech(text: str) -> None:
68
+ """
69
+ Converts text to speech using gTTS and saves it as an MP3 file.
70
+ Args:
71
+ text (str): The text to be converted to speech.
72
+ """
73
+ try:
74
+ tts = gTTS(text, lang='en')
75
+ tts.save("response.mp3")
76
+ os.system("mpg321 response.mp3")
77
+ except Exception as e:
78
+ st.error(f"Error generating speech: {e}")
79
+
80
+ # Display chat history
81
+ for message in st.session_state.messages:
82
+ with st.chat_message(message["role"]):
83
+ st.write(message["content"])
84
+
85
+ # Function to generate chatbot response
86
+ def generate_response(prompt: str) -> str:
87
+ """
88
+ Generates a response from the assistant based on the user input.
89
+ Args:
90
+ prompt (str): The user's input prompt.
91
+ Returns:
92
+ str: The generated response from the assistant.
93
+ """
94
+ if code_llama_model and code_llama_tokenizer:
95
+ if "python" in prompt.lower():
96
+ # Use the Code Llama model for code-related queries
97
+ inputs = code_llama_tokenizer(prompt, return_tensors="pt")
98
+ outputs = code_llama_model.generate(**inputs, max_length=150, num_return_sequences=1)
99
+ response = code_llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
100
+ else:
101
+ response = "I'm here to assist with your queries."
102
+ else:
103
+ response = "Model not loaded. Please try again later."
104
+
105
+ if "osint" in prompt.lower():
106
+ # Respond with dataset-based OSINT information
107
+ response = "OSINT data analysis coming soon!"
108
+ elif "wordlist" in prompt.lower() and wordlist_dataset:
109
+ # Fetch and display a random entry from the Wordlist dataset
110
+ wordlist_entry = random.choice(wordlist_dataset["train"])["text"]
111
+ response = f"Here's a random wordlist entry: {wordlist_entry}"
112
+
113
+ return response
114
+
115
+ # User input handling
116
+ if prompt := st.chat_input():
117
+ # Validate user input
118
+ if validate_prompt(prompt):
119
+ st.session_state.messages.append({"role": "user", "content": prompt})
120
+ with st.chat_message("user"):
121
+ st.write(prompt)
122
+
123
+ # Generate and display response with smooth animations
124
+ with st.chat_message("assistant"):
125
+ with st.spinner("Assistant is typing..."):
126
+ response = generate_response(prompt)
127
+ st.write(response)
128
+
129
+ # Text-to-Speech integration for the assistant's response
130
+ text_to_speech(response)
131
+
132
+ # Store the assistant's response
133
+ st.session_state.messages.append({"role": "assistant", "content": response})
134
+ else:
135
+ st.warning("Invalid input. Please ensure your input contains only valid characters.")
136
+
137
+ # User Feedback Section
138
+ feedback = st.selectbox("How was your experience?", ["😊 Excellent", "😐 Okay", "πŸ˜• Poor"])
139
+ if feedback:
140
+ st.success(f"Thank you for your feedback: {feedback}", icon="βœ…")