Spaces:
Sleeping
Sleeping
N.Achyuth Reddy
commited on
Commit
Β·
1e3c563
1
Parent(s):
7d38479
Update app.py
Browse files
app.py
CHANGED
@@ -4,59 +4,30 @@ from st_audiorec import st_audiorec
|
|
4 |
from gtts import gTTS
|
5 |
import os
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
# Constants
|
10 |
-
TITLE = "AgriTure"
|
11 |
-
DESCRIPTION = """
|
12 |
-
----
|
13 |
-
This Project demonstrates a model fine-tuned by Achyuth. This Model is named as "AgriTure". This Model helps the farmers and scientists to develop the art of agriculture and farming.
|
14 |
-
Hope this will be a Successful Project!!!
|
15 |
-
~Achyuth
|
16 |
-
----
|
17 |
-
"""
|
18 |
-
|
19 |
-
# Initialize client
|
20 |
-
|
21 |
-
|
22 |
-
with st.sidebar:
|
23 |
-
system_promptSide = st.text_input("Optional system prompt:")
|
24 |
-
temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
|
25 |
-
max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
|
26 |
-
ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
|
27 |
-
RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
|
28 |
-
|
29 |
-
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
|
30 |
-
|
31 |
-
|
32 |
-
def transcribe(wav_path):
|
33 |
-
|
34 |
-
return whisper_client.predict(
|
35 |
-
wav_path, # str (filepath or URL to file) in 'inputs' Audio component
|
36 |
-
"transcribe", # str in 'Task' Radio component
|
37 |
-
api_name="/predict"
|
38 |
-
)
|
39 |
|
40 |
# Prediction function
|
41 |
-
def predict(message, system_prompt='Your name is OpenGPT. You are developed by Achyuth. You need to mostly focus on giving information about future agriculture and advanced farming. Empower yourself farming future with cutting-edge technology and sustainable practices. You need to cultivate a greener and more productive. Your developer is studying in The Hyderabad Public School Kadapa.', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
|
42 |
with st.status("Starting client"):
|
43 |
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/")
|
44 |
st.write("Requesting Audio Transcriber")
|
45 |
with st.status("Requesting AgriTure v1"):
|
46 |
st.write("Requesting API")
|
47 |
response = client.predict(
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
)
|
57 |
st.write("Done")
|
58 |
return response
|
59 |
|
|
|
60 |
def text_to_speech(text, language='en', filename='output.mp3'):
|
61 |
# Create a gTTS object
|
62 |
tts = gTTS(text=text, lang=language, slow=False)
|
@@ -65,62 +36,26 @@ def text_to_speech(text, language='en', filename='output.mp3'):
|
|
65 |
tts.save(filename)
|
66 |
|
67 |
# Play the audio file
|
68 |
-
os.system(f'start {filename}')
|
69 |
-
|
70 |
-
# This works on Windows. For other OS, you might need a different command.
|
71 |
|
72 |
-
# Example usage
|
73 |
-
text_to_speech(response)
|
74 |
-
|
75 |
# Streamlit UI
|
76 |
-
|
77 |
-
st.write(DESCRIPTION)
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
if "messages" not in st.session_state:
|
84 |
-
st.session_state.messages = []
|
85 |
-
|
86 |
-
# Display chat messages from history on app rerun
|
87 |
-
for message in st.session_state.messages:
|
88 |
-
with st.chat_message(message["role"], avatar=("π§βπ»" if message["role"] == 'human' else 'π¦')):
|
89 |
-
st.markdown(message["content"])
|
90 |
-
|
91 |
-
textinput = st.chat_input("Ask AgriTure anything...")
|
92 |
-
wav_audio_data = st_audiorec()
|
93 |
-
|
94 |
-
if wav_audio_data != None:
|
95 |
-
with st.status("Transcribing audio..."):
|
96 |
-
# save audio
|
97 |
-
with open("audio.wav", "wb") as f:
|
98 |
-
f.write(wav_audio_data)
|
99 |
-
prompt = transcribe("audio.wav")
|
100 |
-
|
101 |
-
st.write("Transcribed Given Audio β")
|
102 |
-
|
103 |
-
st.chat_message("human",avatar = "π§βπ»").markdown(prompt)
|
104 |
-
st.session_state.messages.append({"role": "human", "content": prompt})
|
105 |
-
|
106 |
-
# transcribe audio
|
107 |
-
response = predict(message= prompt)
|
108 |
-
|
109 |
-
with st.chat_message("assistant", avatar='π¦'):
|
110 |
-
st.markdown(response)
|
111 |
-
# Add assistant response to chat history
|
112 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
113 |
|
114 |
# React to user input
|
115 |
if prompt := textinput:
|
116 |
# Display user message in chat message container
|
117 |
-
st.chat_message("human",avatar
|
118 |
# Add user message to chat history
|
119 |
st.session_state.messages.append({"role": "human", "content": prompt})
|
120 |
|
121 |
-
|
|
|
|
|
122 |
# Display assistant response in chat message container
|
123 |
with st.chat_message("assistant", avatar='π¦'):
|
124 |
st.markdown(response)
|
125 |
# Add assistant response to chat history
|
126 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
|
|
|
|
4 |
from gtts import gTTS
|
5 |
import os
|
6 |
|
7 |
+
# Initialize response variable
|
8 |
+
response = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Prediction function
|
11 |
+
def predict(message, system_prompt='Your name is OpenGPT. You are developed by Achyuth. You need to mostly focus on giving information about future agriculture and advanced farming. Empower yourself farming future with cutting-edge technology and sustainable practices. You need to cultivate a greener and more productive. Your developer is studying in The Hyderabad Public School Kadapa.', temperature=0.7, max_new_tokens=4096, Topp=0.5, Repetitionpenalty=1.2):
|
12 |
with st.status("Starting client"):
|
13 |
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/")
|
14 |
st.write("Requesting Audio Transcriber")
|
15 |
with st.status("Requesting AgriTure v1"):
|
16 |
st.write("Requesting API")
|
17 |
response = client.predict(
|
18 |
+
message,
|
19 |
+
system_prompt,
|
20 |
+
max_new_tokens,
|
21 |
+
temperature,
|
22 |
+
Topp,
|
23 |
+
500,
|
24 |
+
Repetitionpenalty,
|
25 |
+
api_name="/chat"
|
26 |
)
|
27 |
st.write("Done")
|
28 |
return response
|
29 |
|
30 |
+
# Function to convert text to speech
|
31 |
def text_to_speech(text, language='en', filename='output.mp3'):
|
32 |
# Create a gTTS object
|
33 |
tts = gTTS(text=text, lang=language, slow=False)
|
|
|
36 |
tts.save(filename)
|
37 |
|
38 |
# Play the audio file
|
39 |
+
os.system(f'start {filename}') # This works on Windows. For other OS, you might need a different command.
|
|
|
|
|
40 |
|
|
|
|
|
|
|
41 |
# Streamlit UI
|
42 |
+
# ... (previous code remains unchanged)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# React to user input
|
45 |
if prompt := textinput:
|
46 |
# Display user message in chat message container
|
47 |
+
st.chat_message("human", avatar="π¬: ").markdown(prompt)
|
48 |
# Add user message to chat history
|
49 |
st.session_state.messages.append({"role": "human", "content": prompt})
|
50 |
|
51 |
+
# Update the global response variable
|
52 |
+
response = predict(message=prompt)
|
53 |
+
|
54 |
# Display assistant response in chat message container
|
55 |
with st.chat_message("assistant", avatar='π¦'):
|
56 |
st.markdown(response)
|
57 |
# Add assistant response to chat history
|
58 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
59 |
+
|
60 |
+
# Convert response to audio
|
61 |
+
text_to_speech(response) # Call text_to_speech after getting the response
|