Spaces:
Sleeping
Sleeping
File size: 4,245 Bytes
86b946a 43773a4 d968fe8 86b946a a197dc7 ef5afd7 d4138a4 86b946a a197dc7 2e97054 c88185a e07786f c88185a e07786f a197dc7 dfb995a 2e97054 dfb995a a197dc7 86b946a 5938dff a79aa4c a197dc7 d4138a4 019cdf0 86b946a a197dc7 3405778 f845a0a dfb995a 124bb90 13ab025 124bb90 13ab025 124bb90 dfb995a 124bb90 3405778 dfb995a 883b37e f845a0a 883b37e a197dc7 883b37e a197dc7 883b37e 911c5ef 883b37e e0d541d 883b37e 3405778 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import streamlit as st
from gradio_client import Client
from st_audiorec import st_audiorec
# Constants
TITLE = "Llama2 70B Chatbot"
DESCRIPTION = """
This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, a Llama 2 model with 70B parameters fine-tuned for chat instructions.
| Model | Llama2 | Llama2-hf | Llama2-chat | Llama2-chat-hf |
|---|---|---|---|---|
| 70B | [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) |
---
"""
# Initialize client
with st.sidebar:
# system_promptSide = st.text_input("Optional system prompt:")
temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
# ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
# RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
def transcribe(wav_path):
return whisper_client.predict(
wav_path, # str (filepath or URL to file) in 'inputs' Audio component
"transcribe", # str in 'Task' Radio component
api_name="/predict"
)
# Prediction function
def predict(message, system_prompt='', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
with st.status("Starting client"):
client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
st.write("Requesting client")
with st.status("Requesting LLama-2"):
st.write("Requesting API")
response = client.predict(
message, # str in 'Message' Textbox component
system_prompt, # str in 'Optional system prompt' Textbox component
temperature, # int | float (numeric value between 0.0 and 1.0)
max_new_tokens, # int | float (numeric value between 0 and 4096)
Topp, # int | float (numeric value between 0.0 and 1)
Repetitionpenalty, # int | float (numeric value between 1.0 and 2.0)
api_name="/chat_1"
)
st.write("Done")
return response
# Streamlit UI
st.title(TITLE)
st.write(DESCRIPTION)
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=("π§βπ»" if message["role"] == 'human' else 'π¦')):
st.markdown(message["content"])
textinput = st.chat_input("Ask LLama-2-70b anything...")
wav_audio_data = st_audiorec()
if wav_audio_data != None:
with st.status("Transcribing audio"):
# save audio
with open("audio.wav", "wb") as f:
f.write(wav_audio_data)
prompt = transcribe("audio.wav")
st.write("Transcribed audio")
st.chat_message("human",avatar = "π§βπ»").markdown(prompt)
st.session_state.messages.append({"role": "human", "content": prompt})
# transcribe audio
response = predict(message= prompt)
with st.chat_message("assistant", avatar='π¦'):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# React to user input
if prompt := textinput:
# Display user message in chat message container
st.chat_message("human",avatar = "π§βπ»").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "human", "content": prompt})
response = predict(message=prompt)#, temperature= temperatureSide,max_new_tokens=max_new_tokensSide)
# Display assistant response in chat message container
with st.chat_message("assistant", avatar='π¦'):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
|