Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import torch | |
import gradio as gr | |
from transformers import pipeline | |
from llama_cpp import Llama | |
import os | |
MODEL_NAME = "openai/whisper-large-v3-turbo" | |
BATCH_SIZE = 8 | |
FILE_LIMIT_MB = 1000 | |
device = 0 if torch.cuda.is_available() else "cpu" | |
# Initialize the transcription pipeline | |
pipe = pipeline( | |
task="automatic-speech-recognition", | |
model=MODEL_NAME, | |
chunk_length_s=30, | |
device=device, | |
) | |
# Load the Llama model for SOAP note generation | |
llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml") | |
# Prompt for SOAP note generation | |
sys_prompt = "You are a world class clinical assistant." | |
task_prompt = """ | |
Convert the following transcribed conversation into a clinical SOAP note. | |
The text includes dialogue between a physician and a patient. Please clearly distinguish between the physician's and the patient's statements. | |
Extract and organize the information into the relevant sections of a SOAP note: | |
- Subjective (symptoms and patient statements), | |
- Objective (clinical findings and observations, these might be missing if the physician has not conducted a physical exam or has not verbally stated findings), | |
- Assessment (diagnosis or potential diagnoses, objectively provide a top 5 most likely diagnosis based on just the subjective findings, and use the objective findings if available), | |
- Plan (treatment and follow-up). | |
Ensure the note is concise, clear, and accurately reflects the conversation. | |
""" | |
# Function to transcribe audio inputs | |
def transcribe(inputs, task): | |
if inputs is None: | |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.") | |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"] | |
return text | |
# Function to generate SOAP notes using Llama model | |
def generate_soap(transcribed_text): | |
prompt = [{"role": "system", "content": sys_prompt}] | |
prompt.append({"role": "user", "content": f"{task_prompt}\n{transcribed_text}"}) | |
# Generate a response using the Llama model in streaming mode | |
stream_response = llm.create_chat_completion(messages=prompt, temperature=0.7, max_tokens=2048, stream=True) | |
response = "" | |
for chunk in stream_response: | |
if "content" in chunk['choices'][0]["delta"]: | |
response += chunk['choices'][0]["delta"]["content"] | |
return response | |
# Gradio Interfaces for different inputs | |
demo = gr.Blocks(theme=gr.themes.Ocean()) | |
# Interface for microphone or file transcription | |
mf_transcribe = gr.Interface( | |
fn=transcribe, | |
inputs=[gr.Audio(sources="microphone", type="filepath"), gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")], | |
outputs="text", | |
title="Audio Transcribe", | |
description="Transcribe long-form microphone or audio inputs." | |
) | |
file_transcribe = gr.Interface( | |
fn=transcribe, | |
inputs=[gr.Audio(sources="upload", type="filepath", label="Audio file"), gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")], | |
outputs="text", | |
title="Audio Transcribe" | |
) | |
# SOAP Note generation interface | |
soap_note = gr.Interface( | |
fn=generate_soap, | |
inputs="text", | |
outputs="text", | |
title="Generate Clinical SOAP Note", | |
description="Convert transcribed conversation to a clinical SOAP note with structured sections (Subjective, Objective, Assessment, Plan)." | |
) | |
# Tabbed interface integrating SOAP note below transcription | |
with demo: | |
with gr.TabbedInterface([mf_transcribe, file_transcribe], ["Microphone", "Audio file"]) as transcribe_tab: | |
transcribe_tab.outputs[0] # Output from transcription feeds directly to SOAP note | |
soap_note # SOAP note interface placed directly below transcription output | |
demo.queue().launch(ssr_mode=False) | |