Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,9 @@ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
|
4 |
from pydub import AudioSegment
|
5 |
from sentence_transformers import SentenceTransformer, util
|
6 |
import spacy
|
7 |
-
spacy.cli
|
8 |
import json
|
|
|
9 |
import ollama
|
10 |
|
11 |
# Audio conversion from MP4 to MP3
|
@@ -50,8 +51,8 @@ soap_embeddings = {section: embedder.encode(prompt, convert_to_tensor=True) for
|
|
50 |
def ollama_query(user_prompt, soap_note):
|
51 |
combined_prompt = f"User Instructions:\n{user_prompt}\n\nContext:\n{soap_note}"
|
52 |
try:
|
53 |
-
response = ollama.
|
54 |
-
return response
|
55 |
except Exception as e:
|
56 |
return f"Error generating response: {e}"
|
57 |
|
|
|
4 |
from pydub import AudioSegment
|
5 |
from sentence_transformers import SentenceTransformer, util
|
6 |
import spacy
|
7 |
+
spacy.cli download("en_core_web_sm")
|
8 |
import json
|
9 |
+
from faster_whisper import WhisperModel
|
10 |
import ollama
|
11 |
|
12 |
# Audio conversion from MP4 to MP3
|
|
|
51 |
def ollama_query(user_prompt, soap_note):
|
52 |
combined_prompt = f"User Instructions:\n{user_prompt}\n\nContext:\n{soap_note}"
|
53 |
try:
|
54 |
+
response = ollama.chat(model="llama2:7b-uncensored", messages=[{"role": "user", "content": combined_prompt}])
|
55 |
+
return response['text']
|
56 |
except Exception as e:
|
57 |
return f"Error generating response: {e}"
|
58 |
|