karthi311 commited on
Commit
c16939e
·
verified ·
1 Parent(s): 992b560

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -1,13 +1,12 @@
1
  import torch
2
  import gradio as gr
3
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
4
  from pydub import AudioSegment
5
  from sentence_transformers import SentenceTransformer, util
6
  import spacy
7
  spacy.cli.download("en_core_web_sm")
8
  import json
9
  from faster_whisper import WhisperModel
10
- import ollama
11
 
12
  # Audio conversion from MP4 to MP3
13
  def convert_mp4_to_mp3(mp4_path, mp3_path):
@@ -34,8 +33,8 @@ def load_faster_whisper():
34
  nlp = spacy.load("en_core_web_sm")
35
  embedder = SentenceTransformer("all-MiniLM-L6-v2")
36
 
37
- tokenizer = AutoTokenizer.from_pretrained("Mahalingam/DistilBart-Med-Summary")
38
- model = AutoModelForSeq2SeqLM.from_pretrained("Mahalingam/DistilBart-Med-Summary")
39
 
40
  summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
41
 
@@ -47,12 +46,14 @@ soap_prompts = {
47
  }
48
  soap_embeddings = {section: embedder.encode(prompt, convert_to_tensor=True) for section, prompt in soap_prompts.items()}
49
 
50
- # Ollama Llama 2 Model Query function
51
- def ollama_query(user_prompt, soap_note):
52
  combined_prompt = f"User Instructions:\n{user_prompt}\n\nContext:\n{soap_note}"
53
  try:
54
- response = ollama.chat(model="llama2:7b-uncensored", messages=[{"role": "user", "content": combined_prompt}])
55
- return response['text']
 
 
56
  except Exception as e:
57
  return f"Error generating response: {e}"
58
 
@@ -139,7 +140,7 @@ def process_file(mp4_file, user_prompt):
139
  soap_note = soap_analysis(transcription)
140
  print("SOAP Notes: ", soap_note)
141
 
142
- template_output = ollama_query(user_prompt, soap_note)
143
  print("Template: ", template_output)
144
 
145
  json_output = convert_to_json(template_output)
@@ -151,7 +152,7 @@ def process_text(text, user_prompt):
151
  soap_note = soap_analysis(text)
152
  print(soap_note)
153
 
154
- template_output = ollama_query(user_prompt, soap_note)
155
  print(template_output)
156
  json_output = convert_to_json(template_output)
157
 
@@ -170,7 +171,7 @@ def launch_gradio():
170
  ],
171
  outputs=[
172
  gr.Textbox(label="SOAP Note"),
173
- gr.Textbox(label="Generated Template from Llama 2"),
174
  gr.Textbox(label="JSON Output"),
175
  ],
176
  )
@@ -183,7 +184,7 @@ def launch_gradio():
183
  ],
184
  outputs=[
185
  gr.Textbox(label="SOAP Note"),
186
- gr.Textbox(label="Generated Template from Llama 2"),
187
  gr.Textbox(label="JSON Output"),
188
  ],
189
  )
 
1
  import torch
2
  import gradio as gr
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
  from pydub import AudioSegment
5
  from sentence_transformers import SentenceTransformer, util
6
  import spacy
7
  spacy.cli.download("en_core_web_sm")
8
  import json
9
  from faster_whisper import WhisperModel
 
10
 
11
  # Audio conversion from MP4 to MP3
12
  def convert_mp4_to_mp3(mp4_path, mp3_path):
 
33
  nlp = spacy.load("en_core_web_sm")
34
  embedder = SentenceTransformer("all-MiniLM-L6-v2")
35
 
36
+ tokenizer = AutoTokenizer.from_pretrained("aws-prototyping/MegaBeam-Mistral-7B-512k")
37
+ model = AutoModelForCausalLM.from_pretrained("aws-prototyping/MegaBeam-Mistral-7B-512k")
38
 
39
  summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
40
 
 
46
  }
47
  soap_embeddings = {section: embedder.encode(prompt, convert_to_tensor=True) for section, prompt in soap_prompts.items()}
48
 
49
+ # Query function for MegaBeam-Mistral-7B
50
+ def megabeam_query(user_prompt, soap_note):
51
  combined_prompt = f"User Instructions:\n{user_prompt}\n\nContext:\n{soap_note}"
52
  try:
53
+ inputs = tokenizer(combined_prompt, return_tensors="pt")
54
+ outputs = model.generate(**inputs, max_length=512)
55
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
+ return response
57
  except Exception as e:
58
  return f"Error generating response: {e}"
59
 
 
140
  soap_note = soap_analysis(transcription)
141
  print("SOAP Notes: ", soap_note)
142
 
143
+ template_output = megabeam_query(user_prompt, soap_note)
144
  print("Template: ", template_output)
145
 
146
  json_output = convert_to_json(template_output)
 
152
  soap_note = soap_analysis(text)
153
  print(soap_note)
154
 
155
+ template_output = megabeam_query(user_prompt, soap_note)
156
  print(template_output)
157
  json_output = convert_to_json(template_output)
158
 
 
171
  ],
172
  outputs=[
173
  gr.Textbox(label="SOAP Note"),
174
+ gr.Textbox(label="Generated Template from MegaBeam-Mistral-7B"),
175
  gr.Textbox(label="JSON Output"),
176
  ],
177
  )
 
184
  ],
185
  outputs=[
186
  gr.Textbox(label="SOAP Note"),
187
+ gr.Textbox(label="Generated Template from MegaBeam-Mistral-7B"),
188
  gr.Textbox(label="JSON Output"),
189
  ],
190
  )