ghadaAlmuaikel commited on
Commit
fdd08ab
1 Parent(s): 57dff25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -205,7 +205,7 @@ def process_best_match(best_match, language):
205
  best_story = best_match["Story"]
206
 
207
  # Translate to Arabic if the language is Arabic
208
- if language == "Arabic":
209
  best_story_translated = translate_story_to_arabic(best_story)
210
  info_html = f"<div dir='rtl' style='font-size: 18px; color: white; font-family: Arial, sans-serif;'>{best_story_translated}</div>"
211
  audio_file = text_to_speech_arabic(best_story_translated)
@@ -220,7 +220,8 @@ def process_best_match(best_match, language):
220
  def compare_images(image, language):
221
  try:
222
  inputs = processor(images=image, return_tensors="pt")
223
- image_features = model.get_image_features(**inputs)
 
224
 
225
  best_score = -2.0
226
  best_match_idx = None
 
205
  best_story = best_match["Story"]
206
 
207
  # Translate to Arabic if the language is Arabic
208
+ if language == "Arabic" or language == "ar":
209
  best_story_translated = translate_story_to_arabic(best_story)
210
  info_html = f"<div dir='rtl' style='font-size: 18px; color: white; font-family: Arial, sans-serif;'>{best_story_translated}</div>"
211
  audio_file = text_to_speech_arabic(best_story_translated)
 
220
  def compare_images(image, language):
221
  try:
222
  inputs = processor(images=image, return_tensors="pt")
223
+ inputs = {k: v.to(device) for k, v in inputs.items()}
224
+ image_features = model.get_image_features(**inputs).to(device)
225
 
226
  best_score = -2.0
227
  best_match_idx = None