RASMUS commited on
Commit
097264f
·
verified ·
1 Parent(s): 34fc62a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -37
app.py CHANGED
@@ -24,6 +24,7 @@ import time
24
  from pytube import YouTube
25
 
26
  headers = {'Authorization': os.environ['DeepL_API_KEY']}
 
27
 
28
 
29
  import torch
@@ -36,10 +37,10 @@ combined_models = []
36
  combined_models.extend(whisper_models)
37
  combined_models.extend(custom_models)
38
 
39
- usage = requests.get('https://api.deepl.com/v2/usage', headers=headers)
40
- usage = json.loads(usage.text)
41
- deepL_character_usage = str(usage['character_count'])
42
- print("deepL_character_usage")
43
 
44
 
45
 
@@ -219,6 +220,7 @@ def get_youtube(video_url):
219
 
220
  def speech_to_text(video_file_path, selected_source_lang, whisper_model):
221
  """
 
222
  # Youtube with translated subtitles using OpenAI Whisper and Opus-MT models.
223
  # Currently supports only English audio
224
  This space allows you to:
@@ -301,18 +303,18 @@ def speech_to_text(video_file_path, selected_source_lang, whisper_model):
301
  print("Error creating srt df")
302
 
303
 
304
- try:
305
- usage = requests.get('https://api-free.deepl.com/v2/usage', headers=headers)
306
- usage = json.loads(usage.text)
307
- char_count = str(usage['character_count'])
308
 
309
- print('Usage is at: ' + str(usage['character_count']) + ' characters')
310
 
311
- if usage['character_count'] >= 490000:
312
- print("USAGE CLOSE TO LIMIT")
313
 
314
- except Exception as e:
315
- print('Error with DeepL API requesting usage count')
316
 
317
 
318
  return df
@@ -341,32 +343,32 @@ def translate_transcriptions(df, selected_translation_lang_2):
341
  'tag_spitting': 'xml',
342
  'target_lang': DeepL_language_codes_for_translation.get(selected_translation_lang_2)
343
  }
344
- try:
345
-
346
- usage = requests.get('https://api-free.deepl.com/v2/usage', headers=headers)
347
- usage = json.loads(usage.text)
348
- deepL_character_usage = str(usage['character_count'])
349
- try:
350
- print('Usage is at: ' + deepL_character_usage + 'characters')
351
- except Exception as e:
352
- print(e)
353
 
354
- if int(deepL_character_usage) <= 490000:
355
- print("STILL CHARACTERS LEFT")
356
- response = requests.post('https://api-free.deepl.com/v2/translate', headers=headers, data=data)
 
 
 
 
 
 
 
 
357
 
358
- # Print the response from the server
359
- translated_sentences = json.loads(response.text)
360
- translated_sentences = translated_sentences['translations'][0]['text'].split('\n')
361
- df['translation'] = translated_sentences
362
-
363
- else:
364
- df['translation'] = df['text']
365
-
366
- except Exception as e:
367
- print("EXCEPTION WITH DEEPL API")
368
- print(e)
369
- df['translation'] = df['text']
370
 
371
  print("translations done")
372
 
@@ -576,6 +578,7 @@ with demo:
576
  If spoken language is not in the list, translation might not work. In this case original transcriptions are used.
577
  ''')
578
  gr.Markdown(f'''
 
579
  DeepL API character usage:
580
  {deepL_character_usage if deepL_character_usage is not None else ''}/500 000 characters
581
  If usage is over 490 000 characters original transcriptions will be used for subtitles. This value might not properly update so if you get transcriptions in original language that might be the reason.
 
24
  from pytube import YouTube
25
 
26
  headers = {'Authorization': os.environ['DeepL_API_KEY']}
27
+ deepL_character_usage = 'UNKOWN'
28
 
29
 
30
  import torch
 
37
  combined_models.extend(whisper_models)
38
  combined_models.extend(custom_models)
39
 
40
+ #usage = requests.get('https://api.deepl.com/v2/usage', headers=headers)
41
+ #usage = json.loads(usage.text)
42
+ #deepL_character_usage = str(usage['character_count'])
43
+ #print("deepL_character_usage")
44
 
45
 
46
 
 
220
 
221
  def speech_to_text(video_file_path, selected_source_lang, whisper_model):
222
  """
223
+ # PLEASE NOTE CURRENTLY TRANSLATIONS DO NOT WORK BECAUSE OF ISSUES WITH DEEPL
224
  # Youtube with translated subtitles using OpenAI Whisper and Opus-MT models.
225
  # Currently supports only English audio
226
  This space allows you to:
 
303
  print("Error creating srt df")
304
 
305
 
306
+ # try:
307
+ # #usage = requests.get('https://api-free.deepl.com/v2/usage', headers=headers)
308
+ # #usage = json.loads(usage.text)
309
+ # #char_count = str(usage['character_count'])
310
 
311
+ # #print('Usage is at: ' + str(usage['character_count']) + ' characters')
312
 
313
+ # #if usage['character_count'] >= 490000:
314
+ # # print("USAGE CLOSE TO LIMIT")
315
 
316
+ # except Exception as e:
317
+ # print('Error with DeepL API requesting usage count')
318
 
319
 
320
  return df
 
343
  'tag_spitting': 'xml',
344
  'target_lang': DeepL_language_codes_for_translation.get(selected_translation_lang_2)
345
  }
346
+ # try:
 
 
 
 
 
 
 
 
347
 
348
+ # #usage = requests.get('https://api-free.deepl.com/v2/usage', headers=headers)
349
+ # #usage = json.loads(usage.text)
350
+ # #deepL_character_usage = str(usage['character_count'])
351
+ # #try:
352
+ # # print('Usage is at: ' + deepL_character_usage + 'characters')
353
+ # #except Exception as e:
354
+ # # print(e)
355
+ # #
356
+ # #if int(deepL_character_usage) <= 490000:
357
+ # # print("STILL CHARACTERS LEFT")
358
+ # # response = requests.post('https://api-free.deepl.com/v2/translate', headers=headers, data=data)
359
 
360
+ # # Print the response from the server
361
+ # translated_sentences = json.loads(response.text)
362
+ # translated_sentences = translated_sentences['translations'][0]['text'].split('\n')
363
+ # df['translation'] = translated_sentences
364
+
365
+ # else:
366
+ # df['translation'] = df['text']
367
+
368
+ # except Exception as e:
369
+ print("EXCEPTION WITH DEEPL API")
370
+ print(e)
371
+ df['translation'] = df['text']
372
 
373
  print("translations done")
374
 
 
578
  If spoken language is not in the list, translation might not work. In this case original transcriptions are used.
579
  ''')
580
  gr.Markdown(f'''
581
+ DEEPL DOES NOT WORK CURRENTLY, Sorry for inconvenience
582
  DeepL API character usage:
583
  {deepL_character_usage if deepL_character_usage is not None else ''}/500 000 characters
584
  If usage is over 490 000 characters original transcriptions will be used for subtitles. This value might not properly update so if you get transcriptions in original language that might be the reason.