Spaces:
Running
Running
File size: 10,892 Bytes
e7cae83 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
from typing import Any, Dict, Optional
WORDING : Dict[str, Any] =\
{
'conda_not_activated': 'Conda is not activated',
'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',
'ffmpeg_not_installed': 'FFMpeg is not installed',
'creating_temp': 'Creating temporary resources',
'extracting_frames': 'Extracting frames with a resolution of {resolution} and {fps} frames per second',
'extracting_frames_succeed': 'Extracting frames succeed',
'extracting_frames_failed': 'Extracting frames failed',
'analysing': 'Analysing',
'processing': 'Processing',
'downloading': 'Downloading',
'temp_frames_not_found': 'Temporary frames not found',
'copying_image': 'Copying image with a resolution of {resolution}',
'copying_image_succeed': 'Copying image succeed',
'copying_image_failed': 'Copying image failed',
'finalizing_image': 'Finalizing image with a resolution of {resolution}',
'finalizing_image_succeed': 'Finalizing image succeed',
'finalizing_image_skipped': 'Finalizing image skipped',
'merging_video': 'Merging video with a resolution of {resolution} and {fps} frames per second',
'merging_video_succeed': 'Merging video succeed',
'merging_video_failed': 'Merging video failed',
'skipping_audio': 'Skipping audio',
'restoring_audio_succeed': 'Restoring audio succeed',
'restoring_audio_skipped': 'Restoring audio skipped',
'clearing_temp': 'Clearing temporary resources',
'processing_stopped': 'Processing stopped',
'processing_image_succeed': 'Processing to image succeed in {seconds} seconds',
'processing_image_failed': 'Processing to image failed',
'processing_video_succeed': 'Processing to video succeed in {seconds} seconds',
'processing_video_failed': 'Processing to video failed',
'model_download_not_done': 'Download of the model is not done',
'model_file_not_present': 'File of the model is not present',
'select_image_source': 'Select a image for source path',
'select_audio_source': 'Select a audio for source path',
'select_video_target': 'Select a video for target path',
'select_image_or_video_target': 'Select a image or video for target path',
'select_file_or_directory_output': 'Select a file or directory for output path',
'no_source_face_detected': 'No source face detected',
'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded',
'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly',
'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded',
'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly',
'stream_not_loaded': 'Stream {stream_mode} could not be loaded',
'point': '.',
'comma': ',',
'colon': ':',
'question_mark': '?',
'exclamation_mark': '!',
'help':
{
# installer
'install_dependency': 'select the variant of {dependency} to install',
'skip_conda': 'skip the conda environment check',
# general
'source': 'choose single or multiple source images or audios',
'target': 'choose single target image or video',
'output': 'specify the output file or directory',
# misc
'force_download': 'force automate downloads and exit',
'skip_download': 'omit automate downloads and remote lookups',
'headless': 'run the program without a user interface',
'log_level': 'adjust the message severity displayed in the terminal',
# execution
'execution_providers': 'accelerate the model inference using different providers (choices: {choices}, ...)',
'execution_thread_count': 'specify the amount of parallel threads while processing',
'execution_queue_count': 'specify the amount of frames each thread is processing',
# memory
'video_memory_strategy': 'balance fast frame processing and low VRAM usage',
'system_memory_limit': 'limit the available RAM that can be used while processing',
# face analyser
'face_analyser_order': 'specify the order in which the face analyser detects faces',
'face_analyser_age': 'filter the detected faces based on their age',
'face_analyser_gender': 'filter the detected faces based on their gender',
'face_detector_model': 'choose the model responsible for detecting the face',
'face_detector_size': 'specify the size of the frame provided to the face detector',
'face_detector_score': 'filter the detected faces base on the confidence score',
'face_landmarker_score': 'filter the detected landmarks base on the confidence score',
# face selector
'face_selector_mode': 'use reference based tracking or simple matching',
'reference_face_position': 'specify the position used to create the reference face',
'reference_face_distance': 'specify the desired similarity between the reference face and target face',
'reference_frame_number': 'specify the frame used to create the reference face',
# face mask
'face_mask_types': 'mix and match different face mask types (choices: {choices})',
'face_mask_blur': 'specify the degree of blur applied the box mask',
'face_mask_padding': 'apply top, right, bottom and left padding to the box mask',
'face_mask_regions': 'choose the facial features used for the region mask (choices: {choices})',
# frame extraction
'trim_frame_start': 'specify the the start frame of the target video',
'trim_frame_end': 'specify the the end frame of the target video',
'temp_frame_format': 'specify the temporary resources format',
'keep_temp': 'keep the temporary resources after processing',
# output creation
'output_image_quality': 'specify the image quality which translates to the compression factor',
'output_image_resolution': 'specify the image output resolution based on the target image',
'output_video_encoder': 'specify the encoder use for the video compression',
'output_video_preset': 'balance fast video processing and video file size',
'output_video_quality': 'specify the video quality which translates to the compression factor',
'output_video_resolution': 'specify the video output resolution based on the target video',
'output_video_fps': 'specify the video output fps based on the target video',
'skip_audio': 'omit the audio from the target video',
# frame processors
'frame_processors': 'load a single or multiple frame processors. (choices: {choices}, ...)',
'face_debugger_items': 'load a single or multiple frame processors (choices: {choices})',
'face_enhancer_model': 'choose the model responsible for enhancing the face',
'face_enhancer_blend': 'blend the enhanced into the previous face',
'face_swapper_model': 'choose the model responsible for swapping the face',
'frame_colorizer_model': 'choose the model responsible for colorizing the frame',
'frame_colorizer_blend': 'blend the colorized into the previous frame',
'frame_colorizer_size': 'specify the size of the frame provided to the frame colorizer',
'frame_enhancer_model': 'choose the model responsible for enhancing the frame',
'frame_enhancer_blend': 'blend the enhanced into the previous frame',
'lip_syncer_model': 'choose the model responsible for syncing the lips',
# uis
'ui_layouts': 'launch a single or multiple UI layouts (choices: {choices}, ...)'
},
'uis':
{
# general
'start_button': 'START',
'stop_button': 'STOP',
'clear_button': 'CLEAR',
# about
'donate_button': 'DONATE',
# benchmark
'benchmark_results_dataframe': 'BENCHMARK RESULTS',
# benchmark options
'benchmark_runs_checkbox_group': 'BENCHMARK RUNS',
'benchmark_cycles_slider': 'BENCHMARK CYCLES',
# common options
'common_options_checkbox_group': 'OPTIONS',
# execution
'execution_providers_checkbox_group': 'EXECUTION PROVIDERS',
# execution queue count
'execution_queue_count_slider': 'EXECUTION QUEUE COUNT',
# execution thread count
'execution_thread_count_slider': 'EXECUTION THREAD COUNT',
# face analyser
'face_analyser_order_dropdown': 'FACE ANALYSER ORDER',
'face_analyser_age_dropdown': 'FACE ANALYSER AGE',
'face_analyser_gender_dropdown': 'FACE ANALYSER GENDER',
'face_detector_model_dropdown': 'FACE DETECTOR MODEL',
'face_detector_size_dropdown': 'FACE DETECTOR SIZE',
'face_detector_score_slider': 'FACE DETECTOR SCORE',
'face_landmarker_score_slider': 'FACE LANDMARKER SCORE',
# face masker
'face_mask_types_checkbox_group': 'FACE MASK TYPES',
'face_mask_blur_slider': 'FACE MASK BLUR',
'face_mask_padding_top_slider': 'FACE MASK PADDING TOP',
'face_mask_padding_right_slider': 'FACE MASK PADDING RIGHT',
'face_mask_padding_bottom_slider': 'FACE MASK PADDING BOTTOM',
'face_mask_padding_left_slider': 'FACE MASK PADDING LEFT',
'face_mask_region_checkbox_group': 'FACE MASK REGIONS',
# face selector
'face_selector_mode_dropdown': 'FACE SELECTOR MODE',
'reference_face_gallery': 'REFERENCE FACE',
'reference_face_distance_slider': 'REFERENCE FACE DISTANCE',
# frame processors
'frame_processors_checkbox_group': 'FRAME PROCESSORS',
# frame processors options
'face_debugger_items_checkbox_group': 'FACE DEBUGGER ITEMS',
'face_enhancer_model_dropdown': 'FACE ENHANCER MODEL',
'face_enhancer_blend_slider': 'FACE ENHANCER BLEND',
'face_swapper_model_dropdown': 'FACE SWAPPER MODEL',
'frame_colorizer_model_dropdown': 'FRAME COLORIZER MODEL',
'frame_colorizer_blend_slider': 'FRAME COLORIZER BLEND',
'frame_colorizer_size_dropdown': 'FRAME COLORIZER SIZE',
'frame_enhancer_model_dropdown': 'FRAME ENHANCER MODEL',
'frame_enhancer_blend_slider': 'FRAME ENHANCER BLEND',
'lip_syncer_model_dropdown': 'LIP SYNCER MODEL',
# memory
'video_memory_strategy_dropdown': 'VIDEO MEMORY STRATEGY',
'system_memory_limit_slider': 'SYSTEM MEMORY LIMIT',
# output
'output_image_or_video': 'OUTPUT',
# output options
'output_path_textbox': 'OUTPUT PATH',
'output_image_quality_slider': 'OUTPUT IMAGE QUALITY',
'output_image_resolution_dropdown': 'OUTPUT IMAGE RESOLUTION',
'output_video_encoder_dropdown': 'OUTPUT VIDEO ENCODER',
'output_video_preset_dropdown': 'OUTPUT VIDEO PRESET',
'output_video_quality_slider': 'OUTPUT VIDEO QUALITY',
'output_video_resolution_dropdown': 'OUTPUT VIDEO RESOLUTION',
'output_video_fps_slider': 'OUTPUT VIDEO FPS',
# preview
'preview_image': 'PREVIEW',
'preview_frame_slider': 'PREVIEW FRAME',
# source
'source_file': 'SOURCE',
# target
'target_file': 'TARGET',
# temp frame
'temp_frame_format_dropdown': 'TEMP FRAME FORMAT',
# trim frame
'trim_frame_start_slider': 'TRIM FRAME START',
'trim_frame_end_slider': 'TRIM FRAME END',
# webcam
'webcam_image': 'WEBCAM',
# webcam options
'webcam_mode_radio': 'WEBCAM MODE',
'webcam_resolution_dropdown': 'WEBCAM RESOLUTION',
'webcam_fps_slider': 'WEBCAM FPS'
}
}
def get(key : str) -> Optional[str]:
if '.' in key:
section, name = key.split('.')
if section in WORDING and name in WORDING[section]:
return WORDING[section][name]
if key in WORDING:
return WORDING[key]
return None
|