Update subgen.py

This commit is contained in:
McCloudS
2024-02-10 21:23:38 -07:00
committed by GitHub
parent b018368d70
commit 815a2dd04c

View File

@@ -48,7 +48,7 @@ path_mapping_from = os.getenv('PATH_MAPPING_FROM', '/tv')
path_mapping_to = os.getenv('PATH_MAPPING_TO', '/Volumes/TV') path_mapping_to = os.getenv('PATH_MAPPING_TO', '/Volumes/TV')
model_location = os.getenv('MODEL_PATH', '.') model_location = os.getenv('MODEL_PATH', '.')
transcribe_folders = os.getenv('TRANSCRIBE_FOLDERS', '') transcribe_folders = os.getenv('TRANSCRIBE_FOLDERS', '')
transcribe_or_translate = os.getenv('TRANSCRIBE_OR_TRANSLATE', 'translate') transcribe_or_translate = os.getenv('TRANSCRIBE_OR_TRANSLATE', 'transcribe')
force_detected_language_to = os.getenv('FORCE_DETECTED_LANGUAGE_TO', '') force_detected_language_to = os.getenv('FORCE_DETECTED_LANGUAGE_TO', '')
hf_transformers = convert_to_bool(os.getenv('HF_TRANSFORMERS', False)) hf_transformers = convert_to_bool(os.getenv('HF_TRANSFORMERS', False))
hf_batch_size = int(os.getenv('HF_BATCH_SIZE', 24)) hf_batch_size = int(os.getenv('HF_BATCH_SIZE', 24))
@@ -268,6 +268,7 @@ def start_model():
model = stable_whisper.load_faster_whisper(whisper_model, download_root=model_location, device=transcribe_device, cpu_threads=whisper_threads, num_workers=concurrent_transcriptions, compute_type=compute_type) model = stable_whisper.load_faster_whisper(whisper_model, download_root=model_location, device=transcribe_device, cpu_threads=whisper_threads, num_workers=concurrent_transcriptions, compute_type=compute_type)
def delete_model(): def delete_model():
global clear_vram_on_complete
if clear_vram_on_complete: if clear_vram_on_complete:
if len(files_to_transcribe) == 0: if len(files_to_transcribe) == 0:
global model global model