Update subgen.py
This commit is contained in:
18
subgen.py
18
subgen.py
@@ -59,7 +59,7 @@ append = convert_to_bool(os.getenv('APPEND', False))
|
|||||||
reload_script_on_change = convert_to_bool(os.getenv('RELOAD_SCRIPT_ON_CHANGE', False))
|
reload_script_on_change = convert_to_bool(os.getenv('RELOAD_SCRIPT_ON_CHANGE', False))
|
||||||
model_prompt = os.getenv('USE_MODEL_PROMPT', 'False')
|
model_prompt = os.getenv('USE_MODEL_PROMPT', 'False')
|
||||||
custom_model_prompt = os.getenv('CUSTOM_MODEL_PROMPT', '')
|
custom_model_prompt = os.getenv('CUSTOM_MODEL_PROMPT', '')
|
||||||
custom_parameters = os.getenv('CUSTOM_PARAMETERS', '')
|
custom_regroup = os.getenv('CUSTOM_REGROUP', '')
|
||||||
|
|
||||||
if transcribe_device == "gpu":
|
if transcribe_device == "gpu":
|
||||||
transcribe_device = "cuda"
|
transcribe_device = "cuda"
|
||||||
@@ -337,10 +337,10 @@ def asr(
|
|||||||
audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0
|
audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0
|
||||||
if model_prompt:
|
if model_prompt:
|
||||||
custom_model_prompt = greetings_translations.get(language, '') or custom_model_prompt
|
custom_model_prompt = greetings_translations.get(language, '') or custom_model_prompt
|
||||||
if custom_parameters:
|
if custom_regroup:
|
||||||
# Convert the string to a dictionary
|
result = model.transcribe_stable(audio_data, task=task, input_sr=16000, language=language, progress_callback=progress, initial_prompt=custom_model_prompt, regroup=custom_regroup)
|
||||||
params_dict = dict(param.split('=') for param in custom_parameters.split(', '))
|
else:
|
||||||
result = model.transcribe_stable(audio_data, task=task, input_sr=16000, language=language, progress_callback=progress, initial_prompt=custom_model_prompt, **params_dict)
|
result = model.transcribe_stable(audio_data, task=task, input_sr=16000, language=language, progress_callback=progress, initial_prompt=custom_model_prompt)
|
||||||
appendLine(result)
|
appendLine(result)
|
||||||
elapsed_time = time.time() - start_time
|
elapsed_time = time.time() - start_time
|
||||||
minutes, seconds = divmod(int(elapsed_time), 60)
|
minutes, seconds = divmod(int(elapsed_time), 60)
|
||||||
@@ -440,10 +440,10 @@ def gen_subtitles(file_path: str, transcribe_or_translate: str, front=True, forc
|
|||||||
if force_detected_language_to:
|
if force_detected_language_to:
|
||||||
forceLanguage = force_detected_language_to
|
forceLanguage = force_detected_language_to
|
||||||
logging.info(f"Forcing language to {forceLanguage}")
|
logging.info(f"Forcing language to {forceLanguage}")
|
||||||
if custom_parameters:
|
if custom_regroup:
|
||||||
# Convert the string to a dictionary
|
result = model.transcribe_stable(file_path, language=forceLanguage, task=transcribe_or_translate, progress_callback=progress, initial_prompt=custom_model_prompt, regroup=custom_regroup)
|
||||||
params_dict = dict(param.split('=') for param in custom_parameters.split(', '))
|
else:
|
||||||
result = model.transcribe_stable(file_path, language=forceLanguage, task=transcribe_or_translate, progress_callback=progress, initial_prompt=custom_model_prompt, **params_dict)
|
result = model.transcribe_stable(file_path, language=forceLanguage, task=transcribe_or_translate, progress_callback=progress, initial_prompt=custom_model_prompt)
|
||||||
appendLine(result)
|
appendLine(result)
|
||||||
result.to_srt_vtt(get_file_name_without_extension(file_path) + subextension, word_level=word_level_highlight)
|
result.to_srt_vtt(get_file_name_without_extension(file_path) + subextension, word_level=word_level_highlight)
|
||||||
elapsed_time = time.time() - start_time
|
elapsed_time = time.time() - start_time
|
||||||
|
|||||||
Reference in New Issue
Block a user