diff --git a/subgen.py b/subgen.py index a75ea8f..9041ad2 100644 --- a/subgen.py +++ b/subgen.py @@ -59,7 +59,7 @@ append = convert_to_bool(os.getenv('APPEND', False)) reload_script_on_change = convert_to_bool(os.getenv('RELOAD_SCRIPT_ON_CHANGE', False)) model_prompt = os.getenv('USE_MODEL_PROMPT', 'False') custom_model_prompt = os.getenv('CUSTOM_MODEL_PROMPT', '') -custom_parameters = os.getenv('CUSTOM_PARAMETERS', '') +custom_regroup = os.getenv('CUSTOM_REGROUP', '') if transcribe_device == "gpu": transcribe_device = "cuda" @@ -337,10 +337,10 @@ def asr( audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0 if model_prompt: custom_model_prompt = greetings_translations.get(language, '') or custom_model_prompt - if custom_parameters: - # Convert the string to a dictionary - params_dict = dict(param.split('=') for param in custom_parameters.split(', ')) - result = model.transcribe_stable(audio_data, task=task, input_sr=16000, language=language, progress_callback=progress, initial_prompt=custom_model_prompt, **params_dict) + if custom_regroup: + result = model.transcribe_stable(audio_data, task=task, input_sr=16000, language=language, progress_callback=progress, initial_prompt=custom_model_prompt, regroup=custom_regroup) + else: + result = model.transcribe_stable(audio_data, task=task, input_sr=16000, language=language, progress_callback=progress, initial_prompt=custom_model_prompt) appendLine(result) elapsed_time = time.time() - start_time minutes, seconds = divmod(int(elapsed_time), 60) @@ -440,10 +440,10 @@ def gen_subtitles(file_path: str, transcribe_or_translate: str, front=True, forc if force_detected_language_to: forceLanguage = force_detected_language_to logging.info(f"Forcing language to {forceLanguage}") - if custom_parameters: - # Convert the string to a dictionary - params_dict = dict(param.split('=') for param in custom_parameters.split(', ')) - result = model.transcribe_stable(file_path, language=forceLanguage, task=transcribe_or_translate, progress_callback=progress, initial_prompt=custom_model_prompt, **params_dict) + if custom_regroup: + result = model.transcribe_stable(file_path, language=forceLanguage, task=transcribe_or_translate, progress_callback=progress, initial_prompt=custom_model_prompt, regroup=custom_regroup) + else: + result = model.transcribe_stable(file_path, language=forceLanguage, task=transcribe_or_translate, progress_callback=progress, initial_prompt=custom_model_prompt) appendLine(result) result.to_srt_vtt(get_file_name_without_extension(file_path) + subextension, word_level=word_level_highlight) elapsed_time = time.time() - start_time