Cleaning up globals

This commit is contained in:
McCloudS
2024-09-30 22:44:28 -06:00
committed by GitHub
parent 8e73530070
commit 09968003d2

View File

@@ -37,63 +37,50 @@ def convert_to_bool(in_bool):
# Convert the input to string and lower case, then check against true values # Convert the input to string and lower case, then check against true values
return str(in_bool).lower() in ('true', 'on', '1', 'y', 'yes') return str(in_bool).lower() in ('true', 'on', '1', 'y', 'yes')
def update_env_variables(): plextoken = os.getenv('PLEXTOKEN', 'token here')
global plextoken, plexserver, jellyfintoken, jellyfinserver, whisper_model, whisper_threads plexserver = os.getenv('PLEXSERVER', 'http://192.168.1.111:32400')
global concurrent_transcriptions, transcribe_device, procaddedmedia, procmediaonplay jellyfintoken = os.getenv('JELLYFINTOKEN', 'token here')
global namesublang, skipifinternalsublang, webhookport, word_level_highlight, debug jellyfinserver = os.getenv('JELLYFINSERVER', 'http://192.168.1.111:8096')
global use_path_mapping, path_mapping_from, path_mapping_to, model_location, monitor whisper_model = os.getenv('WHISPER_MODEL', 'medium')
global transcribe_folders, transcribe_or_translate, force_detected_language_to whisper_threads = int(os.getenv('WHISPER_THREADS', 4))
global clear_vram_on_complete, compute_type, append, reload_script_on_change concurrent_transcriptions = int(os.getenv('CONCURRENT_TRANSCRIPTIONS', 2))
global model_prompt, custom_model_prompt, lrc_for_audio_files, custom_regroup transcribe_device = os.getenv('TRANSCRIBE_DEVICE', 'cpu')
global subextension, subextensionSDH, detect_language_length, skipifexternalsub procaddedmedia = convert_to_bool(os.getenv('PROCADDEDMEDIA', True))
global kwargs procmediaonplay = convert_to_bool(os.getenv('PROCMEDIAONPLAY', True))
namesublang = os.getenv('NAMESUBLANG', 'aa')
plextoken = os.getenv('PLEXTOKEN', 'token here') skipifinternalsublang = os.getenv('SKIPIFINTERNALSUBLANG', 'eng')
plexserver = os.getenv('PLEXSERVER', 'http://192.168.1.111:32400') webhookport = int(os.getenv('WEBHOOKPORT', 9000))
jellyfintoken = os.getenv('JELLYFINTOKEN', 'token here') word_level_highlight = convert_to_bool(os.getenv('WORD_LEVEL_HIGHLIGHT', False))
jellyfinserver = os.getenv('JELLYFINSERVER', 'http://192.168.1.111:8096') debug = convert_to_bool(os.getenv('DEBUG', True))
whisper_model = os.getenv('WHISPER_MODEL', 'medium') use_path_mapping = convert_to_bool(os.getenv('USE_PATH_MAPPING', False))
whisper_threads = int(os.getenv('WHISPER_THREADS', 4)) path_mapping_from = os.getenv('PATH_MAPPING_FROM', r'/tv')
concurrent_transcriptions = int(os.getenv('CONCURRENT_TRANSCRIPTIONS', 2)) path_mapping_to = os.getenv('PATH_MAPPING_TO', r'/Volumes/TV')
transcribe_device = os.getenv('TRANSCRIBE_DEVICE', 'cpu') model_location = os.getenv('MODEL_PATH', './models')
procaddedmedia = convert_to_bool(os.getenv('PROCADDEDMEDIA', True)) monitor = convert_to_bool(os.getenv('MONITOR', False))
procmediaonplay = convert_to_bool(os.getenv('PROCMEDIAONPLAY', True)) transcribe_folders = os.getenv('TRANSCRIBE_FOLDERS', '')
namesublang = os.getenv('NAMESUBLANG', 'aa') transcribe_or_translate = os.getenv('TRANSCRIBE_OR_TRANSLATE', 'transcribe')
skipifinternalsublang = os.getenv('SKIPIFINTERNALSUBLANG', 'eng') force_detected_language_to = os.getenv('FORCE_DETECTED_LANGUAGE_TO', '').lower()
webhookport = int(os.getenv('WEBHOOKPORT', 9000)) clear_vram_on_complete = convert_to_bool(os.getenv('CLEAR_VRAM_ON_COMPLETE', True))
word_level_highlight = convert_to_bool(os.getenv('WORD_LEVEL_HIGHLIGHT', False)) compute_type = os.getenv('COMPUTE_TYPE', 'auto')
debug = convert_to_bool(os.getenv('DEBUG', True)) append = convert_to_bool(os.getenv('APPEND', False))
use_path_mapping = convert_to_bool(os.getenv('USE_PATH_MAPPING', False)) reload_script_on_change = convert_to_bool(os.getenv('RELOAD_SCRIPT_ON_CHANGE', False))
path_mapping_from = os.getenv('PATH_MAPPING_FROM', r'/tv') model_prompt = os.getenv('USE_MODEL_PROMPT', 'False')
path_mapping_to = os.getenv('PATH_MAPPING_TO', r'/Volumes/TV') custom_model_prompt = os.getenv('CUSTOM_MODEL_PROMPT', '')
model_location = os.getenv('MODEL_PATH', './models') lrc_for_audio_files = convert_to_bool(os.getenv('LRC_FOR_AUDIO_FILES', True))
monitor = convert_to_bool(os.getenv('MONITOR', False)) custom_regroup = os.getenv('CUSTOM_REGROUP', 'cm_sl=84_sl=42++++++1')
transcribe_folders = os.getenv('TRANSCRIBE_FOLDERS', '') detect_language_length = os.getenv('DETECT_LANGUAGE_LENGTH', 30)
transcribe_or_translate = os.getenv('TRANSCRIBE_OR_TRANSLATE', 'transcribe') skipifexternalsub = convert_to_bool(os.getenv('SKIPIFEXTERNALSUB', False))
force_detected_language_to = os.getenv('FORCE_DETECTED_LANGUAGE_TO', '').lower() try:
clear_vram_on_complete = convert_to_bool(os.getenv('CLEAR_VRAM_ON_COMPLETE', True))
compute_type = os.getenv('COMPUTE_TYPE', 'auto')
append = convert_to_bool(os.getenv('APPEND', False))
reload_script_on_change = convert_to_bool(os.getenv('RELOAD_SCRIPT_ON_CHANGE', False))
model_prompt = os.getenv('USE_MODEL_PROMPT', 'False')
custom_model_prompt = os.getenv('CUSTOM_MODEL_PROMPT', '')
lrc_for_audio_files = convert_to_bool(os.getenv('LRC_FOR_AUDIO_FILES', True))
custom_regroup = os.getenv('CUSTOM_REGROUP', 'cm_sl=84_sl=42++++++1')
detect_language_length = os.getenv('DETECT_LANGUAGE_LENGTH', 30)
skipifexternalsub = convert_to_bool(os.getenv('SKIPIFEXTERNALSUB', False))
try:
kwargs = ast.literal_eval(os.getenv('SUBGEN_KWARGS', '{}') or '{}') kwargs = ast.literal_eval(os.getenv('SUBGEN_KWARGS', '{}') or '{}')
except ValueError: except ValueError:
kwargs = {} kwargs = {}
logging.info("kwargs (SUBGEN_KWARGS) is an invalid dictionary, defaulting to empty '{}'") logging.info("kwargs (SUBGEN_KWARGS) is an invalid dictionary, defaulting to empty '{}'")
if transcribe_device == "gpu": if transcribe_device == "gpu":
transcribe_device = "cuda" transcribe_device = "cuda"
subextension = f".subgen.{whisper_model.split('.')[0]}.{namesublang}.srt" subextension = f".subgen.{whisper_model.split('.')[0]}.{namesublang}.srt"
subextensionSDH = f".subgen.{whisper_model.split('.')[0]}.{namesublang}.sdh.srt" subextensionSDH = f".subgen.{whisper_model.split('.')[0]}.{namesublang}.sdh.srt"
update_env_variables()
app = FastAPI() app = FastAPI()
model = None model = None
@@ -103,7 +90,7 @@ docker_status = "Docker" if in_docker else "Standalone"
last_print_time = None last_print_time = None
#start queue #start queue
global task_queue task_queue
task_queue = queue.Queue() task_queue = queue.Queue()
def transcription_worker(): def transcription_worker():