add queue tracking for Bazarr

General clean up, removed the old array that tracked items and added Bazarr.  Bazarr doesn't obey the CONCURRENT_TRANSCRIPTIONS, which is probably best since those have a fixed timeout they need to respond by.
This commit is contained in:
McCloudS
2024-04-20 11:28:26 -06:00
committed by GitHub
parent 2e6bf94b09
commit 8a6e625365

View File

@@ -109,7 +109,6 @@ update_env_variables()
app = FastAPI() app = FastAPI()
model = None model = None
files_to_transcribe = []
in_docker = os.path.exists('/.dockerenv') in_docker = os.path.exists('/.dockerenv')
docker_status = "Docker" if in_docker else "Standalone" docker_status = "Docker" if in_docker else "Standalone"
@@ -122,8 +121,11 @@ task_queue = queue.Queue()
def transcription_worker(): def transcription_worker():
while True: while True:
task = task_queue.get() task = task_queue.get()
gen_subtitles(task['path'], task['transcribe_or_translate'], task['force'],task['force_language']) if 'Bazarr-' in task['path']:
task_queue.task_done() logging.info(f"Skipping processing for {task['path']} as it is handled by ASR.")
else:
gen_subtitles(task['path'], task['transcribe_or_translate'], task['force_language'])
task_queue.task_done()
# show queue # show queue
logging.debug(f"There are {task_queue.qsize()} tasks left in the queue.") logging.debug(f"There are {task_queue.qsize()} tasks left in the queue.")
@@ -309,7 +311,6 @@ def receive_tautulli_webhook(
fullpath = file fullpath = file
logging.debug("Path of file: " + fullpath) logging.debug("Path of file: " + fullpath)
# gen_subtitles(path_mapping(fullpath), transcribe_or_translate, True)
gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True) gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True)
else: else:
return { return {
@@ -337,7 +338,6 @@ def receive_plex_webhook(
fullpath = get_plex_file_name(plex_json['Metadata']['ratingKey'], plexserver, plextoken) fullpath = get_plex_file_name(plex_json['Metadata']['ratingKey'], plexserver, plextoken)
logging.debug("Path of file: " + fullpath) logging.debug("Path of file: " + fullpath)
# gen_subtitles(path_mapping(fullpath), transcribe_or_translate, True)
gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True) gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True)
refresh_plex_metadata(plex_json['Metadata']['ratingKey'], plexserver, plextoken) refresh_plex_metadata(plex_json['Metadata']['ratingKey'], plexserver, plextoken)
logging.info(f"Metadata for item {plex_json['Metadata']['ratingKey']} refreshed successfully.") logging.info(f"Metadata for item {plex_json['Metadata']['ratingKey']} refreshed successfully.")
@@ -363,7 +363,6 @@ def receive_jellyfin_webhook(
fullpath = get_jellyfin_file_name(ItemId, jellyfinserver, jellyfintoken) fullpath = get_jellyfin_file_name(ItemId, jellyfinserver, jellyfintoken)
logging.debug(f"Path of file: {fullpath}") logging.debug(f"Path of file: {fullpath}")
# gen_subtitles(path_mapping(fullpath), transcribe_or_translate, True)
gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True) gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True)
try: try:
refresh_jellyfin_metadata(ItemId, jellyfinserver, jellyfintoken) refresh_jellyfin_metadata(ItemId, jellyfinserver, jellyfintoken)
@@ -397,7 +396,6 @@ def receive_emby_webhook(
if event == "library.new" and procaddedmedia or event == "playback.start" and procmediaonplay: if event == "library.new" and procaddedmedia or event == "playback.start" and procmediaonplay:
logging.debug("Path of file: " + fullpath) logging.debug("Path of file: " + fullpath)
# gen_subtitles(path_mapping(fullpath), transcribe_or_translate, True)
gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True) gen_subtitles_queue(path_mapping(fullpath), transcribe_or_translate, True)
return "" return ""
@@ -424,14 +422,17 @@ def asr(
try: try:
logging.info(f"Transcribing file from Bazarr/ASR webhook") logging.info(f"Transcribing file from Bazarr/ASR webhook")
result = None result = None
random_name = random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=6) random_name = ''.join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=6))
if force_detected_language_to: if force_detected_language_to:
language = force_detected_language_to language = force_detected_language_to
start_time = time.time() start_time = time.time()
start_model() start_model()
files_to_transcribe.insert(0, f"Bazarr-asr-{random_name}")
task_id = { 'path': f"Bazarr-asr-{random_name}" }
task_queue.put(task_id)
audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0 audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0
if model_prompt: if model_prompt:
custom_prompt = greetings_translations.get(language, '') or custom_model_prompt custom_prompt = greetings_translations.get(language, '') or custom_model_prompt
@@ -446,8 +447,7 @@ def asr(
except Exception as e: except Exception as e:
logging.info(f"Error processing or transcribing Bazarr {audio_file.filename}: {e}") logging.info(f"Error processing or transcribing Bazarr {audio_file.filename}: {e}")
finally: finally:
if f"Bazarr-asr-{random_name}" in files_to_transcribe: task_queue.task_done()
files_to_transcribe.remove(f"Bazarr-asr-{random_name}")
delete_model() delete_model()
if result: if result:
return StreamingResponse( return StreamingResponse(
@@ -470,8 +470,11 @@ def detect_language(
logging.info(f"Detect language is set to detect on the first {detect_language_length} seconds of the audio.") logging.info(f"Detect language is set to detect on the first {detect_language_length} seconds of the audio.")
try: try:
start_model() start_model()
random_name = random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=6) random_name = ''.join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=6))
files_to_transcribe.insert(0, f"Bazarr-detect-language-{random_name}")
task_id = { 'path': f"Bazarr-detect-language-{random_name}" }
task_queue.put(task_id)
audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0 audio_data = np.frombuffer(audio_file.file.read(), np.int16).flatten().astype(np.float32) / 32768.0
detected_lang_code = model.transcribe_stable(whisper.pad_or_trim(audio_data, detect_language_length * 16000), input_sr=16000).language detected_lang_code = model.transcribe_stable(whisper.pad_or_trim(audio_data, detect_language_length * 16000), input_sr=16000).language
@@ -479,8 +482,7 @@ def detect_language(
logging.info(f"Error processing or transcribing Bazarr {audio_file.filename}: {e}") logging.info(f"Error processing or transcribing Bazarr {audio_file.filename}: {e}")
finally: finally:
if f"Bazarr-detect-language-{random_name}" in files_to_transcribe: task_queue.task_done()
files_to_transcribe.remove(f"Bazarr-detect-language-{random_name}")
delete_model() delete_model()
return {"detected_language": whisper_languages.get(detected_lang_code, detected_lang_code) , "language_code": detected_lang_code} return {"detected_language": whisper_languages.get(detected_lang_code, detected_lang_code) , "language_code": detected_lang_code}
@@ -492,7 +494,7 @@ def start_model():
model = stable_whisper.load_faster_whisper(whisper_model, download_root=model_location, device=transcribe_device, cpu_threads=whisper_threads, num_workers=concurrent_transcriptions, compute_type=compute_type) model = stable_whisper.load_faster_whisper(whisper_model, download_root=model_location, device=transcribe_device, cpu_threads=whisper_threads, num_workers=concurrent_transcriptions, compute_type=compute_type)
def delete_model(): def delete_model():
if clear_vram_on_complete and len(files_to_transcribe) == 0: if clear_vram_on_complete and task_queue.qsize() == 0:
global model global model
logging.debug("Queue is empty, clearing/releasing VRAM") logging.debug("Queue is empty, clearing/releasing VRAM")
model = None model = None
@@ -509,25 +511,17 @@ def write_lrc(result, file_path):
fraction = int((segment.start - int(segment.start)) * 100) fraction = int((segment.start - int(segment.start)) * 100)
file.write(f"[{minutes:02d}:{seconds:02d}.{fraction:02d}] {segment.text}\n") file.write(f"[{minutes:02d}:{seconds:02d}.{fraction:02d}] {segment.text}\n")
def gen_subtitles(file_path: str, transcription_type: str, add_to_front=True, force_language=None) -> None: def gen_subtitles(file_path: str, transcription_type: str, force_language=None) -> None:
"""Generates subtitles for a video file. """Generates subtitles for a video file.
Args: Args:
file_path: str - The path to the video file. file_path: str - The path to the video file.
transcription_type: str - The type of transcription or translation to perform. transcription_type: str - The type of transcription or translation to perform.
add_to_front: bool - Whether to add the file to the front of the transcription queue. Default is True.
force_language: str - The language to force for transcription or translation. Default is None. force_language: str - The language to force for transcription or translation. Default is None.
""" """
try: try:
if add_to_front:
files_to_transcribe.insert(0, file_path)
else:
files_to_transcribe.append(file_path)
logging.info(f"Added {os.path.basename(file_path)} for transcription.") logging.info(f"Added {os.path.basename(file_path)} for transcription.")
#logging.info(f"{len(files_to_transcribe)} files in the queue for transcription")
logging.info(f"Transcribing file: {os.path.basename(file_path)}") logging.info(f"Transcribing file: {os.path.basename(file_path)}")
start_time = time.time() start_time = time.time()
@@ -562,21 +556,15 @@ def gen_subtitles(file_path: str, transcription_type: str, add_to_front=True, fo
logging.info(f"Error processing or transcribing {file_path}: {e}") logging.info(f"Error processing or transcribing {file_path}: {e}")
finally: finally:
if file_path in files_to_transcribe:
files_to_transcribe.remove(file_path)
delete_model() delete_model()
def gen_subtitles_queue(file_path: str, transcription_type: str, add_to_front=True, force_language=None) -> None: def gen_subtitles_queue(file_path: str, transcription_type: str, force_language=None) -> None:
global task_queue global task_queue
if not has_audio(file_path): if not has_audio(file_path):
logging.debug(f"{file_path} doesn't have any audio to transcribe!") logging.debug(f"{file_path} doesn't have any audio to transcribe!")
return return
if file_path in files_to_transcribe:
logging.info(f"File {os.path.basename(file_path)} is already in the transcription list. Skipping.")
return
message = None message = None
if has_subtitle_language(file_path, skipifinternalsublang): if has_subtitle_language(file_path, skipifinternalsublang):
message = f"{file_path} already has an internal subtitle we want, skipping generation" message = f"{file_path} already has an internal subtitle we want, skipping generation"
@@ -591,7 +579,6 @@ def gen_subtitles_queue(file_path: str, transcription_type: str, add_to_front=Tr
task = { task = {
'path': file_path, 'path': file_path,
'transcribe_or_translate': transcription_type, 'transcribe_or_translate': transcription_type,
'force': add_to_front,
'force_language':force_language 'force_language':force_language
} }
task_queue.put(task) task_queue.put(task)
@@ -766,8 +753,7 @@ if monitor:
if has_audio(file_path): if has_audio(file_path):
# Call the gen_subtitles function # Call the gen_subtitles function
logging.info(f"File: {path_mapping(file_path)} was added") logging.info(f"File: {path_mapping(file_path)} was added")
# gen_subtitles(path_mapping(file_path), transcribe_or_translate, False) gen_subtitles_queue(path_mapping(file_path), transcribe_or_translate)
gen_subtitles_queue(path_mapping(file_path), transcribe_or_translate, False)
def on_created(self, event): def on_created(self, event):
self.create_subtitle(event) self.create_subtitle(event)
def on_modified(self, event): def on_modified(self, event):
@@ -782,13 +768,11 @@ def transcribe_existing(transcribe_folders, forceLanguage=None):
for root, dirs, files in os.walk(path): for root, dirs, files in os.walk(path):
for file in files: for file in files:
file_path = os.path.join(root, file) file_path = os.path.join(root, file)
# gen_subtitles(path_mapping(file_path), transcribe_or_translate, False, forceLanguage) gen_subtitles_queue(path_mapping(file_path), transcribe_or_translate, forceLanguage)
gen_subtitles_queue(path_mapping(file_path), transcribe_or_translate, False, forceLanguage)
# if the path specified was actually a single file and not a folder, process it # if the path specified was actually a single file and not a folder, process it
if os.path.isfile(path): if os.path.isfile(path):
if has_audio(path): if has_audio(path):
# gen_subtitles(path_mapping(path), transcribe_or_translate, False, forceLanguage) gen_subtitles_queue(path_mapping(path), transcribe_or_translate, forceLanguage)
gen_subtitles_queue(path_mapping(path), transcribe_or_translate, False, forceLanguage)
# Set up the observer to watch for new files # Set up the observer to watch for new files
if monitor: if monitor:
observer = Observer() observer = Observer()