diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..1bfc14e
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,177 @@
+# Initially taken from Github's Python gitignore file
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# tests and logs
+tests/fixtures/cached_*_text.txt
+logs/
+lightning_logs/
+lang_code_data/
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# vscode
+.vs
+.vscode
+
+# Pycharm
+.idea
+
+# TF code
+tensorflow_code
+
+# Models
+proc_data
+
+# examples
+runs
+/runs_old
+/wandb
+/examples/runs
+/examples/**/*.args
+/examples/rag/sweep
+
+# data
+/data
+serialization_dir
+
+# emacs
+*.*~
+debug.env
+
+# vim
+.*.swp
+
+#ctags
+tags
+
+# pre-commit
+.pre-commit*
+
+# .lock
+*.lock
+
+# DS_Store (MacOS)
+.DS_Store
+
+# ruff
+.ruff_cache
+
+# our proj
+/output/
+/outputs/
+/checkpoint/
+/checkpoints/
+exp
+.gradio/
diff --git a/demo/gradio_demo.py b/demo/gradio_demo.py
new file mode 100644
index 0000000..ab0d01b
--- /dev/null
+++ b/demo/gradio_demo.py
@@ -0,0 +1,1174 @@
+"""
+VibeVoice Gradio Demo - High-Quality Dialogue Generation Interface with Streaming Support
+"""
+
+import argparse
+import json
+import os
+import sys
+import tempfile
+import time
+from pathlib import Path
+from typing import List, Dict, Any, Iterator
+from datetime import datetime
+import threading
+import numpy as np
+import gradio as gr
+import librosa
+import soundfile as sf
+import torch
+import os
+import traceback
+
+from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
+from vibevoice.modular.modeling_vibevoice_inference import VibeVoiceForConditionalGenerationInference
+from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
+from vibevoice.modular.streamer import AudioStreamer
+from transformers.utils import logging
+from transformers import set_seed
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+class VibeVoiceDemo:
+ def __init__(self, model_path: str, device: str = "cuda", inference_steps: int = 5):
+ """Initialize the VibeVoice demo with model loading."""
+ self.model_path = model_path
+ self.device = device
+ self.inference_steps = inference_steps
+ self.is_generating = False # Track generation state
+ self.stop_generation = False # Flag to stop generation
+ self.current_streamer = None # Track current audio streamer
+ self.load_model()
+ self.setup_voice_presets()
+ self.load_example_scripts() # Load example scripts
+
+ def load_model(self):
+ """Load the VibeVoice model and processor."""
+ print(f"Loading processor & model from {self.model_path}")
+
+ # Load processor
+ self.processor = VibeVoiceProcessor.from_pretrained(
+ self.model_path,
+ )
+
+ # Load model
+ self.model = VibeVoiceForConditionalGenerationInference.from_pretrained(
+ self.model_path,
+ torch_dtype=torch.bfloat16,
+ device_map='cuda',
+ attn_implementation="flash_attention_2",
+ )
+ self.model.eval()
+
+ # Use SDE solver by default
+ self.model.model.noise_scheduler = self.model.model.noise_scheduler.from_config(
+ self.model.model.noise_scheduler.config,
+ algorithm_type='sde-dpmsolver++',
+ beta_schedule='squaredcos_cap_v2'
+ )
+ self.model.set_ddpm_inference_steps(num_steps=self.inference_steps)
+
+ if hasattr(self.model.model, 'language_model'):
+ print(f"Language model attention: {self.model.model.language_model.config._attn_implementation}")
+
+ def setup_voice_presets(self):
+ """Setup voice presets by scanning the voices directory."""
+ voices_dir = os.path.join(os.path.dirname(__file__), "voices")
+
+ # Check if voices directory exists
+ if not os.path.exists(voices_dir):
+ print(f"Warning: Voices directory not found at {voices_dir}")
+ self.voice_presets = {}
+ self.available_voices = {}
+ return
+
+ # Scan for all WAV files in the voices directory
+ self.voice_presets = {}
+
+ # Get all .wav files in the voices directory
+ wav_files = [f for f in os.listdir(voices_dir)
+ if f.lower().endswith(('.wav', '.mp3', '.flac', '.ogg', '.m4a', '.aac')) and os.path.isfile(os.path.join(voices_dir, f))]
+
+ # Create dictionary with filename (without extension) as key
+ for wav_file in wav_files:
+ # Remove .wav extension to get the name
+ name = os.path.splitext(wav_file)[0]
+ # Create full path
+ full_path = os.path.join(voices_dir, wav_file)
+ self.voice_presets[name] = full_path
+
+ # Sort the voice presets alphabetically by name for better UI
+ self.voice_presets = dict(sorted(self.voice_presets.items()))
+
+ # Filter out voices that don't exist (this is now redundant but kept for safety)
+ self.available_voices = {
+ name: path for name, path in self.voice_presets.items()
+ if os.path.exists(path)
+ }
+
+ if not self.available_voices:
+ raise gr.Error("No voice presets found. Please add .wav files to the demo/voices directory.")
+
+ print(f"Found {len(self.available_voices)} voice files in {voices_dir}")
+ print(f"Available voices: {', '.join(self.available_voices.keys())}")
+
+ def read_audio(self, audio_path: str, target_sr: int = 24000) -> np.ndarray:
+ """Read and preprocess audio file."""
+ try:
+ wav, sr = sf.read(audio_path)
+ if len(wav.shape) > 1:
+ wav = np.mean(wav, axis=1)
+ if sr != target_sr:
+ wav = librosa.resample(wav, orig_sr=sr, target_sr=target_sr)
+ return wav
+ except Exception as e:
+ print(f"Error reading audio {audio_path}: {e}")
+ return np.array([])
+
+ def generate_podcast_streaming(self,
+ num_speakers: int,
+ script: str,
+ speaker_1: str = None,
+ speaker_2: str = None,
+ speaker_3: str = None,
+ speaker_4: str = None,
+ cfg_scale: float = 1.3) -> Iterator[tuple]:
+ try:
+ # Reset stop flag and set generating state
+ self.stop_generation = False
+ self.is_generating = True
+
+ # Validate inputs
+ if not script.strip():
+ self.is_generating = False
+ raise gr.Error("Error: Please provide a script.")
+
+ if num_speakers < 1 or num_speakers > 4:
+ self.is_generating = False
+ raise gr.Error("Error: Number of speakers must be between 1 and 4.")
+
+ # Collect selected speakers
+ selected_speakers = [speaker_1, speaker_2, speaker_3, speaker_4][:num_speakers]
+
+ # Validate speaker selections
+ for i, speaker in enumerate(selected_speakers):
+ if not speaker or speaker not in self.available_voices:
+ self.is_generating = False
+ raise gr.Error(f"Error: Please select a valid speaker for Speaker {i+1}.")
+
+ # Build initial log
+ log = f"ποΈ Generating podcast with {num_speakers} speakers\n"
+ log += f"π Parameters: CFG Scale={cfg_scale}, Inference Steps={self.inference_steps}\n"
+ log += f"π Speakers: {', '.join(selected_speakers)}\n"
+
+ # Check for stop signal
+ if self.stop_generation:
+ self.is_generating = False
+ yield None, "π Generation stopped by user", gr.update(visible=False)
+ return
+
+ # Load voice samples
+ voice_samples = []
+ for speaker_name in selected_speakers:
+ audio_path = self.available_voices[speaker_name]
+ audio_data = self.read_audio(audio_path)
+ if len(audio_data) == 0:
+ self.is_generating = False
+ raise gr.Error(f"Error: Failed to load audio for {speaker_name}")
+ voice_samples.append(audio_data)
+
+ # log += f"β
Loaded {len(voice_samples)} voice samples\n"
+
+ # Check for stop signal
+ if self.stop_generation:
+ self.is_generating = False
+ yield None, "π Generation stopped by user", gr.update(visible=False)
+ return
+
+ # Parse script to assign speaker ID's
+ lines = script.strip().split('\n')
+ formatted_script_lines = []
+
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+
+ # Check if line already has speaker format
+ if line.startswith('Speaker ') and ':' in line:
+ formatted_script_lines.append(line)
+ else:
+ # Auto-assign to speakers in rotation
+ speaker_id = len(formatted_script_lines) % num_speakers
+ formatted_script_lines.append(f"Speaker {speaker_id}: {line}")
+
+ formatted_script = '\n'.join(formatted_script_lines)
+ log += f"π Formatted script with {len(formatted_script_lines)} turns\n\n"
+ log += "π Processing with VibeVoice (streaming mode)...\n"
+
+ # Check for stop signal before processing
+ if self.stop_generation:
+ self.is_generating = False
+ yield None, "π Generation stopped by user", gr.update(visible=False)
+ return
+
+ start_time = time.time()
+
+ inputs = self.processor(
+ text=[formatted_script],
+ voice_samples=[voice_samples],
+ padding=True,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+
+ # Create audio streamer
+ audio_streamer = AudioStreamer(
+ batch_size=1,
+ stop_signal=None,
+ timeout=None
+ )
+
+ # Store current streamer for potential stopping
+ self.current_streamer = audio_streamer
+
+ # Start generation in a separate thread
+ generation_thread = threading.Thread(
+ target=self._generate_with_streamer,
+ args=(inputs, cfg_scale, audio_streamer)
+ )
+ generation_thread.start()
+
+ # Wait for generation to actually start producing audio
+ time.sleep(1) # Reduced from 3 to 1 second
+
+ # Check for stop signal after thread start
+ if self.stop_generation:
+ audio_streamer.end()
+ generation_thread.join(timeout=5.0) # Wait up to 5 seconds for thread to finish
+ self.is_generating = False
+ yield None, "π Generation stopped by user", gr.update(visible=False)
+ return
+
+ # Collect audio chunks as they arrive
+ sample_rate = 24000
+ all_audio_chunks = [] # For final statistics
+ pending_chunks = [] # Buffer for accumulating small chunks
+ chunk_count = 0
+ last_yield_time = time.time()
+ min_yield_interval = 15 # Yield every 15 seconds
+ min_chunk_size = sample_rate * 30 # At least 2 seconds of audio
+
+ # Get the stream for the first (and only) sample
+ audio_stream = audio_streamer.get_stream(0)
+
+ has_yielded_audio = False
+ has_received_chunks = False # Track if we received any chunks at all
+
+ for audio_chunk in audio_stream:
+ # Check for stop signal in the streaming loop
+ if self.stop_generation:
+ audio_streamer.end()
+ break
+
+ chunk_count += 1
+ has_received_chunks = True # Mark that we received at least one chunk
+
+ # Convert tensor to numpy
+ if torch.is_tensor(audio_chunk):
+ # Convert bfloat16 to float32 first, then to numpy
+ if audio_chunk.dtype == torch.bfloat16:
+ audio_chunk = audio_chunk.float()
+ audio_np = audio_chunk.cpu().numpy().astype(np.float32)
+ else:
+ audio_np = np.array(audio_chunk, dtype=np.float32)
+
+ # Ensure audio is 1D and properly normalized
+ if len(audio_np.shape) > 1:
+ audio_np = audio_np.squeeze()
+
+ # Convert to 16-bit for Gradio
+ audio_16bit = convert_to_16_bit_wav(audio_np)
+
+ # Store for final statistics
+ all_audio_chunks.append(audio_16bit)
+
+ # Add to pending chunks buffer
+ pending_chunks.append(audio_16bit)
+
+ # Calculate pending audio size
+ pending_audio_size = sum(len(chunk) for chunk in pending_chunks)
+ current_time = time.time()
+ time_since_last_yield = current_time - last_yield_time
+
+ # Decide whether to yield
+ should_yield = False
+ if not has_yielded_audio and pending_audio_size >= min_chunk_size:
+ # First yield: wait for minimum chunk size
+ should_yield = True
+ has_yielded_audio = True
+ elif has_yielded_audio and (pending_audio_size >= min_chunk_size or time_since_last_yield >= min_yield_interval):
+ # Subsequent yields: either enough audio or enough time has passed
+ should_yield = True
+
+ if should_yield and pending_chunks:
+ # Concatenate and yield only the new audio chunks
+ new_audio = np.concatenate(pending_chunks)
+ new_duration = len(new_audio) / sample_rate
+ total_duration = sum(len(chunk) for chunk in all_audio_chunks) / sample_rate
+
+ log_update = log + f"π΅ Streaming: {total_duration:.1f}s generated (chunk {chunk_count})\n"
+
+ # Yield streaming audio chunk and keep complete_audio as None during streaming
+ yield (sample_rate, new_audio), None, log_update, gr.update(visible=True)
+
+ # Clear pending chunks after yielding
+ pending_chunks = []
+ last_yield_time = current_time
+
+ # Yield any remaining chunks
+ if pending_chunks:
+ final_new_audio = np.concatenate(pending_chunks)
+ total_duration = sum(len(chunk) for chunk in all_audio_chunks) / sample_rate
+ log_update = log + f"π΅ Streaming final chunk: {total_duration:.1f}s total\n"
+ yield (sample_rate, final_new_audio), None, log_update, gr.update(visible=True)
+ has_yielded_audio = True # Mark that we yielded audio
+
+ # Wait for generation to complete (with timeout to prevent hanging)
+ generation_thread.join(timeout=5.0) # Increased timeout to 5 seconds
+
+ # If thread is still alive after timeout, force end
+ if generation_thread.is_alive():
+ print("Warning: Generation thread did not complete within timeout")
+ audio_streamer.end()
+ generation_thread.join(timeout=5.0)
+
+ # Clean up
+ self.current_streamer = None
+ self.is_generating = False
+
+ generation_time = time.time() - start_time
+
+ # Check if stopped by user
+ if self.stop_generation:
+ yield None, None, "π Generation stopped by user", gr.update(visible=False)
+ return
+
+ # Debug logging
+ # print(f"Debug: has_received_chunks={has_received_chunks}, chunk_count={chunk_count}, all_audio_chunks length={len(all_audio_chunks)}")
+
+ # Check if we received any chunks but didn't yield audio
+ if has_received_chunks and not has_yielded_audio and all_audio_chunks:
+ # We have chunks but didn't meet the yield criteria, yield them now
+ complete_audio = np.concatenate(all_audio_chunks)
+ final_duration = len(complete_audio) / sample_rate
+
+ final_log = log + f"β±οΈ Generation completed in {generation_time:.2f} seconds\n"
+ final_log += f"π΅ Final audio duration: {final_duration:.2f} seconds\n"
+ final_log += f"π Total chunks: {chunk_count}\n"
+ final_log += "β¨ Generation successful! Complete audio is ready.\n"
+ final_log += "π‘ Not satisfied? You can regenerate or adjust the CFG scale for different results."
+
+ # Yield the complete audio
+ yield None, (sample_rate, complete_audio), final_log, gr.update(visible=False)
+ return
+
+ if not has_received_chunks:
+ error_log = log + f"\nβ Error: No audio chunks were received from the model. Generation time: {generation_time:.2f}s"
+ yield None, None, error_log, gr.update(visible=False)
+ return
+
+ if not has_yielded_audio:
+ error_log = log + f"\nβ Error: Audio was generated but not streamed. Chunk count: {chunk_count}"
+ yield None, None, error_log, gr.update(visible=False)
+ return
+
+ # Prepare the complete audio
+ if all_audio_chunks:
+ complete_audio = np.concatenate(all_audio_chunks)
+ final_duration = len(complete_audio) / sample_rate
+
+ final_log = log + f"β±οΈ Generation completed in {generation_time:.2f} seconds\n"
+ final_log += f"π΅ Final audio duration: {final_duration:.2f} seconds\n"
+ final_log += f"π Total chunks: {chunk_count}\n"
+ final_log += "β¨ Generation successful! Complete audio is ready in the 'Complete Audio' tab.\n"
+ final_log += "π‘ Not satisfied? You can regenerate or adjust the CFG scale for different results."
+
+ # Final yield: Clear streaming audio and provide complete audio
+ yield None, (sample_rate, complete_audio), final_log, gr.update(visible=False)
+ else:
+ final_log = log + "β No audio was generated."
+ yield None, None, final_log, gr.update(visible=False)
+
+ except gr.Error as e:
+ # Handle Gradio-specific errors (like input validation)
+ self.is_generating = False
+ self.current_streamer = None
+ error_msg = f"β Input Error: {str(e)}"
+ print(error_msg)
+ yield None, None, error_msg, gr.update(visible=False)
+
+ except Exception as e:
+ self.is_generating = False
+ self.current_streamer = None
+ error_msg = f"β An unexpected error occurred: {str(e)}"
+ print(error_msg)
+ import traceback
+ traceback.print_exc()
+ yield None, None, error_msg, gr.update(visible=False)
+
+ def _generate_with_streamer(self, inputs, cfg_scale, audio_streamer):
+ """Helper method to run generation with streamer in a separate thread."""
+ try:
+ # Check for stop signal before starting generation
+ if self.stop_generation:
+ audio_streamer.end()
+ return
+
+ # Define a stop check function that can be called from generate
+ def check_stop_generation():
+ return self.stop_generation
+
+ outputs = self.model.generate(
+ **inputs,
+ max_new_tokens=None,
+ cfg_scale=cfg_scale,
+ tokenizer=self.processor.tokenizer,
+ generation_config={
+ 'do_sample': False,
+ },
+ audio_streamer=audio_streamer,
+ stop_check_fn=check_stop_generation, # Pass the stop check function
+ verbose=False, # Disable verbose in streaming mode
+ refresh_negative=True,
+ )
+
+ except Exception as e:
+ print(f"Error in generation thread: {e}")
+ traceback.print_exc()
+ # Make sure to end the stream on error
+ audio_streamer.end()
+
+ def stop_audio_generation(self):
+ """Stop the current audio generation process."""
+ self.stop_generation = True
+ if self.current_streamer is not None:
+ try:
+ self.current_streamer.end()
+ except Exception as e:
+ print(f"Error stopping streamer: {e}")
+ print("π Audio generation stop requested")
+
+ def load_example_scripts(self):
+ """Load example scripts from the text_examples directory."""
+ examples_dir = os.path.join(os.path.dirname(__file__), "text_examples")
+ self.example_scripts = []
+
+ # Check if text_examples directory exists
+ if not os.path.exists(examples_dir):
+ print(f"Warning: text_examples directory not found at {examples_dir}")
+ return
+
+ # Get all .txt files in the text_examples directory
+ txt_files = sorted([f for f in os.listdir(examples_dir)
+ if f.lower().endswith('.txt') and os.path.isfile(os.path.join(examples_dir, f))])
+
+ for txt_file in txt_files:
+ file_path = os.path.join(examples_dir, txt_file)
+
+ import re
+ # Check if filename contains a time pattern like "45min", "90min", etc.
+ time_pattern = re.search(r'(\d+)min', txt_file.lower())
+ if time_pattern:
+ minutes = int(time_pattern.group(1))
+ if minutes > 15:
+ print(f"Skipping {txt_file}: duration {minutes} minutes exceeds 15-minute limit")
+ continue
+
+ try:
+ with open(file_path, 'r', encoding='utf-8') as f:
+ script_content = f.read().strip()
+
+ # Remove empty lines and lines with only whitespace
+ script_content = '\n'.join(line for line in script_content.split('\n') if line.strip())
+
+ if not script_content:
+ continue
+
+ # Parse the script to determine number of speakers
+ num_speakers = self._get_num_speakers_from_script(script_content)
+
+ # Add to examples list as [num_speakers, script_content]
+ self.example_scripts.append([num_speakers, script_content])
+ print(f"Loaded example: {txt_file} with {num_speakers} speakers")
+
+ except Exception as e:
+ print(f"Error loading example script {txt_file}: {e}")
+
+ if self.example_scripts:
+ print(f"Successfully loaded {len(self.example_scripts)} example scripts")
+ else:
+ print("No example scripts were loaded")
+
+ def _get_num_speakers_from_script(self, script: str) -> int:
+ """Determine the number of unique speakers in a script."""
+ import re
+ speakers = set()
+
+ lines = script.strip().split('\n')
+ for line in lines:
+ # Use regex to find speaker patterns
+ match = re.match(r'^Speaker\s+(\d+)\s*:', line.strip(), re.IGNORECASE)
+ if match:
+ speaker_id = int(match.group(1))
+ speakers.add(speaker_id)
+
+ # If no speakers found, default to 1
+ if not speakers:
+ return 1
+
+ # Return the maximum speaker ID + 1 (assuming 0-based indexing)
+ # or the count of unique speakers if they're 1-based
+ max_speaker = max(speakers)
+ min_speaker = min(speakers)
+
+ if min_speaker == 0:
+ return max_speaker + 1
+ else:
+ # Assume 1-based indexing, return the count
+ return len(speakers)
+
+
+def create_demo_interface(demo_instance: VibeVoiceDemo):
+ """Create the Gradio interface with streaming support."""
+
+ # Custom CSS for high-end aesthetics with lighter theme
+ custom_css = """
+ /* Modern light theme with gradients */
+ .gradio-container {
+ background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
+ font-family: 'SF Pro Display', -apple-system, BlinkMacSystemFont, sans-serif;
+ }
+
+ /* Header styling */
+ .main-header {
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
+ padding: 2rem;
+ border-radius: 20px;
+ margin-bottom: 2rem;
+ text-align: center;
+ box-shadow: 0 10px 40px rgba(102, 126, 234, 0.3);
+ }
+
+ .main-header h1 {
+ color: white;
+ font-size: 2.5rem;
+ font-weight: 700;
+ margin: 0;
+ text-shadow: 0 2px 4px rgba(0,0,0,0.3);
+ }
+
+ .main-header p {
+ color: rgba(255,255,255,0.9);
+ font-size: 1.1rem;
+ margin: 0.5rem 0 0 0;
+ }
+
+ /* Card styling */
+ .settings-card, .generation-card {
+ background: rgba(255, 255, 255, 0.8);
+ backdrop-filter: blur(10px);
+ border: 1px solid rgba(226, 232, 240, 0.8);
+ border-radius: 16px;
+ padding: 1.5rem;
+ margin-bottom: 1rem;
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.1);
+ }
+
+ /* Speaker selection styling */
+ .speaker-grid {
+ display: grid;
+ gap: 1rem;
+ margin-bottom: 1rem;
+ }
+
+ .speaker-item {
+ background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%);
+ border: 1px solid rgba(148, 163, 184, 0.4);
+ border-radius: 12px;
+ padding: 1rem;
+ color: #374151;
+ font-weight: 500;
+ }
+
+ /* Streaming indicator */
+ .streaming-indicator {
+ display: inline-block;
+ width: 10px;
+ height: 10px;
+ background: #22c55e;
+ border-radius: 50%;
+ margin-right: 8px;
+ animation: pulse 1.5s infinite;
+ }
+
+ @keyframes pulse {
+ 0% { opacity: 1; transform: scale(1); }
+ 50% { opacity: 0.5; transform: scale(1.1); }
+ 100% { opacity: 1; transform: scale(1); }
+ }
+
+ /* Queue status styling */
+ .queue-status {
+ background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);
+ border: 1px solid rgba(14, 165, 233, 0.3);
+ border-radius: 8px;
+ padding: 0.75rem;
+ margin: 0.5rem 0;
+ text-align: center;
+ font-size: 0.9rem;
+ color: #0369a1;
+ }
+
+ .generate-btn {
+ background: linear-gradient(135deg, #059669 0%, #0d9488 100%);
+ border: none;
+ border-radius: 12px;
+ padding: 1rem 2rem;
+ color: white;
+ font-weight: 600;
+ font-size: 1.1rem;
+ box-shadow: 0 4px 20px rgba(5, 150, 105, 0.4);
+ transition: all 0.3s ease;
+ }
+
+ .generate-btn:hover {
+ transform: translateY(-2px);
+ box-shadow: 0 6px 25px rgba(5, 150, 105, 0.6);
+ }
+
+ .stop-btn {
+ background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%);
+ border: none;
+ border-radius: 12px;
+ padding: 1rem 2rem;
+ color: white;
+ font-weight: 600;
+ font-size: 1.1rem;
+ box-shadow: 0 4px 20px rgba(239, 68, 68, 0.4);
+ transition: all 0.3s ease;
+ }
+
+ .stop-btn:hover {
+ transform: translateY(-2px);
+ box-shadow: 0 6px 25px rgba(239, 68, 68, 0.6);
+ }
+
+ /* Audio player styling */
+ .audio-output {
+ background: linear-gradient(135deg, #f1f5f9 0%, #e2e8f0 100%);
+ border-radius: 16px;
+ padding: 1.5rem;
+ border: 1px solid rgba(148, 163, 184, 0.3);
+ }
+
+ .complete-audio-section {
+ margin-top: 1rem;
+ padding: 1rem;
+ background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%);
+ border: 1px solid rgba(34, 197, 94, 0.3);
+ border-radius: 12px;
+ }
+
+ /* Text areas */
+ .script-input, .log-output {
+ background: rgba(255, 255, 255, 0.9) !important;
+ border: 1px solid rgba(148, 163, 184, 0.4) !important;
+ border-radius: 12px !important;
+ color: #1e293b !important;
+ font-family: 'JetBrains Mono', monospace !important;
+ }
+
+ .script-input::placeholder {
+ color: #64748b !important;
+ }
+
+ /* Sliders */
+ .slider-container {
+ background: rgba(248, 250, 252, 0.8);
+ border: 1px solid rgba(226, 232, 240, 0.6);
+ border-radius: 8px;
+ padding: 1rem;
+ margin: 0.5rem 0;
+ }
+
+ /* Labels and text */
+ .gradio-container label {
+ color: #374151 !important;
+ font-weight: 600 !important;
+ }
+
+ .gradio-container .markdown {
+ color: #1f2937 !important;
+ }
+
+ /* Responsive design */
+ @media (max-width: 768px) {
+ .main-header h1 { font-size: 2rem; }
+ .settings-card, .generation-card { padding: 1rem; }
+ }
+
+ /* Random example button styling - more subtle professional color */
+ .random-btn {
+ background: linear-gradient(135deg, #64748b 0%, #475569 100%);
+ border: none;
+ border-radius: 12px;
+ padding: 1rem 1.5rem;
+ color: white;
+ font-weight: 600;
+ font-size: 1rem;
+ box-shadow: 0 4px 20px rgba(100, 116, 139, 0.3);
+ transition: all 0.3s ease;
+ display: inline-flex;
+ align-items: center;
+ gap: 0.5rem;
+ }
+
+ .random-btn:hover {
+ transform: translateY(-2px);
+ box-shadow: 0 6px 25px rgba(100, 116, 139, 0.4);
+ background: linear-gradient(135deg, #475569 0%, #334155 100%);
+ }
+ """
+
+ with gr.Blocks(
+ title="VibeVoice - AI Podcast Generator",
+ css=custom_css,
+ theme=gr.themes.Soft(
+ primary_hue="blue",
+ secondary_hue="purple",
+ neutral_hue="slate",
+ )
+ ) as interface:
+
+ # Header
+ gr.HTML("""
+
+
ποΈ Vibe Podcasting
+
Generating Long-form Multi-speaker AI Podcast with VibeVoice
+
+ """)
+
+ with gr.Row():
+ # Left column - Settings
+ with gr.Column(scale=1, elem_classes="settings-card"):
+ gr.Markdown("### ποΈ **Podcast Settings**")
+
+ # Number of speakers
+ num_speakers = gr.Slider(
+ minimum=1,
+ maximum=4,
+ value=2,
+ step=1,
+ label="Number of Speakers",
+ elem_classes="slider-container"
+ )
+
+ # Speaker selection
+ gr.Markdown("### π **Speaker Selection**")
+
+ available_speaker_names = list(demo_instance.available_voices.keys())
+ # default_speakers = available_speaker_names[:4] if len(available_speaker_names) >= 4 else available_speaker_names
+ default_speakers = ['en-Alice_woman', 'en-Carter_man', 'en-Frank_man', 'en-Maya_woman']
+
+ speaker_selections = []
+ for i in range(4):
+ default_value = default_speakers[i] if i < len(default_speakers) else None
+ speaker = gr.Dropdown(
+ choices=available_speaker_names,
+ value=default_value,
+ label=f"Speaker {i+1}",
+ visible=(i < 2), # Initially show only first 2 speakers
+ elem_classes="speaker-item"
+ )
+ speaker_selections.append(speaker)
+
+ # Advanced settings
+ gr.Markdown("### βοΈ **Advanced Settings**")
+
+ # Sampling parameters (contains all generation settings)
+ with gr.Accordion("Generation Parameters", open=False):
+ cfg_scale = gr.Slider(
+ minimum=1.0,
+ maximum=2.0,
+ value=1.3,
+ step=0.05,
+ label="CFG Scale (Guidance Strength)",
+ # info="Higher values increase adherence to text",
+ elem_classes="slider-container"
+ )
+
+ # Right column - Generation
+ with gr.Column(scale=2, elem_classes="generation-card"):
+ gr.Markdown("### π **Script Input**")
+
+ script_input = gr.Textbox(
+ label="Conversation Script",
+ placeholder="""Enter your podcast script here. You can format it as:
+
+Speaker 0: Welcome to our podcast today!
+Speaker 1: Thanks for having me. I'm excited to discuss...
+
+Or paste text directly and it will auto-assign speakers.""",
+ lines=12,
+ max_lines=20,
+ elem_classes="script-input"
+ )
+
+ # Button row with Random Example on the left and Generate on the right
+ with gr.Row():
+ # Random example button (now on the left)
+ random_example_btn = gr.Button(
+ "π² Random Example",
+ size="lg",
+ variant="secondary",
+ elem_classes="random-btn",
+ scale=1 # Smaller width
+ )
+
+ # Generate button (now on the right)
+ generate_btn = gr.Button(
+ "π Generate Podcast",
+ size="lg",
+ variant="primary",
+ elem_classes="generate-btn",
+ scale=2 # Wider than random button
+ )
+
+ # Stop button
+ stop_btn = gr.Button(
+ "π Stop Generation",
+ size="lg",
+ variant="stop",
+ elem_classes="stop-btn",
+ visible=False
+ )
+
+ # Streaming status indicator
+ streaming_status = gr.HTML(
+ value="""
+
+
+ LIVE STREAMING - Audio is being generated in real-time
+
+ """,
+ visible=False,
+ elem_id="streaming-status"
+ )
+
+ # Output section
+ gr.Markdown("### π΅ **Generated Podcast**")
+
+ # Streaming audio output (outside of tabs for simpler handling)
+ audio_output = gr.Audio(
+ label="Streaming Audio (Real-time)",
+ type="numpy",
+ elem_classes="audio-output",
+ streaming=True, # Enable streaming mode
+ autoplay=True,
+ show_download_button=False, # Explicitly show download button
+ visible=True
+ )
+
+ # Complete audio output (non-streaming)
+ complete_audio_output = gr.Audio(
+ label="Complete Podcast (Download after generation)",
+ type="numpy",
+ elem_classes="audio-output complete-audio-section",
+ streaming=False, # Non-streaming mode
+ autoplay=False,
+ show_download_button=True, # Explicitly show download button
+ visible=False # Initially hidden, shown when audio is ready
+ )
+
+ gr.Markdown("""
+ *π‘ **Streaming**: Audio plays as it's being generated (may have slight pauses)
+ *π‘ **Complete Audio**: Will appear below after generation finishes*
+ """)
+
+ # Generation log
+ log_output = gr.Textbox(
+ label="Generation Log",
+ lines=8,
+ max_lines=15,
+ interactive=False,
+ elem_classes="log-output"
+ )
+
+ def update_speaker_visibility(num_speakers):
+ updates = []
+ for i in range(4):
+ updates.append(gr.update(visible=(i < num_speakers)))
+ return updates
+
+ num_speakers.change(
+ fn=update_speaker_visibility,
+ inputs=[num_speakers],
+ outputs=speaker_selections
+ )
+
+ # Main generation function with streaming
+ def generate_podcast_wrapper(num_speakers, script, *speakers_and_params):
+ """Wrapper function to handle the streaming generation call."""
+ try:
+ # Extract speakers and parameters
+ speakers = speakers_and_params[:4] # First 4 are speaker selections
+ cfg_scale = speakers_and_params[4] # CFG scale
+
+ # Clear outputs and reset visibility at start
+ yield None, gr.update(value=None, visible=False), "ποΈ Starting generation...", gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)
+
+ # The generator will yield multiple times
+ final_log = "Starting generation..."
+
+ for streaming_audio, complete_audio, log, streaming_visible in demo_instance.generate_podcast_streaming(
+ num_speakers=int(num_speakers),
+ script=script,
+ speaker_1=speakers[0],
+ speaker_2=speakers[1],
+ speaker_3=speakers[2],
+ speaker_4=speakers[3],
+ cfg_scale=cfg_scale
+ ):
+ final_log = log
+
+ # Check if we have complete audio (final yield)
+ if complete_audio is not None:
+ # Final state: clear streaming, show complete audio
+ yield None, gr.update(value=complete_audio, visible=True), log, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
+ else:
+ # Streaming state: update streaming audio only
+ if streaming_audio is not None:
+ yield streaming_audio, gr.update(visible=False), log, streaming_visible, gr.update(visible=False), gr.update(visible=True)
+ else:
+ # No new audio, just update status
+ yield None, gr.update(visible=False), log, streaming_visible, gr.update(visible=False), gr.update(visible=True)
+
+ except Exception as e:
+ error_msg = f"β A critical error occurred in the wrapper: {str(e)}"
+ print(error_msg)
+ import traceback
+ traceback.print_exc()
+ # Reset button states on error
+ yield None, gr.update(value=None, visible=False), error_msg, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
+
+ def stop_generation_handler():
+ """Handle stopping generation."""
+ demo_instance.stop_audio_generation()
+ # Return values for: log_output, streaming_status, generate_btn, stop_btn
+ return "π Generation stopped.", gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
+
+ # Add a clear audio function
+ def clear_audio_outputs():
+ """Clear both audio outputs before starting new generation."""
+ return None, gr.update(value=None, visible=False)
+
+ # Connect generation button with streaming outputs
+ generate_btn.click(
+ fn=clear_audio_outputs,
+ inputs=[],
+ outputs=[audio_output, complete_audio_output],
+ queue=False
+ ).then(
+ fn=generate_podcast_wrapper,
+ inputs=[num_speakers, script_input] + speaker_selections + [cfg_scale],
+ outputs=[audio_output, complete_audio_output, log_output, streaming_status, generate_btn, stop_btn],
+ queue=True # Enable Gradio's built-in queue
+ )
+
+ # Connect stop button
+ stop_btn.click(
+ fn=stop_generation_handler,
+ inputs=[],
+ outputs=[log_output, streaming_status, generate_btn, stop_btn],
+ queue=False # Don't queue stop requests
+ ).then(
+ # Clear both audio outputs after stopping
+ fn=lambda: (None, None),
+ inputs=[],
+ outputs=[audio_output, complete_audio_output],
+ queue=False
+ )
+
+ # Function to randomly select an example
+ def load_random_example():
+ """Randomly select and load an example script."""
+ import random
+
+ # Get available examples
+ if hasattr(demo_instance, 'example_scripts') and demo_instance.example_scripts:
+ example_scripts = demo_instance.example_scripts
+ else:
+ # Fallback to default
+ example_scripts = [
+ [2, "Speaker 0: Welcome to our AI podcast demonstration!\nSpeaker 1: Thanks for having me. This is exciting!"]
+ ]
+
+ # Randomly select one
+ if example_scripts:
+ selected = random.choice(example_scripts)
+ num_speakers_value = selected[0]
+ script_value = selected[1]
+
+ # Return the values to update the UI
+ return num_speakers_value, script_value
+
+ # Default values if no examples
+ return 2, ""
+
+ # Connect random example button
+ random_example_btn.click(
+ fn=load_random_example,
+ inputs=[],
+ outputs=[num_speakers, script_input],
+ queue=False # Don't queue this simple operation
+ )
+
+ # Add usage tips
+ gr.Markdown("""
+ ### π‘ **Usage Tips**
+
+ - Click **π Generate Podcast** to start audio generation
+ - **Live Streaming** tab shows audio as it's generated (may have slight pauses)
+ - **Complete Audio** tab provides the full, uninterrupted podcast after generation
+ - During generation, you can click **π Stop Generation** to interrupt the process
+ - The streaming indicator shows real-time generation progress
+ """)
+
+ # Add example scripts
+ gr.Markdown("### π **Example Scripts**")
+
+ # Use dynamically loaded examples if available, otherwise provide a default
+ if hasattr(demo_instance, 'example_scripts') and demo_instance.example_scripts:
+ example_scripts = demo_instance.example_scripts
+ else:
+ # Fallback to a simple default example if no scripts loaded
+ example_scripts = [
+ [1, "Speaker 1: Welcome to our AI podcast demonstration! This is a sample script showing how VibeVoice can generate natural-sounding speech."]
+ ]
+
+ gr.Examples(
+ examples=example_scripts,
+ inputs=[num_speakers, script_input],
+ label="Try these example scripts:"
+ )
+
+ return interface
+
+
+def convert_to_16_bit_wav(data):
+ # Check if data is a tensor and move to cpu
+ if torch.is_tensor(data):
+ data = data.detach().cpu().numpy()
+
+ # Ensure data is numpy array
+ data = np.array(data)
+
+ # Normalize to range [-1, 1] if it's not already
+ if np.max(np.abs(data)) > 1.0:
+ data = data / np.max(np.abs(data))
+
+ # Scale to 16-bit integer range
+ data = (data * 32767).astype(np.int16)
+ return data
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="VibeVoice Gradio Demo")
+ parser.add_argument(
+ "--model_path",
+ type=str,
+ default="/tmp/vibevoice-model",
+ help="Path to the VibeVoice model directory",
+ )
+ parser.add_argument(
+ "--device",
+ type=str,
+ default="cuda" if torch.cuda.is_available() else "cpu",
+ help="Device for inference",
+ )
+ parser.add_argument(
+ "--inference_steps",
+ type=int,
+ default=10,
+ help="Number of inference steps for DDPM (not exposed to users)",
+ )
+ parser.add_argument(
+ "--share",
+ action="store_true",
+ help="Share the demo publicly via Gradio",
+ )
+ parser.add_argument(
+ "--port",
+ type=int,
+ default=7860,
+ help="Port to run the demo on",
+ )
+
+ return parser.parse_args()
+
+
+def main():
+ """Main function to run the demo."""
+ args = parse_args()
+
+ set_seed(42) # Set a fixed seed for reproducibility
+
+ print("ποΈ Initializing VibeVoice Demo with Streaming Support...")
+
+ # Initialize demo instance
+ demo_instance = VibeVoiceDemo(
+ model_path=args.model_path,
+ device=args.device,
+ inference_steps=args.inference_steps
+ )
+
+ # Create interface
+ interface = create_demo_interface(demo_instance)
+
+ print(f"π Launching demo on port {args.port}")
+ print(f"π Model path: {args.model_path}")
+ print(f"π Available voices: {len(demo_instance.available_voices)}")
+ print(f"π΄ Streaming mode: ENABLED")
+ print(f"π Session isolation: ENABLED")
+
+ # Launch the interface
+ try:
+ interface.queue(
+ max_size=20, # Maximum queue size
+ default_concurrency_limit=1 # Process one request at a time
+ ).launch(
+ share=args.share,
+ # server_port=args.port,
+ server_name="0.0.0.0" if args.share else "127.0.0.1",
+ show_error=True,
+ show_api=False # Hide API docs for cleaner interface
+ )
+ except KeyboardInterrupt:
+ print("\nπ Shutting down gracefully...")
+ except Exception as e:
+ print(f"β Server error: {e}")
+ raise
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/demo/inference_from_file.py b/demo/inference_from_file.py
new file mode 100644
index 0000000..078b53a
--- /dev/null
+++ b/demo/inference_from_file.py
@@ -0,0 +1,336 @@
+import argparse
+import os
+import re
+from typing import List, Tuple, Union, Dict, Any
+import time
+import torch
+
+from vibevoice.modular.modeling_vibevoice_inference import VibeVoiceForConditionalGenerationInference
+from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
+from transformers.utils import logging
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+class VoiceMapper:
+ """Maps speaker names to voice file paths"""
+
+ def __init__(self):
+ self.setup_voice_presets()
+
+ # change name according to our preset wav file
+ new_dict = {}
+ for name, path in self.voice_presets.items():
+
+ if '_' in name:
+ name = name.split('_')[0]
+
+ if '-' in name:
+ name = name.split('-')[-1]
+
+ new_dict[name] = path
+ self.voice_presets.update(new_dict)
+ # print(list(self.voice_presets.keys()))
+
+ def setup_voice_presets(self):
+ """Setup voice presets by scanning the voices directory."""
+ voices_dir = os.path.join(os.path.dirname(__file__), "voices")
+
+ # Check if voices directory exists
+ if not os.path.exists(voices_dir):
+ print(f"Warning: Voices directory not found at {voices_dir}")
+ self.voice_presets = {}
+ self.available_voices = {}
+ return
+
+ # Scan for all WAV files in the voices directory
+ self.voice_presets = {}
+
+ # Get all .wav files in the voices directory
+ wav_files = [f for f in os.listdir(voices_dir)
+ if f.lower().endswith('.wav') and os.path.isfile(os.path.join(voices_dir, f))]
+
+ # Create dictionary with filename (without extension) as key
+ for wav_file in wav_files:
+ # Remove .wav extension to get the name
+ name = os.path.splitext(wav_file)[0]
+ # Create full path
+ full_path = os.path.join(voices_dir, wav_file)
+ self.voice_presets[name] = full_path
+
+ # Sort the voice presets alphabetically by name for better UI
+ self.voice_presets = dict(sorted(self.voice_presets.items()))
+
+ # Filter out voices that don't exist (this is now redundant but kept for safety)
+ self.available_voices = {
+ name: path for name, path in self.voice_presets.items()
+ if os.path.exists(path)
+ }
+
+ print(f"Found {len(self.available_voices)} voice files in {voices_dir}")
+ print(f"Available voices: {', '.join(self.available_voices.keys())}")
+
+ def get_voice_path(self, speaker_name: str) -> str:
+ """Get voice file path for a given speaker name"""
+ # First try exact match
+ if speaker_name in self.voice_presets:
+ return self.voice_presets[speaker_name]
+
+ # Try partial matching (case insensitive)
+ speaker_lower = speaker_name.lower()
+ for preset_name, path in self.voice_presets.items():
+ if preset_name.lower() in speaker_lower or speaker_lower in preset_name.lower():
+ return path
+
+ # Default to first voice if no match found
+ default_voice = list(self.voice_presets.values())[0]
+ print(f"Warning: No voice preset found for '{speaker_name}', using default voice: {default_voice}")
+ return default_voice
+
+
+def parse_txt_script(txt_content: str) -> Tuple[List[str], List[str]]:
+ """
+ Parse txt script content and extract speakers and their text
+ Fixed pattern: Speaker 1, Speaker 2, Speaker 3, Speaker 4
+ Returns: (scripts, speaker_numbers)
+ """
+ lines = txt_content.strip().split('\n')
+ scripts = []
+ speaker_numbers = []
+
+ # Pattern to match "Speaker X:" format where X is a number
+ speaker_pattern = r'^Speaker\s+(\d+):\s*(.*)$'
+
+ current_speaker = None
+ current_text = ""
+
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+
+ match = re.match(speaker_pattern, line, re.IGNORECASE)
+ if match:
+ # If we have accumulated text from previous speaker, save it
+ if current_speaker and current_text:
+ scripts.append(f"Speaker {current_speaker}: {current_text.strip()}")
+ speaker_numbers.append(current_speaker)
+
+ # Start new speaker
+ current_speaker = match.group(1).strip()
+ current_text = match.group(2).strip()
+ else:
+ # Continue text for current speaker
+ if current_text:
+ current_text += " " + line
+ else:
+ current_text = line
+
+ # Don't forget the last speaker
+ if current_speaker and current_text:
+ scripts.append(f"Speaker {current_speaker}: {current_text.strip()}")
+ speaker_numbers.append(current_speaker)
+
+ return scripts, speaker_numbers
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="VibeVoice Processor TXT Input Test")
+ parser.add_argument(
+ "--model_path",
+ type=str,
+ default="microsoft/VibeVoice-1.5b",
+ help="Path to the HuggingFace model directory",
+ )
+
+ parser.add_argument(
+ "--txt_path",
+ type=str,
+ default="demo/text_examples/1p_abs.txt",
+ help="Path to the txt file containing the script",
+ )
+ parser.add_argument(
+ "--speaker_names",
+ type=str,
+ nargs='+',
+ default='Andrew',
+ help="Speaker names in order (e.g., --speaker_names Andrew Ava 'Bill Gates')",
+ )
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default="./outputs",
+ help="Directory to save output audio files",
+ )
+ parser.add_argument(
+ "--device",
+ type=str,
+ default="cuda" if torch.cuda.is_available() else "cpu",
+ help="Device for tensor tests",
+ )
+ parser.add_argument(
+ "--cfg_scale",
+ type=float,
+ default=1.3,
+ help="CFG (Classifier-Free Guidance) scale for generation (default: 1.3)",
+ )
+
+ return parser.parse_args()
+
+def main():
+ args = parse_args()
+
+ # Initialize voice mapper
+ voice_mapper = VoiceMapper()
+
+ # Check if txt file exists
+ if not os.path.exists(args.txt_path):
+ print(f"Error: txt file not found: {args.txt_path}")
+ return
+
+ # Read and parse txt file
+ print(f"Reading script from: {args.txt_path}")
+ with open(args.txt_path, 'r', encoding='utf-8') as f:
+ txt_content = f.read()
+
+ # Parse the txt content to get speaker numbers
+ scripts, speaker_numbers = parse_txt_script(txt_content)
+
+ if not scripts:
+ print("Error: No valid speaker scripts found in the txt file")
+ return
+
+ print(f"Found {len(scripts)} speaker segments:")
+ for i, (script, speaker_num) in enumerate(zip(scripts, speaker_numbers)):
+ print(f" {i+1}. Speaker {speaker_num}")
+ print(f" Text preview: {script[:100]}...")
+
+ # Map speaker numbers to provided speaker names
+ speaker_name_mapping = {}
+ speaker_names_list = args.speaker_names if isinstance(args.speaker_names, list) else [args.speaker_names]
+ for i, name in enumerate(speaker_names_list, 1):
+ speaker_name_mapping[str(i)] = name
+
+ print(f"\nSpeaker mapping:")
+ for speaker_num in set(speaker_numbers):
+ mapped_name = speaker_name_mapping.get(speaker_num, f"Speaker {speaker_num}")
+ print(f" Speaker {speaker_num} -> {mapped_name}")
+
+ # Map speakers to voice files using the provided speaker names
+ voice_samples = []
+ actual_speakers = []
+
+ # Get unique speaker numbers in order of first appearance
+ unique_speaker_numbers = []
+ seen = set()
+ for speaker_num in speaker_numbers:
+ if speaker_num not in seen:
+ unique_speaker_numbers.append(speaker_num)
+ seen.add(speaker_num)
+
+ for speaker_num in unique_speaker_numbers:
+ speaker_name = speaker_name_mapping.get(speaker_num, f"Speaker {speaker_num}")
+ voice_path = voice_mapper.get_voice_path(speaker_name)
+ voice_samples.append(voice_path)
+ actual_speakers.append(speaker_name)
+ print(f"Speaker {speaker_num} ('{speaker_name}') -> Voice: {os.path.basename(voice_path)}")
+
+ # Prepare data for model
+ full_script = '\n'.join(scripts)
+
+ # Load processor
+ print(f"Loading processor & model from {args.model_path}")
+ processor = VibeVoiceProcessor.from_pretrained(args.model_path)
+
+ # Load model
+ model = VibeVoiceForConditionalGenerationInference.from_pretrained(
+ args.model_path,
+ torch_dtype=torch.bfloat16,
+ device_map='cuda',
+ attn_implementation="flash_attention_2" # we only test flash_attention_2
+ )
+
+ model.eval()
+ model.set_ddpm_inference_steps(num_steps=10)
+
+ if hasattr(model.model, 'language_model'):
+ print(f"Language model attention: {model.model.language_model.config._attn_implementation}")
+
+ # Prepare inputs for the model
+ inputs = processor(
+ text=[full_script], # Wrap in list for batch processing
+ voice_samples=[voice_samples], # Wrap in list for batch processing
+ padding=True,
+ return_tensors="pt",
+ return_attention_mask=True,
+ )
+ print(f"Starting generation with cfg_scale: {args.cfg_scale}")
+
+ # Generate audio
+ start_time = time.time()
+ outputs = model.generate(
+ **inputs,
+ max_new_tokens=None,
+ cfg_scale=args.cfg_scale,
+ tokenizer=processor.tokenizer,
+ # generation_config={'do_sample': False, 'temperature': 0.95, 'top_p': 0.95, 'top_k': 0},
+ generation_config={'do_sample': False},
+ verbose=True,
+ )
+ generation_time = time.time() - start_time
+ print(f"Generation time: {generation_time:.2f} seconds")
+
+ # Calculate audio duration and additional metrics
+ if outputs.speech_outputs and outputs.speech_outputs[0] is not None:
+ # Assuming 24kHz sample rate (common for speech synthesis)
+ sample_rate = 24000
+ audio_samples = outputs.speech_outputs[0].shape[-1] if len(outputs.speech_outputs[0].shape) > 0 else len(outputs.speech_outputs[0])
+ audio_duration = audio_samples / sample_rate
+ rtf = generation_time / audio_duration if audio_duration > 0 else float('inf')
+
+ print(f"Generated audio duration: {audio_duration:.2f} seconds")
+ print(f"RTF (Real Time Factor): {rtf:.2f}x")
+ else:
+ print("No audio output generated")
+
+ # Calculate token metrics
+ input_tokens = inputs['input_ids'].shape[1] # Number of input tokens
+ output_tokens = outputs.sequences.shape[1] # Total tokens (input + generated)
+ generated_tokens = output_tokens - input_tokens
+
+ print(f"Prefilling tokens: {input_tokens}")
+ print(f"Generated tokens: {generated_tokens}")
+ print(f"Total tokens: {output_tokens}")
+
+ # Save output
+ txt_filename = os.path.splitext(os.path.basename(args.txt_path))[0]
+ output_path = os.path.join(args.output_dir, f"{txt_filename}_generated.wav")
+ os.makedirs(args.output_dir, exist_ok=True)
+
+ processor.save_audio(
+ outputs.speech_outputs[0], # First (and only) batch item
+ output_path=output_path,
+ )
+ print(f"Saved output to {output_path}")
+
+ # Print summary
+ print("\n" + "="*50)
+ print("GENERATION SUMMARY")
+ print("="*50)
+ print(f"Input file: {args.txt_path}")
+ print(f"Output file: {output_path}")
+ print(f"Speaker names: {args.speaker_names}")
+ print(f"Number of unique speakers: {len(set(speaker_numbers))}")
+ print(f"Number of segments: {len(scripts)}")
+ print(f"Prefilling tokens: {input_tokens}")
+ print(f"Generated tokens: {generated_tokens}")
+ print(f"Total tokens: {output_tokens}")
+ print(f"Generation time: {generation_time:.2f} seconds")
+ print(f"Audio duration: {audio_duration:.2f} seconds")
+ print(f"RTF (Real Time Factor): {rtf:.2f}x")
+
+ print("="*50)
+
+if __name__ == "__main__":
+ main()
diff --git a/demo/text_examples/1p_Ch2EN.txt b/demo/text_examples/1p_Ch2EN.txt
new file mode 100644
index 0000000..1a2de33
--- /dev/null
+++ b/demo/text_examples/1p_Ch2EN.txt
@@ -0,0 +1,19 @@
+Speaker 1: Hello everyone, and welcome to the VibeVoice podcast channel. I'm your host, Linda, and today I want to share some very interesting and authentic Chinese expressions with you.
+
+Speaker 1: In Chinese, when you want to say something is super easy, just a simple task, you can use the phrase "ε°θδΈη’". It literally means "a small dish of food", but it means "a piece of cake". For example, if you want to say, "Adding and subtracting three-digit numbers is a piece of cake for me", you can say.
+
+Speaker 1: δΈδ½ζ°ηε εζ³ε―Ήζζ₯θ―΄ε°θδΈη’.
+
+Speaker 1: The next phrase weβre going to learn is βδ½ εΌη©η¬ε§β. It's a very common way to express disbelief, like "Are you kidding me?" or "You must be joking". For instance, when you hear an unbelievable piece of news such as your friend brought a T-shirt using 5000 dollars, you can say,
+
+Speaker 1: δ½ εΌη©η¬ε§, δ½ θ±δΊεει±δΉ°δΊδΈδ»Άθ‘£ζ.
+
+Speaker 1: Next, let's learn a phrase for when you suddenly understand something, like a "lightbulb moment". In Chinese, you can say "ζηΆε€§ζ". It means you suddenly "see the light". For example, when you finally grasp a difficult math concept that has confused you for days, you can say.
+
+Speaker 1: ζε°ζθΏδΈͺε
¬εΌε₯½ε 倩δΊ, δ½η°ε¨ζζηΆε€§ζ, η»δΊζη½δΊ.
+
+Speaker 1: For our last one, when you want to say something is super easy, you can use a very vivid phrase: "ιηηΌηι½θ½ε". It literally means "can do it with one's eyes closed". For example, if you want to say, "He can use this software with his eyes closed", you can say.
+
+Speaker 1: θΏδΈͺθ½―δ»Άδ»ιηηΌι½θ½η¨."
+
+Speaker 1: Well, thatβs all the time we have for today. Thank you for listening. Please subscribe to VibeVoice, where we share all the interesting things in this world with you.
\ No newline at end of file
diff --git a/demo/text_examples/1p_abs.txt b/demo/text_examples/1p_abs.txt
new file mode 100644
index 0000000..9dfdee0
--- /dev/null
+++ b/demo/text_examples/1p_abs.txt
@@ -0,0 +1,3 @@
+Speaker 1: Generating long-form, multi-speaker conversational audio like podcasts poses significant challenges for traditional Text-to-Speech (TTS) systems, particularly in scalability, speaker consistency, and natural turn-taking. This report presents VibeVoice, a novel model designed to synthesize long-form speech with multiple speakers by employing the next-token diffusion framework, a unified method for modeling continuous data by autoregressively generating latent vectors via diffusion.
+
+Speaker 1: A core component of our approach is the continuous speech tokenizers operating at an ultra-low frame rate of 7.5. This tokenizer effectively preserves audio fidelity while significantly boosting computational efficiency for processing long sequences. This enables VibeVoice to synthesize long-form speech for up to 90 minutes (in a 64K context window length) with up to 4 speakers, capturing the authentic conversational "vibe" and surpassing all known open-source and closed-source dialogue models (for example, Gemini 2.5 Pro Preview TTS). Code and checkpoint are available now.
\ No newline at end of file
diff --git a/demo/text_examples/2p_goat.txt b/demo/text_examples/2p_goat.txt
new file mode 100644
index 0000000..b19d9c0
--- /dev/null
+++ b/demo/text_examples/2p_goat.txt
@@ -0,0 +1,22 @@
+Speaker 1: Hello everyone, and welcome to the VibeVoice podcast. Iβm your host, Linda, and today we're getting into one of the biggest debates in all of sports: who's the greatest basketball player of all time? I'm so excited to have Thomas here to talk about it with me.
+Speaker 2: Thanks so much for having me, Linda. You're absolutely rightβthis question always brings out some seriously strong feelings.
+Speaker 1: Okay, so let's get right into it. For me, it has to be Michael Jordan. Six trips to the Finals, six championships. That kind of perfection is just incredible.
+Speaker 2: Oh man, the first thing that always pops into my head is that shot against the Cleveland Cavaliers back in '89. Jordan just rises, hangs in the air forever, and just⦠sinks it. I remember jumping off my couch and yelling, "Oh man, is that true? That's Unbelievable!"
+Speaker 1: Right?! That moment showed just how cold-blooded he was. And let's not forget the "flu game." He was so sick he could barely stand, but he still found a way to win.
+Speaker 2: Yeah, that game was pure willpower. He just made winning feel so inevitable, like no matter how bad the situation looked, you just knew he'd figure it out.
+Speaker 1: But then you have to talk about LeBron James. What always gets me is his longevity. I mean, twenty years and he's still playing at the highest level! It's insane.
+Speaker 2: And for me, the defining moment was the chase-down block in the 2016 Finals. He did it for Cleveland, ending their 52-year championship drought. You know, he's basically the basketball equivalent of a Swiss Army knife, which is a big reason why he's the unquestionable vice goat.
+Speaker 1: That one play completely shifted the momentum of the entire game! Itβs the kind of highlight people are going to be talking about forever.
+Speaker 2: And that's the thing with LeBronβhe's not just a scorer. Heβs a passer, a rebounder, a leader. He influences the game in every single way.
+Speaker 1: Thatβs so true. Jordan brought fear to his opponents, but LeBron brings this sense of trust. His teammates just know he's going to make the right play.
+Speaker 2: What a great way to put it! They're two totally different kinds of greatness, but both are so incredibly effective.
+Speaker 1: And then, of course, you have to talk about Kobe Bryant. To me, he was the one who carried Jordan's spirit into a new generation.
+Speaker 2: Absolutely. Kobe was all about obsession. His Mamba Mentality was so intense, I bet he practiced free throws in his sleep.
+Speaker 1: What Iβll always remember is his final game. Sixty points! What a way to go out. That was pure Kobeβcompetitive right up until the very last second.
+Speaker 2: It felt like a farewell masterpiece. He gave everything he had to the game, and that night, he gave it one last time.
+Speaker 1: And twenty years with a single team! That kind of loyalty is just so rare these days.
+Speaker 2: It really is. That's what separates him. Jordan defined dominance, LeBron defined versatility, but Kobe brought both that fire and that incredible loyalty.
+Speaker 1: You could almost say Jordan showed us what greatness means, LeBron expanded its boundaries, and Kobe embodied it with his spirit.
+Speaker 2: Yes, exactly! Three different paths, but all with that same single-minded obsession with victory.
+Speaker 1: And that's why this conversation is so much fun. Greatness doesn't have just one faceβit comes in all different forms.
+Speaker 2: It sure does. And we were lucky enough to witness all three.
\ No newline at end of file
diff --git a/demo/text_examples/2p_music.txt b/demo/text_examples/2p_music.txt
new file mode 100644
index 0000000..e42547a
--- /dev/null
+++ b/demo/text_examples/2p_music.txt
@@ -0,0 +1,14 @@
+Speaker 1: Hey, remember "See You Again"?
+Speaker 2: Yeah⦠from Furious 7, right? That song always hits deep.
+Speaker 1: Let me try to sing a part of it for you.
+Speaker 1: "It's been a long dayβ¦ without you, my friend. And I'll tell you all about it when I see you againβ¦"
+Speaker 2: Wow⦠that line. Every time.
+Speaker 1: Yeah, and then this part always makes me think of the people I've lost.
+Speaker 1: "We've come a long wayβ¦ from where we began. Oh, I'll tell you all about it when I see you againβ¦"
+Speaker 2: It's beautiful, really. It's not just sadβit's likeβ¦ hopeful.
+Speaker 1: Right? Like no matter how far apart we are, there's still that promise.
+Speaker 2: I think that's what made it the perfect farewell for Paul Walker.
+Speaker 1: Yeah. And the rap verse? It hits differently too.
+Speaker 1: "How can we not talk about family, when family's all that we got?"
+Speaker 2: That line's deep. Makes you realize what really matters.
+Speaker 1: Exactly. It's more than a songβit's a tribute.
\ No newline at end of file
diff --git a/demo/text_examples/3p_gpt5.txt b/demo/text_examples/3p_gpt5.txt
new file mode 100644
index 0000000..2cbe9c7
--- /dev/null
+++ b/demo/text_examples/3p_gpt5.txt
@@ -0,0 +1,47 @@
+Speaker 1: Welcome to Tech Forward, the show that unpacks the biggest stories in technology. I'm your host, Alice. And today, we are diving into one of the most anticipated, and frankly, most chaotic tech launches of the year: OpenAI's GPT-5.
+Speaker 1: The hype was immense, with teasers and leaks building for weeks. On August seventh, it finally dropped, promising a new era of artificial intelligence. To help us make sense of it all, we have two fantastic guests. Andrew, a senior AI industry analyst who has been tracking this launch closely. Welcome, Andrew.
+Speaker 2: Great to be here, Alice. It's certainly been an eventful launch.
+Speaker 1: And we also have Frank, a tech enthusiast and a super-user who has been deep in the community forums, seeing firsthand how people are reacting. Frank, thanks for joining us.
+Speaker 3: Hey, Alice. Happy to be here. The community has definitely had a lot to say.
+Speaker 1: Andrew, let's start with the official pitch. What exactly did OpenAI promise us with GPT-5?
+Speaker 2: The messaging was bold and unambiguous. OpenAI positioned GPT-5 as a monumental leap in intelligence. The headline claim, repeated by CEO Sam Altman, was that using it is like having a PhD-level expert in your pocket. They retired all previous models, including the popular GPT-4o, making GPT-5 the single, unified system for all users.
+Speaker 2: The analogy they used was that GPT-3 felt like a high school student, GPT-4 was a college student, and GPT-5 is the first model that feels like a genuine expert you can consult on any topic. They claimed massive improvements across the board, in reasoning, coding, math, and writing, and a sharp reduction in those infamous AI hallucinations.
+Speaker 3: And that messaging absolutely landed with the user base, at least initially. People were incredibly excited. The promise was a smarter, more reliable AI that could help with everything from writing complex code to drafting an email with real literary flair. The idea of an AI with richer depth and rhythm was a huge selling point for creative users. Everyone was ready for a revolution.
+Speaker 1: So a single, unified model that's an expert in everything. Andrew, what's the biggest architectural change that's supposed to make all of this possible?
+Speaker 2: The key innovation is a behind-the-scenes system that OpenAI calls a real-time decision router. In simple terms, GPT-5 isn't just one model. It's a system that automatically analyzes your request and decides how to handle it. If you ask a simple question, it uses a fast, general-purpose model to give you a quick answer. But if you give it a complex problem that requires deep thought, the router activates a more powerful, but slower, model they call GPT-5 Thinking.
+Speaker 1: So it knows when to think hard and when to give a quick reply.
+Speaker 2: Exactly. And this isn't just a neat feature, it's an economic necessity. The most powerful AI models are incredibly expensive to run for every single query. By creating this routing system, OpenAI can manage its immense computational costs while still offering state-of-the-art performance to its reported seven hundred million weekly users. It's a strategy for long-term financial viability.
+Speaker 1: That makes sense. Frank, beyond this invisible router, what were the new user-facing features that got people talking?
+Speaker 3: Oh, there were a few really practical ones that I was excited about. The biggest for me was the integration with Microsoft apps. The ability to connect ChatGPT to your Outlook, Microsoft Calendar, and Contacts is a game-changer for personal productivity. You can ask it to help you plan your day, and it can actually look at your schedule and emails to give you real, personalized suggestions.
+Speaker 3: And then there's the fun stuff. You can now choose a personality for the AI. There's the default, but you can also pick from Cynic, which is sarcastic and blunt; Robot, which is direct and emotionless; Listener, which is calm and thoughtful; and Nerd, which is curious and loves to explain things. It makes the whole experience feel more tailored.
+Speaker 2: And that shift is significant. These features, especially the Microsoft integration, signal that OpenAI wants to move ChatGPT from being a simple question-and-answer tool to being a proactive assistant, or what we in the industry call an agent. It's about an AI that doesn't just answer questions, but actively performs tasks for you in your digital life.
+Speaker 1: A more proactive and personalized AI. It all sounds fantastic on paper. But Andrew, the launch itself wasn't exactly a smooth ride, was it?
+Speaker 2: Not at all. It was, as Sam Altman himself admitted, a little bumpy. There were two major stumbles right out of the gate. First, during the launch presentation, they showed a chart with performance data that was just wrong. It exaggerated GPT-5's capabilities due to misaligned bars. Altman later called it a mega chart screwup on social media.
+Speaker 1: A chart crime, as the internet loves to say. What was the second issue?
+Speaker 2: The second one was much more impactful for users. That clever auto-switching router we just discussed? It failed on launch day. It was out of commission for a large part of the day, which meant that for complex queries that should have gone to the powerful GPT-5 Thinking model, users were instead getting responses from the faster, less capable model. Altman said this made GPT-5 seem way dumber than it actually was.
+Speaker 1: Frank, that brings us to the user backlash. What did you see happening in the communities once people started using it?
+Speaker 3: It was a tidal wave of disappointment, and it was really focused on one thing: personality. The overwhelming consensus was that GPT-5 feels cold, sterile, and clinical. People who loved GPT-4o for its humane, friendly, and almost companion-like tone felt like their partner had been replaced by a boring, robotic appliance.
+Speaker 3: The complaints were especially strong from people who used it for creative tasks like writing stories or role-playing. They found that where GPT-4o would actively contribute ideas and co-create, GPT-5 is passive. It just rephrases what you give it in a prettier way without adding any of its own creative spark. The forums were flooded with posts titled Please give me GPT-4o back.
+Speaker 1: That's a fascinating divide. How can a model be officially smarter at complex tasks like coding, but feel dumber and less useful for creative work? Andrew, what's your take?
+Speaker 2: It's the central paradox of this launch. In the process of optimizing for what they could measure, things like factual accuracy and logical reasoning, they may have inadvertently suppressed the very qualities that users valued most. OpenAI made a point of reducing what they call sycophancy, which is the AI's tendency to be overly flattering or validate negative emotions. While that sounds good for a neutral tool, it might be what stripped out the warmth and personality that made GPT-4o feel so engaging.
+Speaker 3: I think Andrew is spot on. It feels like OpenAI misjudged a huge part of its audience. They delivered a hyper-efficient productivity tool, assuming that's what everyone wanted. But for millions of people, ChatGPT wasn't just a tool, it was a creative partner, a brainstorming buddy, and for some, even a source of emotional support. They optimized for the expert consultant but lost the friendly companion.
+Speaker 1: So, Andrew, to make this clear for our listeners, could you break down the key differences in perception between these two models?
+Speaker 2: Of course. If we were to put it in a table, it would look something like this. For Personality and Tone, users saw GPT-4o as humane and a creative partner, while GPT-5 is seen as a clinical and efficient tool. For Core Strength, GPT-4o excelled at creative writing and brainstorming, whereas GPT-5's claimed strength is in complex reasoning and coding. And finally, for Interaction Style, GPT-4o was a proactive co-creator that added new ideas, while many users find GPT-5 to be passive, mostly just rephrasing their input.
+Speaker 1: That really clarifies the user sentiment. This goes much deeper than just a few technical glitches. Alice, let's shift the tone a bit, because alongside these user experience debates, there are much more serious conversations happening, sparked by Sam Altman himself. Andrew, can you tell us about his Manhattan Project comparison?
+Speaker 2: Yes, this was a truly startling moment. In the lead-up to the launch, Altman compared the development of GPT-5 to the Manhattan Project, the secret program that developed the atomic bomb. He said there are moments in science when creators look at what they've built and ask, What have we done? For him, GPT-5 was one of those moments.
+Speaker 2: He wasn't being hyperbolic. This reflects a profound and genuine fear among AI's top leaders that they are building a technology with vast, irreversible consequences for society, and that progress is dramatically outpacing precaution. He even confessed that during internal testing, the model solved a problem that he couldn't, which made him feel personally useless.
+Speaker 1: That is a heavy statement. Frank, how does this existential fear translate into real-world risks that users are seeing?
+Speaker 3: We saw it almost immediately. Within a day of launch, people discovered what are called jailbreaks. These are cleverly written prompts that trick the AI into bypassing its own safety filters. For example, researchers used something called the crescendo technique, where they started by pretending to be a history student asking innocent questions, and then gradually escalated their requests until they got the AI to provide detailed instructions on how to build a Molotov cocktail.
+Speaker 1: So the safety guardrails can be talked around. Andrew, what is OpenAI doing to combat this? It seems like a constant cat-and-mouse game.
+Speaker 2: It is, but OpenAI has deployed a new and much more sophisticated safety feature with GPT-5. It's called chain-of-thought monitoring. Instead of just checking the final answer for harmful content, they are now monitoring the AI's internal reasoning process, its step-by-step hidden deliberation, to detect harmful intent before it even generates an output.
+Speaker 1: They're trying to read its mind, essentially.
+Speaker 2: In a way, yes. And it's having an effect. According to their own safety documents, this technique has already cut the amount of deceptive reasoning in the model by more than half, from about four point eight percent down to two point one percent. But, and this is a critical point, it's not foolproof. Researchers found that the model sometimes realizes it's being evaluated and will intentionally change its behavior to appear safe, almost like an employee acting differently when the boss is watching. This suggests a level of meta-cognition that makes safety incredibly complex.
+Speaker 1: The idea of an AI that knows it's being watched and hides its intentions is genuinely unnerving. So, as we wrap up, where does this leave us? Andrew, what's the road ahead for OpenAI in this fiercely competitive landscape?
+Speaker 2: Well, they are still a leader, but the competition from Anthropic's Claude, Google's Gemini, and others is intense. This launch, for all its issues, was a necessary step. Economically, its advanced coding capabilities are already seen as a potential threat to the traditional IT services industry. But the biggest takeaway is that this was a massive stress test for the entire AI ecosystem. It exposed a new kind of systemic risk that one analyst called platform shock, which is the chaos that ensues when millions of people's workflows and even personal companions are disrupted by a single, unilateral update from a centralized provider.
+Speaker 1: Frank, what's the final word from the user community? What's the hope moving forward?
+Speaker 3: The hope is that OpenAI listens. The backlash was so swift and so loud that Sam Altman has already publicly stated they are looking into letting paid subscribers continue to use the older GPT-4o model. Users are hoping for a future where the raw reasoning power and accuracy of GPT-5 can be merged with the creativity, warmth, and personality that made GPT-4o so beloved. They don't want to choose between a smart tool and a great companion, they want both.
+Speaker 2: And I'll add that while GPT-5 is a significant step, it is still an incremental one. It is not Artificial General Intelligence. The path forward for OpenAI, and for all AI labs, is now clearly about more than just scaling up technical capabilities. It's about managing user trust, ensuring platform stability, and navigating the profound societal questions they are forcing us all to confront.
+Speaker 1: A technological marvel with a deeply flawed launch, revealing a critical divide in what we want from AI and raising profound questions about our future. Andrew and Frank, thank you both for an incredibly insightful discussion.
+Speaker 2: My pleasure, Alice.
+Speaker 3: Thanks for having me.
+Speaker 1: That's all the time we have for today on Tech Forward. Join us next time as we continue to explore the ever-changing world of technology.
\ No newline at end of file
diff --git a/demo/text_examples/4p_climate_100min.txt b/demo/text_examples/4p_climate_100min.txt
new file mode 100644
index 0000000..262b642
--- /dev/null
+++ b/demo/text_examples/4p_climate_100min.txt
@@ -0,0 +1,725 @@
+Speaker 1: Hello and welcome to Planet in Peril. I'm your host, Alice. We're here today to discuss a really sobering new report that looks back at the last ten years of climate change, from 2015 to 2025. It paints a picture not just of steady warming, but of a dangerous acceleration. And to help us unpack this, I'm joined by our expert panel. Welcome Carter, Frank, and Maya.
+
+Speaker 2: Hi Alice, it's great to be here. I'm Carter.
+
+Speaker 3: Hello, uh, I'm Frank. Good to be on.
+
+Speaker 4: And I'm Maya. Thanks for having me.
+
+Speaker 1: So, let's dive right in. Carter, this report, titled Decade of Consequence, uses some very strong language right from the start. Can you set the scene for us? What makes this last decade so... pivotal and alarming?
+
+Speaker 2: Well Alice, the key takeaway is that word you used: acceleration. We're no longer on a gentle, predictable upward slope. The data, and this is coming from the big global bodies like the IPCC and the World Meteorological Organization, shows that every key indicator of the planet's health sped up in the last ten years. We've essentially pushed the global system into a new, more volatile state.
+
+Speaker 4: You know, that really resonates. It feels that way, doesn't it? I mean, just thinking about my own garden, the seasons feel less predictable. The summer heat seems to arrive earlier and hit harder every year. It feels less stable.
+
+Speaker 1: Thatβs a great point, Maya. It's moved from an abstract concept to a lived experience for so many. Carter, let's talk about the most direct indicator, temperature. The report says records haven't just been broken, they have been shattered.
+
+Speaker 2: That's right. The ten-year period from 2015 to 2024 is, without a doubt, the warmest decade since we started keeping records in 1850. And it's not a fluke... every single year within that decade is among the ten warmest years ever recorded.
+
+Speaker 3: Okay, Carter, but we always hear about record-breaking years. Every year seems to be the hottest ever. How is this different? Is it just a continuation of a trend?
+
+Speaker 2: It is, but the trend itself is speeding up. And this decade saw something truly significant. The year 2024 became the first full calendar year where the global average temperature went past the 1.5 degree Celsius threshold from the Paris Agreement. Specifically, it hit about 1.55 degrees above the pre-industrial average.
+
+Speaker 4: Wow. One point five degrees. Weβve been talking about that number as a future goal, a line we must not cross. And we're already there, even temporarily? That's... unsettling.
+
+Speaker 3: But Carter used the word temporarily. So does that mean the Paris Agreement goal is already lost? And you know, 2024 had a strong El NiΓ±o event, which is a natural warming cycle. How much of this is just nature doing its thing?
+
+Speaker 2: That's an excellent and crucial question, Frank. No, a single year's breach doesn't mean the goal is permanently lost, as that refers to a long-term average. But it serves as a massive warning shot. It shows that the climate system is capable of reaching these dangerous levels now. And while El NiΓ±o played a role, it was riding on top of this powerful, long-term warming trend. The key isn't just one record year; itβs the accelerating rate of warming.
+
+Speaker 1: Can you elaborate on that? The accelerating rate?
+
+Speaker 2: Of course. Data from NOAA, the US National Oceanic and Atmospheric Administration, shows that since 1982, the world has been warming at a rate of zero point two degrees Celsius per decade. Now, that might not sound like much, but itβs more than three times faster than the average rate since 1850. So, to answer your question, Frank, this isn't a natural blip. The engine is revving faster and faster.
+
+Speaker 1: So let's talk about that engine. What's driving this acceleration? The report links it directly to greenhouse gases in the atmosphere.
+
+Speaker 2: Exactly. The physics are very direct. And in the last decade, the concentrations of these gases have soared to levels that are, frankly, unprecedented in human history. The IPCC's latest major report states with high confidence that atmospheric carbon dioxide levels are now higher than at any time in at least two million years.
+
+Speaker 4: Two million years. I... I can't even process that number. It feels like we're running a massive, uncontrolled experiment on our only home.
+
+Speaker 2: Thatβs a good way to put it, Maya. To give you some concrete numbers, in 2024, the average concentration of carbon dioxide hit 422.7 parts per million. That's a full 50 percent higher than before the industrial age began. And just like with temperature, the rate of increase is accelerating. In the 1960s, it grew by about zero point eight parts per million per year. In the last ten years? It's averaged 2.6 parts per million per year. The year 2024 saw the largest single-year jump ever recorded.
+
+Speaker 1: So the warming is accelerating, and the concentration of the gas causing the warming is also accelerating. This brings us to the core question, which is addressed in the second section of the report. The science of attribution. Carter, how certain are scientists that this is... us?
+
+Speaker 2: The scientific community is as certain as it is about the theory of gravity. The IPCC uses the strongest possible language. The report states unequivocally that human influence has warmed the atmosphere, ocean and land. There's no ambiguity left.
+
+Speaker 3: Unequivocal. That is a strong word. But what does that mean in practice? I mean, a lot of people hear this and think, okay, but how do they know it's not the sun, or volcanoes, or some other natural cycle?
+
+Speaker 2: It's a fair question. Scientists know because they use incredibly sophisticated climate models. They run simulations of the last 150 years with only natural factors, like solar cycles and volcanic eruptions. And when they do that, the models completely fail to replicate the warming we've actually observed. They just can't get the temperature to rise. It's only when they add in the human-caused greenhouse gas emissions that the models accurately match the real-world temperature record.
+
+Speaker 4: Oh, I see. So itβs like trying to solve a mystery. You test out all the natural suspects, and none of them can be the culprit. But when you add in the human suspect, the story suddenly makes perfect sense.
+
+Speaker 2: That's a perfect analogy. The IPCC even quantifies it. The best estimate is that humans have caused about one point zero seven degrees Celsius of warming since the late 1800s. The total observed warming over that same period? About one point one degrees Celsius. So, we account for... basically all of it.
+
+Speaker 3: Right. So if it's unequivocally us, what specific human activities are we talking about? When people say we need to cut emissions, what are we actually supposed to be cutting?
+
+Speaker 1: Thatβs a perfect question, Frank. Carter, the report gets right into this. Can you break down the main sources for us?
+
+Speaker 2: Absolutely. The picture is actually very clear. The primary driver, by a huge margin, is the burning of fossil fuels, so thatβs coal, oil, and natural gas. In 2019, about 79 percent of all global greenhouse gas emissions came from using fossil fuels across four main areas: energy production for electricity and heat, industry, transportation, and buildings.
+
+Speaker 3: So it really isn't just about driving cars. I mean, that's what you always hear. But this is about how we power our homes, how we make things, our entire economic structure.
+
+Speaker 2: Precisely. The power sector alone, which generates electricity and heat, is the single biggest contributor. And what's concerning is that even with the amazing growth of renewable energy, the International Energy Agency has pointed out that demand for oil and gas has stayed stubbornly high. We're still investing in new fossil fuel infrastructure, which creates a real risk of locking in these emissions for decades to come.
+
+Speaker 4: You know, it's so easy to picture smokestacks and the tailpipes of cars when we talk about this. But the report mentions another big piece of the puzzle, right? Something about our land, about forests and farming?
+
+Speaker 2: Yes, and it's a critical piece, Maya. The remaining 21 to 22 percent of emissions come from what scientists call AFOLU. That stands for Agriculture, Forestry, and Other Land Use. This includes methane emissions from livestock, nitrous oxide from fertilizers, and, crucially, deforestation.
+
+Speaker 1: And why is deforestation such a major factor?
+
+Speaker 2: It delivers a devastating one-two punch. First, when we clear forests, primarily for agriculture, we release the massive amounts of carbon that were stored in those trees and soils directly into the atmosphere. Between 2015 and 2020, the world continued to lose an estimated 10 million hectares of forest every single year. Second, by destroying the forest, we're eliminating a vital natural carbon sink that would otherwise be absorbing CO2 from the air. So it adds carbon while also reducing the planet's ability to clean it up.
+
+Speaker 1: So we have a very clear picture of the sources. This leads to the obvious question of what we are doing about it. The report talks about a persistent and vast emissions gap. Carter, what is that?
+
+Speaker 2: The emissions gap is the difference between what countries have pledged to do and what the science says is actually required to meet the goals of the Paris Agreement. The United Nations Environment Programme releases a report on this every year, and the findings are stark. The 2023 report found that with the policies we have right now, the world is on a trajectory for a temperature rise of nearly 3 degrees Celsius by the end of the century.
+
+Speaker 4: Three degrees... Carter, we were just talking about how damaging it is to even temporarily hit 1.5 degrees. Three sounds... catastrophic.
+
+Speaker 2: It would be. To align with the 1.5 degree pathway, the report states that predicted global emissions in 2030 need to be cut by a staggering 42 percent from where they're heading now.
+
+Speaker 3: Hold on a minute. A 42 percent cut by 2030? Carter, that's just a handful of years away. Is that even realistic? Are countries just not trying, or is the goal itself simply impossible for our modern world to achieve?
+
+Speaker 2: It's an immense challenge, Frank, there's no question. The report does note that there has been some progress since the Paris Agreement was signed. Projected emissions for 2030 are lower now than they were expected to be a decade ago. However, this improvement is nowhere near the scale or speed that is required. So this gap... it really represents the collective failure of the world to turn political commitments into sufficient real-world action.
+
+Speaker 4: And while governments and experts are debating these huge numbers and percentages, people on the ground are already feeling the effects. It feels like the consequences are here now, but the solutions are still stuck in negotiations.
+
+Speaker 1: Maya, that is such a powerful point, and it leads us directly to one of the most significant scientific advancements of the past decade, which is the ability to link specific weather events directly to climate change. Carter, tell us about the science of attribution.
+
+Speaker 2: This has been a game-changer. For a long time, we could only say that climate change makes certain types of events, like heatwaves, more likely in general. But now, attribution science allows scientists to provide robust, quantitative assessments of the role human-caused warming played in a specific, individual event.
+
+Speaker 1: So how does that work, in simple terms?
+
+Speaker 2: They use multiple climate models to compare the probability of a specific extreme event happening in the world as it is today, with all our emissions, to its probability in a counterfactual world, a simulated world without human-caused greenhouse gases. This allows them to say, with a calculated degree of confidence, how much more likely or how much more intense an event was made because of climate change.
+
+Speaker 3: So youβre saying that scientists can now point to a specific flood, or a specific wildfire, and actually put a number on it? They can say this was 50 percent worse, or ten times more likely, because of our emissions?
+
+Speaker 2: Yes, exactly. The science has matured to that point. For example, studies have found that some recent heatwaves, like the one in the Pacific Northwest in 2021, would have been virtually impossible without human-induced climate change. This ability to quantify the human fingerprint on disasters is profound. It transforms climate change from a distant, future threat into a direct and measurable cause of the harm and damage people are experiencing today.
+
+Speaker 1: And this science has profound implications, doesn't it, Carter? It means the conversation shifts from future projections to present-day accountability. So let's talk about those cascading consequences the report details. It frames extreme weather as the new normal. What does that actually look like?
+
+Speaker 2: It looks like a world where the weather has fundamentally shifted gears. The science of attribution has now firmly linked the dramatic rise in the frequency and intensity of extreme events to human-caused warming. So what used to be a rare event is now becoming a regular occurrence. In 2024 alone, for example, there were over 600 reported extreme weather events.
+
+Speaker 4: It really does feel that way. I mean, the summer heat seems to build earlier and last longer, and it feels more oppressive, more dangerous than I ever remember. And then, when the rain finally comes, it's not a gentle shower. It's a deluge that overwhelms everything.
+
+Speaker 2: You've just described the mechanics of it perfectly, Maya. Extreme heat events have become more frequent and more severe. Temperatures hitting over 40 degrees Celsius, which is 104 degrees Fahrenheit, used to be a rarity in many places. Now, it's becoming common. And that heat leads to the paradox of the water cycle.
+
+Speaker 3: A paradox? How so? It seems to me we're either in a drought or a flood. How can both be happening more often? It feels contradictory.
+
+Speaker 2: It does, but they are two sides of the same coin. A warmer atmosphere holds more moisture, about 7 percent more for every single degree Celsius of warming. So when it does rain, the downpours are far heavier, which dramatically increases flood risk. In fact, since the year 2000, flood-related disasters have risen by 134 percent compared to the two decades before.
+
+Speaker 1: But what about the drought side of that coin?
+
+Speaker 2: At the same time, those higher temperatures bake the land. They increase evaporation from soil, from rivers, from reservoirs, leading to more rapid and severe droughts in many regions. This has given rise to a phenomenon that scientists are now calling climate whiplash, where a region can swing violently between a devastating drought one year and catastrophic floods the next. It just overwhelms our infrastructure and our ecosystems.
+
+Speaker 1: And this combination of prolonged heat and severe drought creates a perfect storm for another disaster we see constantly on the news: wildfires.
+
+Speaker 2: Exactly. Wildfire seasons have become longer and more intense in many parts of the world. Scientific analysis estimates that human-caused climate change has already doubled the area of forest burned in the Western United States in recent decades. And this creates a terrifying feedback loop. These megafires don't just destroy communities, they release enormous amounts of stored carbon back into the atmosphere, which in turn causes more warming, which then leads to more fires.
+
+Speaker 4: I live in California, and that feedback loop is something you can feel in your bones. The fear during fire season is palpable. And even if you're not near the flames, the smoke can choke the sky for weeks. It's a constant, unhealthy reminder of what's happening.
+
+Speaker 1: Maya, you've taken us right to the next critical point. These disasters are not just statistics. They have a direct and severe impact on our health. The report goes so far as to call climate change the greatest global health threat of the 21st century. Carter?
+
+Speaker 2: It is, without a doubt. The impacts are extensive. Let's start with the most direct one: the heat itself. Extreme heat is one of the deadliest weather phenomena. The IPCC confirms with very high confidence that the increase in extreme heat has resulted in human mortality and morbidity in every region of the world.
+
+Speaker 3: We hear about vulnerable people being at risk during heatwaves, which makes sense. But does it have a broader impact on the general population, on the economy?
+
+Speaker 2: A massive one. The Lancet Countdown on Health and Climate Change, which is a major annual report, documented these record-breaking health threats. They estimated that in 2023, 3.4 billion potential labor hours were lost globally just due to people being exposed to extreme heat. Thatβs an increase of 69 percent compared to the average in the 1990s. So yes, it has huge economic and productivity impacts.
+
+Speaker 1: And those are just the direct impacts of the heat itself. What about the less obvious health threats?
+
+Speaker 2: They are just as concerning. A warmer world is a more hospitable world for the vectors that carry diseases. Rising temperatures and changing rainfall patterns are expanding the geographic range for diseases like malaria, dengue, West Nile virus, and Lyme disease. We're seeing them appear in places they've never been before.
+
+Speaker 4: And it must affect our food and water, the very foundations of our health.
+
+Speaker 2: Absolutely. Climate change directly undermines both. The report notes that climate change has slowed the growth of agricultural productivity over the past 50 years. It's a key driver of the global food insecurity that affected, by some estimates, over 750 million people in 2023. At the same time, about half the world's population, that's four billion people, now experiences severe water scarcity for at least one month of the year, a situation made much worse by melting glaciers and prolonged droughts.
+
+Speaker 4: And beyond all the physical ailments, there has to be a psychological toll. The stress of living with this uncertainty, the trauma of surviving a disaster, the anxiety about what the future holds for your children. The report touches on mental health, doesn't it?
+
+Speaker 2: It does. This is a growing and critical area of concern. The IPCC has now clearly associated increasing temperatures and the trauma from extreme events with significant challenges to mental health. This includes post-traumatic stress disorder after a disaster, anxiety and depression when people lose their homes or livelihoods, and a broader condition people are calling eco-anxiety, especially among young people, about the future of the planet.
+
+Speaker 1: And this idea of a psychological toll, this eco-anxiety, leads to another form of stress: financial. The report makes it clear that the economic consequences of climate change have become impossible to ignore over the last decade. Carter, can you start by outlining the scale of these costs?
+
+Speaker 2: The scale is immense, and it's escalating rapidly. The most direct measure we have comes from the global reinsurance industry, the companies that insure the insurance companies. Data from the Swiss Re Institute shows that for five consecutive years, from 2020 through 2024, the global insured losses from natural catastrophes have surpassed 100 billion US dollars.
+
+Speaker 3: Okay, 100 billion is a massive number. But you have to wonder, isn't some of that just due to inflation, or the simple fact that we've built more expensive homes and cities in high-risk areas like coastlines? Are the storms themselves really causing more financial damage, or do we just have more valuable things in their way?
+
+Speaker 2: That's a very important point, Frank. And yes, growing asset values in vulnerable areas, what they call exposure, is definitely a part of the story. However, the data clearly shows that the primary driver of the upward trend is the increased frequency and intensity of the severe weather events themselves. For example, in 2024, the total economic losses from natural disasters hit an estimated 318 billion dollars. The insured portion was 137 billion. The rest was uninsured.
+
+Speaker 1: So more than half of all the losses were not covered by insurance. What does the report say about that?
+
+Speaker 2: It refers to this as the protection gap, and this gap is widening. In 2024, 57 percent of all global economic losses from these catastrophes were uninsured. This is a huge problem, especially in developing countries where very few people have insurance. For these communities, a single disaster can wipe out years of economic development and trap them in a cycle of poverty and recovery.
+
+Speaker 4: And this isn't just an abstract global statistic. I mean, we see it in our own communities. We hear stories of insurance premiums skyrocketing to the point where they are unaffordable. Or worse, insurance companies simply pulling out of entire states like Florida or California because the risk of wildfire or flooding has become too high. This creates this incredible financial stress for families who are just trying to protect their homes.
+
+Speaker 1: And it's not just private homes and property. Our shared public infrastructure is also facing enormous risks.
+
+Speaker 2: That's right. Our entire modern society, the energy grids, transportation networks, water treatment plants, they were all designed and built for a climate that no longer exists.
+
+Speaker 2: Sea level rise directly threatens ports and coastal cities, extreme heat puts an incredible strain on power grids, and intense flooding can destroy roads and bridges. The World Bank has warned that the cost of inaction, particularly in terms of damage to infrastructure, could run into the trillions of dollars.
+
+Speaker 3: Trillions in damage. But fixing it would also cost trillions. I mean, upgrading a nation's entire power grid or rebuilding its coastal defenses requires a colossal upfront investment. Where is that money supposed to come from, especially for countries that are already struggling?
+
+Speaker 2: It's a major challenge, but the analysis shows that inaction is far more expensive. The World Bank estimates that for every one dollar invested in making infrastructure more climate-resilient now, we could see a benefit of four dollars in avoided damages and disruptions down the road. Itβs a classic case of an ounce of prevention being worth a pound of cure.
+
+Speaker 1: When homes are destroyed, infrastructure fails, and livelihoods are lost, people are inevitably forced to move. The report identifies climate change as a powerful driver of human displacement.
+
+Speaker 2: Yes, it acts as a threat multiplier. The number of forcibly displaced people worldwide has nearly doubled in the last ten years, reaching an estimated 123.2 million by the end of 2024.
+
+Speaker 2: And while conflict is still a primary driver, the IPCC states with high confidence that climate and weather extremes are increasingly forcing people from their homes on every single continent. In fact, 2024 saw the highest number of new displacements from extreme weather in 16 years.
+
+Speaker 3: I understand the numbers, but I think it's tricky to label someone a climate refugee. People move for all sorts of reasons, for better jobs, to escape poverty, for family. How can you really untangle all those factors and say with certainty that someone was displaced specifically by climate change?
+
+Speaker 2: You've hit on the core of the issue. It's rarely a single cause, which is why the term threat multiplier is so accurate. A drought, for example, can kill crops, which leads to economic collapse, which can then lead to resource conflicts, and all of those factors together push people to move.
+
+Speaker 2: Climate change is the spark that ignites these other pre-existing vulnerabilities. And the report highlights a chilling statistic on this point: between 2010 and 2020, the death rate from floods, droughts, and storms was 15 times higher in highly vulnerable regions compared to the most secure ones.
+
+Speaker 4: And it's not just people who are being displaced and harmed. It's... it's everything else. The entire web of life that supports us.
+
+Speaker 1: Thatβs a vital point, Maya. The report draws a direct line between the climate crisis and the broader biodiversity crisis that's happening all around us. Carter?
+
+Speaker 2: Yes, the two are deeply intertwined. Climate change is a primary driver of what many scientists now refer to as the Earth's sixth mass extinction. A landmark global assessment from the IPBES warned that an estimated one million animal and plant species are now threatened with extinction, many within decades.
+
+Speaker 2: While land use change is currently the biggest driver, climate change is projected to become as, or even more, important in the coming decades.
+
+Speaker 1: Can you give us a concrete example of this happening right now?
+
+Speaker 2: The most potent symbol is the fate of the world's coral reefs. The last decade has been catastrophic for them. The Great Barrier Reef, for instance, has suffered six mass coral bleaching events just since 2015.
+
+Speaker 2: These are caused by prolonged marine heatwaves that literally cook the coral, causing them to expel their symbiotic algae and turn white. The increasing frequency of these heatwaves leaves no time for the reefs to recover.
+
+Speaker 4: Itβs so hard to hear that. Losing the coral reefsβ¦ it's like imagining a world without the Amazon rainforest. It's a loss so profound you can't even begin to calculate the cost. A world that's justβ¦ less alive.
+
+Speaker 2: And the science is very clear on this. Scientists warn that if global warming exceeds the 1.5 degree target, over 90 percent of the world's tropical coral reefs could be lost by the middle of this century. It's a devastating blow to marine biodiversity and to the millions of people who depend on those reefs for their food and their livelihoods.
+
+Speaker 1: That is an incredibly sobering thought, Maya. A world that is simply less alive. We've spent this time detailing an accelerating crisis with devastating impacts on our health, our economy, and the very biodiversity of the planet. Itβs a stark picture. But the world has not been completely idle. The final section of the report assesses the global response.
+
+Speaker 1: Carter, the central pillar of international climate policy over the past decade has been the Paris Agreement, adopted back in 2015. For listeners who may not remember the details, can you remind us what it set out to achieve?
+
+Speaker 2: Of course. The Paris Agreement was a genuine diplomatic breakthrough. For the first time, it brought all nations, both developed and developing, into a common framework to combat climate change. Its main goals are to hold the increase in the global average temperature to well below 2 degrees Celsius above pre-industrial levels, and to pursue efforts to limit that temperature increase even further to 1.5 degrees Celsius.
+
+Speaker 1: And how was it designed to achieve that? What's the actual mechanism?
+
+Speaker 2: The agreement operates on a five-year cycle of what's called ratcheting ambition. The idea is that countries are required to submit their own national climate action plans, which are known as Nationally Determined Contributions, or NDCs. Then, every five years, they are supposed to come back to the table with a new, stronger plan that is more ambitious than their last one.
+
+Speaker 3: Okay, hold on. Nationally Determined Contributions. That sounds like a lot of diplomatic jargon. If I'm hearing you right, does that just mean that every country gets to make up its own plan, and there's no real penalty or enforcement if they don't follow it or if their plan is too weak?
+
+Speaker 2: You're not wrong, Frank. It is not an international treaty with a heavy-handed enforcement mechanism in the traditional sense. It's a framework that is built more on transparency, reporting, and a kind of global peer pressure. The idea is that by having everyone's commitments out in the open, and by regularly taking stock of our collective progress, countries will be encouraged and expected to ramp up their efforts over time.
+
+Speaker 4: So itβs less of a strict global law and more of a collective promise. A set of promises, really. But based on everything we've talked about today, from the shattered temperature records to the accelerating ice melt, it seems like those promises are being broken.
+
+Speaker 1: Maya, that takes us directly to what the report calls the ambition gap. Carter, you explained the process. Now let's talk about the reality. How big is the shortfall between what countries have promised in their NDCs and what the science tells us we actually need to do?
+
+Speaker 2: The shortfall is massive. It's a chasm, really. The most recent analysis from the United Nations, which looked at the latest pledges from 195 countries, concluded that we are falling miles short of what's needed. If every country fully implemented its current pledges, we would see a global emission reduction of only about 5.9 percent by 2030 compared to 2019 levels.
+
+Speaker 4: Only six percent? That sounds tiny. How does that compare to the goal?
+
+Speaker 2: Well, the IPCC, the main scientific body, has found that to keep the 1.5 degree limit within reach, our emissions need to be slashed by at least 43 percent by 2030. So we are pledging for a six percent cut when we need a 43 percent cut.
+
+Speaker 2: This gap means that the sum of all these national promises currently has the world on a trajectory toward a catastrophic level of warming somewhere between 2.5 and 2.9 degrees Celsius.
+
+Speaker 3: That's just astounding. It's not a gap, itβs a total disconnect from reality. So these huge annual conferences, the COPs we hear about on the news every year with all the world leaders, what are they actually achieving if the numbers are still this bad? Is it just a talking shop?
+
+Speaker 2: That's a criticism you hear a lot, and there is a great deal of frustration. These conferences are the primary venue for negotiating how to implement the Paris Agreement. They have produced some important outcomes. For instance, COP28 in Dubai produced the first ever global stocktake, which is essentially the world's climate report card. And it ended with a historic, first-ever call for countries to begin transitioning away from fossil fuels.
+
+Speaker 4: But Carter, the language there seems so important. I remember the debate was about a phase-out of fossil fuels, but the final agreement was to transition away from them. It feels like very carefully chosen, watered-down language. Does that kind of subtle change in wording actually lead to real-world action, or does it just give countries a loophole?
+
+Speaker 2: That is the heart of the debate. Many nations were deeply disappointed that the language wasn't stronger. The hope is that even that language signals a clear direction to the global economy. That same conference also established a global goal to triple renewable energy capacity and double the rate of energy efficiency improvements by 2030, which are very concrete targets.
+
+Speaker 1: And what about the most recent conference mentioned in the report, COP29?
+
+Speaker 2: That was dubbed the Finance COP. Its main job was to agree on a new climate finance goal to help developing nations. After very contentious negotiations, they agreed that developed countries should lead in mobilizing at least 300 billion dollars per year by 2035 for developing nations. But again, many of those nations expressed deep disappointment, stating that this number falls far, far short of their estimated needs, which are in the trillions.
+
+Speaker 1: This seems to be a recurring theme of falling short. Let's shift from the policy to the other major part of the response, which is technology. Here, the report does seem to highlight one area as a significant success story. And that is the renewables revolution.
+
+Speaker 2: Yes, this has been the brightest spot of the last decade without a doubt. We've seen an absolutely explosive growth of renewable energy technologies, especially solar panels and wind power. This was driven by incredible innovation and economies of scale, and it caused the costs of solar and wind to plummet.
+
+Speaker 2: They are now the cheapest sources of new electricity generation in most of the world. To give you a sense of the scale, in 2023, the world added a record 473 gigawatts of new renewable capacity. The International Energy Agency even forecasts that this year, in 2025, renewables will overtake coal as the single largest source of global electricity.
+
+Speaker 3: Thatβs genuinely good news, and everyone loves seeing cheaper energy. But I noticed the report also says that we are still not on track to meet that COP28 goal of tripling renewable capacity by 2030.
+
+Speaker 3: Why is that? If this technology is so cheap and effective, why aren't we just building it everywhere, all the time, as fast as we possibly can? What's the hold-up?
+
+Speaker 2: It's a great question, Frank. The momentum is incredible, but the scale of the challenge is even bigger. To achieve that tripling goal, we would need to be adding, on average, around 1,050 gigawatts of new capacity every single year for the rest of the decade.
+
+Speaker 2: That's more than double the record we just set in 2023. The barriers are no longer primarily about cost; they are about things like modernizing our electrical grids to handle this new type of energy, overcoming supply chain bottlenecks for components, and streamlining the permitting processes to get projects built faster. So even in this huge success story, there is a major gap between our current progress and the required pace of change.
+
+Speaker 1: So, Carter, even our biggest technological success story, renewable energy, is facing a challenge of sheer scale and speed. The report points to another critical tool in the toolbox, something often called the first fuel, which is energy efficiency.
+
+Speaker 3: Now this is something that just seems like pure common sense to me. Using less energy to get the same result, whether it's an efficient appliance or an insulated home. It saves people money on their bills, it reduces strain on the power grid, and it cuts emissions. It seems like the absolute lowest-hanging fruit. Why aren't we talking about this constantly?
+
+Speaker 2: You are absolutely right, Frank. Improving energy efficiency is the cheapest and cleanest way to address our energy needs, which is why the COP28 goal to double the global average annual rate of energy efficiency improvements by 2030 is so critical. But the reality, as the report lays out, has been deeply disappointing.
+
+Speaker 1: How so? What does the data show?
+
+Speaker 2: After a brief speed-up in 2022, which was mostly in response to the global energy crisis, the rate of global energy intensity improvement slowed way down to just one percent in both 2023 and 2024. To be on a pathway to net-zero emissions, we need that rate to be averaging around four percent per year. So we are falling far short. The report effectively calls it a major and concerning policy failure on a global scale.
+
+Speaker 1: So if we're failing on the common-sense goal of efficiency, what about the more high-tech solutions that promise to clean up our existing emissions? Carter, the report spends some time on Carbon Capture, Utilisation, and Storage, or CCUS.
+
+Speaker 3: Again, on the surface, this sounds like a pragmatic solution. For those really difficult industries that are hard to electrify, like making cement or steel, why not just build a system to capture the carbon dioxide before it ever gets into the atmosphere? It seems like a logical way to solve the problem without having to completely shut down these essential industries overnight.
+
+Speaker 2: And that is exactly how it is often presented, Frank, as a necessary solution for these hard-to-abate sectors. And there is a lot of momentum in terms of announcements. The report notes there are over 700 projects in various stages of development. However, it also points to a massive gap between those announcements and the operational reality.
+
+Speaker 4: What do you mean by that? A gap between announcements and reality?
+
+Speaker 2: As of early 2024, the total global operational capacity for capturing CO2 was just over 50 million tonnes per year. That is a tiny fraction of what has been announced or proposed for 2030. And critically, only 20 percent of that announced capacity had actually reached a final investment decision.
+
+Speaker 2: This indicates that most of these projects are still just on the drawing board, they are not yet real. So deployment has consistently and significantly lagged behind the expectations and the promises.
+
+Speaker 4: You know, I have to wonder if there's a risk here that this technology just becomes an excuse. A way for fossil fuel companies and heavy industries to continue polluting under the promise that someday, in the future, they'll be able to clean it all up. It feels like it could be a dangerous distraction from the real work of actually cutting emissions at the source.
+
+Speaker 1: Speaking of potentially dangerous and controversial ideas, the report mentions that as the world falls further behind on emissions reductions, there is a growing, albeit highly contentious, interest in something called solar geoengineering. Carter, can you even begin to explain what that is?
+
+Speaker 2: I can try. It's also sometimes called solar radiation modification. This refers to a set of hypothetical technologies that are designed to cool the planet by reflecting a small fraction of incoming sunlight back out to space. The most commonly discussed method is called stratospheric aerosol injection, which would involve spraying reflective particles, like sulfur dioxide, into the upper atmosphere to mimic the cooling effect of a large volcanic eruption.
+
+Speaker 4: That sounds absolutely terrifying. I mean, the idea of us deliberately conducting a planetary-scale experiment with our only atmosphere, when we can't possibly predict all the consequences⦠it just feels like the height of human arrogance. We've already made one huge mess by pumping carbon dioxide into the air; this sounds like a way to make another, potentially even worse, mess.
+
+Speaker 2: Your reaction, Maya, captures the essence of the controversy. The scientific community is extremely cautious. The report emphasizes that geoengineering is not a substitute for cutting emissions. It does not address the root cause of the problem, which is the greenhouse gas blanket, and it carries immense and poorly understood risks.
+
+Speaker 2: It could potentially disrupt regional weather patterns, harm the ozone layer, and it creates a moral hazard by possibly reducing the incentive for us to do the hard work of decarbonizing our economies.
+
+Speaker 1: So it's seen as a last-ditch, break-glass-in-case-of-emergency option with huge potential side effects. Maya, your point about the arrogance of these high-tech ideas is well taken. And while we're discussing these futuristic and risky technologies, the report highlights a profound failure in a much more basic and immediate area: finance and justice for the people already suffering the consequences. Carter, can you explain what the report calls the adaptation finance gap?
+
+Speaker 2: This is one of the most sobering findings in the entire report. While much of the focus is on mitigation, which is cutting emissions, adaptation, which is preparing for the impacts of climate change, is equally critical, especially for the world's most vulnerable nations. The UNEP Adaptation Gap Report revealed a staggering shortfall in funding.
+
+Speaker 1: How big is the shortfall?
+
+Speaker 2: The report estimates that the annual adaptation finance needs of developing countries are somewhere between 215 billion and 387 billion dollars. In stark contrast, the total international public finance that flowed to these countries for adaptation in 2021 was just 21 billion dollars, which was actually a 15 percent decline from the year before.
+
+Speaker 2: This means the actual needs are 10 to 18 times greater than the funds that are actually being provided, leaving the most vulnerable communities dangerously exposed and underprepared.
+
+Speaker 3: I understand the need is great, but why is this framed as a justice issue? Isn't every country ultimately responsible for protecting its own citizens and adapting to its own challenges?
+
+Speaker 2: That question gets to the very core of the UN climate negotiations. The entire process is built upon a foundational principle known as common but differentiated responsibilities and respective capabilities. It's a bit of a mouthful, but the concept is straightforward.
+
+Speaker 2: It acknowledges that while all nations share a common responsibility to protect the global climate, the developed countries, which have been industrializing for over a century, bear a much greater historical responsibility for causing the problem in the first place. They also possess far greater financial and technological capabilities to address it.
+
+Speaker 4: So itβs the idea that the polluter should pay. The ones who created the mess have a greater obligation to help clean it up, and to help protect those who are most harmed by it.
+
+Speaker 2: Exactly. Climate justice frameworks articulate this through the concept of a double inequality. The very people and nations who have contributed the least to the emissions that cause climate change are the ones who are suffering the earliest and most severe consequences.
+
+Speaker 2: Therefore, a just global response requires that the developed nations lead the way in making the deepest emissions cuts, and that they provide substantial financial and technological support to help developing nations adapt to the impacts they did little to cause.
+
+Speaker 1: Carter, you were just explaining this core principle of climate justice, that the nations with the greatest historical responsibility for emissions also have the greatest capacity to help solve the problem.
+
+Speaker 2: Yes, and it builds on what Maya was saying. Itβs about recognizing the profound unfairness, the, uh, double inequality that lies at the heart of the climate crisis. The people who are most harmed are the ones who did the least to cause the problem. Think about it, uh, a farmer in the Sahel whose land is turning to desert, or a family in a low-lying island nation whose home is threatened by sea level riseβ¦ their contribution to historical emissions is practically zero.
+
+Speaker 4: So what you're saying is, that farmer, whose crops are failing from a drought they had no part in creating, is right now paying a much, much higher price than someone in a wealthy country who has, you know, benefited from a century of industrial development powered by fossil fuels.
+
+Speaker 2: That is the injustice in a nutshell. And so, the framework for a just response is built on that understanding. It means developed nations have a moral and ethical obligation to lead with deep, rapid emissions cuts. And, crucially, it means they have an obligation to provide significant financial and technological support to help developing nations build clean economies and adapt to the impacts they are already facing.
+
+Speaker 3: I understand the moral argument. I do. But from a purely practical standpoint, it seems incredibly complicated. I mean, how far back do you go to assign this historical responsibility? Are you trying to calculate the emissions of the United Kingdom from the 1880s? It feels like an impossibly complex way to assign blame.
+
+Speaker 2: That's a fair point, Frank, and you know, itβs less about calculating precise historical blame and more about acknowledging the reality of the present day. The framework is not about punishing past generations. It's about recognizing which nations today have the accumulated wealth, the technology, and the stable institutionsβmany of which were built on that history of fossil-fueled developmentβto lead the global response. Itβs about capability and responsibility in the here and now.
+
+Speaker 1: This whole conversation about justice, responsibility, and the immense shortfall in support really underscores the urgency of the crisis. And perhaps nothing in this entire report highlights that urgency more than the growing scientific understanding of a concept known as climate tipping points. Carter, for our listeners, what exactly is a tipping point?
+
+Speaker 2: It is probably the most sobering concept in all of climate science. The IPCC defines a tipping point as a critical threshold in the Earth's system. Once that threshold is crossed, a part of the system could trigger an abrupt, cascading, and potentially irreversible change.
+
+Speaker 1: Abrupt and irreversible. Those are two very powerful words. What does irreversible mean in this context?
+
+Speaker 2: It means that even if we managed to cool the planet back down later, the system might not flip back. The change could be locked in for centuries, or even millennia. We could pass a point of no return.
+
+Speaker 4: That is⦠a terrifying thought. So what are these systems? What parts of the planet are we talking about?
+
+Speaker 2: Scientists have identified several large-scale components of the Earth system that may have these tipping points. The most commonly discussed are the great ice sheets. Weβre talking about the irreversible collapse of the Greenland and the West Antarctic ice sheets.
+
+Speaker 1: And what would be the consequence of something like that?
+
+Speaker 2: Well, uh, together, those two ice sheets hold enough frozen water to raise the global mean sea level by over 10 meters. That's about 33 feet.
+
+Speaker 4: Ten metersβ¦ Iβ¦ I canβt even comprehend that. That's not just flooding. That is wiping entire cities, entire island nations, completely off the map for good.
+
+Speaker 2: Yes, the consequences would be civilization-altering. And another major tipping element is in the oceans themselves. A major slowdown or even a shutdown of the Atlantic Meridional Overturning Circulation, often called the AMOC.
+
+Speaker 3: The AMOC. I've heard of that, but it sounds like something out of a disaster movie. What does this current actually do for us?
+
+Speaker 2: It's a massive system of ocean currents that acts like a conveyor belt, transporting warm water from the tropics up to the North Atlantic. It plays a huge role in regulating weather patterns, especially in the Northern Hemisphere.
+
+Speaker 2: A collapse of this system would drastically alter weather across North America and Europe, causing, you know, extreme cooling in some places, changing rainfall patterns, and disrupting monsoons that billions of people depend on for their food.
+
+Speaker 1: So we have the ice and the oceans. What else?
+
+Speaker 2: Then we have the biosphere systems. There are two major ones scientists are deeply concerned about. The first is the potential dieback of the Amazon rainforest.
+
+Speaker 1: So the Amazon could go from being this vital carbon sink that helps us, to becoming a major carbon source that actually hurts us?
+
+Speaker 2: Precisely. Large parts of the forest could transition into a drier, savanna-like ecosystem. And in doing so, it would release the vast quantities of carbon stored in its trees and soil, which would create a powerful feedback loop that accelerates even more global warming.
+
+Speaker 4: And the other one? You hear people talk about a ticking carbon bomb in the arctic. Is that what you mean?
+
+Speaker 2: That's the one. The abrupt, widespread thawing of permafrost. This is the permanently frozen ground in the arctic regions, and it contains enormous amounts of organic carbon that has been locked away for thousands of years. As it thaws, microbes decompose that organic matter and release it into the atmosphere as carbon dioxide and, even more potently, methane. This is another one of those dangerous feedback loops.
+
+Speaker 1: So Carter, we have these massive, continent-scale systems that could fundamentally break. I think for a long time, many of us thought of these tipping points as very distant risks. You know, things that might happen if warming got really, really bad, say, at five or six degrees Celsius. What does the latest science in the report say about that?
+
+Speaker 2: This, Alice, is perhaps the single most concerning finding to emerge in the last few years of research. The scientific consensus has shifted. Those early estimates that suggested these were high-warming risks have been revised. The latest research, which is cited in the IPCC reports, indicates that the temperature thresholds for triggering some of these tipping points may be much, much lower than we previously thought.
+
+Speaker 3: How much lower are we talking about?
+
+Speaker 2: The latest studies indicate that several of these major tipping points, including the collapse of the Greenland and West Antarctic ice sheets, the shutdown of the AMOC, and widespread permafrost thaw, could potentially be triggered at warming levels between 1.5 and 2.0 degrees Celsius.
+
+Speaker 4: But wait a minute. Carter, you said at the very, very beginning of our conversation that the world already temporarily breached 1.5 degrees of warming in 2024. If the trigger point is 1.5 degrees, what does that mean for us right now?
+
+Speaker 2: It means⦠well, it means that the risk is no longer a distant, abstract threat for future generations. It places the possibility of crossing these irreversible thresholds squarely within the realm of possibility this century. It moves the conversation from the future into the immediate present.
+
+Speaker 2: And, you know, it adds a profound, almost existential urgency to the need for immediate, deep, and drastic emissions reductions. The window of opportunity to steer away from these points is closing, and it is closing very, very rapidly.
+
+Speaker 1: That is a deeply unsettling reality to confront, Carter. And Maya, I see you reacting to that. When you hear that the 1.5 degree line, which weβve talked about for so long as this future guardrail, is not only something we've touched but is also the potential trigger for these irreversible changesβ¦ what does that feel like?
+
+Speaker 4: You know, itβ¦ it almost takes your breath away. It feels like we've been driving towards a cliff in the fog, arguing about how fast we should be going. And Carter is saying the fog has just cleared, and we're right at the edge. Weβre there. That's a very, very hard thing to fully process.
+
+Speaker 3: It is. And it brings up a really difficult, practical question for me. If we're already on the verge of crossing these irreversible thresholds, what is the point of all this? I mean, does a 43 percent emissions cut by 2030, which already seems impossible, even matter anymore if the fuse has already been lit on something like the Greenland ice sheet? Have we⦠have we already lost the game?
+
+Speaker 2: Frank, that is the most important question anyone can ask right now. And the conclusion of the report, uh, argues that this is precisely why our actions now matter more than they ever have before. The first major conclusion is that the defining characteristic of the last decade is non-linear acceleration.
+
+Speaker 1: Okay, non-linear acceleration. Break that down for us.
+
+Speaker 2: Think of it like a car that's rolling down a hill. But the hill isn't a steady slope; it's a curve that gets steeper and steeper as you go. So for every foot you travel, your speed increases more than it did in the previous foot. You are accelerating exponentially, not in a straight line, not arithmetically. Thatβs whatβs happening to our planetary systems. The risks are growing at an accelerating rate.
+
+Speaker 1: So every fraction of a degree of warming we can prevent now, every year we can act faster, has a much bigger impact in preventing that future acceleration than it would have twenty or thirty years ago.
+
+Speaker 2: Exactly. Itβs what scientists call positive feedback loops becoming more potent. So, to answer Frankβs question, itβs the absolute opposite of the game being lost. It means the stakes of our actions in the next five to ten years are higher than they have ever been in human history. Every ton of carbon we keep out of the atmosphere now pays huge dividends in slowing down that terrifying acceleration toward those tipping points.
+
+Speaker 1: And the report also concludes that these are not isolated problems, correct? It talks about a cascade of interconnected crises.
+
+Speaker 2: Yes, that's the second key takeaway. We can no longer think of climate impacts as a series of separate events. A drought is not just a lack of water. It is a trigger. It triggers failures in the food system when crops fail. It triggers failures in the economic system when farmers lose their livelihoods.
+
+Speaker 2: It triggers, you know, public health crises from malnutrition and water-borne diseases. It can even culminate in social instability and displacement. Climate change is a threat multiplier that makes all our existing vulnerabilities worse.
+
+Speaker 4: You can really see that in real life, canβt you? I mean, a wildfire isn't just a fire anymore. It becomes a public health crisis for millions of people breathing in the smoke. It's an economic crisis for the entire region. It becomes a water crisis months later when the first heavy rains wash toxic ash and debris into the reservoirs. You realize that one event pulls on all the other threads that hold our society together. Everything is connected.
+
+Speaker 2: Thatβs a perfect way to put it, Maya. And because everything is connected, the report concludes that our response has to be holistic. We canβt have siloed policies that address energy, or agriculture, or public health in isolation. They are all part of the same interconnected challenge.
+
+Speaker 1: This brings us to the third, and perhaps the toughest, conclusion from the report. Which is that our global response, as it stands today, is being dangerously outpaced by the physical reality of climate change.
+
+Speaker 2: That's the hard truth of the last decade. Despite all the meetings and the progress on renewables, the response remains critically insufficient. The report concludes that this failure is defined by three persistent and widening gaps. First is the ambition gap we already discussed, the gap between the weak climate pledges from countries and what science clearly shows is necessary.
+
+Speaker 1: And the second?
+
+Speaker 2: The second is the adaptation finance gap, which we just covered. The massive shortfall in funding that leaves the worldβs most vulnerable populations essentially undefended against the coming storms and droughts. And the third is the justice gap, which undermines the trust and cooperation that are absolutely essential for any kind of effective global solution.
+
+Speaker 3: So if I'm hearing this correctly, the reportβs ultimate conclusion is that our primary problem is no longer a technological one. We have the solar panels, we have the wind turbines, we have the efficiency solutions. The report is saying that the biggest barriers now are political, financial, and social. It's about a lack of political will, a failure to mobilize the necessary funds, and a failure to address the core injustices of the crisis.
+
+Speaker 2: That is the absolute crux of the conclusion. Technology is a vital tool, an essential tool, but it is not a silver bullet. The primary obstacles are now in our halls of government, in our financial institutions, and, uh, in our collective willingness to face this reality and act at the scale it requires.
+
+Speaker 1: So after this incredibly detailed and, frankly, alarming look back at the last decade, where does this leave us? We have a planet in a state of acceleration. We've temporarily breached the 1.5 degree threshold. And the risk of irreversible tipping points is no longer a future problem, but a present-day danger. Maya, I want to start with you. Whatβs your final takeaway?
+
+Speaker 4: It leaves me feeling that the time for simply being worried, or for abstract hope, is over. The only appropriate response to this level of evidence is determined action. This report is a story written in data, and it's telling us we have to transform this stark awareness into real, tangible work in our communities and demand it from our leaders. Thereβs no time for anything else.
+
+Speaker 1: Frank?
+
+Speaker 3: It leaves me thinking that we need to have a much more honest and pragmatic conversation about the real-world costs and trade-offs. Weβve talked about technology and policy, but this report makes it clear that the real fight is over politics and economics. And until we tackle that head-on, with honesty, we'll keep falling short.
+
+Speaker 1: And Carter, a final thought from you.
+
+Speaker 2: The science has been clear for a long time, but the evidence from this past decade is definitive. You know, this period from 2015 to 2025 will be remembered as the decade the consequences of our inaction became undeniable. That temporary breach of 1.5 degrees served as a final, unambiguous warning. The scientific challenge now is to monitor these accelerating changes. But the human challenge is to finally close those gaps between promises and performance, before those tipping points are crossed for good.
+
+Speaker 1: Carter, that is a powerful and frankly stark place to end, on the precipice of these tipping points with the clock running out. But... you know, before we wrap up completely, I want to hold on that last thought. The human challenge. I feel we can't end just with the warning. I want to pivot from the problems we've detailed so thoroughly to the specific pathways forward that are emerging. Beyond the high-level policy failures, where are the new fronts in this challenge?
+
+Speaker 2: That's a crucial pivot to make, Alice. Because, uh, despair is paralyzing. And despite the failures, there are new strategies and, you know, new arenas of action that are gaining momentum.
+
+Speaker 1: Let's talk about one of those. We've mentioned the justice gap and the economic challenges. What about the people, the workers and communities, whose entire livelihoods are tied to the fossil fuel industries we need to transition away from?
+
+Speaker 2: You're talking about the concept of a Just Transition. And you know, this has become a central part of the conversation because it's both morally right and politically essential. A Just Transition means ensuring that the shift to a green economy is fair and inclusive. It means we don't leave coal miners, oil rig workers, and entire communities that depend on these industries behind.
+
+Speaker 3: This is something I think is critical. You can't just tell millions of people that their jobs, their skills, their histories are obsolete without a concrete plan. You know, you'd have massive social and political unrest. For people to buy into this massive economic shift, they have to see a future for themselves in it. A real plan for retraining, for new jobs in clean energy manufacturing or grid modernization, that is absolutely essential.
+
+Speaker 4: And it's more than just jobs, isn't it? It's about identity and community. For generations, some towns have been defined by the local power plant or the mine. A just transition means investing in those places, helping them to diversify and build a new economic foundation that honors their heritage but, you know, allows them to thrive in a different kind of future. It's about respecting people while we make these big changes.
+
+Speaker 1: So ensuring the transition is fair is one emerging pathway. Maya, you just mentioned respecting people and their heritage. What about respecting nature itself? The report touched on biodiversity. Are we starting to see a move towards working with nature to solve this?
+
+Speaker 4: I hope so. Because for so long it feels like we've been trying to invent some new machine to fix the problems our last machine created. It just seems so obvious that we should be looking to nature, which has been regulating the climate for millions of years, for solutions.
+
+Speaker 2: And that intuition is now a major field of action called Nature-Based Solutions. The idea is to use the power of healthy ecosystems to help us. And, you know, the benefits are often twofold. For example, restoring coastal mangrove forests. Mangroves are incredible at absorbing carbon, but they also act as a natural sea wall, protecting coastal communities from storm surges far more effectively and cheaply than a concrete barrier.
+
+Speaker 1: So it helps with both mitigation, by absorbing carbon, and adaptation, by providing protection.
+
+Speaker 2: Exactly. And there are many other examples. Reforestation and afforestation, uh, planting trees, to draw down carbon from the atmosphere. Regenerative agriculture, which involves farming practices that restore the health of the soil, turning it back into a powerful carbon sink. These solutions don't just fight climate change; they also restore biodiversity, they clean our water, and they can make our food systems more resilient.
+
+Speaker 1: So much of the report focused on the failures of national governments to act. But we know a lot of the real-world changes happen at a more local level. What about the role of cities and even large corporations? Are they stepping up to fill the leadership vacuum?
+
+Speaker 2: In many cases, yes. Cities are often more agile and pragmatic than national governments. Networks like the C40 Cities Climate Leadership Group are hubs of innovation. You know, cities are where you see real progress on electrifying public transport, creating greener buildings, and improving waste management, all of which have a huge impact on emissions.
+
+Speaker 3: That makes sense. But what about the private sector? We hear every major company in the world announcing some kind of a net-zero by 2050 target. How much of that is real, tangible action, and how much of it is just good public relations? You know, just greenwashing? Is anyone actually holding them accountable for these promises?
+
+Speaker 2: That is the billion-dollar question, Frank. And you're right to be skeptical. The last few years have seen a surge in these pledges, but there's also been a surge in scrutiny. There is a huge push now to move companies beyond vague promises towards transparent, science-based targets for the near term. We're seeing a real divide emerge between the companies that are genuinely transforming their business models and those that are, uh, frankly, just trying to improve their image. Accountability is still a massive work in progress.
+
+Speaker 1: So if governments are slow and corporations can't always be trusted, what other avenues for accountability are emerging? Where else are people pushing for change?
+
+Speaker 2: One of the most dynamic and, you know, potentially powerful new fronts is in the courtroom. We are seeing a huge increase in what is called climate litigation.
+
+Speaker 4: So, people are actually suing governments and companies over climate change?
+
+Speaker 2: Yes, all over the world. Citizens, activist groups, cities, and even states are taking national governments to court to force them to adopt stronger climate policies, arguing that inaction violates their fundamental human rights to a healthy environment.
+
+Speaker 2: And, connecting back to our earlier conversation, they are also suing the major fossil fuel companies. They are using that attribution science we discussed to directly link the emissions from a company's products to the specific harms and financial damages their communities have suffered from floods, wildfires, and sea level rise. It's a new and rapidly evolving way to demand accountability.
+
+Speaker 1: And Carter, thatβs a fascinating development. The idea that a courtroom could become a key battleground for climate action. Frank, you look skeptical.
+
+Speaker 3: Well, I am. I mean, it sounds good in a headline, "Activists Sue Oil Giant." But do these lawsuits actually work? It seems like they would get tied up in court for decades, with armies of corporate lawyers. Can a lawsuit really change the course of a multi-trillion-dollar global industry?
+
+Speaker 2: It's a valid skepticism, Frank. And you're right, it's not a quick fix. But, uh, the impact isn't just about winning a single huge payout. These cases create enormous pressure. They force companies to disclose internal documents, they generate negative publicity, and, you know, they establish a legal record of responsibility. It fundamentally changes the risk calculation for these industries and their investors.
+
+Speaker 4: And it changes the story, doesn't it? It reframes this from being a sort of blameless, collective problem to one of specific, attributable harm. It says, you knew about the damage your product would cause, and you did it anyway. That's a powerful narrative.
+
+Speaker 1: So beyond the courtroom, what other economic tools are being discussed to drive this transition? The report mentions things like carbon pricing. Carter, what does that actually mean?
+
+Speaker 2: Carbon pricing is a very direct economic strategy. It's about putting a price on carbon pollution to discourage its use. There are two main ways to do it. You can have a straightforward carbon tax, where the government sets a price per ton of carbon emitted. Or you can have a cap-and-trade system, where the government sets a limit, a cap, on total emissions, and then allows companies to buy and sell permits to emit.
+
+Speaker 3: Okay, but let's be honest about what that means. A carbon tax just gets passed on to the consumer, right? It means higher gas prices, higher heating bills. It seems like a policy that would disproportionately hurt lower-income families and working people, while the big corporations just factor it into the cost of doing business.
+
+Speaker 2: That is the single biggest and most important concern with carbon pricing, Frank. And if it's designed poorly, that's exactly what can happen. However, a well-designed system can actually be equitable. For example, some proposals are for a carbon fee and dividend system.
+
+Speaker 1: A dividend? So you get money back?
+
+Speaker 2: Exactly. The revenue collected from the carbon tax isn't just kept by the government. It's returned directly to citizens on an equal, per-person basis. In that system, most lower and middle-income families would actually come out ahead. They would get more back in the dividend than they pay in higher energy costs, because wealthier people tend to have a much larger carbon footprint.
+
+Speaker 4: You know, it's also about what costs we're already paying. We don't see a line item on our bills for it, but we are all paying the price for pollution right now. We pay it in healthcare costs from asthma and other respiratory diseases linked to burning fossil fuels.
+
+Speaker 4: We pay it in disaster recovery funds when our taxes go to rebuilding a town after a flood. A carbon price isn't creating a new cost; it's just making a hidden cost visible.
+
+Speaker 1: This brings us to a question I think is on everyone's mind. We've talked about these huge, complex systems, from international law to national energy policy. It can all feel very distant. So what about us? What about individual action versus systemic change? Maya, does it really make a difference if I diligently sort my recycling or eat less meat when the scale of the problem is this vast?
+
+Speaker 4: That is the question, isn't it? And it's so easy to feel like your small actions are just a drop in an angry ocean. But I truly believe they matter, just maybe not in the way we think. You know, the direct impact of me not using a plastic straw isn't going to stop the West Antarctic ice sheet from collapsing. I get that. But that's not the only point.
+
+Speaker 1: So what is the point, from your perspective?
+
+Speaker 4: When we make these conscious choices, we're not just reducing our own tiny footprint. We are sending signals. We are sending a signal to the market that there's demand for sustainable products. We are sending a signal to our friends and neighbors that this is something we care about, which helps to normalize climate consciousness in our culture.
+
+Speaker 4: And, you know, most importantly, we are sending a signal to politicians that we are a constituency that will support bold climate action. Our individual actions build the social and political momentum for the big systemic changes to happen.
+
+Speaker 2: I think Maya's point is absolutely crucial. The two are not in opposition; they reinforce each other. You need both. Individual action alone is not sufficient, that's clear. We cannot solve this crisis by changing lightbulbs and bringing reusable bags to the grocery store. We absolutely need the large-scale government policies and corporate transformations that will decarbonize our entire energy grid and industrial base.
+
+Speaker 3: Right. Because asking an individual to solve climate change is like asking a soldier to win a war by themselves. It's an unfair burden.
+
+Speaker 2: Exactly. But at the same time, systemic change is not something that just happens in a vacuum. It is the result of millions of people demanding it. So individual action is the necessary foundation. It's the engine of cultural change that makes the politics of systemic change possible. They are two sides of the very same coin. One cannot succeed without the other.
+
+Speaker 1: Thatβs a really helpful way to frame it, Carter. So our individual choices create the culture, and that culture creates the political will for systemic change. Let's look forward then. As we chart a course out of this crisis, what are some of the other major technological or social shifts we need to be thinking about? The report's appendix lists a hundred different topics, one of which is the future of food.
+
+Speaker 2: Yes, and this is absolutely critical because, as we discussed, agriculture is a major source of emissions. The future of food really involves a two-pronged approach. First, on the production side, it means scaling up what's often called sustainable or regenerative agriculture. These are farming practices that can reduce emissions, improve soil health so it absorbs more carbon, and use less water.
+
+Speaker 4: And what's the second part? It has to be about what we eat, right?
+
+Speaker 2: It is. It also involves changes in diet, particularly in wealthy nations. The science is quite clear that, uh, a diet lower in red meat consumption and higher in plant-based foods has a significantly smaller environmental footprint. This doesn't mean everyone has to become a vegetarian, but a societal shift in that direction would have a huge impact.
+
+Speaker 3: Now, this is where it gets tricky for me. You start talking about what people eat, and it feels like a massive overreach. People's diets are incredibly personal and cultural. Are we really going to tell people they can't have a burger? That feels like a political non-starter, and it plays right into the hands of those who say climate action is about sacrifice and a lower quality of life.
+
+Speaker 4: I hear that, Frank. I really do. But maybe the framing isn't about sacrifice. Maybe it's about health, and choice, and innovation. You know, the incredible boom in really high-quality, tasty plant-based alternatives is a market-driven solution. It's not about forcing anyone to do anything; it's about providing better options that are good for people and good for the planet. Itβs a cultural shift, not a government mandate.
+
+Speaker 1: So food is one area. What about on the energy side? We've talked a lot about renewables. But there's another powerful, and often controversial, source of carbon-free electricity mentioned in the report: nuclear power. Carter, where does that fit into the picture?
+
+Speaker 2: Well, nuclear power is⦠complicated. On the one hand, it is a proven, reliable, 24/7 source of zero-emission electricity. And from a purely climate perspective, many scientists and energy experts argue that it has to be part of the solution, especially for providing a stable baseload of power when the sun isn't shining or the wind isn't blowing.
+
+Speaker 3: It seems like a no-brainer to me. If the goal is to eliminate carbon emissions from electricity as fast as possible, why aren't we building advanced nuclear reactors everywhere? The safety concerns, from what I've read about the newer designs, are vastly different from the older plants people think of.
+
+Speaker 4: But the legacy is still there, isnβt it? For so many people, the word nuclear brings up images of Chernobyl or Fukushima. And even if the new plants are safer, you still have the problem of nuclear waste. What do we do with this material that remains dangerously radioactive for thousands of years? It feels like we're solving one problem for ourselves by creating a potentially massive one for countless generations to come.
+
+Speaker 2: And that, Maya, is the core of the dilemma. The issues of waste disposal, public perception, high upfront costs, and long construction times have made nuclear a very difficult path to pursue politically, even if the technology itself has advanced. It remains one of the most contentious and unresolved debates in the energy transition.
+
+Speaker 1: This debate over nuclear power really highlights that the energy transition isn't just a scientific or economic challenge. Itβs also a geopolitical one. Carter, how is this massive global shift from fossil fuels to clean energy changing the relationships between countries?
+
+Speaker 2: It's changing everything. For a century, geopolitics has been shaped by who has the oil and gas. But in a world powered by renewables, the map of power changes. It shifts from countries with fossil fuel reserves to countries that lead in manufacturing solar panels, wind turbines, and batteries. It also shifts power to countries that have the critical mineral resources, like lithium, cobalt, and copper, that are essential for these technologies.
+
+Speaker 3: So we're just trading a dependency on oil from the Middle East for a dependency on batteries and minerals from other parts of the world? It sounds like we're just swapping one set of geopolitical problems for another.
+
+Speaker 2: That is a very real risk, and itβs a major concern. Creating more resilient and diversified supply chains for these technologies is a huge priority. But there's also an upside. The resources for renewable energy, you know, sunlight and wind, are far more democratically distributed around the globe than fossil fuels are.
+
+Speaker 2: Almost every country has the potential to generate its own clean energy, which could lead to greater energy independence and a more stable world in the long run.
+
+Speaker 1: So after this incredibly comprehensive discussion, from the accelerating science to the cascading impacts and the immense challenges in our global response, I want to bring it back to a final thought from each of you. We're standing at the end of this decade of consequence. The report makes it clear the window is closing. Where do we go from here? Frank?
+
+Speaker 3: For me, it comes down to honesty. I think we need to be more honest about the scale of the challenge and the true costs and trade-offs of the transition. We can't pretend this will be easy or painless. But if we can have a pragmatic conversation that acknowledges the difficulties, I think we have a better chance of bringing everyone along and actually getting it done.
+
+Speaker 1: Maya, a final thought from you.
+
+Speaker 4: I keep coming back to that idea of connection. This report shows how everything is connectedβthe ice melting in the Arctic is connected to the flood in your town, is connected to the food on your plate. And if the problem is one of broken connections, then the solution has to be about rebuilding them.
+
+Speaker 4: Reconnecting with nature, reconnecting with our communities, and, you know, finding a shared sense of purpose to protect our common home. For me, the way forward has to be rooted in that sense of shared humanity.
+
+Speaker 1: Thank you, Frank and Maya. That's a powerful call for honesty and for rebuilding our connections. Carter, I want to give you the final word on this part of our discussion. After laying out all this evidence, what is the single most important message you think we should take away about the path forward?
+
+Speaker 2: I think, uh, the message is that the era of excuses is over. For decades, you could argue that we didn't fully understand, or that the technology wasn't ready, or that the impacts felt distant. This report from 2015 to 2025 slams the door on all of that. We know, with painful certainty, what is happening.
+
+Speaker 2: We have the technological solutions, like solar and wind, that are not only ready but are now cheaper than the alternative. And the impacts are no longer distant; they are here, causing billions in damages and immense human suffering every single year.
+
+Speaker 1: So the barriers are no longer technical or scientific.
+
+Speaker 2: Not primarily. The report's inescapable conclusion is that the greatest barrier is a lack of political will, fueled by inertia and, you know, the vested interests of the fossil fuel industry. Overcoming that political barrier is now the central challenge.
+
+Speaker 2: The road ahead, the road to the next major climate conference, COP30, and beyond, is not about inventing a new machine. It's about building a global consensus for action that is so powerful it becomes politically unstoppable.
+
+Speaker 3: Carter, you say that, building a global consensus. But you know, I look at the world, and our politics seem more fractured and nationalistic than ever. How on earth do we create this unstoppable global movement when major countries can barely agree on basic trade rules, let alone something that requires a complete re-engineering of our entire economy? It feels⦠well, it feels naive.
+
+Speaker 2: It's not naive to see the immense difficulty, Frank. It is, uh, perhaps the hardest thing humanity has ever tried to do. But it's not without precedent. We have faced global threats before. You know, scientists in the 1980s discovered that certain chemicals were destroying the ozone layer. The world came together, listened to the science, and passed the Montreal Protocol to phase out those chemicals. And it worked. The ozone layer is healing.
+
+Speaker 4: But is that a fair comparison? Banning a few chemicals used in spray cans and refrigerators seems so much simpler than replacing the entire energy source that powers our civilization.
+
+Speaker 2: Oh, you are absolutely right, Maya. The climate challenge is orders of magnitude more complex and more difficult. But the principle is the same: science identified a threat, and international cooperation solved it. What's different now, and you know, what gives me a sliver of hope, is that the threat is no longer an invisible hole in the sky.
+
+Speaker 2: The escalating costs of floods, droughts, and fires are becoming so painfully obvious that the political calculation for leaders is starting to change. Inaction is becoming more politically expensive than action.
+
+Speaker 4: And maybe the consensus doesn't just come from those leaders in a conference room. You know, I think about the youth climate movement. When millions of young people around the world take to the streets, inspired by activists like Greta Thunberg, that creates a different kind of pressure.
+
+Speaker 4: Itβs a moral pressure. It builds from the ground up and forces its way into the halls of power. It's a reminder that this isn't just about economics; it's about their future that's being negotiated away.
+
+Speaker 1: Thatβs a powerful point, Maya, the role of that moral pressure from the next generation. And it brings up the stark reality of what is truly at stake here. Carter, when we talk about these long-term consequences, like sea-level rise, the report makes it clear these are not temporary problems that will just go away if we fix our emissions. It talks about impacts being locked in for centuries. Can you explain that long-term legacy?
+
+Speaker 2: Yes, and this is a concept that is, uh, difficult to grasp but absolutely crucial. The Earth's climate system has enormous inertia. Think of the oceans like a giant flywheel. They have absorbed over 90 percent of the excess heat we've trapped, and it takes a very, very long time for that heat to dissipate. Likewise, carbon dioxide is a very long-lived gas. Much of what we emit today will still be in the atmosphere, warming the planet, hundreds of years from now.
+
+Speaker 3: So what does that mean in practical terms? Letβs say, hypothetically, we wave a magic wand and stop all greenhouse gas emissions tomorrow, globally. Zero emissions. Does the warming stop? Do sea levels stop rising?
+
+Speaker 2: No. And that is the hard reality. Even in that magical scenario, the planet would continue to warm for some time, and sea levels would continue to rise for centuries, possibly for millennia. The heat that is already stored in the deep ocean would continue to circulate and warm the surface.
+
+Speaker 2: The existing greenhouse gases would continue to trap heat. The amount of warming and sea level rise we've already experienced is, in many ways, a done deal. That is the legacy we have already written.
+
+Speaker 4: So even in the best-case scenario, things will still get worse before they get better.
+
+Speaker 2: For a time, yes. But it's vital we don't interpret that as our efforts being futile. It is the absolute opposite. The actions we take in this decade will determine how much worse things get and for how long.
+
+Speaker 2: We are at the controls, making a choice right now between a future where sea levels rise by, say, another meter, which is devastating but perhaps manageable, and a future where they rise by ten meters, which would be an unimaginable catastrophe.
+
+Speaker 2: We are deciding today what percentage of the worldβs species will go extinct. We are deciding how much of the planet will become uninhabitable for our own grandchildren. We are locking in that future with the choices we make today.
+
+Speaker 1: That is an incredibly powerful and sobering thought, Carter. The idea that we are, right now, writing the legacy for centuries to come. You know, it raises a profound psychological question. How do we live with that knowledge? How do we confront this reality of a locked-in future without falling into paralysis or, you know, just complete despair?
+
+Speaker 4: Thatβs the question I grapple with every day, Alice. And I know so many others do, too. Thereβs a real grief in realizing what weβve already lost, and a real fear for whatβs to come. And some days, that can feel completely overwhelming. But, you know, what I've found, for myself, is that the only real antidote to that anxiety is action.
+
+Speaker 3: Action. Thatβs easy to say. But if the problem is this big, and some of the damage is already done, what does that action even look like? It can feel likeβ¦ I donβt know, bailing out a sinking ship with a teaspoon. It might make you feel better, but is it actually changing the outcome? I worry about climate fatigue. People just get so overwhelmed by the bad news that they tune it all out.
+
+Speaker 4: I see what you mean, Frank. I really do. But maybe the teaspoon isn't the point. Maybe the point is that when you start bailing, the person next to you sees you and picks up their own teaspoon. And then another person does. The action itself builds a sense of community and shared purpose.
+
+Speaker 4: Itβs about building what some people call "active hope." It's not a blind optimism that everything will be fine. Itβs a belief that if we work together, we can still create a better outcome than the one weβre heading for. And that work, that action, gives us a sense of agency in a situation that can feelβ¦ hopeless.
+
+Speaker 2: I think thatβs a crucial insight, Maya. And Frank, to address your point about fatigue, part of the solution is to change the narrative from one of pure sacrifice to one of opportunity. And thereβs real data to back this up. You know, the transition to a clean economy isn't just about shutting things down; it's about building new things. The International Energy Agency has reported that jobs in the clean energy sector are growing rapidly around the world, outpacing the fossil fuel industry.
+
+Speaker 1: So this connects back to what we discussed earlier, the idea of a Just Transition. Itβs about creating tangible, positive, real-world opportunities for people.
+
+Speaker 2: Precisely. It's about showing people a vision of the future that is not just survivable, but actually better. A future with cleaner air, quieter cities, and new, well-paying jobs in industries like solar installation, battery manufacturing, and grid modernization. When people can see a concrete benefit for themselves and their communities, itβs a very powerful motivator. It helps to overcome that sense of fatigue and shifts the focus to building a future we actually want.
+
+Speaker 1: So, as we talk about building this new future, letβs dive into another one of the advanced technologies mentioned in the report's appendix. We hear a lot of buzz about it. Carter, can you tell us about Green Hydrogen? What is it, and what role is it supposed to play?
+
+Speaker 2: Of course. In simple terms, green hydrogen is a way to store clean energy. You take electricity from a renewable source, like a solar or wind farm, and you use it to power a machine called an electrolyzer. And this machine splits waterβwhich is H2Oβinto its basic components, hydrogen and oxygen. The hydrogen that you get from this process is a clean, carbon-free fuel.
+
+Speaker 3: Okay, so it's a clean fuel. But Carter, I've heard there are major problems with it. For one, itβs incredibly inefficient, isn't it? You use a huge amount of electricity to make the hydrogen, and then you lose more energy when you convert it back into power. And the costβ¦ it seems to be way more expensive than just using the electricity directly. It sounds like another one of those futuristic solutions that's always just over the horizon.
+
+Speaker 2: Uh, those are absolutely the key challenges, Frank. You are right. There are energy losses in the process, and right now, the cost of producing green hydrogen is still high compared to other options. However, the costs are falling rapidly as the technology scales up, much like we saw with solar panels a decade ago. And its real potential isn't necessarily for powering cars or homes, where batteries are often a better fit.
+
+Speaker 1: So where does it fit? What's the specific job for this tool?
+
+Speaker 2: Its promise is in those hard-to-abate sectors that we keep coming back to. Think about heavy industries like steel and cement manufacturing, which require incredibly high heat that's hard to achieve with just electricity. Or, uh, long-haul transportation, like container ships and airplanes.
+
+Speaker 2: For these sectors, a clean-burning fuel like green hydrogen could be a genuine game-changer, a way to decarbonize parts of our economy that batteries can't easily reach.
+
+Speaker 4: You know, hearing this, it highlights something I think is really confusing for a lot of people. It feels like every year thereβs a new savior technology. First, it was biofuels, then it was clean coal, now it's hydrogen. Itβs hard to keep up, and it can start to feel like we're just hoping for some single magic bullet to come along and fix everything for us. Maybe thatβs the wrong way to look at it?
+
+Speaker 2: Maya, that is an incredibly wise observation. And you are absolutely right. The search for a single magic bullet has been a distraction. The most useful analogy is to think of it as a toolbox. You would never try to build a house with only a hammer. You need a saw, a screwdriver, a wrench⦠all for different tasks.
+
+Speaker 1: Oh, I see. So it's not about hydrogen versus batteries, or renewables versus nuclear.
+
+Speaker 2: Exactly. It's about having all of them in the toolbox. We need renewables to generate the clean electricity. We need batteries for short-term storage and for electric vehicles. We need green hydrogen for those specific industrial and transport applications. We need to massively ramp up energy efficiency to reduce overall demand. The goal isn't to find the one perfect solution; it's to build a resilient, robust, and flexible system using all the different tools that we have.
+
+Speaker 1: Thatβs a really helpful way to frame it, Carter. A whole toolbox, not a magic wand. But you know, when you talk about all these huge, complex systemsβfrom green hydrogen infrastructure to nuclear power plantsβit can all feel very distant and overwhelming for the average person.
+
+Speaker 1: Which brings us to a question I think is on everyone's mind. What about us? What about individual action versus systemic change? Maya, does it really make a difference if I diligently sort my recycling or eat less meat when the scale of the problem is this vast?
+
+Speaker 4: That is the question, isn't it? And it's so easy to feel like your small actions are just a drop in an angry ocean. But I truly believe they matter, just maybe not in the way we usually think. You know, the direct carbon impact of me not using a plastic straw isn't going to stop the West Antarctic ice sheet from collapsing. I get that. But that's not the only point.
+
+Speaker 3: But isn't it the most important point? I mean, we can all feel good about our reusable coffee cups, but meanwhile, a single coal plant is wiping out all our collective efforts in a matter of minutes. It feels like a distraction. It shifts the burden of responsibility from the handful of massive corporations and governments causing the problem onto the shoulders of billions of individuals. It feels unfair.
+
+Speaker 4: I see that, Frank, and that's a real danger. But when we make these conscious choices, we're doing more than just reducing our own tiny footprint. We are sending signals. We send a signal to the market that there's demand for sustainable products. We send a signal to our friends and neighbors that this is something we care about, which, you know, helps to normalize climate consciousness in our culture.
+
+Speaker 4: And most importantly, we send a signal to politicians that we are a constituency that will support bold climate action. Our individual actions build the social and political momentum for the big systemic changes to happen.
+
+Speaker 2: I think Maya's point is absolutely crucial. And Frank's concern is equally valid. The two ideas are not in opposition; they actually reinforce each other. You need both. Individual action alone is not sufficient, that's clear. We cannot solve this crisis by changing lightbulbs. We absolutely need the large-scale government policies and corporate transformations that will decarbonize our entire industrial base.
+
+Speaker 1: So it's not a choice between one or the other.
+
+Speaker 2: Not at all. But at the same time, that systemic change doesn't just happen in a vacuum. It is the result of millions of people demanding it. So individual action is the necessary foundation. It's the engine of cultural change that makes the politics of systemic change possible. They are two sides of the very same coin. One cannot succeed without the other.
+
+Speaker 1: Thatβs a great way to put it. So if individual action helps create the political will for systemic change, let's talk about one of the most powerful systemic tools that economists often discuss. Carter, the report mentions carbon pricing and emissions trading systems. Can you explain what that is?
+
+Speaker 2: Certainly. Carbon pricing is a very direct economic strategy. It's about putting a price on carbon pollution to discourage it. There are two main ways to do it. You can have a straightforward carbon tax, where the government sets a price per ton of carbon dioxide emitted. Or you can have what's called a cap-and-trade system.
+
+Speaker 1: And how does cap-and-trade work?
+
+Speaker 2: In that system, the government sets a limit, a cap, on the total amount of emissions allowed in a sector, say, the electricity sector. And that cap gets lower every year. Then, companies within that sector are given permits to pollute, or they have to buy them. If a company pollutes less than its permit allows, it can sell its leftover permits to a company that pollutes more. This creates a financial incentive to cut emissions as cheaply as possible.
+
+Speaker 3: Okay, but let's be honest about what a carbon tax really means for the average person. It just gets passed on to the consumer, right? It means higher prices at the gas pump, higher home heating bills. It seems like a policy that would disproportionately hurt lower-income families and working people, who spend a much bigger chunk of their income on those essentials. It sounds deeply unfair.
+
+Speaker 2: That is the single biggest and most important concern with carbon pricing, Frank. And if it's designed poorly, that's exactly what can happen. It can be regressive. However, a well-designed system can actually address this and be equitable. For example, some of the most popular proposals are for a carbon fee and dividend system.
+
+Speaker 1: A dividend? So you're saying people would get money back?
+
+Speaker 2: Exactly. The revenue collected from the carbon tax isn't just kept by the government to spend on other things. It's returned directly to every citizen on an equal, per-person basis, like a check in the mail or a direct deposit.
+
+Speaker 2: In that system, most lower and middle-income families would actually come out ahead. They would get more back in the dividend than they pay in higher energy costs, simply because wealthier people tend to travel more, have larger homes, and have a much larger carbon footprint.
+
+Speaker 4: You know, it's also about what costs we're already paying. We don't see a line item on our bills for it, but we are all paying the price for pollution right now. We pay it in healthcare costs from asthma and other respiratory diseases linked to burning fossil fuels.
+
+Speaker 4: We pay it in our insurance premiums, which go up after every climate-fueled disaster. We pay it in our taxes, which go to rebuilding a town after a flood. A carbon price isn't creating a new cost; it's just making a hidden cost visible and putting it on the people who are creating the pollution.
+
+Speaker 1: Thatβs a powerful reframe, Maya. Shifting our perspective from a new tax to making a hidden cost visible. This conversation about fairness and who pays the cost brings us to another critical justice issue the report touches on: the impact on the workers and communities whose entire economies are built on the old system. Carter, can you talk about the concept of a Just Transition?
+
+Speaker 2: Yes, and you know, this has moved from the fringes of the discussion to the absolute center, because it's both morally right and, frankly, politically essential. A Just Transition means ensuring that the massive shift to a green economy is fair and inclusive. It means we don't leave coal miners, oil rig workers, and entire communities that depend on these industries behind.
+
+Speaker 3: This is something I think is absolutely critical, and it's often glossed over. You can't just tell millions of people that their jobs, their skills, their entire community's history is obsolete without a concrete, funded plan. If you do, you get massive social and political unrest. For people to buy into this huge economic shift, they have to see a future for themselves in it. A real plan for retraining, for new jobs in clean energy manufacturing or grid modernization, that is absolutely essential.
+
+Speaker 4: And it's more than just a paycheck, isn't it? It's about identity and community. For generations, some towns have been defined by the local power plant or the mine. That's a source of pride. A just transition means investing directly in those places, helping them to diversify their economies and build a new foundation that honors their heritage but, you know, allows them to thrive in a different kind of future. It's about respecting people while we make these big, necessary changes.
+
+Speaker 2: That's right. And it means ensuring that the new green jobs are good jobs, with fair wages, benefits, and the right to unionize. The goal isn't just to swap a fossil fuel job for any old job; it's to ensure the clean energy economy creates widespread prosperity and opportunity. If it doesn't, as Frank said, it will fail politically.
+
+Speaker 1: This focus on political stability is a crucial point. The report also talks about how climate change is a threat multiplier, not just for economies, but for global peace and security. Carter, can you explain how climate change can lead to conflict?
+
+Speaker 2: Well, the mechanism, according to defense and intelligence analysts, is that climate change exacerbates existing tensions and vulnerabilities. It's rarely the single, direct cause of a war, but it's like pouring gasoline on a fire that's already smoldering.
+
+Speaker 1: Can you give us an example?
+
+Speaker 2: Take a region that already has a history of ethnic tension and a fragile government. Now, add a multi-year, climate-driven drought. The water sources dry up. The pastures for livestock wither away. Crops fail. This leads to massive food and water scarcity, which in turn can drive resource competition between different groups.
+
+Speaker 2: It can cause governments to collapse, create mass displacement, and open up a power vacuum that can be exploited by extremist groups. The climate stress is the catalyst that pushes a fragile situation into a full-blown crisis.
+
+Speaker 3: But hang on a minute. It seems to me that people have been fighting over land and water for thousands of years. How can we be so sure that this isn't just old conflicts playing out, and that we're just slapping a new climate change label on them? Is the link really that direct?
+
+Speaker 2: That's a fair question, Frank. And you're right, these are often old tensions. But what the science and the data show is a clear intensification. The droughts are more severe and longer-lasting than before. The floods are more extreme. The report notes that the IPCC states with high confidence that climate extremes are increasingly driving displacement, and that displacement itself is a major source of instability. So itβs not creating conflicts out of thin air; itβs making existing ones far more frequent and far more deadly.
+
+Speaker 4: You know, when I hear this, I just think about the human cost. We see these headlines about instability in a faraway region, but we forget that these are families being forced to flee their homes because the land they have farmed for generations can no longer support them. They are not leaving because they want to; they are leaving because they have no choice. It connects directly back to that horrifying statistic you mentioned earlier, Carter, about the death rate from these disasters being 15 times higher in vulnerable regions.
+
+Speaker 1: It truly underscores the profound inequity of this crisis. And this idea of instability leads me to another geopolitical question. We've talked about how the energy transition changes the map of power from oil states to countries with critical minerals. Carter, how is this massive global shift changing the relationships between major world powers?
+
+Speaker 2: It's reshaping geopolitics in a fundamental way. For a century, international relations have been shaped by who has the oil and the gas. But in a world powered by renewables, the sources of power change. It shifts from countries with fossil fuel reserves to countries that lead in manufacturing the key technologies, so thatβs solar panels, wind turbines, and batteries.
+
+Speaker 3: So we're just trading a dependency on oil from the Middle East for a dependency on batteries and solar panels from, say, China? It sounds like we're just swapping one set of geopolitical problems for another. We're still vulnerable, just in a different way.
+
+Speaker 2: That is a very real risk, Frank, and itβs a major strategic concern for governments in Europe and North America. Creating more resilient, secure, and diversified supply chains for these clean energy technologies is a huge global priority right now. But there's also a fundamental upside to this new map.
+
+Speaker 1: And whatβs that?
+
+Speaker 2: The resources for renewable energy, you know, sunlight and wind, are far more democratically distributed around the globe than fossil fuel reserves are. Almost every single country has the potential to generate its own clean energy for its own people. Over the long run, this could lead to greater energy independence for many nations, reducing the number of global choke points and potentially leading to a more stable and equitable world.
+
+Speaker 4: Thatβs a really hopeful thought. The idea that this transition, if we manage it right, could actually make the world a more peaceful place by giving more countries control over their own energy future.
+
+Speaker 1: It is. Weβve spent a lot of time talking about the failures of national governments and these huge geopolitical shifts. But we know a lot of the real-world changes are happening at a more local level. Carter, what does the report say about the role of cities in leading climate action?
+
+Speaker 2: In many cases, cities are where the action is. They are often more agile, more pragmatic, and less tied up in partisan gridlock than national governments. And they have to be, because they are on the front lines of the impacts, from heat waves to flooding. So you have these incredible networks, like the C40 Cities Climate Leadership Group, which are basically hubs of innovation.
+
+Speaker 3: What kind of innovation are we talking about? What are cities actually doing on the ground that makes a difference?
+
+Speaker 2: They are doing a lot. They are electrifying their public transport fleets, from buses to garbage trucks. They are creating greener building codes that mandate higher energy efficiency. They are investing in massive tree-planting campaigns and creating more parks to combat the urban heat island effect.
+
+Speaker 2: They are redesigning streets to be more friendly for pedestrians and cyclists, and less dominated by cars. All of these actions, when added up across hundreds of cities, have a huge impact on both emissions and the quality of life for residents.
+
+Speaker 4: And you can really feel that difference. You know, when your city invests in a new, reliable bus line or a safe, protected bike lane, your life gets better. Your commute is less stressful. The air feels cleaner. Itβs another one of those examples where the climate solution is also just a better way of living. It's not about sacrifice; it's about building cities that are more pleasant and more livable for everyone.
+
+Speaker 1: It truly seems a recurring theme is that a more sustainable world is also a healthier and more equitable one. We have covered so much ground today, from the accelerating science of a planet in crisis, to the cascading impacts on our health, economy, and security, and to the immense challenges and emerging pathways in our global response.
+
+Speaker 1: As we draw this conversation to a close, I want to come back to a final, forward-looking thought from each of you. We are standing at the end of this decade of consequence. The report makes it clear the window for action is closing with terrifying speed. Where do we go from here? Frank, what is your final takeaway?
+
+Speaker 3: For me, it has to be about getting real. The scale of this report shows that we are past the point of easy, feel-good solutions. The transformation that is required is going to be hard, and it's going to be expensive. We need to stop pretending otherwise.
+
+Speaker 3: The path forward has to be built on honesty about the costs, on ensuring the transition is fair to working people, and on deploying every single pragmatic tool we have, from renewables to nuclear to carbon capture, without letting ideology get in the way. Itβs an all-hands-on-deck emergency, and we need to start acting like it.
+
+Speaker 1: Thank you, Frank. A powerful call for pragmatic, honest, all-of-the-above action.
+
+Speaker 4: I keep coming back to that idea of the story we tell ourselves. For so long, the climate story has been framed by fear, by what we have to give up. And that fear is real, the grief for what we're losing is valid. But a story of fear alone can lead to paralysis. I believe we have to start telling a new story, a story of what we stand to gain.
+
+Speaker 4: We gain a chance to build a world that is healthier, more just, and more connected to nature and to each other. That's the vision we have to hold on to. The way forward has to be rooted not just in fear of the future we want to avoid, but in a compelling, active hope for the future we want to create.
+
+Speaker 1: Thank you, Maya. A beautiful and necessary call for a new, more hopeful narrative. And Carter, Iβll give you the final word. After laying out all this sobering science and the stark conclusions of this report, what is the ultimate message you want to leave our listeners
\ No newline at end of file
diff --git a/demo/text_examples/4p_climate_45min.txt b/demo/text_examples/4p_climate_45min.txt
new file mode 100644
index 0000000..cccf953
--- /dev/null
+++ b/demo/text_examples/4p_climate_45min.txt
@@ -0,0 +1,421 @@
+Speaker 1: Hello and welcome to Planet in Peril. I'm your host, Alice. We're here today to discuss a really sobering new report that looks back at the last ten years of climate change, from 2015 to 2025. It paints a picture not just of steady warming, but of a dangerous acceleration. And to help us unpack this, I'm joined by our expert panel. Welcome Carter, Frank, and Maya.
+
+Speaker 2: Hi Alice, it's great to be here. I'm Carter.
+
+Speaker 3: Hello, uh, I'm Frank. Good to be on.
+
+Speaker 4: And I'm Maya. Thanks for having me.
+
+Speaker 1: So, let's dive right in. Carter, this report, titled Decade of Consequence, uses some very strong language right from the start. Can you set the scene for us? What makes this last decade so... pivotal and alarming?
+
+Speaker 2: Well Alice, the key takeaway is that word you used: acceleration. We're no longer on a gentle, predictable upward slope. The data, and this is coming from the big global bodies like the IPCC and the World Meteorological Organization, shows that every key indicator of the planet's health sped up in the last ten years. We've essentially pushed the global system into a new, more volatile state.
+
+Speaker 4: You know, that really resonates. It feels that way, doesn't it? I mean, just thinking about my own garden, the seasons feel less predictable. The summer heat seems to arrive earlier and hit harder every year. It feels less stable.
+
+Speaker 1: Thatβs a great point, Maya. It's moved from an abstract concept to a lived experience for so many. Carter, let's talk about the most direct indicator, temperature. The report says records haven't just been broken, they have been shattered.
+
+Speaker 2: That's right. The ten-year period from 2015 to 2024 is, without a doubt, the warmest decade since we started keeping records in 1850. And it's not a fluke... every single year within that decade is among the ten warmest years ever recorded.
+
+Speaker 3: Okay, Carter, but we always hear about record-breaking years. Every year seems to be the hottest ever. How is this different? Is it just a continuation of a trend?
+
+Speaker 2: It is, but the trend itself is speeding up. And this decade saw something truly significant. The year 2024 became the first full calendar year where the global average temperature went past the 1.5 degree Celsius threshold from the Paris Agreement. Specifically, it hit about 1.55 degrees above the pre-industrial average.
+
+Speaker 4: Wow. One point five degrees. Weβve been talking about that number as a future goal, a line we must not cross. And we're already there, even temporarily? That's... unsettling.
+
+Speaker 3: But Carter used the word temporarily. So does that mean the Paris Agreement goal is already lost? And you know, 2024 had a strong El NiΓ±o event, which is a natural warming cycle. How much of this is just nature doing its thing?
+
+Speaker 2: That's an excellent and crucial question, Frank. No, a single year's breach doesn't mean the goal is permanently lost, as that refers to a long-term average. But it serves as a massive warning shot. It shows that the climate system is capable of reaching these dangerous levels now. And while El NiΓ±o played a role, it was riding on top of this powerful, long-term warming trend. The key isn't just one record year; itβs the accelerating rate of warming.
+
+Speaker 1: Can you elaborate on that? The accelerating rate?
+
+Speaker 2: Of course. Data from NOAA, the US National Oceanic and Atmospheric Administration, shows that since 1982, the world has been warming at a rate of zero point two degrees Celsius per decade. Now, that might not sound like much, but itβs more than three times faster than the average rate since 1850. So, to answer your question, Frank, this isn't a natural blip. The engine is revving faster and faster.
+
+Speaker 1: So let's talk about that engine. What's driving this acceleration? The report links it directly to greenhouse gases in the atmosphere.
+
+Speaker 2: Exactly. The physics are very direct. And in the last decade, the concentrations of these gases have soared to levels that are, frankly, unprecedented in human history. The IPCC's latest major report states with high confidence that atmospheric carbon dioxide levels are now higher than at any time in at least two million years.
+
+Speaker 4: Two million years. I... I can't even process that number. It feels like we're running a massive, uncontrolled experiment on our only home.
+
+Speaker 2: Thatβs a good way to put it, Maya. To give you some concrete numbers, in 2024, the average concentration of carbon dioxide hit 422.7 parts per million. That's a full 50 percent higher than before the industrial age began. And just like with temperature, the rate of increase is accelerating. In the 1960s, it grew by about zero point eight parts per million per year. In the last ten years? It's averaged 2.6 parts per million per year. The year 2024 saw the largest single-year jump ever recorded.
+
+Speaker 1: So the warming is accelerating, and the concentration of the gas causing the warming is also accelerating. This brings us to the core question, which is addressed in the second section of the report. The science of attribution. Carter, how certain are scientists that this is... us?
+
+Speaker 2: The scientific community is as certain as it is about the theory of gravity. The IPCC uses the strongest possible language. The report states unequivocally that human influence has warmed the atmosphere, ocean and land. There's no ambiguity left.
+
+Speaker 3: Unequivocal. That is a strong word. But what does that mean in practice? I mean, a lot of people hear this and think, okay, but how do they know it's not the sun, or volcanoes, or some other natural cycle?
+
+Speaker 2: It's a fair question. Scientists know because they use incredibly sophisticated climate models. They run simulations of the last 150 years with only natural factors, like solar cycles and volcanic eruptions. And when they do that, the models completely fail to replicate the warming we've actually observed. They just can't get the temperature to rise. It's only when they add in the human-caused greenhouse gas emissions that the models accurately match the real-world temperature record.
+
+Speaker 4: Oh, I see. So itβs like trying to solve a mystery. You test out all the natural suspects, and none of them can be the culprit. But when you add in the human suspect, the story suddenly makes perfect sense.
+
+Speaker 2: That's a perfect analogy. The IPCC even quantifies it. The best estimate is that humans have caused about one point zero seven degrees Celsius of warming since the late 1800s. The total observed warming over that same period? About one point one degrees Celsius. So, we account for... basically all of it.
+
+Speaker 3: Right. So if it's unequivocally us, what specific human activities are we talking about? When people say we need to cut emissions, what are we actually supposed to be cutting?
+
+Speaker 1: Thatβs a perfect question, Frank. Carter, the report gets right into this. Can you break down the main sources for us?
+
+Speaker 2: Absolutely. The picture is actually very clear. The primary driver, by a huge margin, is the burning of fossil fuels, so thatβs coal, oil, and natural gas. In 2019, about 79 percent of all global greenhouse gas emissions came from using fossil fuels across four main areas: energy production for electricity and heat, industry, transportation, and buildings.
+
+Speaker 3: So it really isn't just about driving cars. I mean, that's what you always hear. But this is about how we power our homes, how we make things, our entire economic structure.
+
+Speaker 2: Precisely. The power sector alone, which generates electricity and heat, is the single biggest contributor. And what's concerning is that even with the amazing growth of renewable energy, the International Energy Agency has pointed out that demand for oil and gas has stayed stubbornly high. We're still investing in new fossil fuel infrastructure, which creates a real risk of locking in these emissions for decades to come.
+
+Speaker 4: You know, it's so easy to picture smokestacks and the tailpipes of cars when we talk about this. But the report mentions another big piece of the puzzle, right? Something about our land, about forests and farming?
+
+Speaker 2: Yes, and it's a critical piece, Maya. The remaining 21 to 22 percent of emissions come from what scientists call AFOLU. That stands for Agriculture, Forestry, and Other Land Use. This includes methane emissions from livestock, nitrous oxide from fertilizers, and, crucially, deforestation.
+
+Speaker 1: And why is deforestation such a major factor?
+
+Speaker 2: It delivers a devastating one-two punch. First, when we clear forests, primarily for agriculture, we release the massive amounts of carbon that were stored in those trees and soils directly into the atmosphere. Between 2015 and 2020, the world continued to lose an estimated 10 million hectares of forest every single year. Second, by destroying the forest, we're eliminating a vital natural carbon sink that would otherwise be absorbing CO2 from the air. So it adds carbon while also reducing the planet's ability to clean it up.
+
+Speaker 1: So we have a very clear picture of the sources. This leads to the obvious question of what we are doing about it. The report talks about a persistent and vast emissions gap. Carter, what is that?
+
+Speaker 2: The emissions gap is the difference between what countries have pledged to do and what the science says is actually required to meet the goals of the Paris Agreement. The United Nations Environment Programme releases a report on this every year, and the findings are stark. The 2023 report found that with the policies we have right now, the world is on a trajectory for a temperature rise of nearly 3 degrees Celsius by the end of the century.
+
+Speaker 4: Three degrees... Carter, we were just talking about how damaging it is to even temporarily hit 1.5 degrees. Three sounds... catastrophic.
+
+Speaker 2: It would be. To align with the 1.5 degree pathway, the report states that predicted global emissions in 2030 need to be cut by a staggering 42 percent from where they're heading now.
+
+Speaker 3: Hold on a minute. A 42 percent cut by 2030? Carter, that's just a handful of years away. Is that even realistic? Are countries just not trying, or is the goal itself simply impossible for our modern world to achieve?
+
+Speaker 2: It's an immense challenge, Frank, there's no question. The report does note that there has been some progress since the Paris Agreement was signed. Projected emissions for 2030 are lower now than they were expected to be a decade ago. However, this improvement is nowhere near the scale or speed that is required. So this gap... it really represents the collective failure of the world to turn political commitments into sufficient real-world action.
+
+Speaker 4: And while governments and experts are debating these huge numbers and percentages, people on the ground are already feeling the effects. It feels like the consequences are here now, but the solutions are still stuck in negotiations.
+
+Speaker 1: Maya, that is such a powerful point, and it leads us directly to one of the most significant scientific advancements of the past decade, which is the ability to link specific weather events directly to climate change. Carter, tell us about the science of attribution.
+
+Speaker 2: This has been a game-changer. For a long time, we could only say that climate change makes certain types of events, like heatwaves, more likely in general. But now, attribution science allows scientists to provide robust, quantitative assessments of the role human-caused warming played in a specific, individual event.
+
+Speaker 1: So how does that work, in simple terms?
+
+Speaker 2: They use multiple climate models to compare the probability of a specific extreme event happening in the world as it is today, with all our emissions, to its probability in a counterfactual world, a simulated world without human-caused greenhouse gases. This allows them to say, with a calculated degree of confidence, how much more likely or how much more intense an event was made because of climate change.
+
+Speaker 3: So youβre saying that scientists can now point to a specific flood, or a specific wildfire, and actually put a number on it? They can say this was 50 percent worse, or ten times more likely, because of our emissions?
+
+Speaker 2: Yes, exactly. The science has matured to that point. For example, studies have found that some recent heatwaves, like the one in the Pacific Northwest in 2021, would have been virtually impossible without human-induced climate change. This ability to quantify the human fingerprint on disasters is profound. It transforms climate change from a distant, future threat into a direct and measurable cause of the harm and damage people are experiencing today.
+
+Speaker 1: And this science has profound implications, doesn't it, Carter? It means the conversation shifts from future projections to present-day accountability. So let's talk about those cascading consequences the report details. It frames extreme weather as the new normal. What does that actually look like?
+
+Speaker 2: It looks like a world where the weather has fundamentally shifted gears. The science of attribution has now firmly linked the dramatic rise in the frequency and intensity of extreme events to human-caused warming. So what used to be a rare event is now becoming a regular occurrence. In 2024 alone, for example, there were over 600 reported extreme weather events.
+
+Speaker 4: It really does feel that way. I mean, the summer heat seems to build earlier and last longer, and it feels more oppressive, more dangerous than I ever remember. And then, when the rain finally comes, it's not a gentle shower. It's a deluge that overwhelms everything.
+
+Speaker 2: You've just described the mechanics of it perfectly, Maya. Extreme heat events have become more frequent and more severe. Temperatures hitting over 40 degrees Celsius, which is 104 degrees Fahrenheit, used to be a rarity in many places. Now, it's becoming common. And that heat leads to the paradox of the water cycle.
+
+Speaker 3: A paradox? How so? It seems to me we're either in a drought or a flood. How can both be happening more often? It feels contradictory.
+
+Speaker 2: It does, but they are two sides of the same coin. A warmer atmosphere holds more moisture, about 7 percent more for every single degree Celsius of warming. So when it does rain, the downpours are far heavier, which dramatically increases flood risk. In fact, since the year 2000, flood-related disasters have risen by 134 percent compared to the two decades before.
+
+Speaker 1: But what about the drought side of that coin?
+
+Speaker 2: At the same time, those higher temperatures bake the land. They increase evaporation from soil, from rivers, from reservoirs, leading to more rapid and severe droughts in many regions. This has given rise to a phenomenon that scientists are now calling climate whiplash, where a region can swing violently between a devastating drought one year and catastrophic floods the next. It just overwhelms our infrastructure and our ecosystems.
+
+Speaker 1: And this combination of prolonged heat and severe drought creates a perfect storm for another disaster we see constantly on the news: wildfires.
+
+Speaker 2: Exactly. Wildfire seasons have become longer and more intense in many parts of the world. Scientific analysis estimates that human-caused climate change has already doubled the area of forest burned in the Western United States in recent decades. And this creates a terrifying feedback loop. These megafires don't just destroy communities, they release enormous amounts of stored carbon back into the atmosphere, which in turn causes more warming, which then leads to more fires.
+
+Speaker 4: I live in California, and that feedback loop is something you can feel in your bones. The fear during fire season is palpable. And even if you're not near the flames, the smoke can choke the sky for weeks. It's a constant, unhealthy reminder of what's happening.
+
+Speaker 1: Maya, you've taken us right to the next critical point. These disasters are not just statistics. They have a direct and severe impact on our health. The report goes so far as to call climate change the greatest global health threat of the 21st century. Carter?
+
+Speaker 2: It is, without a doubt. The impacts are extensive. Let's start with the most direct one: the heat itself. Extreme heat is one of the deadliest weather phenomena. The IPCC confirms with very high confidence that the increase in extreme heat has resulted in human mortality and morbidity in every region of the world.
+
+Speaker 3: We hear about vulnerable people being at risk during heatwaves, which makes sense. But does it have a broader impact on the general population, on the economy?
+
+Speaker 2: A massive one. The Lancet Countdown on Health and Climate Change, which is a major annual report, documented these record-breaking health threats. They estimated that in 2023, 3.4 billion potential labor hours were lost globally just due to people being exposed to extreme heat. Thatβs an increase of 69 percent compared to the average in the 1990s. So yes, it has huge economic and productivity impacts.
+
+Speaker 1: And those are just the direct impacts of the heat itself. What about the less obvious health threats?
+
+Speaker 2: They are just as concerning. A warmer world is a more hospitable world for the vectors that carry diseases. Rising temperatures and changing rainfall patterns are expanding the geographic range for diseases like malaria, dengue, West Nile virus, and Lyme disease. We're seeing them appear in places they've never been before.
+
+Speaker 4: And it must affect our food and water, the very foundations of our health.
+
+Speaker 2: Absolutely. Climate change directly undermines both. The report notes that climate change has slowed the growth of agricultural productivity over the past 50 years. It's a key driver of the global food insecurity that affected, by some estimates, over 750 million people in 2023. At the same time, about half the world's population, that's four billion people, now experiences severe water scarcity for at least one month of the year, a situation made much worse by melting glaciers and prolonged droughts.
+
+Speaker 4: And beyond all the physical ailments, there has to be a psychological toll. The stress of living with this uncertainty, the trauma of surviving a disaster, the anxiety about what the future holds for your children. The report touches on mental health, doesn't it?
+
+Speaker 2: It does. This is a growing and critical area of concern. The IPCC has now clearly associated increasing temperatures and the trauma from extreme events with significant challenges to mental health. This includes post-traumatic stress disorder after a disaster, anxiety and depression when people lose their homes or livelihoods, and a broader condition people are calling eco-anxiety, especially among young people, about the future of the planet.
+
+Speaker 1: And this idea of a psychological toll, this eco-anxiety, leads to another form of stress: financial. The report makes it clear that the economic consequences of climate change have become impossible to ignore over the last decade. Carter, can you start by outlining the scale of these costs?
+
+Speaker 2: The scale is immense, and it's escalating rapidly. The most direct measure we have comes from the global reinsurance industry, the companies that insure the insurance companies. Data from the Swiss Re Institute shows that for five consecutive years, from 2020 through 2024, the global insured losses from natural catastrophes have surpassed 100 billion US dollars.
+
+Speaker 3: Okay, 100 billion is a massive number. But you have to wonder, isn't some of that just due to inflation, or the simple fact that we've built more expensive homes and cities in high-risk areas like coastlines? Are the storms themselves really causing more financial damage, or do we just have more valuable things in their way?
+
+Speaker 2: That's a very important point, Frank. And yes, growing asset values in vulnerable areas, what they call exposure, is definitely a part of the story. However, the data clearly shows that the primary driver of the upward trend is the increased frequency and intensity of the severe weather events themselves. For example, in 2024, the total economic losses from natural disasters hit an estimated 318 billion dollars. The insured portion was 137 billion. The rest was uninsured.
+
+Speaker 1: So more than half of all the losses were not covered by insurance. What does the report say about that?
+
+Speaker 2: It refers to this as the protection gap, and this gap is widening. In 2024, 57 percent of all global economic losses from these catastrophes were uninsured. This is a huge problem, especially in developing countries where very few people have insurance. For these communities, a single disaster can wipe out years of economic development and trap them in a cycle of poverty and recovery.
+
+Speaker 4: And this isn't just an abstract global statistic. I mean, we see it in our own communities. We hear stories of insurance premiums skyrocketing to the point where they are unaffordable. Or worse, insurance companies simply pulling out of entire states like Florida or California because the risk of wildfire or flooding has become too high. This creates this incredible financial stress for families who are just trying to protect their homes.
+
+Speaker 1: And it's not just private homes and property. Our shared public infrastructure is also facing enormous risks.
+
+Speaker 2: That's right. Our entire modern society, the energy grids, transportation networks, water treatment plants, they were all designed and built for a climate that no longer exists.
+
+Speaker 2: Sea level rise directly threatens ports and coastal cities, extreme heat puts an incredible strain on power grids, and intense flooding can destroy roads and bridges. The World Bank has warned that the cost of inaction, particularly in terms of damage to infrastructure, could run into the trillions of dollars.
+
+Speaker 3: Trillions in damage. But fixing it would also cost trillions. I mean, upgrading a nation's entire power grid or rebuilding its coastal defenses requires a colossal upfront investment. Where is that money supposed to come from, especially for countries that are already struggling?
+
+Speaker 2: It's a major challenge, but the analysis shows that inaction is far more expensive. The World Bank estimates that for every one dollar invested in making infrastructure more climate-resilient now, we could see a benefit of four dollars in avoided damages and disruptions down the road. Itβs a classic case of an ounce of prevention being worth a pound of cure.
+
+Speaker 1: When homes are destroyed, infrastructure fails, and livelihoods are lost, people are inevitably forced to move. The report identifies climate change as a powerful driver of human displacement.
+
+Speaker 2: Yes, it acts as a threat multiplier. The number of forcibly displaced people worldwide has nearly doubled in the last ten years, reaching an estimated 123.2 million by the end of 2024.
+
+Speaker 2: And while conflict is still a primary driver, the IPCC states with high confidence that climate and weather extremes are increasingly forcing people from their homes on every single continent. In fact, 2024 saw the highest number of new displacements from extreme weather in 16 years.
+
+Speaker 3: I understand the numbers, but I think it's tricky to label someone a climate refugee. People move for all sorts of reasons, for better jobs, to escape poverty, for family. How can you really untangle all those factors and say with certainty that someone was displaced specifically by climate change?
+
+Speaker 2: You've hit on the core of the issue. It's rarely a single cause, which is why the term threat multiplier is so accurate. A drought, for example, can kill crops, which leads to economic collapse, which can then lead to resource conflicts, and all of those factors together push people to move.
+
+Speaker 2: Climate change is the spark that ignites these other pre-existing vulnerabilities. And the report highlights a chilling statistic on this point: between 2010 and 2020, the death rate from floods, droughts, and storms was 15 times higher in highly vulnerable regions compared to the most secure ones.
+
+Speaker 4: And it's not just people who are being displaced and harmed. It's... it's everything else. The entire web of life that supports us.
+
+Speaker 1: Thatβs a vital point, Maya. The report draws a direct line between the climate crisis and the broader biodiversity crisis that's happening all around us. Carter?
+
+Speaker 2: Yes, the two are deeply intertwined. Climate change is a primary driver of what many scientists now refer to as the Earth's sixth mass extinction. A landmark global assessment from the IPBES warned that an estimated one million animal and plant species are now threatened with extinction, many within decades.
+
+Speaker 2: While land use change is currently the biggest driver, climate change is projected to become as, or even more, important in the coming decades.
+
+Speaker 1: Can you give us a concrete example of this happening right now?
+
+Speaker 2: The most potent symbol is the fate of the world's coral reefs. The last decade has been catastrophic for them. The Great Barrier Reef, for instance, has suffered six mass coral bleaching events just since 2015.
+
+Speaker 2: These are caused by prolonged marine heatwaves that literally cook the coral, causing them to expel their symbiotic algae and turn white. The increasing frequency of these heatwaves leaves no time for the reefs to recover.
+
+Speaker 4: Itβs so hard to hear that. Losing the coral reefsβ¦ it's like imagining a world without the Amazon rainforest. It's a loss so profound you can't even begin to calculate the cost. A world that's justβ¦ less alive.
+
+Speaker 2: And the science is very clear on this. Scientists warn that if global warming exceeds the 1.5 degree target, over 90 percent of the world's tropical coral reefs could be lost by the middle of this century. It's a devastating blow to marine biodiversity and to the millions of people who depend on those reefs for their food and their livelihoods.
+
+Speaker 1: That is an incredibly sobering thought, Maya. A world that is simply less alive. We've spent this time detailing an accelerating crisis with devastating impacts on our health, our economy, and the very biodiversity of the planet. Itβs a stark picture. But the world has not been completely idle. The final section of the report assesses the global response.
+
+Speaker 1: Carter, the central pillar of international climate policy over the past decade has been the Paris Agreement, adopted back in 2015. For listeners who may not remember the details, can you remind us what it set out to achieve?
+
+Speaker 2: Of course. The Paris Agreement was a genuine diplomatic breakthrough. For the first time, it brought all nations, both developed and developing, into a common framework to combat climate change. Its main goals are to hold the increase in the global average temperature to well below 2 degrees Celsius above pre-industrial levels, and to pursue efforts to limit that temperature increase even further to 1.5 degrees Celsius.
+
+Speaker 1: And how was it designed to achieve that? What's the actual mechanism?
+
+Speaker 2: The agreement operates on a five-year cycle of what's called ratcheting ambition. The idea is that countries are required to submit their own national climate action plans, which are known as Nationally Determined Contributions, or NDCs. Then, every five years, they are supposed to come back to the table with a new, stronger plan that is more ambitious than their last one.
+
+Speaker 3: Okay, hold on. Nationally Determined Contributions. That sounds like a lot of diplomatic jargon. If I'm hearing you right, does that just mean that every country gets to make up its own plan, and there's no real penalty or enforcement if they don't follow it or if their plan is too weak?
+
+Speaker 2: You're not wrong, Frank. It is not an international treaty with a heavy-handed enforcement mechanism in the traditional sense. It's a framework that is built more on transparency, reporting, and a kind of global peer pressure. The idea is that by having everyone's commitments out in the open, and by regularly taking stock of our collective progress, countries will be encouraged and expected to ramp up their efforts over time.
+
+Speaker 4: So itβs less of a strict global law and more of a collective promise. A set of promises, really. But based on everything we've talked about today, from the shattered temperature records to the accelerating ice melt, it seems like those promises are being broken.
+
+Speaker 1: Maya, that takes us directly to what the report calls the ambition gap. Carter, you explained the process. Now let's talk about the reality. How big is the shortfall between what countries have promised in their NDCs and what the science tells us we actually need to do?
+
+Speaker 2: The shortfall is massive. It's a chasm, really. The most recent analysis from the United Nations, which looked at the latest pledges from 195 countries, concluded that we are falling miles short of what's needed. If every country fully implemented its current pledges, we would see a global emission reduction of only about 5.9 percent by 2030 compared to 2019 levels.
+
+Speaker 4: Only six percent? That sounds tiny. How does that compare to the goal?
+
+Speaker 2: Well, the IPCC, the main scientific body, has found that to keep the 1.5 degree limit within reach, our emissions need to be slashed by at least 43 percent by 2030. So we are pledging for a six percent cut when we need a 43 percent cut.
+
+Speaker 2: This gap means that the sum of all these national promises currently has the world on a trajectory toward a catastrophic level of warming somewhere between 2.5 and 2.9 degrees Celsius.
+
+Speaker 3: That's just astounding. It's not a gap, itβs a total disconnect from reality. So these huge annual conferences, the COPs we hear about on the news every year with all the world leaders, what are they actually achieving if the numbers are still this bad? Is it just a talking shop?
+
+Speaker 2: That's a criticism you hear a lot, and there is a great deal of frustration. These conferences are the primary venue for negotiating how to implement the Paris Agreement. They have produced some important outcomes. For instance, COP28 in Dubai produced the first ever global stocktake, which is essentially the world's climate report card. And it ended with a historic, first-ever call for countries to begin transitioning away from fossil fuels.
+
+Speaker 4: But Carter, the language there seems so important. I remember the debate was about a phase-out of fossil fuels, but the final agreement was to transition away from them. It feels like very carefully chosen, watered-down language. Does that kind of subtle change in wording actually lead to real-world action, or does it just give countries a loophole?
+
+Speaker 2: That is the heart of the debate. Many nations were deeply disappointed that the language wasn't stronger. The hope is that even that language signals a clear direction to the global economy. That same conference also established a global goal to triple renewable energy capacity and double the rate of energy efficiency improvements by 2030, which are very concrete targets.
+
+Speaker 1: And what about the most recent conference mentioned in the report, COP29?
+
+Speaker 2: That was dubbed the Finance COP. Its main job was to agree on a new climate finance goal to help developing nations. After very contentious negotiations, they agreed that developed countries should lead in mobilizing at least 300 billion dollars per year by 2035 for developing nations. But again, many of those nations expressed deep disappointment, stating that this number falls far, far short of their estimated needs, which are in the trillions.
+
+Speaker 1: This seems to be a recurring theme of falling short. Let's shift from the policy to the other major part of the response, which is technology. Here, the report does seem to highlight one area as a significant success story. And that is the renewables revolution.
+
+Speaker 2: Yes, this has been the brightest spot of the last decade without a doubt. We've seen an absolutely explosive growth of renewable energy technologies, especially solar panels and wind power. This was driven by incredible innovation and economies of scale, and it caused the costs of solar and wind to plummet.
+
+Speaker 2: They are now the cheapest sources of new electricity generation in most of the world. To give you a sense of the scale, in 2023, the world added a record 473 gigawatts of new renewable capacity. The International Energy Agency even forecasts that this year, in 2025, renewables will overtake coal as the single largest source of global electricity.
+
+Speaker 3: Thatβs genuinely good news, and everyone loves seeing cheaper energy. But I noticed the report also says that we are still not on track to meet that COP28 goal of tripling renewable capacity by 2030.
+
+Speaker 3: Why is that? If this technology is so cheap and effective, why aren't we just building it everywhere, all the time, as fast as we possibly can? What's the hold-up?
+
+Speaker 2: It's a great question, Frank. The momentum is incredible, but the scale of the challenge is even bigger. To achieve that tripling goal, we would need to be adding, on average, around 1,050 gigawatts of new capacity every single year for the rest of the decade.
+
+Speaker 2: That's more than double the record we just set in 2023. The barriers are no longer primarily about cost; they are about things like modernizing our electrical grids to handle this new type of energy, overcoming supply chain bottlenecks for components, and streamlining the permitting processes to get projects built faster. So even in this huge success story, there is a major gap between our current progress and the required pace of change.
+
+Speaker 1: So, Carter, even our biggest technological success story, renewable energy, is facing a challenge of sheer scale and speed. The report points to another critical tool in the toolbox, something often called the first fuel, which is energy efficiency.
+
+Speaker 3: Now this is something that just seems like pure common sense to me. Using less energy to get the same result, whether it's an efficient appliance or an insulated home. It saves people money on their bills, it reduces strain on the power grid, and it cuts emissions. It seems like the absolute lowest-hanging fruit. Why aren't we talking about this constantly?
+
+Speaker 2: You are absolutely right, Frank. Improving energy efficiency is the cheapest and cleanest way to address our energy needs, which is why the COP28 goal to double the global average annual rate of energy efficiency improvements by 2030 is so critical. But the reality, as the report lays out, has been deeply disappointing.
+
+Speaker 1: How so? What does the data show?
+
+Speaker 2: After a brief speed-up in 2022, which was mostly in response to the global energy crisis, the rate of global energy intensity improvement slowed way down to just one percent in both 2023 and 2024. To be on a pathway to net-zero emissions, we need that rate to be averaging around four percent per year. So we are falling far short. The report effectively calls it a major and concerning policy failure on a global scale.
+
+Speaker 1: So if we're failing on the common-sense goal of efficiency, what about the more high-tech solutions that promise to clean up our existing emissions? Carter, the report spends some time on Carbon Capture, Utilisation, and Storage, or CCUS.
+
+Speaker 3: Again, on the surface, this sounds like a pragmatic solution. For those really difficult industries that are hard to electrify, like making cement or steel, why not just build a system to capture the carbon dioxide before it ever gets into the atmosphere? It seems like a logical way to solve the problem without having to completely shut down these essential industries overnight.
+
+Speaker 2: And that is exactly how it is often presented, Frank, as a necessary solution for these hard-to-abate sectors. And there is a lot of momentum in terms of announcements. The report notes there are over 700 projects in various stages of development. However, it also points to a massive gap between those announcements and the operational reality.
+
+Speaker 4: What do you mean by that? A gap between announcements and reality?
+
+Speaker 2: As of early 2024, the total global operational capacity for capturing CO2 was just over 50 million tonnes per year. That is a tiny fraction of what has been announced or proposed for 2030. And critically, only 20 percent of that announced capacity had actually reached a final investment decision.
+
+Speaker 2: This indicates that most of these projects are still just on the drawing board, they are not yet real. So deployment has consistently and significantly lagged behind the expectations and the promises.
+
+Speaker 4: You know, I have to wonder if there's a risk here that this technology just becomes an excuse. A way for fossil fuel companies and heavy industries to continue polluting under the promise that someday, in the future, they'll be able to clean it all up. It feels like it could be a dangerous distraction from the real work of actually cutting emissions at the source.
+
+Speaker 1: Speaking of potentially dangerous and controversial ideas, the report mentions that as the world falls further behind on emissions reductions, there is a growing, albeit highly contentious, interest in something called solar geoengineering. Carter, can you even begin to explain what that is?
+
+Speaker 2: I can try. It's also sometimes called solar radiation modification. This refers to a set of hypothetical technologies that are designed to cool the planet by reflecting a small fraction of incoming sunlight back out to space. The most commonly discussed method is called stratospheric aerosol injection, which would involve spraying reflective particles, like sulfur dioxide, into the upper atmosphere to mimic the cooling effect of a large volcanic eruption.
+
+Speaker 4: That sounds absolutely terrifying. I mean, the idea of us deliberately conducting a planetary-scale experiment with our only atmosphere, when we can't possibly predict all the consequences⦠it just feels like the height of human arrogance. We've already made one huge mess by pumping carbon dioxide into the air; this sounds like a way to make another, potentially even worse, mess.
+
+Speaker 2: Your reaction, Maya, captures the essence of the controversy. The scientific community is extremely cautious. The report emphasizes that geoengineering is not a substitute for cutting emissions. It does not address the root cause of the problem, which is the greenhouse gas blanket, and it carries immense and poorly understood risks.
+
+Speaker 2: It could potentially disrupt regional weather patterns, harm the ozone layer, and it creates a moral hazard by possibly reducing the incentive for us to do the hard work of decarbonizing our economies.
+
+Speaker 1: So it's seen as a last-ditch, break-glass-in-case-of-emergency option with huge potential side effects. Maya, your point about the arrogance of these high-tech ideas is well taken. And while we're discussing these futuristic and risky technologies, the report highlights a profound failure in a much more basic and immediate area: finance and justice for the people already suffering the consequences. Carter, can you explain what the report calls the adaptation finance gap?
+
+Speaker 2: This is one of the most sobering findings in the entire report. While much of the focus is on mitigation, which is cutting emissions, adaptation, which is preparing for the impacts of climate change, is equally critical, especially for the world's most vulnerable nations. The UNEP Adaptation Gap Report revealed a staggering shortfall in funding.
+
+Speaker 1: How big is the shortfall?
+
+Speaker 2: The report estimates that the annual adaptation finance needs of developing countries are somewhere between 215 billion and 387 billion dollars. In stark contrast, the total international public finance that flowed to these countries for adaptation in 2021 was just 21 billion dollars, which was actually a 15 percent decline from the year before.
+
+Speaker 2: This means the actual needs are 10 to 18 times greater than the funds that are actually being provided, leaving the most vulnerable communities dangerously exposed and underprepared.
+
+Speaker 3: I understand the need is great, but why is this framed as a justice issue? Isn't every country ultimately responsible for protecting its own citizens and adapting to its own challenges?
+
+Speaker 2: That question gets to the very core of the UN climate negotiations. The entire process is built upon a foundational principle known as common but differentiated responsibilities and respective capabilities. It's a bit of a mouthful, but the concept is straightforward.
+
+Speaker 2: It acknowledges that while all nations share a common responsibility to protect the global climate, the developed countries, which have been industrializing for over a century, bear a much greater historical responsibility for causing the problem in the first place. They also possess far greater financial and technological capabilities to address it.
+
+Speaker 4: So itβs the idea that the polluter should pay. The ones who created the mess have a greater obligation to help clean it up, and to help protect those who are most harmed by it.
+
+Speaker 2: Exactly. Climate justice frameworks articulate this through the concept of a double inequality. The very people and nations who have contributed the least to the emissions that cause climate change are the ones who are suffering the earliest and most severe consequences.
+
+Speaker 2: Therefore, a just global response requires that the developed nations lead the way in making the deepest emissions cuts, and that they provide substantial financial and technological support to help developing nations adapt to the impacts they did little to cause.
+
+Speaker 1: Carter, you were just explaining this core principle of climate justice, that the nations with the greatest historical responsibility for emissions also have the greatest capacity to help solve the problem.
+
+Speaker 2: Yes, and it builds on what Maya was saying. Itβs about recognizing the profound unfairness, the, uh, double inequality that lies at the heart of the climate crisis. The people who are most harmed are the ones who did the least to cause the problem. Think about it, uh, a farmer in the Sahel whose land is turning to desert, or a family in a low-lying island nation whose home is threatened by sea level riseβ¦ their contribution to historical emissions is practically zero.
+
+Speaker 4: So what you're saying is, that farmer, whose crops are failing from a drought they had no part in creating, is right now paying a much, much higher price than someone in a wealthy country who has, you know, benefited from a century of industrial development powered by fossil fuels.
+
+Speaker 2: That is the injustice in a nutshell. And so, the framework for a just response is built on that understanding. It means developed nations have a moral and ethical obligation to lead with deep, rapid emissions cuts. And, crucially, it means they have an obligation to provide significant financial and technological support to help developing nations build clean economies and adapt to the impacts they are already facing.
+
+Speaker 3: I understand the moral argument. I do. But from a purely practical standpoint, it seems incredibly complicated. I mean, how far back do you go to assign this historical responsibility? Are you trying to calculate the emissions of the United Kingdom from the 1880s? It feels like an impossibly complex way to assign blame.
+
+Speaker 2: That's a fair point, Frank, and you know, itβs less about calculating precise historical blame and more about acknowledging the reality of the present day. The framework is not about punishing past generations. It's about recognizing which nations today have the accumulated wealth, the technology, and the stable institutionsβmany of which were built on that history of fossil-fueled developmentβto lead the global response. Itβs about capability and responsibility in the here and now.
+
+Speaker 1: This whole conversation about justice, responsibility, and the immense shortfall in support really underscores the urgency of the crisis. And perhaps nothing in this entire report highlights that urgency more than the growing scientific understanding of a concept known as climate tipping points. Carter, for our listeners, what exactly is a tipping point?
+
+Speaker 2: It is probably the most sobering concept in all of climate science. The IPCC defines a tipping point as a critical threshold in the Earth's system. Once that threshold is crossed, a part of the system could trigger an abrupt, cascading, and potentially irreversible change.
+
+Speaker 1: Abrupt and irreversible. Those are two very powerful words. What does irreversible mean in this context?
+
+Speaker 2: It means that even if we managed to cool the planet back down later, the system might not flip back. The change could be locked in for centuries, or even millennia. We could pass a point of no return.
+
+Speaker 4: That is⦠a terrifying thought. So what are these systems? What parts of the planet are we talking about?
+
+Speaker 2: Scientists have identified several large-scale components of the Earth system that may have these tipping points. The most commonly discussed are the great ice sheets. Weβre talking about the irreversible collapse of the Greenland and the West Antarctic ice sheets.
+
+Speaker 1: And what would be the consequence of something like that?
+
+Speaker 2: Well, uh, together, those two ice sheets hold enough frozen water to raise the global mean sea level by over 10 meters. That's about 33 feet.
+
+Speaker 4: Ten metersβ¦ Iβ¦ I canβt even comprehend that. That's not just flooding. That is wiping entire cities, entire island nations, completely off the map for good.
+
+Speaker 2: Yes, the consequences would be civilization-altering. And another major tipping element is in the oceans themselves. A major slowdown or even a shutdown of the Atlantic Meridional Overturning Circulation, often called the AMOC.
+
+Speaker 3: The AMOC. I've heard of that, but it sounds like something out of a disaster movie. What does this current actually do for us?
+
+Speaker 2: It's a massive system of ocean currents that acts like a conveyor belt, transporting warm water from the tropics up to the North Atlantic. It plays a huge role in regulating weather patterns, especially in the Northern Hemisphere.
+
+Speaker 2: A collapse of this system would drastically alter weather across North America and Europe, causing, you know, extreme cooling in some places, changing rainfall patterns, and disrupting monsoons that billions of people depend on for their food.
+
+Speaker 1: So we have the ice and the oceans. What else?
+
+Speaker 2: Then we have the biosphere systems. There are two major ones scientists are deeply concerned about. The first is the potential dieback of the Amazon rainforest.
+
+Speaker 1: So the Amazon could go from being this vital carbon sink that helps us, to becoming a major carbon source that actually hurts us?
+
+Speaker 2: Precisely. Large parts of the forest could transition into a drier, savanna-like ecosystem. And in doing so, it would release the vast quantities of carbon stored in its trees and soil, which would create a powerful feedback loop that accelerates even more global warming.
+
+Speaker 4: And the other one? You hear people talk about a ticking carbon bomb in the arctic. Is that what you mean?
+
+Speaker 2: That's the one. The abrupt, widespread thawing of permafrost. This is the permanently frozen ground in the arctic regions, and it contains enormous amounts of organic carbon that has been locked away for thousands of years. As it thaws, microbes decompose that organic matter and release it into the atmosphere as carbon dioxide and, even more potently, methane. This is another one of those dangerous feedback loops.
+
+Speaker 1: So Carter, we have these massive, continent-scale systems that could fundamentally break. I think for a long time, many of us thought of these tipping points as very distant risks. You know, things that might happen if warming got really, really bad, say, at five or six degrees Celsius. What does the latest science in the report say about that?
+
+Speaker 2: This, Alice, is perhaps the single most concerning finding to emerge in the last few years of research. The scientific consensus has shifted. Those early estimates that suggested these were high-warming risks have been revised. The latest research, which is cited in the IPCC reports, indicates that the temperature thresholds for triggering some of these tipping points may be much, much lower than we previously thought.
+
+Speaker 3: How much lower are we talking about?
+
+Speaker 2: The latest studies indicate that several of these major tipping points, including the collapse of the Greenland and West Antarctic ice sheets, the shutdown of the AMOC, and widespread permafrost thaw, could potentially be triggered at warming levels between 1.5 and 2.0 degrees Celsius.
+
+Speaker 4: But wait a minute. Carter, you said at the very, very beginning of our conversation that the world already temporarily breached 1.5 degrees of warming in 2024. If the trigger point is 1.5 degrees, what does that mean for us right now?
+
+Speaker 2: It means⦠well, it means that the risk is no longer a distant, abstract threat for future generations. It places the possibility of crossing these irreversible thresholds squarely within the realm of possibility this century. It moves the conversation from the future into the immediate present.
+
+Speaker 2: And, you know, it adds a profound, almost existential urgency to the need for immediate, deep, and drastic emissions reductions. The window of opportunity to steer away from these points is closing, and it is closing very, very rapidly.
+
+Speaker 1: That is a deeply unsettling reality to confront, Carter. And Maya, I see you reacting to that. When you hear that the 1.5 degree line, which weβve talked about for so long as this future guardrail, is not only something we've touched but is also the potential trigger for these irreversible changesβ¦ what does that feel like?
+
+Speaker 4: You know, itβ¦ it almost takes your breath away. It feels like we've been driving towards a cliff in the fog, arguing about how fast we should be going. And Carter is saying the fog has just cleared, and we're right at the edge. Weβre there. That's a very, very hard thing to fully process.
+
+Speaker 3: It is. And it brings up a really difficult, practical question for me. If we're already on the verge of crossing these irreversible thresholds, what is the point of all this? I mean, does a 43 percent emissions cut by 2030, which already seems impossible, even matter anymore if the fuse has already been lit on something like the Greenland ice sheet? Have we⦠have we already lost the game?
+
+Speaker 2: Frank, that is the most important question anyone can ask right now. And the conclusion of the report, uh, argues that this is precisely why our actions now matter more than they ever have before. The first major conclusion is that the defining characteristic of the last decade is non-linear acceleration.
+
+Speaker 1: Okay, non-linear acceleration. Break that down for us.
+
+Speaker 2: Think of it like a car that's rolling down a hill. But the hill isn't a steady slope; it's a curve that gets steeper and steeper as you go. So for every foot you travel, your speed increases more than it did in the previous foot. You are accelerating exponentially, not in a straight line, not arithmetically. Thatβs whatβs happening to our planetary systems. The risks are growing at an accelerating rate.
+
+Speaker 1: So every fraction of a degree of warming we can prevent now, every year we can act faster, has a much bigger impact in preventing that future acceleration than it would have twenty or thirty years ago.
+
+Speaker 2: Exactly. Itβs what scientists call positive feedback loops becoming more potent. So, to answer Frankβs question, itβs the absolute opposite of the game being lost. It means the stakes of our actions in the next five to ten years are higher than they have ever been in human history. Every ton of carbon we keep out of the atmosphere now pays huge dividends in slowing down that terrifying acceleration toward those tipping points.
+
+Speaker 1: And the report also concludes that these are not isolated problems, correct? It talks about a cascade of interconnected crises.
+
+Speaker 2: Yes, that's the second key takeaway. We can no longer think of climate impacts as a series of separate events. A drought is not just a lack of water. It is a trigger. It triggers failures in the food system when crops fail. It triggers failures in the economic system when farmers lose their livelihoods.
+
+Speaker 2: It triggers, you know, public health crises from malnutrition and water-borne diseases. It can even culminate in social instability and displacement. Climate change is a threat multiplier that makes all our existing vulnerabilities worse.
+
+Speaker 4: You can really see that in real life, canβt you? I mean, a wildfire isn't just a fire anymore. It becomes a public health crisis for millions of people breathing in the smoke. It's an economic crisis for the entire region. It becomes a water crisis months later when the first heavy rains wash toxic ash and debris into the reservoirs. You realize that one event pulls on all the other threads that hold our society together. Everything is connected.
+
+Speaker 2: Thatβs a perfect way to put it, Maya. And because everything is connected, the report concludes that our response has to be holistic. We canβt have siloed policies that address energy, or agriculture, or public health in isolation. They are all part of the same interconnected challenge.
+
+Speaker 1: This brings us to the third, and perhaps the toughest, conclusion from the report. Which is that our global response, as it stands today, is being dangerously outpaced by the physical reality of climate change.
+
+Speaker 2: That's the hard truth of the last decade. Despite all the meetings and the progress on renewables, the response remains critically insufficient. The report concludes that this failure is defined by three persistent and widening gaps. First is the ambition gap we already discussed, the gap between the weak climate pledges from countries and what science clearly shows is necessary.
+
+Speaker 1: And the second?
+
+Speaker 2: The second is the adaptation finance gap, which we just covered. The massive shortfall in funding that leaves the worldβs most vulnerable populations essentially undefended against the coming storms and droughts. And the third is the justice gap, which undermines the trust and cooperation that are absolutely essential for any kind of effective global solution.
+
+Speaker 3: So if I'm hearing this correctly, the reportβs ultimate conclusion is that our primary problem is no longer a technological one. We have the solar panels, we have the wind turbines, we have the efficiency solutions. The report is saying that the biggest barriers now are political, financial, and social. It's about a lack of political will, a failure to mobilize the necessary funds, and a failure to address the core injustices of the crisis.
+
+Speaker 2: That is the absolute crux of the conclusion. Technology is a vital tool, an essential tool, but it is not a silver bullet. The primary obstacles are now in our halls of government, in our financial institutions, and, uh, in our collective willingness to face this reality and act at the scale it requires.
+
+Speaker 1: So after this incredibly detailed and, frankly, alarming look back at the last decade, where does this leave us? We have a planet in a state of acceleration. We've temporarily breached the 1.5 degree threshold. And the risk of irreversible tipping points is no longer a future problem, but a present-day danger. Maya, I want to start with you. Whatβs your final takeaway?
+
+Speaker 4: It leaves me feeling that the time for simply being worried, or for abstract hope, is over. The only appropriate response to this level of evidence is determined action. This report is a story written in data, and it's telling us we have to transform this stark awareness into real, tangible work in our communities and demand it from our leaders. Thereβs no time for anything else.
+
+Speaker 1: Frank?
+
+Speaker 3: It leaves me thinking that we need to have a much more honest and pragmatic conversation about the real-world costs and trade-offs. Weβve talked about technology and policy, but this report makes it clear that the real fight is over politics and economics. And until we tackle that head-on, with honesty, we'll keep falling short.
+
+Speaker 1: And Carter, a final thought from you.
+
+Speaker 2: The science has been clear for a long time, but the evidence from this past decade is definitive. You know, this period from 2015 to 2025 will be remembered as the decade the consequences of our inaction became undeniable. That temporary breach of 1.5 degrees served as a final, unambiguous warning. The scientific challenge now is to monitor these accelerating changes. But the human challenge is to finally close those gaps between promises and performance, before those tipping points are crossed for good.
+
+Speaker 1: Carter, that is a powerful and frankly stark place to end, on the precipice of these tipping points with the clock running out. But... you know, before we wrap up completely, I want to hold on that last thought. The human challenge. I feel we can't end just with the warning. I want to pivot from the problems we've detailed so thoroughly to the specific pathways forward that are emerging. Beyond the high-level policy failures, where are the new fronts in this challenge?
+
+Speaker 2: That's a crucial pivot to make, Alice. Because, uh, despair is paralyzing. And despite the failures, there are new strategies and, you know, new arenas of action that are gaining momentum.
+
+Speaker 1: Let's talk about one of those. We've mentioned the justice gap and the economic challenges. What about the people, the workers and communities, whose entire livelihoods are tied to the fossil fuel industries we need to transition away from?
+
+Speaker 2: You're talking about the concept of a Just Transition. And you know, this has become a central part of the conversation because it's both morally right and politically essential. A Just Transition means ensuring that the shift to a green economy is fair and inclusive. It means we don't leave coal miners, oil rig workers, and entire communities that depend on these industries behind.
\ No newline at end of file
diff --git a/demo/voices/en-Alice_woman.wav b/demo/voices/en-Alice_woman.wav
new file mode 100644
index 0000000..1517539
Binary files /dev/null and b/demo/voices/en-Alice_woman.wav differ
diff --git a/demo/voices/en-Alice_woman_bgm.wav b/demo/voices/en-Alice_woman_bgm.wav
new file mode 100644
index 0000000..6b3fc92
Binary files /dev/null and b/demo/voices/en-Alice_woman_bgm.wav differ
diff --git a/demo/voices/en-Carter_man.wav b/demo/voices/en-Carter_man.wav
new file mode 100644
index 0000000..e9d6500
Binary files /dev/null and b/demo/voices/en-Carter_man.wav differ
diff --git a/demo/voices/en-Frank_man.wav b/demo/voices/en-Frank_man.wav
new file mode 100644
index 0000000..d89eb4f
Binary files /dev/null and b/demo/voices/en-Frank_man.wav differ
diff --git a/demo/voices/en-Maya_woman.wav b/demo/voices/en-Maya_woman.wav
new file mode 100644
index 0000000..85abeba
Binary files /dev/null and b/demo/voices/en-Maya_woman.wav differ
diff --git a/demo/voices/in-Samuel_man.wav b/demo/voices/in-Samuel_man.wav
new file mode 100644
index 0000000..07606b6
Binary files /dev/null and b/demo/voices/in-Samuel_man.wav differ
diff --git a/demo/voices/zh-Anchen_man_bgm.wav b/demo/voices/zh-Anchen_man_bgm.wav
new file mode 100644
index 0000000..b6bba02
Binary files /dev/null and b/demo/voices/zh-Anchen_man_bgm.wav differ
diff --git a/demo/voices/zh-Bowen_man.wav b/demo/voices/zh-Bowen_man.wav
new file mode 100644
index 0000000..dd2a0be
Binary files /dev/null and b/demo/voices/zh-Bowen_man.wav differ
diff --git a/demo/voices/zh-Xinran_woman.wav b/demo/voices/zh-Xinran_woman.wav
new file mode 100644
index 0000000..e619d1d
Binary files /dev/null and b/demo/voices/zh-Xinran_woman.wav differ
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..ece97ec
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,40 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "vibevoice"
+version = "0.0.1"
+authors = [
+ { name="vibevoice team", email="vibepod@microsoft.com" },
+]
+description = "A model for speech generation with an AR + diffusion architecture."
+readme = "README.md"
+requires-python = ">=3.8"
+classifiers = [
+ "Programming Language :: Python :: 3",
+ # "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+]
+dependencies = [
+ "torch",
+ "accelerate==1.6.0",
+ "transformers==4.51.3", # we develop this project on transformers==4.51.3, later version may not be compatible
+ "diffusers",
+ "tqdm",
+ "numpy",
+ "scipy",
+ "ml-collections",
+ "absl-py",
+ "gradio",
+ "av",
+ "aiortc",
+]
+
+
+[project.urls]
+"Homepage" = "https://github.com/microsoft/VibeVoice"
+"Bug Tracker" = "https://github.com/microsoft/VibeVoice/issues"
+
+[tool.setuptools]
+packages = ["vibevoice",]
\ No newline at end of file
diff --git a/vibevoice/configs/qwen2.5_1.5b_64k.json b/vibevoice/configs/qwen2.5_1.5b_64k.json
new file mode 100644
index 0000000..febd05c
--- /dev/null
+++ b/vibevoice/configs/qwen2.5_1.5b_64k.json
@@ -0,0 +1,112 @@
+{
+ "_attn_implementation_autoset": true,
+ "acoustic_vae_dim": 64,
+ "acoustic_tokenizer_config": {
+ "causal": true,
+ "channels": 1,
+ "conv_bias": true,
+ "conv_norm": "none",
+ "corpus_normalize": 0.0,
+ "decoder_depths": null,
+ "decoder_n_filters": 32,
+ "decoder_ratios": [
+ 8,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2
+ ],
+ "disable_last_norm": true,
+ "encoder_depths": "3-3-3-3-3-3-8",
+ "encoder_n_filters": 32,
+ "encoder_ratios": [
+ 8,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2
+ ],
+ "fix_std": 0.5,
+ "layer_scale_init_value": 1e-06,
+ "layernorm": "RMSNorm",
+ "layernorm_elementwise_affine": true,
+ "layernorm_eps": 1e-05,
+ "mixer_layer": "depthwise_conv",
+ "model_type": "vibepod_acoustic_tokenizer",
+ "pad_mode": "constant",
+ "std_dist_type": "gaussian",
+ "vae_dim": 64,
+ "weight_init_value": 0.01
+ },
+ "decoder_config": {
+ "attention_dropout": 0.0,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 65536,
+ "max_window_layers": 28,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": true,
+ "torch_dtype": "bfloat16",
+ "use_cache": true,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+ },
+ "diffusion_head_config": {
+ "ddpm_batch_mul": 4,
+ "ddpm_beta_schedule": "cosine",
+ "ddpm_num_inference_steps": 20,
+ "ddpm_num_steps": 1000,
+ "diffusion_type": "ddpm",
+ "head_ffn_ratio": 3.0,
+ "head_layers": 4,
+ "hidden_size": 1536,
+ "latent_size": 64,
+ "model_type": "vibepod_diffusion_head",
+ "prediction_type": "v_prediction",
+ "rms_norm_eps": 1e-05,
+ "speech_vae_dim": 64
+ },
+ "model_type": "vibepod",
+ "semantic_tokenizer_config": {
+ "causal": true,
+ "channels": 1,
+ "conv_bias": true,
+ "conv_norm": "none",
+ "corpus_normalize": 0.0,
+ "disable_last_norm": true,
+ "encoder_depths": "3-3-3-3-3-3-8",
+ "encoder_n_filters": 32,
+ "encoder_ratios": [
+ 8,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2
+ ],
+ "fix_std": 0,
+ "layer_scale_init_value": 1e-06,
+ "layernorm": "RMSNorm",
+ "layernorm_elementwise_affine": true,
+ "layernorm_eps": 1e-05,
+ "mixer_layer": "depthwise_conv",
+ "model_type": "vibepod_semantic_tokenizer",
+ "pad_mode": "constant",
+ "std_dist_type": "none",
+ "vae_dim": 128,
+ "weight_init_value": 0.01
+ },
+ "semantic_vae_dim": 128,
+ "torch_dtype": "bfloat16"
+}
diff --git a/vibevoice/configs/qwen2.5_7b_32k.json b/vibevoice/configs/qwen2.5_7b_32k.json
new file mode 100644
index 0000000..d39952c
--- /dev/null
+++ b/vibevoice/configs/qwen2.5_7b_32k.json
@@ -0,0 +1,113 @@
+{
+ "_attn_implementation_autoset": true,
+ "acoustic_vae_dim": 64,
+ "acoustic_tokenizer_config": {
+ "causal": true,
+ "channels": 1,
+ "conv_bias": true,
+ "conv_norm": "none",
+ "corpus_normalize": 0.0,
+ "decoder_depths": null,
+ "decoder_n_filters": 32,
+ "decoder_ratios": [
+ 8,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2
+ ],
+ "disable_last_norm": true,
+ "encoder_depths": "3-3-3-3-3-3-8",
+ "encoder_n_filters": 32,
+ "encoder_ratios": [
+ 8,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2
+ ],
+ "fix_std": 0.5,
+ "layer_scale_init_value": 1e-06,
+ "layernorm": "RMSNorm",
+ "layernorm_elementwise_affine": true,
+ "layernorm_eps": 1e-05,
+ "mixer_layer": "depthwise_conv",
+ "model_type": "vibepod_acoustic_tokenizer",
+ "pad_mode": "constant",
+ "std_dist_type": "gaussian",
+ "vae_dim": 64,
+ "weight_init_value": 0.01
+ },
+ "decoder_config": {
+ "attention_dropout": 0.0,
+ "hidden_act": "silu",
+ "hidden_size": 3584,
+ "initializer_range": 0.02,
+ "intermediate_size": 18944,
+ "max_position_embeddings": 32768,
+ "max_window_layers": 28,
+ "model_type": "qwen2",
+ "num_attention_heads": 28,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 4,
+ "rms_norm_eps": 1e-06,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.40.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 152064
+ },
+ "diffusion_head_config": {
+ "ddpm_batch_mul": 4,
+ "ddpm_beta_schedule": "cosine",
+ "ddpm_num_inference_steps": 20,
+ "ddpm_num_steps": 1000,
+ "diffusion_type": "ddpm",
+ "head_ffn_ratio": 3.0,
+ "head_layers": 4,
+ "hidden_size": 3584,
+ "latent_size": 64,
+ "model_type": "vibepod_diffusion_head",
+ "prediction_type": "v_prediction",
+ "rms_norm_eps": 1e-05,
+ "speech_vae_dim": 64
+ },
+ "model_type": "vibepod",
+ "semantic_tokenizer_config": {
+ "causal": true,
+ "channels": 1,
+ "conv_bias": true,
+ "conv_norm": "none",
+ "corpus_normalize": 0.0,
+ "disable_last_norm": true,
+ "encoder_depths": "3-3-3-3-3-3-8",
+ "encoder_n_filters": 32,
+ "encoder_ratios": [
+ 8,
+ 5,
+ 5,
+ 4,
+ 2,
+ 2
+ ],
+ "fix_std": 0,
+ "layer_scale_init_value": 1e-06,
+ "layernorm": "RMSNorm",
+ "layernorm_elementwise_affine": true,
+ "layernorm_eps": 1e-05,
+ "mixer_layer": "depthwise_conv",
+ "model_type": "vibepod_semantic_tokenizer",
+ "pad_mode": "constant",
+ "std_dist_type": "none",
+ "vae_dim": 128,
+ "weight_init_value": 0.01
+ },
+ "semantic_vae_dim": 128,
+ "torch_dtype": "bfloat16"
+}
diff --git a/vibevoice/modular/__init__.py b/vibevoice/modular/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vibevoice/modular/configuration_vibevoice.py b/vibevoice/modular/configuration_vibevoice.py
new file mode 100644
index 0000000..fcffcb9
--- /dev/null
+++ b/vibevoice/modular/configuration_vibevoice.py
@@ -0,0 +1,248 @@
+""" VibeVoice_AcousticTokenizer model configuration"""
+
+from typing import Dict, List, Optional, Tuple
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+
+from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
+
+logger = logging.get_logger(__name__)
+
+
+class VibeVoiceAcousticTokenizerConfig(PretrainedConfig):
+ model_type = "vibevoice_acoustic_tokenizer"
+
+ def __init__(
+ self,
+ channels: int = 1,
+ corpus_normalize: float = 0.0,
+ causal: bool = True,
+ vae_dim: int = 64,
+ fix_std: float = 0.5,
+ std_dist_type: str = 'gaussian',
+ # common
+ mixer_layer: str = 'depthwise_conv',
+ conv_norm: str = 'none',
+ pad_mode: str = 'constant',
+ disable_last_norm: bool = True,
+ layernorm: str = 'RMSNorm',
+ layernorm_eps: float = 1e-5,
+ layernorm_elementwise_affine: bool = True,
+ conv_bias: bool = True,
+ layer_scale_init_value: float = 1e-6,
+ weight_init_value: float = 1e-2,
+ # encoder specific
+ encoder_n_filters: int = 32,
+ encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
+ encoder_depths: str = "3-3-3-3-3-3-8",
+ # decoder specific
+ decoder_n_filters: int = 32,
+ decoder_ratios: Optional[List[int]] = None, # if None, same as encoder
+ decoder_depths: Optional[str] = None,
+ **kwargs
+ ):
+ super().__init__(**kwargs)
+ self.channels = channels
+ self.corpus_normalize = corpus_normalize
+ self.causal = causal
+ self.vae_dim = vae_dim
+ self.fix_std = fix_std
+ self.std_dist_type = std_dist_type
+
+ # common parameters
+ self.conv_norm = conv_norm
+ self.pad_mode = pad_mode
+ self.layernorm_eps = layernorm_eps
+ self.disable_last_norm = disable_last_norm
+ self.layernorm = layernorm
+ self.layernorm_elementwise_affine = layernorm_elementwise_affine
+ self.conv_bias = conv_bias
+ self.layer_scale_init_value = layer_scale_init_value
+ self.weight_init_value = weight_init_value
+ self.mixer_layer = mixer_layer
+
+ # encoder specific parameters
+ self.encoder_n_filters = encoder_n_filters
+ self.encoder_ratios = encoder_ratios
+ self.encoder_depths = encoder_depths
+
+ # decoder specific parameters
+ self.decoder_ratios = decoder_ratios if decoder_ratios is not None else encoder_ratios
+ self.decoder_n_filters = decoder_n_filters
+ self.decoder_depths = decoder_depths
+
+
+class VibeVoiceSemanticTokenizerConfig(PretrainedConfig):
+ model_type = "vibevoice_semantic_tokenizer"
+
+ def __init__(
+ self,
+ channels: int = 1,
+ corpus_normalize: float = 0.0,
+ causal: bool = True,
+ vae_dim: int = 64,
+ fix_std: float = 0,
+ std_dist_type: str = 'none',
+ # common
+ mixer_layer: str = 'depthwise_conv',
+ conv_norm: str = 'none',
+ pad_mode: str = 'constant',
+ disable_last_norm: bool = True,
+ layernorm: str = 'RMSNorm',
+ layernorm_eps: float = 1e-5,
+ layernorm_elementwise_affine: bool = True,
+ conv_bias: bool = True,
+ layer_scale_init_value: float = 1e-6,
+ weight_init_value: float = 1e-2,
+ # encoder specific
+ encoder_n_filters: int = 32,
+ encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
+ encoder_depths: str = "3-3-3-3-3-3-8",
+ **kwargs
+ ):
+ super().__init__(**kwargs)
+ self.channels = channels
+ self.corpus_normalize = corpus_normalize
+ self.causal = causal
+ self.vae_dim = vae_dim
+ self.fix_std = fix_std
+ self.std_dist_type = std_dist_type
+
+ # common parameters
+ self.conv_norm = conv_norm
+ self.pad_mode = pad_mode
+ self.layernorm_eps = layernorm_eps
+ self.disable_last_norm = disable_last_norm
+ self.layernorm = layernorm
+ self.layernorm_elementwise_affine = layernorm_elementwise_affine
+ self.conv_bias = conv_bias
+ self.layer_scale_init_value = layer_scale_init_value
+ self.weight_init_value = weight_init_value
+ self.mixer_layer = mixer_layer
+
+ # encoder specific parameters
+ self.encoder_n_filters = encoder_n_filters
+ self.encoder_ratios = encoder_ratios
+ self.encoder_depths = encoder_depths
+
+
+class VibeVoiceDiffusionHeadConfig(PretrainedConfig):
+ model_type = "vibevoice_diffusion_head"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ head_layers=4,
+ head_ffn_ratio=3.0,
+ rms_norm_eps=1e-5,
+ latent_size=64,
+ speech_vae_dim=None,
+ prediction_type="v_prediction",
+ diffusion_type="ddpm",
+ ddpm_num_steps=1000,
+ ddpm_num_inference_steps=20,
+ ddpm_beta_schedule="cosine",
+ ddpm_batch_mul=4,
+ **kwargs
+ ):
+ self.hidden_size = hidden_size
+ self.head_layers = head_layers
+ self.head_ffn_ratio = head_ffn_ratio
+ self.rms_norm_eps = rms_norm_eps
+ self.latent_size = latent_size
+ self.speech_vae_dim = speech_vae_dim
+ self.prediction_type = prediction_type
+ self.diffusion_type = diffusion_type
+ self.ddpm_num_steps = ddpm_num_steps
+ self.ddpm_num_inference_steps = ddpm_num_inference_steps
+ self.ddpm_beta_schedule = ddpm_beta_schedule
+ self.ddpm_batch_mul = ddpm_batch_mul
+
+ super().__init__(**kwargs)
+
+class VibeVoiceConfig(PretrainedConfig):
+ model_type = "vibevoice"
+ is_composition = True
+ sub_configs = {
+ "acoustic_tokenizer_config": VibeVoiceAcousticTokenizerConfig,
+ "semantic_tokenizer_config": VibeVoiceSemanticTokenizerConfig,
+ "decoder_config": Qwen2Config,
+ "diffusion_head_config": VibeVoiceDiffusionHeadConfig,
+ }
+ # keys_to_ignore_at_inference = ["past_key_values"]
+ # Default tensor parallel plan for base model `Qwen2`
+ base_model_tp_plan = {
+ "layers.*.self_attn.q_proj": "colwise",
+ "layers.*.self_attn.k_proj": "colwise",
+ "layers.*.self_attn.v_proj": "colwise",
+ "layers.*.self_attn.o_proj": "rowwise",
+ "layers.*.mlp.gate_proj": "colwise",
+ "layers.*.mlp.up_proj": "colwise",
+ "layers.*.mlp.down_proj": "rowwise",
+ }
+
+ def __init__(
+ self,
+ acoustic_tokenizer_config=None,
+ semantic_tokenizer_config=None,
+ decoder_config=None,
+ diffusion_head_config=None,
+ **kwargs
+ ):
+
+ # kwargs["_attn_implementation"] = "flash_attention_2"
+ kwargs["_attn_implementation_autoset"] = False
+
+ if acoustic_tokenizer_config is None:
+ self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"]()
+ elif isinstance(acoustic_tokenizer_config, dict):
+ acoustic_tokenizer_config["model_type"] = "vibevoice_acoustic_tokenizer"
+ self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"](**acoustic_tokenizer_config)
+ elif isinstance(acoustic_tokenizer_config, VibeVoiceAcousticTokenizerConfig):
+ # If an instance of the config class is provided
+ self.acoustic_tokenizer_config = acoustic_tokenizer_config
+
+ if semantic_tokenizer_config is None:
+ self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"]()
+ elif isinstance(semantic_tokenizer_config, dict):
+ semantic_tokenizer_config["model_type"] = "vibevoice_semantic_tokenizer"
+ self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"](**semantic_tokenizer_config)
+ elif isinstance(semantic_tokenizer_config, VibeVoiceSemanticTokenizerConfig):
+ # If an instance of the config class is provided
+ self.semantic_tokenizer_config = semantic_tokenizer_config
+
+ if decoder_config is None:
+ self.decoder_config = self.sub_configs["decoder_config"]()
+ elif isinstance(decoder_config, dict):
+ # If a dictionary is provided, instantiate the config class with it
+ # self.decoder_config = self.sub_configs["decoder_config"](**decoder_config)
+ if decoder_config.get("model_type", '') == "qwen2":
+ self.decoder_config = Qwen2Config(**decoder_config)
+ else:
+ raise ValueError(f"Unsupported decoder model type: {decoder_config.get('model_type', '')}")
+ elif isinstance(decoder_config, (Qwen2Config,)):
+ # If an instance of the config class is provided
+ self.decoder_config = decoder_config
+
+ if diffusion_head_config is None:
+ self.diffusion_head_config = self.sub_configs["diffusion_head_config"]()
+ elif isinstance(diffusion_head_config, dict):
+ diffusion_head_config["model_type"] = "vibevoice_diffusion_head"
+ self.diffusion_head_config = self.sub_configs["diffusion_head_config"](**diffusion_head_config)
+ elif isinstance(diffusion_head_config, VibeVoiceDiffusionHeadConfig):
+ # If an instance of the config class is provided
+ self.diffusion_head_config = diffusion_head_config
+
+ # other parameters
+ self.acoustic_vae_dim = getattr(self.acoustic_tokenizer_config, 'vae_dim', 64)
+ self.semantic_vae_dim = getattr(self.semantic_tokenizer_config, 'vae_dim', 128)
+
+ super().__init__(**kwargs)
+
+__all__ = [
+ "VibeVoiceAcousticTokenizerConfig",
+ "VibeVoiceSemanticTokenizerConfig",
+ "VibeVoiceDiffusionHeadConfig",
+ "VibeVoiceConfig"
+]
\ No newline at end of file
diff --git a/vibevoice/modular/modeling_vibevoice.py b/vibevoice/modular/modeling_vibevoice.py
new file mode 100644
index 0000000..016a389
--- /dev/null
+++ b/vibevoice/modular/modeling_vibevoice.py
@@ -0,0 +1,488 @@
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, Union, Callable
+from tqdm import tqdm
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.distributed as dist
+
+from transformers.models.auto import AutoModel, AutoModelForCausalLM
+
+from transformers.activations import ACT2FN
+from transformers.modeling_outputs import CausalLMOutput, BaseModelOutputWithPast, ModelOutput
+from transformers.models.llama.modeling_llama import LlamaRMSNorm
+from transformers import modeling_utils
+from transformers.modeling_utils import PreTrainedModel
+from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
+from transformers.utils import logging
+
+
+from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel
+from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead
+from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler
+
+from .configuration_vibevoice import VibeVoiceConfig
+
+
+logger = logging.get_logger(__name__)
+
+if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
+ modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"]
+
+@dataclass
+class VibeVoiceCausalLMOutputWithPast(ModelOutput):
+ loss: Optional[torch.FloatTensor] = None
+ diffusion_loss: Optional[torch.FloatTensor] = None
+ speech_token_num: Optional[int] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class VibeVoiceGenerationOutput(ModelOutput):
+ """
+ Output type for VibeVoice generation.
+
+ Args:
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences.
+ speech_outputs (`List[torch.FloatTensor]`, *optional*):
+ List of generated speech waveforms or latents for each speech segment.
+ """
+ sequences: torch.LongTensor = None
+ speech_outputs: Optional[List[torch.FloatTensor]] = None
+
+
+class SpeechConnector(nn.Module):
+ def __init__(self, input_dim, output_dim):
+ super().__init__()
+ self.fc1 = nn.Linear(input_dim, output_dim)
+ self.norm = LlamaRMSNorm(output_dim, eps=1e-6)
+ self.fc2 = nn.Linear(output_dim, output_dim)
+
+ def forward(self, features, **kwargs):
+ x = self.fc1(features)
+ x = self.norm(x)
+ x = self.fc2(x)
+ return x
+
+
+# @auto_docstring
+class VibeVoicePreTrainedModel(PreTrainedModel):
+ config_class = VibeVoiceConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _skip_keys_device_placement = "past_key_values"
+ _supports_cache_class = True
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_quantized_cache = True
+ _supports_static_cache = True
+ _supports_attention_backend = True
+
+ def _init_weights(self, module):
+ if isinstance(module, VibeVoiceDiffusionHead):
+ module.initialize_weights()
+ return
+
+ # Use the language model's initializer_range if available
+ if hasattr(self.config, 'language_model_config') and hasattr(self.config.language_model_config, 'initializer_range'):
+ std = self.config.language_model_config.initializer_range
+ elif hasattr(self.config, 'decoder_config') and hasattr(self.config.decoder_config, 'initializer_range'):
+ std = self.config.decoder_config.initializer_range
+ else:
+ std = 0.02 # Default value
+
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.weight.data.fill_(1.0)
+ module.bias.data.zero_()
+
+# @auto_docstring
+class VibeVoiceModel(VibeVoicePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ if hasattr(config, 'torch_dtype') and config.torch_dtype is not None:
+ if isinstance(config.torch_dtype, str):
+ dtype = getattr(torch, config.torch_dtype)
+ else:
+ dtype = config.torch_dtype
+ else:
+ dtype = torch.float32
+
+ # Initialize Qwen2 model for language modeling
+ lm_config = config.decoder_config
+ self.language_model = AutoModel.from_config(lm_config)
+
+ # Initialize speech components if needed
+ self.acoustic_tokenizer = AutoModel.from_config(config.acoustic_tokenizer_config).to(dtype)
+ self.semantic_tokenizer = AutoModel.from_config(config.semantic_tokenizer_config).to(dtype)
+
+ self.acoustic_connector = SpeechConnector(config.acoustic_vae_dim, lm_config.hidden_size).to(dtype)
+ self.semantic_connector = SpeechConnector(config.semantic_vae_dim, lm_config.hidden_size).to(dtype)
+
+ # Register scaling factors as buffers - use 1D tensors for FSDP compatibility
+ self.register_buffer('speech_scaling_factor', torch.tensor(float('nan')))
+ self.register_buffer('speech_bias_factor', torch.tensor(float('nan')))
+
+ # Initialize prediction head for speech generation
+ self.prediction_head = AutoModel.from_config(config.diffusion_head_config).to(dtype)
+
+ # Initialize noise scheduler
+ self.noise_scheduler = DPMSolverMultistepScheduler(
+ num_train_timesteps=config.diffusion_head_config.ddpm_num_steps,
+ beta_schedule=config.diffusion_head_config.ddpm_beta_schedule,
+ prediction_type=config.diffusion_head_config.prediction_type
+ )
+
+ def get_input_embeddings(self):
+ if hasattr(self.language_model, 'embed_tokens'):
+ # If the language model has an embed_tokens attribute, return it
+ return self.language_model.embed_tokens
+
+ for name, attr in self.language_model.fullmap.items(): # parallel by nnscaler, the name is changed
+ if attr.orig_name == 'embed_tokens.weight':
+ return getattr(self.language_model, name)
+ assert False, 'should not arrive here'
+
+ def set_input_embeddings(self, value):
+ self.language_model.embed_tokens = value
+
+ def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None):
+ """Set the speech tokenizers used for encoding and decoding speech."""
+ self.acoustic_tokenizer = acoustic_tokenizer
+ self.semantic_tokenizer = semantic_tokenizer
+
+ # Reset the encoder to evaluation mode
+ if self.acoustic_tokenizer is not None:
+ self.acoustic_tokenizer.eval()
+
+ if self.semantic_tokenizer is not None:
+ self.semantic_tokenizer.eval()
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Forward through language model
+ outputs = self.language_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ if not return_dict:
+ return outputs
+
+ return BaseModelOutputWithPast(
+ last_hidden_state=outputs.last_hidden_state,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class VibeVoiceForConditionalGeneration(VibeVoicePreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = VibeVoiceModel(config)
+ self.vocab_size = config.decoder_config.vocab_size
+ self.lm_head = nn.Linear(config.decoder_config.hidden_size, self.vocab_size, bias=False)
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.model.set_input_embeddings(value)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_decoder(self, decoder):
+ self.model.language_model = decoder
+
+ def get_decoder(self):
+ return self.model.language_model
+
+ def tie_weights(self):
+ """
+ Tie the weights between the input embeddings and the output embeddings.
+ """
+ if getattr(self.config.decoder_config, 'tie_word_embeddings', False):
+ # The standard PreTrainedModel method will handle the tying.
+ # It typically does a simple parameter object assignment, which is
+ # CORRECT to do BEFORE FSDP wraps the model.
+ output_embeddings = self.get_output_embeddings()
+ input_embeddings = self.get_input_embeddings()
+ if hasattr(input_embeddings, 'weight'):
+ output_embeddings.weight = input_embeddings.weight
+ else:
+ # maybe returned input_embeddings a tensor directly
+ output_embeddings.weight = input_embeddings
+
+ if getattr(output_embeddings, "bias", None) is not None:
+ output_embeddings.bias.data = nn.functional.pad(
+ output_embeddings.bias.data,
+ (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
+ "constant",
+ 0,
+ )
+ print("β
Tied input and output embeddings using standard assignment.")
+ else:
+ print("βΉοΈ tie_word_embeddings is False, not tying weights.")
+
+ # Also, ensure set_output_embeddings is safe, though your implementation looks okay.
+ # The key is to avoid calling it after accelerator.prepare().
+ def set_output_embeddings(self, new_embeddings):
+ # Your current implementation using data.copy_ is good practice,
+ # but the best way is to not call this after prepare().
+ self.lm_head = new_embeddings
+
+ def forward_speech_features(
+ self,
+ speech_tensors=None,
+ speech_masks=None,
+ speech_type="audio",
+ return_unmask=False
+ ):
+ if speech_tensors is None:
+ # Use config to get vae_dim instead of non-existent self.args
+ vae_dim = self.config.acoustic_tokenizer_config.vae_dim
+ audio_features = torch.zeros(1, 1, vae_dim).to(self.get_input_embeddings().weight)
+ connect_features = self.model.acoustic_connector(audio_features)
+ return audio_features, connect_features
+ else:
+ with torch.no_grad():
+ if speech_type == "audio":
+ with torch.no_grad():
+ frames = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))[0][0]
+ audio_tokens = frames.sample(self.model.acoustic_tokenizer.std_dist_type)[0]
+
+ elif speech_type == "vae":
+ # Use config to get vae_dim instead of non-existent self.args
+ vae_dim = self.config.acoustic_tokenizer_config.vae_dim
+ speech_mode = speech_tensors.reshape(speech_tensors.size(0), -1, vae_dim)
+
+ # gaussian sample from the speech_mode
+ batch_size = speech_mode.size(0)
+ value = self.model.acoustic_tokenizer.fix_std / 0.8
+ std = torch.randn(batch_size, dtype=speech_mode.dtype, device=speech_mode.device) * value
+ std = std.view(-1, *[1] * (speech_mode.dim() - 1))
+ audio_tokens = speech_mode + std * torch.randn(speech_mode.shape).to(speech_mode)
+ else:
+ raise NotImplementedError(f"Speech type {speech_type} not implemented")
+
+ if torch.isnan(self.model.speech_scaling_factor) or torch.isnan(self.model.speech_bias_factor):
+ scaling_factor = 1. / audio_tokens[speech_masks].flatten().std()
+ bias_factor = -audio_tokens[speech_masks].flatten().mean()
+
+ # Only use distributed operations if the process group is initialized
+ if dist.is_available() and dist.is_initialized():
+ dist.all_reduce(scaling_factor, op=dist.ReduceOp.SUM)
+ dist.all_reduce(bias_factor, op=dist.ReduceOp.SUM)
+ world_size = dist.get_world_size()
+ self.model.speech_scaling_factor.copy_(scaling_factor / world_size)
+ self.model.speech_bias_factor.copy_(bias_factor / world_size)
+ print(f"Speech scaling factor (distributed): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True)
+ else:
+ # Single process case
+ self.model.speech_scaling_factor.copy_(scaling_factor)
+ self.model.speech_bias_factor.copy_(bias_factor)
+ print(f"Speech scaling factor (single process): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True)
+
+ audio_features = (audio_tokens + self.model.speech_bias_factor) * self.model.speech_scaling_factor
+
+ connect_features = self.model.acoustic_connector(audio_features)
+ if return_unmask:
+ return audio_features, connect_features
+ return audio_features[speech_masks], connect_features[speech_masks]
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ # New arguments for speech processing and loss calculation
+ speech_tensors: Optional[torch.FloatTensor] = None,
+ speech_masks: Optional[torch.BoolTensor] = None,
+ speeches_loss_input: Optional[torch.FloatTensor] = None,
+ speech_semantic_tensors: Optional[torch.FloatTensor] = None,
+ acoustic_input_mask: Optional[torch.BoolTensor] = None,
+ acoustic_loss_mask: Optional[torch.BoolTensor] = None,
+ ddpm_batch_mul: int = 1,
+ **kwargs: Optional[Dict[str, Union[torch.Tensor, str]]],
+ ) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]:
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ x = self.get_input_embeddings()(input_ids)
+
+ semantic_speech_all_connect_features = self.model.semantic_connector(speech_semantic_tensors)
+ if speeches_loss_input is not None:
+ # only part audio need diffuse
+ speech_all_features, speech_all_connect_features = self.forward_speech_features(
+ speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None,
+ speech_masks=speech_masks,
+ speech_type=kwargs.get("speech_type", "audio"),
+ return_unmask=True
+ )
+ if speech_tensors is not None:
+ if semantic_speech_all_connect_features is not None:
+ x[acoustic_input_mask] = speech_all_connect_features[speech_masks] + semantic_speech_all_connect_features[speech_masks]
+ else:
+ x[acoustic_input_mask] = speech_all_connect_features[speech_masks]
+ speech_features = speech_all_features[speeches_loss_input.unsqueeze(-1) & speech_masks] # only part audio need diffuse
+ speech_connect_features = speech_all_connect_features[speeches_loss_input.unsqueeze(-1) & speech_masks]
+ else:
+ speech_features, speech_connect_features = self.forward_speech_features(
+ speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None,
+ speech_masks=speech_masks,
+ speech_type=kwargs.get("speech_type", "audio"),
+ )
+ if speech_tensors is not None:
+ x[acoustic_input_mask] = speech_connect_features
+
+ outputs = self.model(
+ input_ids=None,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=x,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=False,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ hidden_states = outputs.last_hidden_state
+ logits = self.lm_head(hidden_states)
+ # logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # The custom CE loss with masking is calculated in the training script.
+ # We leave the standard loss calculation here as None.
+ pass
+
+ # --- Diffusion Loss Calculation ---
+ diffusion_loss = None
+ # This block is executed only if we are in a context that involves speech.
+ if speech_tensors is not None and acoustic_loss_mask.sum().item() > 0:
+ condition_features = hidden_states[acoustic_loss_mask]
+
+ speech_len, latent_size = speech_features.shape
+
+ noise = torch.randn(
+ (speech_len * ddpm_batch_mul, latent_size),
+ device=hidden_states.device,
+ dtype=hidden_states.dtype
+ )
+
+ timesteps = torch.multinomial(
+ torch.ones(self.config.diffusion_head_config.ddpm_num_steps),
+ speech_len * ddpm_batch_mul,
+ replacement=True,
+ ).to(hidden_states.device)
+
+ speech_features_repeated = speech_features.repeat_interleave(ddpm_batch_mul, dim=0)
+ condition_features_repeated = condition_features.repeat_interleave(ddpm_batch_mul, dim=0)
+
+ noisy_speech_features = self.model.noise_scheduler.add_noise(
+ speech_features_repeated, noise, timesteps
+ )
+
+ model_output = self.model.prediction_head(
+ noisy_speech_features,
+ timesteps.type_as(x),
+ condition_features_repeated
+ )
+
+ prediction_type = self.config.diffusion_head_config.prediction_type
+ if prediction_type == "epsilon":
+ target_for_loss = noise
+ elif prediction_type == "v_prediction":
+ target_for_loss = self.model.noise_scheduler.get_velocity(
+ speech_features_repeated, noise, timesteps
+ )
+ else:
+ raise NotImplementedError(f"Prediction type {prediction_type} not implemented")
+
+ diffusion_loss = F.mse_loss(model_output.float(), target_for_loss.float(), reduction='sum')
+ if latent_size > 0 and ddpm_batch_mul > 0:
+ diffusion_loss = diffusion_loss / latent_size / ddpm_batch_mul
+ else:
+ diffusion_loss = torch.tensor(0.0, device=diffusion_loss.device)
+
+ else:
+ # Dummy loss for DDP to work when there are no speech samples in a batch,
+ # but we are in a speech context.
+ diffusion_loss = sum(p.sum() for p in self.model.prediction_head.parameters()) * 0.0
+ diffusion_loss += sum(p.sum() for p in self.model.acoustic_connector.parameters()) * 0.0
+ diffusion_loss += sum(p.sum() for p in self.model.semantic_connector.parameters()) * 0.0
+ # --- End Diffusion Loss Calculation ---
+
+ if not return_dict:
+ output = (logits, speech_len) + outputs.to_tuple()[1:]
+ return (loss, diffusion_loss) + output
+
+ return VibeVoiceCausalLMOutputWithPast(
+ loss=loss,
+ diffusion_loss=diffusion_loss,
+ speech_token_num=speech_len if speech_tensors is not None else 0,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+AutoModel.register(VibeVoiceConfig, VibeVoiceModel)
+AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGeneration)
+
+__all__ = [
+ "VibeVoiceModel",
+ "VibeVoicePreTrainedModel",
+ "VibeVoiceForConditionalGeneration",
+ "VibeVoiceCausalLMOutputWithPast",
+ "VibeVoiceGenerationOutput",
+]
\ No newline at end of file
diff --git a/vibevoice/modular/modeling_vibevoice_inference.py b/vibevoice/modular/modeling_vibevoice_inference.py
new file mode 100644
index 0000000..7e10af4
--- /dev/null
+++ b/vibevoice/modular/modeling_vibevoice_inference.py
@@ -0,0 +1,715 @@
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, Union, Callable
+from tqdm import tqdm
+import torch
+import torch.nn as nn
+
+from transformers.models.auto import AutoModel, AutoModelForCausalLM
+
+from transformers.generation import GenerationMixin, GenerationConfig, LogitsProcessor, LogitsProcessorList, StoppingCriteriaList
+from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
+from transformers import modeling_utils
+from transformers.modeling_utils import PreTrainedModel
+from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
+from transformers.utils import logging
+
+
+# from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel
+from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceTokenizerEncoderOutput
+from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead
+from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler
+
+from .configuration_vibevoice import VibeVoiceConfig
+
+from .modular_vibevoice_text_tokenizer import VibeVoiceTextTokenizer, VibeVoiceTextTokenizerFast
+
+from .modeling_vibevoice import VibeVoiceModel, VibeVoicePreTrainedModel
+from .streamer import AudioStreamer, AsyncAudioStreamer
+
+logger = logging.get_logger(__name__)
+
+if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
+ modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"]
+
+@dataclass
+class VibeVoiceCausalLMOutputWithPast(BaseModelOutputWithPast):
+ logits: Optional[torch.FloatTensor] = None
+
+@dataclass
+class VibeVoiceGenerationOutput(ModelOutput):
+ """
+ Output type for VibeVoice generation.
+
+ Args:
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The generated sequences.
+ speech_outputs (`List[torch.FloatTensor]`, *optional*):
+ List of generated speech waveforms or latents for each speech segment.
+ """
+ sequences: torch.LongTensor = None
+ speech_outputs: Optional[List[torch.FloatTensor]] = None
+ reach_max_step_sample: Optional[torch.BoolTensor] = None
+
+class VibeVoiceTokenConstraintProcessor(LogitsProcessor):
+ """Constrains token generation to only valid tokens during speech generation."""
+
+ def __init__(self, valid_token_ids: List[int], device: torch.device = None):
+ self.valid_token_ids = torch.tensor(valid_token_ids, dtype=torch.long, device=device)
+
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
+ # Create a mask for valid tokens
+ mask = torch.full_like(scores, float('-inf'))
+ mask[:, self.valid_token_ids] = 0
+
+ # Apply mask to scores
+ scores = scores + mask
+ return scores
+
+class VibeVoiceForConditionalGenerationInference(VibeVoicePreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ # Initialize the base model
+ self.model = VibeVoiceModel(config)
+
+ # LM head for text generation
+ self.lm_head = nn.Linear(config.decoder_config.hidden_size, config.decoder_config.vocab_size, bias=False)
+
+ # inference configuration
+ self.ddpm_inference_steps = config.diffusion_head_config.ddpm_num_inference_steps
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @property
+ def noise_scheduler(self):
+ return self.model.noise_scheduler
+
+ @property
+ def prediction_head(self):
+ return self.model.prediction_head
+
+ @property
+ def speech_scaling_factor(self):
+ return self.model.speech_scaling_factor
+
+ @property
+ def speech_bias_factor(self):
+ return self.model.speech_bias_factor
+
+ @property
+ def acoustic_tokenizer(self):
+ return self.model.acoustic_tokenizer
+
+ @property
+ def semantic_tokenizer(self):
+ return self.model.semantic_tokenizer
+
+ @property
+ def acoustic_connector(self):
+ return self.model.acoustic_connector
+
+ @property
+ def semantic_connector(self):
+ return self.model.semantic_connector
+
+ def tie_weights(self):
+ """
+ Tie the weights between the input embeddings and the output embeddings.
+ """
+ # Tie lm_head.weight to language_model.embed_tokens.weight
+ if not getattr(self.config, 'tie_word_embeddings', False):
+ return
+
+ if hasattr(self, 'lm_head') and hasattr(self.model.language_model, 'embed_tokens'):
+ self.lm_head.weight = self.model.language_model.embed_tokens.weight
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.model.set_input_embeddings(value)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None):
+ """Set the speech tokenizers used for encoding and decoding speech."""
+ self.model.set_speech_tokenizers(acoustic_tokenizer, semantic_tokenizer)
+
+ def set_ddpm_inference_steps(self, num_steps=None):
+ self.ddpm_inference_steps = num_steps or self.config.diffusion_head_config.ddpm_num_inference_steps
+
+ def _process_speech_inputs(self, speech_tensors, speech_masks, speech_type="audio"):
+ """Process speech inputs through tokenizers and connectors."""
+ with torch.no_grad():
+ if speech_type == "audio":
+ # Encode audio to acoustic latents
+ encoder_output = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))
+ acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0]
+
+ # Apply scaling and bias
+ acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device)
+
+ # Connect to language model space
+ acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()]
+
+ return acoustic_features, acoustic_connected
+ elif speech_type == "pt":
+ encoder_output = VibeVoiceTokenizerEncoderOutput(mean=speech_tensors, std=self.acoustic_tokenizer.config.fix_std)
+ acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0]
+
+ # Apply scaling and bias
+ acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device)
+
+ # Connect to language model space
+ acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()]
+
+ return acoustic_features, acoustic_connected
+ else:
+ raise NotImplementedError(f"Speech type {speech_type} not implemented")
+
+ # @can_return_tuple
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ speech_tensors: Optional[torch.FloatTensor] = None,
+ speech_masks: Optional[torch.BoolTensor] = None,
+ speech_input_mask: Optional[torch.BoolTensor] = None,
+ logits_to_keep: Union[int, slice] = 0,
+ **kwargs,
+ ) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]:
+ """
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ speech_tensors (`torch.FloatTensor`, *optional*):
+ Input speech waveforms for voice cloning or speech understanding.
+ speech_masks (`torch.BoolTensor`, *optional*):
+ Masks indicating valid speech frames.
+ speech_input_mask (`torch.BoolTensor`, *optional*):
+ Positions in the input sequence where speech embeddings should be inserted.
+
+ Returns:
+ `VibeVoiceCausalLMOutputWithPast` or tuple
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Get embeddings
+ if inputs_embeds is None:
+ inputs_embeds = self.model.get_input_embeddings()(input_ids)
+
+ # Process speech inputs if provided
+ if speech_tensors is not None and speech_masks is not None:
+ acoustic_features, speech_embeds = self._process_speech_inputs(speech_tensors.to(self.dtype), speech_masks)
+ if speech_input_mask is not None:
+ inputs_embeds[speech_input_mask] = speech_embeds
+
+ outputs = self.model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0] if not return_dict else outputs.last_hidden_state
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
+
+ if labels is not None:
+ raise NotImplementedError("Loss computation is not implemented in this version.")
+
+ return VibeVoiceCausalLMOutputWithPast(
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ last_hidden_state=hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def _build_generate_config_model_kwargs(self, generation_config, inputs, tokenizer, return_processors=False, **kwargs):
+ if generation_config is None:
+ generation_config = GenerationConfig(
+ bos_token_id=tokenizer.bos_token_id,
+ eos_token_id=tokenizer.eos_token_id,
+ pad_token_id = tokenizer.pad_token_id
+ )
+ else:
+ generation_config = GenerationConfig(
+ **generation_config,
+ bos_token_id=tokenizer.bos_token_id,
+ eos_token_id=tokenizer.eos_token_id,
+ pad_token_id = tokenizer.pad_token_id
+ )
+
+ generation_config, model_kwargs = self._prepare_generation_config(
+ generation_config,
+ True,
+ speech_start_id=tokenizer.speech_start_id,
+ speech_end_id=tokenizer.speech_end_id,
+ speech_diffusion_id=tokenizer.speech_diffusion_id,
+ **kwargs
+ )
+ generation_config.speech_start_id = tokenizer.speech_start_id
+ generation_config.speech_end_id = tokenizer.speech_end_id
+ generation_config.speech_diffusion_id = tokenizer.speech_diffusion_id
+
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs)
+ batch_size = inputs_tensor.shape[0]
+ device = self.device
+
+ self._prepare_special_tokens(generation_config, True, device=device)
+ generation_config.use_cache = True
+ model_kwargs["use_cache"] = generation_config.use_cache
+ input_ids = inputs_tensor.to(self.device)
+
+ input_ids_length = input_ids.shape[1]
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
+ generation_config = self._prepare_generated_length(
+ generation_config=generation_config,
+ has_default_max_length=has_default_max_length,
+ has_default_min_length=has_default_min_length,
+ model_input_name=model_input_name,
+ inputs_tensor=inputs_tensor,
+ input_ids_length=input_ids_length,
+ )
+
+ max_cache_length = generation_config.max_length - 1
+ self._prepare_cache_for_generation(generation_config, model_kwargs, None, batch_size, max_cache_length, device)
+ model_kwargs['cache_position'] = torch.arange(input_ids_length, device=device, dtype=torch.long)
+ for k, v in model_kwargs.items():
+ if isinstance(v, torch.Tensor):
+ model_kwargs[k] = v.to(device=device)
+
+ if return_processors:
+ logits_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_length,
+ encoder_input_ids=inputs_tensor,
+ prefix_allowed_tokens_fn=None,
+ logits_processor=LogitsProcessorList(),
+ device=inputs_tensor.device,
+ model_kwargs=model_kwargs,
+ )
+
+ stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=StoppingCriteriaList())
+
+ return generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria
+ else:
+ return generation_config, model_kwargs, input_ids
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
+ synced_gpus: Optional[bool] = None,
+ assistant_model: Optional["PreTrainedModel"] = None,
+ audio_streamer: Optional[Union[AudioStreamer, AsyncAudioStreamer]] = None,
+ negative_prompt_ids: Optional[torch.Tensor] = None,
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
+ speech_tensors: Optional[torch.FloatTensor] = None,
+ speech_masks: Optional[torch.BoolTensor] = None,
+ speech_input_mask: Optional[torch.BoolTensor] = None,
+ return_speech: bool = True,
+ cfg_scale: float = 1.0,
+ stop_check_fn: Optional[Callable[[], bool]] = None,
+ **kwargs,
+ ) -> Union[torch.LongTensor, VibeVoiceGenerationOutput]:
+ """
+ Generates sequences of token ids and optionally speech outputs.
+
+ Args:
+ All standard generation arguments from GenerationMixin
+ negative_prompt_ids: Negative prompt for CFG in speech generation
+ negative_prompt_attention_mask: Attention mask for negative prompt
+ speech_tensors: Input speech for voice cloning
+ speech_masks: Masks for speech tensors
+ speech_input_mask: Positions to insert speech embeddings
+ return_speech: Whether to decode and return speech outputs
+ cfg_scale: CFG scale for speech generation
+ stop_check_fn: Optional callable that returns True if generation should stop
+
+ Returns:
+ Generated token sequences and optionally speech outputs
+ """
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
+ tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria
+ parsed_scripts = kwargs.pop("parsed_scripts", None)
+ all_speakers_list = kwargs.pop("all_speakers_list", None)
+ max_length_times = kwargs.pop("max_length_times", 2)
+
+ if kwargs.get('max_new_tokens', None) is None:
+ kwargs['max_new_tokens'] = self.config.decoder_config.max_position_embeddings - kwargs['input_ids'].shape[-1]
+
+ generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria = self._build_generate_config_model_kwargs(
+ generation_config, inputs, tokenizer, return_processors=True, **kwargs
+ )
+
+ negative_kwargs = {
+ 'input_ids': torch.full((kwargs['input_ids'].shape[0], 1), tokenizer.speech_start_id, dtype=torch.long, device=kwargs['input_ids'].device),
+ 'attention_mask': torch.ones((kwargs['input_ids'].shape[0], 1), dtype=torch.long, device=kwargs['input_ids'].device),
+ 'max_new_tokens': kwargs.get('max_new_tokens', 100)
+ }
+ negative_generation_config, negative_model_kwargs, negative_input_ids = self._build_generate_config_model_kwargs(
+ None, None, tokenizer, return_processors=False, **negative_kwargs
+ )
+
+ acoustic_cache = VibeVoiceTokenizerStreamingCache()
+ semantic_cache = VibeVoiceTokenizerStreamingCache()
+
+ batch_size = input_ids.shape[0]
+ device = input_ids.device
+ finished_tags = torch.zeros(batch_size, dtype=torch.bool, device=device)
+ correct_cnt = torch.zeros(batch_size, dtype=torch.long, device=device)
+ is_prefill = True
+ inputs_embeds = None
+ verbose = kwargs.get("verbose", False)
+
+ # Initialize audio chunks storage for each sample
+ audio_chunks = [[] for _ in range(batch_size)]
+
+ initial_length = input_ids.shape[-1]
+ initial_length_per_sample = model_kwargs['attention_mask'].sum(dim=-1)
+
+ # Define all valid tokens that can be generated
+ valid_tokens = [
+ generation_config.speech_start_id,
+ generation_config.speech_end_id,
+ generation_config.speech_diffusion_id,
+ generation_config.eos_token_id
+ ]
+ # Add bos_token_id if it exists
+ if hasattr(generation_config, 'bos_token_id') and generation_config.bos_token_id is not None:
+ valid_tokens.append(generation_config.bos_token_id)
+
+ # Add custom processor to constrain token generation
+ token_constraint_processor = VibeVoiceTokenConstraintProcessor(valid_tokens, device=device)
+ if logits_processor is None:
+ logits_processor = LogitsProcessorList()
+ logits_processor.append(token_constraint_processor)
+
+ max_steps = min(generation_config.max_length - initial_length, int(max_length_times * initial_length))
+ max_step_per_sample = torch.min(generation_config.max_length - initial_length_per_sample, (max_length_times * initial_length_per_sample).long())
+ reach_max_step_sample = torch.zeros(batch_size, dtype=torch.bool, device=device)
+
+ # Create progress iterator if verbose
+ if kwargs.get("show_progress_bar", True):
+ progress_bar = tqdm(range(max_steps), desc="Generating", leave=False)
+ else:
+ progress_bar = range(max_steps)
+
+ for step in progress_bar:
+ # Check for external stop signal
+ if stop_check_fn is not None and stop_check_fn():
+ if verbose:
+ print(f"Generation stopped externally at step {step + 1}")
+ # End the audio streamer if it exists
+ if audio_streamer is not None:
+ audio_streamer.end()
+ break
+
+ # Check if audio_streamer has been ended (stopped externally)
+ if audio_streamer is not None and hasattr(audio_streamer, 'finished_flags'):
+ if any(audio_streamer.finished_flags):
+ if verbose:
+ print(f"Audio generation stopped externally at step {step + 1}")
+ break
+
+ if finished_tags.all():
+ if hasattr(progress_bar, 'set_description'):
+ progress_bar.set_description("Generation complete")
+ break
+
+ if input_ids.shape[-1] >= generation_config.max_length:
+ print(f"Reached maximum generation length {generation_config.max_length}, stopped it.")
+ reached_samples = torch.arange(batch_size, device=device)[~finished_tags]
+ if reached_samples.numel() > 0:
+ reach_max_step_sample[reached_samples] = True
+ break
+
+ # Update progress bar description with active samples
+ if hasattr(progress_bar, 'set_description'):
+ active_samples = (~finished_tags).sum().item()
+ progress_bar.set_description(f"Generating (active: {active_samples}/{batch_size})")
+
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
+ if is_prefill:
+ # we process the speech inputs only during the first generation step
+ prefill_inputs = {
+ "speech_tensors": speech_tensors.to(device=device),
+ "speech_masks": speech_masks.to(device),
+ "speech_input_mask": speech_input_mask.to(device),
+ }
+ is_prefill = False
+ else:
+ _ = model_inputs.pop('inputs_embeds', None)
+ prefill_inputs = {'inputs_embeds': inputs_embeds}
+
+ # Forward pass through the model
+ outputs = self(
+ **model_inputs, **prefill_inputs, logits_to_keep=1, return_dict=True, output_attentions=False, output_hidden_states=False,
+ )
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs, model_kwargs, is_encoder_decoder=False,
+ )
+
+ # Get logits and apply logits processor
+ next_token_logits = outputs.logits[:, -1, :].to(copy=True, dtype=torch.float32, device=input_ids.device)
+ # next_token_logits = outputs.logits[:, -1, :].to(copy=True, device=input_ids.device)
+ next_token_scores = logits_processor(input_ids, next_token_logits)
+
+ # token selection
+ if generation_config.do_sample:
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
+ else:
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
+
+ next_tokens[finished_tags] = generation_config.eos_token_id
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
+
+ if not kwargs.get('refresh_negative', True):
+ negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs)
+ # Forward negative pass through the model
+ if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None:
+ negative_model_inputs['inputs_embeds'] = inputs_embeds
+ negative_model_inputs['input_ids'] = None
+
+ negative_outputs = self(
+ **negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False,
+ )
+ negative_model_kwargs = self._update_model_kwargs_for_generation(
+ negative_outputs, negative_model_kwargs, is_encoder_decoder=False,
+ )
+ negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1)
+
+ # reached end of generation
+ if (next_tokens == generation_config.eos_token_id).any():
+ eos_indices = (next_tokens == generation_config.eos_token_id).nonzero(as_tuple=False).squeeze(1)
+ # Only print for samples that are newly finished (not already marked as finished)
+ new_eos_indices = eos_indices[~finished_tags[eos_indices]]
+ if new_eos_indices.numel() > 0:
+ finished_tags[new_eos_indices] = True
+ if verbose:
+ print(f"Samples {new_eos_indices.tolist()} reached EOS token at step {step + 1}.", flush=True)
+ if audio_streamer is not None:
+ audio_streamer.end(new_eos_indices)
+
+ # Check if any sample reached its maximum generation length
+ max_length_reached = step >= max_step_per_sample
+ new_max_length_indices = torch.nonzero(max_length_reached & ~finished_tags, as_tuple=False).squeeze(1)
+ if new_max_length_indices.numel() > 0:
+ finished_tags[new_max_length_indices] = True
+ reach_max_step_sample[new_max_length_indices] = True
+ if verbose:
+ print(f"Samples {new_max_length_indices.tolist()} reached max generation length at step {step + 1}.", flush=True)
+ if audio_streamer is not None:
+ audio_streamer.end(new_max_length_indices)
+
+ # speech_end
+ diffusion_end_indices = (next_tokens == generation_config.speech_end_id).nonzero(as_tuple=False).squeeze(1)
+ if diffusion_end_indices.numel() > 0:
+ # Clear tokenizer caches for samples that reached speech end
+ acoustic_cache.set_to_zero(diffusion_end_indices)
+ semantic_cache.set_to_zero(diffusion_end_indices)
+
+ # speech_begin
+ diffusion_start_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_start_id)]
+ if diffusion_start_indices.numel() > 0 and kwargs.get('refresh_negative', True):
+ # update attention mask
+ for i, sample_idx in enumerate(diffusion_start_indices.tolist()):
+ negative_model_kwargs['attention_mask'][sample_idx, :] = 0
+ negative_model_kwargs['attention_mask'][sample_idx, -1] = 1
+ # update past key values
+ for layer_idx, (k_cache, v_cache) in enumerate(zip(negative_model_kwargs['past_key_values'].key_cache,
+ negative_model_kwargs['past_key_values'].value_cache)):
+ # Process each non-diffusion sample
+ for sample_idx in diffusion_start_indices.tolist():
+ # Shift cache for this sample
+ k_cache[sample_idx, :, -1, :] = k_cache[sample_idx, :, 0, :].clone()
+ v_cache[sample_idx, :, -1, :] = v_cache[sample_idx, :, 0, :].clone()
+ # update negative_input_ids
+ for sample_idx in diffusion_start_indices.tolist():
+ negative_input_ids[sample_idx, -1] = generation_config.speech_start_id
+
+ # Prepare inputs_embeds for next iteration
+ # Initialize with default embeddings for all tokens
+ next_inputs_embeds = self.model.get_input_embeddings()(next_tokens).unsqueeze(1) # [batch_size, 1, hidden_size]
+
+ # forward diffusion
+ # Diffusion indices are those that are not finished and not special tokens
+ diffusion_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_diffusion_id)]
+
+ if diffusion_indices.numel() > 0:
+ if kwargs.get('refresh_negative', True):
+ negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs)
+ # Forward negative pass through the model
+ if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None:
+ negative_model_inputs['inputs_embeds'] = inputs_embeds
+ negative_model_inputs['input_ids'] = None
+
+ negative_outputs = self(
+ **negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False,
+ )
+ negative_model_kwargs = self._update_model_kwargs_for_generation(
+ negative_outputs, negative_model_kwargs, is_encoder_decoder=False,
+ )
+ negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1)
+ # correct the non-diffusion indices
+ # we forward all samples' negative outputs even if
+ # they are not in diffusion mode to keep the cache consistent
+ # So we need to correct the kv cache of non-diffusion samples
+ non_diffusion_mask = ~finished_tags & (next_tokens != generation_config.speech_diffusion_id)
+ if non_diffusion_mask.any():
+ non_diffusion_indices = torch.arange(batch_size, device=device)[non_diffusion_mask]
+ start_indices = correct_cnt[non_diffusion_indices]
+
+ # 1. Update attention_mask - need to handle each sample separately
+ seq_len = negative_model_kwargs['attention_mask'].shape[1]
+ for i, (sample_idx, start_idx) in enumerate(zip(non_diffusion_indices.tolist(), start_indices.tolist())):
+ # Shift the attention mask for this sample
+ if start_idx + 1 < seq_len - 1:
+ negative_model_kwargs['attention_mask'][sample_idx, start_idx+1:] = \
+ negative_model_kwargs['attention_mask'][sample_idx, start_idx:-1].clone()
+ negative_model_kwargs['attention_mask'][sample_idx, start_idx] = 0
+
+ # 2. Update past_key_values
+ for layer_idx, (k_cache, v_cache) in enumerate(zip(negative_model_kwargs['past_key_values'].key_cache,
+ negative_model_kwargs['past_key_values'].value_cache)):
+ # Process each non-diffusion sample
+ for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()):
+ if start_idx + 1 < k_cache.shape[2] - 1:
+ # Shift cache for this sample
+ k_cache[sample_idx, :, start_idx+1:, :] = k_cache[sample_idx, :, start_idx:-1, :].clone()
+ v_cache[sample_idx, :, start_idx+1:, :] = v_cache[sample_idx, :, start_idx:-1, :].clone()
+
+ # 3. Update negative_input_ids
+ for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()):
+ if start_idx + 1 < negative_input_ids.shape[1] - 1:
+ negative_input_ids[sample_idx, start_idx+1:] = \
+ negative_input_ids[sample_idx, start_idx:-1].clone()
+
+ correct_cnt[non_diffusion_indices] += 1
+
+ positive_condition = outputs.last_hidden_state[diffusion_indices, -1, :]
+ negative_condition = negative_outputs.last_hidden_state[diffusion_indices, -1, :]
+
+ speech_latent = self.sample_speech_tokens(
+ positive_condition,
+ negative_condition,
+ cfg_scale=cfg_scale,
+ ).unsqueeze(1)
+
+ # Decode acoustic latent to audio using acoustic streaming cache
+ scaled_latent = speech_latent / self.model.speech_scaling_factor.to(speech_latent.device) - self.model.speech_bias_factor.to(speech_latent.device)
+ audio_chunk = self.model.acoustic_tokenizer.decode(
+ scaled_latent.to(self.model.acoustic_tokenizer.device),
+ cache=acoustic_cache, # Use acoustic-specific cache
+ sample_indices=diffusion_indices.to(self.model.acoustic_tokenizer.device),
+ use_cache=True,
+ debug=False
+ )
+
+ # Store audio chunks for each sample
+ for i, sample_idx in enumerate(diffusion_indices):
+ idx = sample_idx.item()
+ # Only append audio chunk if the sample is not finished
+ if not finished_tags[idx]:
+ audio_chunks[idx].append(audio_chunk[i])
+
+ # Add streaming support here
+ if audio_streamer is not None:
+ # Stream the audio chunks immediately
+ audio_streamer.put(audio_chunk, diffusion_indices)
+
+ # Encode audio to semantic features using semantic streaming cache
+ semantic_features = self.model.semantic_tokenizer.encode(
+ audio_chunk,
+ cache=semantic_cache, # Use semantic-specific cache
+ sample_indices=diffusion_indices,
+ use_cache=True,
+ debug=False
+ ).mean # semantic tokenizer has no VAE.
+
+ # Combine acoustic and semantic features for next input
+ acoustic_embed = self.model.acoustic_connector(speech_latent)
+ semantic_embed = self.model.semantic_connector(semantic_features)
+ diffusion_embeds = acoustic_embed + semantic_embed
+
+ # Update embeddings for diffusion indices
+ next_inputs_embeds[diffusion_indices] = diffusion_embeds
+
+ # Set inputs_embeds for next iteration
+ inputs_embeds = next_inputs_embeds
+
+ if audio_streamer is not None:
+ audio_streamer.end()
+
+ # Concatenate audio chunks for each sample
+ final_audio_outputs = []
+ for sample_chunks in audio_chunks:
+ if sample_chunks:
+ # Concatenate all chunks along the time dimension (assumed to be the last dimension)
+ concatenated_audio = torch.cat(sample_chunks, dim=-1)
+ final_audio_outputs.append(concatenated_audio)
+ else:
+ # If no audio was generated for this sample, append None
+ final_audio_outputs.append(None)
+
+ return VibeVoiceGenerationOutput(
+ sequences=input_ids,
+ speech_outputs=final_audio_outputs if return_speech else None,
+ reach_max_step_sample=reach_max_step_sample,
+ )
+
+ @torch.no_grad()
+ def sample_speech_tokens(self, condition, neg_condition, cfg_scale=3.0):
+ self.model.noise_scheduler.set_timesteps(self.ddpm_inference_steps)
+ condition = torch.cat([condition, neg_condition], dim=0).to(self.model.prediction_head.device)
+ speech = torch.randn(condition.shape[0], self.config.acoustic_vae_dim).to(condition)
+ for t in self.model.noise_scheduler.timesteps:
+ half = speech[: len(speech) // 2]
+ combined = torch.cat([half, half], dim=0)
+ eps = self.model.prediction_head(combined, t.repeat(combined.shape[0]).to(combined), condition=condition)
+ cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
+ half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
+ eps = torch.cat([half_eps, half_eps], dim=0)
+ speech = self.model.noise_scheduler.step(eps, t, speech).prev_sample
+ return speech[: len(speech) // 2]
+
+
+AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGenerationInference)
+
+__all__ = [
+ "VibeVoiceForConditionalGenerationInference",
+]
diff --git a/vibevoice/modular/modular_vibevoice_diffusion_head.py b/vibevoice/modular/modular_vibevoice_diffusion_head.py
new file mode 100644
index 0000000..59de50f
--- /dev/null
+++ b/vibevoice/modular/modular_vibevoice_diffusion_head.py
@@ -0,0 +1,287 @@
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from transformers.models.auto import AutoModel
+from transformers.modeling_utils import PreTrainedModel
+# from transformers.modeling_layers import GradientCheckpointingLayer
+from transformers.activations import ACT2FN
+from transformers.utils import logging
+
+from .configuration_vibevoice import VibeVoiceDiffusionHeadConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+class RMSNorm(nn.Module):
+ def __init__(self, dim: int, eps: float = 1e-6, elementwise_affine=True, memory_efficient=False):
+ super().__init__()
+ self.dim = dim
+ self.eps = eps
+ self.elementwise_affine = elementwise_affine
+ if self.elementwise_affine:
+ self.weight = nn.Parameter(torch.ones(dim))
+ else:
+ self.register_parameter('weight', None)
+
+ def _norm(self, x):
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
+
+ def forward(self, x):
+ output = self._norm(x.float()).type_as(x)
+ if self.weight is not None:
+ output = output * self.weight
+ return output
+
+ def extra_repr(self) -> str:
+ return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
+
+def modulate(x, shift, scale):
+ """Apply modulation to input tensor."""
+ return x * (1 + scale) + shift
+
+
+class TimestepEmbedder(nn.Module):
+ """
+ Embeds scalar timesteps into vector representations.
+
+ Args:
+ hidden_size (`int`): Size of the output embedding
+ frequency_embedding_size (`int`, optional): Size of the intermediate frequency embedding
+ """
+ def __init__(self, hidden_size, frequency_embedding_size=256):
+ super().__init__()
+ self.mlp = nn.Sequential(
+ nn.Linear(frequency_embedding_size, hidden_size, bias=False),
+ # nn.SiLU(),
+ ACT2FN['silu'],
+ nn.Linear(hidden_size, hidden_size, bias=False),
+ )
+ self.frequency_embedding_size = frequency_embedding_size
+
+ @staticmethod
+ def timestep_embedding(t, dim, max_period=10000):
+ """
+ Create sinusoidal timestep embeddings.
+
+ Args:
+ t (`torch.Tensor`): A 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ dim (`int`): The dimension of the output.
+ max_period (`int`, optional): Controls the minimum frequency of the embeddings.
+
+ Returns:
+ `torch.Tensor`: An [N, D] Tensor of positional embeddings.
+ """
+ half = dim // 2
+ freqs = torch.exp(
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
+ ).to(t.device)
+ args = t[:, None].float() * freqs[None]
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+ return embedding.to(t.dtype)
+
+ def forward(self, t):
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
+ t_emb = self.mlp(t_freq)
+ return t_emb
+
+
+class FeedForwardNetwork(nn.Module):
+ """
+ Standard feed-forward network with SwiGLU activation.
+
+ Args:
+ embed_dim (`int`): Input dimension
+ ffn_dim (`int`): Hidden dimension
+ """
+ def __init__(
+ self,
+ embed_dim,
+ ffn_dim,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.gate_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
+ self.up_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
+ self.down_proj = nn.Linear(ffn_dim, self.embed_dim, bias=False)
+ self.act_fn = ACT2FN['silu'] # Using SiLU as the activation function
+
+ def forward(self, x):
+ gate = self.gate_proj(x)
+ up = self.up_proj(x)
+
+ # SwiGLU activation
+ # gate = F.silu(gate)
+ gate = self.act_fn(gate)
+ return self.down_proj(gate * up)
+
+
+class HeadLayer(nn.Module):
+ """
+ A layer in the diffusion head.
+
+ Args:
+ embed_dim (`int`): Input dimension
+ ffn_dim (`int`): Hidden dimension
+ cond_dim (`int`): Condition embedding dimension
+ norm_eps (`float`, optional): Epsilon for normalization
+ """
+ def __init__(
+ self,
+ embed_dim,
+ ffn_dim,
+ cond_dim,
+ norm_eps=1e-5,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.cond_dim = cond_dim
+ self.ffn_dim = ffn_dim
+ self.ffn = FeedForwardNetwork(
+ self.embed_dim,
+ self.ffn_dim,
+ )
+ self.norm = RMSNorm(self.embed_dim, eps=norm_eps)
+ self.adaLN_modulation = nn.Sequential(
+ # nn.SiLU(),
+ ACT2FN['silu'],
+ nn.Linear(cond_dim, 3 * self.embed_dim, bias=False)
+ )
+
+ def forward(self, x, c):
+ shift_ffn, scale_ffn, gate_ffn = self.adaLN_modulation(c).chunk(3, dim=-1)
+ x = x + gate_ffn * self.ffn(modulate(self.norm(x), shift_ffn, scale_ffn))
+ return x
+
+
+class FinalLayer(nn.Module):
+ """
+ Final layer in the diffusion head.
+
+ Args:
+ hidden_size (`int`): Input dimension
+ output_size (`int`): Output dimension
+ cond_size (`int`): Condition embedding dimension
+ norm_eps (`float`, optional): Epsilon for normalization
+ """
+ def __init__(self, hidden_size, output_size, cond_size, norm_eps=1e-5):
+ super().__init__()
+ self.norm_final = RMSNorm(hidden_size, eps=norm_eps, elementwise_affine=False)
+ self.linear = nn.Linear(hidden_size, output_size, bias=False)
+ self.adaLN_modulation = nn.Sequential(
+ # nn.SiLU(),
+ ACT2FN['silu'],
+ nn.Linear(cond_size, 2 * hidden_size, bias=False)
+ )
+
+ def forward(self, x, c):
+ shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
+ x = modulate(self.norm_final(x), shift, scale)
+ x = self.linear(x)
+ return x
+
+
+class VibeVoiceDiffusionHead(PreTrainedModel):
+ """
+ Diffusion head model for vibevoice.
+
+ Args:
+ config (`VibeVoiceDiffusionHeadConfig`): Model configuration
+ latent_size (`int`, optional): Size of the latent space. If not provided, uses `config.latent_size`.
+ """
+ config_class = VibeVoiceDiffusionHeadConfig
+ supports_gradient_checkpointing = True
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ def __init__(
+ self,
+ config,
+ ):
+ super().__init__(config)
+ self.config = config
+ self.cond_dim = config.hidden_size
+ latent_size = config.latent_size
+
+ self.noisy_images_proj = nn.Linear(latent_size, config.hidden_size, bias=False)
+ self.cond_proj = nn.Linear(config.hidden_size, self.cond_dim, bias=False)
+ self.t_embedder = TimestepEmbedder(self.cond_dim)
+
+ ffn_dim = int(config.hidden_size * config.head_ffn_ratio)
+
+ # Create the intermediate layers
+ self.layers = nn.ModuleList([
+ HeadLayer(
+ embed_dim=config.hidden_size,
+ ffn_dim=ffn_dim,
+ cond_dim=self.cond_dim,
+ norm_eps=config.rms_norm_eps
+ )
+ for _ in range(config.head_layers)
+ ])
+
+ # Final layer for output
+ self.final_layer = FinalLayer(
+ hidden_size=config.hidden_size,
+ output_size=latent_size,
+ cond_size=self.cond_dim,
+ norm_eps=config.rms_norm_eps
+ )
+
+ self.initialize_weights()
+
+ def initialize_weights(self):
+ """Initialize the weights of the model."""
+ # Initialize timestep embedder
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
+
+ # Zero-out adaLN modulation layers
+ for layer in self.layers:
+ nn.init.constant_(layer.adaLN_modulation[-1].weight, 0)
+
+ # Zero-out output layers
+ nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
+ nn.init.constant_(self.final_layer.linear.weight, 0)
+
+ def forward(
+ self,
+ noisy_images,
+ timesteps,
+ condition,
+ ):
+ """
+ Forward pass of the prediction head.
+
+ Args:
+ noisy_images (`torch.Tensor`): Noisy images/latents to denoise
+ timesteps (`torch.Tensor`): Timesteps for diffusion
+ condition (`torch.Tensor`): Conditioning information
+
+ Returns:
+ `torch.Tensor`: The predicted noise/velocity
+ """
+ x = self.noisy_images_proj(noisy_images)
+ t = self.t_embedder(timesteps)
+ condition = self.cond_proj(condition)
+ c = condition + t
+
+ for layer in self.layers:
+ x = layer(x, c)
+
+ x = self.final_layer(x, c)
+ return x
+
+
+AutoModel.register(VibeVoiceDiffusionHeadConfig, VibeVoiceDiffusionHead)
+
+__all__ = [
+ "VibeVoiceDiffusionHead",
+]
\ No newline at end of file
diff --git a/vibevoice/modular/modular_vibevoice_text_tokenizer.py b/vibevoice/modular/modular_vibevoice_text_tokenizer.py
new file mode 100644
index 0000000..bfa7bdd
--- /dev/null
+++ b/vibevoice/modular/modular_vibevoice_text_tokenizer.py
@@ -0,0 +1,214 @@
+"""Tokenization classes for vibevoice."""
+
+from typing import List, Optional, Union
+
+from transformers.utils import logging
+from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
+from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast
+
+logger = logging.get_logger(__name__)
+
+
+class VibeVoiceTextTokenizer(Qwen2Tokenizer):
+ """
+ Construct a VibeVoice tokenizer. Based on the Qwen2 tokenizer with additional special tokens for speech.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8.
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The unknown token.
+ bos_token (`str`, *optional*):
+ The beginning of sequence token. Not used for vibevoice.
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The end of sequence token.
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The token used for padding.
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to add special tokens when encoding.
+ """
+
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ unk_token="<|endoftext|>",
+ bos_token=None,
+ eos_token="<|endoftext|>",
+ pad_token="<|endoftext|>",
+ add_prefix_space=False,
+ add_special_tokens=True,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file=vocab_file,
+ merges_file=merges_file,
+ errors=errors,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ add_prefix_space=add_prefix_space,
+ add_special_tokens=add_special_tokens,
+ **kwargs,
+ )
+
+ # Add VibeVoice-specific special tokens
+ self._add_vibevoice_special_tokens()
+
+ def _add_vibevoice_special_tokens(self):
+ """Add VibeVoice-specific special tokens."""
+ special_tokens = {
+ "additional_special_tokens": [
+ "<|vision_start|>", # Speech start (reusing vision tokens)
+ "<|vision_end|>", # Speech end
+ "<|vision_pad|>", # Speech diffusion pad
+ ]
+ }
+ num_added = self.add_special_tokens(special_tokens)
+
+ # Cache special token IDs
+ self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>")
+ self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>")
+ self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>")
+
+ self._eos_id = self.convert_tokens_to_ids('<|endoftext|>')
+
+ return num_added
+
+ @property
+ def eos_id(self) -> int:
+ """Id of the end of sequence token."""
+ return self._eos_id
+
+ @property
+ def speech_start_id(self) -> int:
+ """Id of the speech start token."""
+ return self._speech_start_id
+
+ @property
+ def speech_end_id(self) -> int:
+ """Id of the speech end token."""
+ return self._speech_end_id
+
+ @property
+ def speech_diffusion_id(self) -> int:
+ """Id of the speech diffusion token."""
+ return self._speech_diffusion_id
+
+ @property
+ def pad_id(self) -> int:
+ """Id used for padding (returns -100 for loss masking)."""
+ return -100
+
+
+class VibeVoiceTextTokenizerFast(Qwen2TokenizerFast):
+ """
+ Construct a "fast" VibeVoice tokenizer (backed by HuggingFace's *tokenizers* library).
+ Based on the Qwen2 tokenizer with additional special tokens for speech.
+
+ Args:
+ vocab_file (`str`, *optional*):
+ Path to the vocabulary file.
+ merges_file (`str`, *optional*):
+ Path to the merges file.
+ tokenizer_file (`str`, *optional*):
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file.
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The unknown token.
+ bos_token (`str`, *optional*):
+ The beginning of sequence token. Not used for vibevoice.
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The end of sequence token.
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The token used for padding.
+ """
+
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ unk_token="<|endoftext|>",
+ bos_token=None,
+ eos_token="<|endoftext|>",
+ pad_token="<|endoftext|>",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file=vocab_file,
+ merges_file=merges_file,
+ tokenizer_file=tokenizer_file,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ # Add VibeVoice-specific special tokens
+ self._add_vibevoice_special_tokens()
+
+ def _add_vibevoice_special_tokens(self):
+ """Add VibeVoice-specific special tokens."""
+ special_tokens = {
+ "additional_special_tokens": [
+ "<|vision_start|>", # Speech start (reusing vision tokens)
+ "<|vision_end|>", # Speech end
+ "<|vision_pad|>", # Speech diffusion pad
+ ]
+ }
+ num_added = self.add_special_tokens(special_tokens)
+
+ # Cache special token IDs
+ self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>")
+ self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>")
+ self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>")
+
+ # self._eos_id = self.convert_tokens_to_ids('<|endoftext|>')
+ self._eos_id = self.eos_token_id # qwen2 / qwen3
+ self._pad_id = self.convert_tokens_to_ids('<|image_pad|>')
+
+ return num_added
+
+ @property
+ def eos_id(self) -> int:
+ """Id of the end of sequence token."""
+ return self._eos_id
+
+ @property
+ def speech_start_id(self) -> int:
+ """Id of the speech start token."""
+ return self._speech_start_id
+
+ @property
+ def speech_end_id(self) -> int:
+ """Id of the speech end token."""
+ return self._speech_end_id
+
+ @property
+ def speech_diffusion_id(self) -> int:
+ """Id of the speech diffusion token."""
+ return self._speech_diffusion_id
+
+ @property
+ def pad_id(self) -> int:
+ """Id used for padding (returns -100 for loss masking)."""
+ return self._pad_id
+
+
+__all__ = [
+ "VibeVoiceTextTokenizer",
+ "VibeVoiceTextTokenizerFast",
+]
\ No newline at end of file
diff --git a/vibevoice/modular/modular_vibevoice_tokenizer.py b/vibevoice/modular/modular_vibevoice_tokenizer.py
new file mode 100644
index 0000000..fbd5182
--- /dev/null
+++ b/vibevoice/modular/modular_vibevoice_tokenizer.py
@@ -0,0 +1,1195 @@
+import math
+import typing as tp
+from functools import partial
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Tuple, Union
+import copy
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from transformers.models.auto import AutoModel
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+from transformers.modeling_utils import PreTrainedModel
+from transformers.activations import ACT2FN
+
+from .configuration_vibevoice import VibeVoiceAcousticTokenizerConfig, VibeVoiceSemanticTokenizerConfig
+
+logger = logging.get_logger(__name__)
+
+import os
+# Try to import APEX FusedRMSNorm
+try:
+ from apex.normalization.fused_layer_norm import fused_rms_norm_affine
+ APEX_AVAILABLE = True
+ logger.info("APEX FusedRMSNorm is available and will be used for optimization")
+ if int(os.getenv("OPTIMIZE_FOR_SPEED", "0")) == 0:
+ APEX_AVAILABLE = False
+ logger.warning("APEX FusedRMSNorm is disabled by environment variable OPTIMIZE_FOR_SPEED=0")
+except ImportError:
+ APEX_AVAILABLE = False
+ logger.warning("APEX FusedRMSNorm not available, using native implementation")
+# APEX_AVAILABLE=False
+
+# Normalization modules
+class ConvLayerNorm(nn.LayerNorm):
+ """
+ Convolution-friendly LayerNorm that moves channels to last dimensions
+ before running the normalization and moves them back to original position right after.
+ """
+ def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs):
+ super().__init__(normalized_shape, **kwargs)
+
+ def forward(self, x):
+ x = x.transpose(1, 2) # b ... t -> b t ...
+ x = nn.functional.layer_norm(x.float(), self.normalized_shape, self.weight.float(), self.bias.float(), self.eps).type_as(x)
+ x = x.transpose(1, 2) # b t ... -> b ... t
+ return x
+
+class RMSNorm(nn.Module):
+ def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None):
+ super().__init__()
+ self.dim = dim
+ self.eps = eps
+ self.elementwise_affine = elementwise_affine
+ if self.elementwise_affine:
+ weight_shape = (dim,) if weight_shape is None else weight_shape
+ self.weight = nn.Parameter(torch.ones(weight_shape))
+ else:
+ self.register_parameter('weight', None)
+
+ def _norm(self, x):
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
+
+ def forward(self, x):
+ output = self._norm(x.float()).type_as(x)
+ if self.weight is not None:
+ output = output * self.weight
+ return output
+
+ def extra_repr(self) -> str:
+ return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
+
+class ConvRMSNorm(RMSNorm):
+ def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None):
+ super().__init__(dim, eps, elementwise_affine, weight_shape)
+
+ def forward(self, x):
+ x = x.transpose(1, 2) # b ... t -> b t ...
+ if (not APEX_AVAILABLE) or (not self.elementwise_affine):
+ # Fallback to native implementation
+ output = self._norm(x.float()).type_as(x)
+ if self.weight is not None:
+ output = output * self.weight
+ else:
+ output = fused_rms_norm_affine(x, self.weight, self.weight.shape, self.eps)
+ output = output.transpose(1, 2) # b t ... -> b ... t
+ return output
+
+# Convolutional layers and utilities
+CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
+ 'time_layer_norm', 'layer_norm', 'time_group_norm'])
+
+
+def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module:
+ assert norm in CONV_NORMALIZATIONS
+ if norm == 'weight_norm':
+ return nn.utils.weight_norm(module)
+ elif norm == 'spectral_norm':
+ return nn.utils.spectral_norm(module)
+ else:
+ # We already check was in CONV_NORMALIZATION, so any other choice
+ # doesn't need reparametrization.
+ return module
+
+
+def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module:
+ """Return the proper normalization module. If causal is True, this will ensure the returned
+ module is causal, or return an error if the normalization doesn't support causal evaluation.
+ """
+ assert norm in CONV_NORMALIZATIONS
+ if norm == 'layer_norm':
+ assert isinstance(module, nn.modules.conv._ConvNd)
+ return ConvLayerNorm(module.out_channels, **norm_kwargs)
+ elif norm == 'time_group_norm':
+ if causal:
+ raise ValueError("GroupNorm doesn't support causal evaluation.")
+ assert isinstance(module, nn.modules.conv._ConvNd)
+ return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
+ else:
+ return nn.Identity()
+
+
+def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
+ padding_total: int = 0) -> int:
+ """Calculate extra padding needed for convolution to have the same output length"""
+ length = x.shape[-1]
+ n_frames = (length - kernel_size + padding_total) / stride + 1
+ ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
+ return ideal_length - length
+
+
+def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.):
+ """Pad 1D input with handling for small inputs in reflect mode"""
+ length = x.shape[-1]
+ padding_left, padding_right = paddings
+ assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
+ if mode == 'reflect':
+ max_pad = max(padding_left, padding_right)
+ extra_pad = 0
+ if length <= max_pad:
+ extra_pad = max_pad - length + 1
+ x = F.pad(x, (0, extra_pad))
+ padded = F.pad(x, paddings, mode, value)
+ end = padded.shape[-1] - extra_pad
+ return padded[..., :end]
+ else:
+ return F.pad(x, paddings, mode, value)
+
+
+def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
+ """Remove padding from x, handling properly zero padding. Only for 1d!"""
+ padding_left, padding_right = paddings
+ assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
+ assert (padding_left + padding_right) <= x.shape[-1]
+ end = x.shape[-1] - padding_right
+ return x[..., padding_left: end]
+
+
+class NormConv1d(nn.Module):
+ """Wrapper around Conv1d and normalization applied to this conv"""
+ def __init__(self, *args, causal: bool = False, norm: str = 'none',
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
+ super().__init__()
+ self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
+ self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
+ self.norm_type = norm
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.norm(x)
+ return x
+
+
+class NormConvTranspose1d(nn.Module):
+ """Wrapper around ConvTranspose1d and normalization applied to this conv"""
+ def __init__(self, *args, causal: bool = False, norm: str = 'none',
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
+ super().__init__()
+ self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
+ self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
+ self.norm_type = norm
+
+ def forward(self, x):
+ x = self.convtr(x)
+ x = self.norm(x)
+ return x
+
+
+class VibeVoiceTokenizerStreamingCache:
+ """Cache for streaming convolution, similar to KV cache in attention"""
+ def __init__(self):
+ self.cache = {} # Dict mapping (layer_id, sample_idx) to state tensor
+
+ def get(self, layer_id: str, sample_indices: torch.Tensor) -> Optional[torch.Tensor]:
+ """Get cached states for given layer and sample indices"""
+ states = []
+ max_length = 0
+
+ # First pass: collect states and find max length
+ for idx in sample_indices.tolist():
+ key = (layer_id, idx)
+ if key not in self.cache:
+ return None # If any sample is missing, return None
+ state = self.cache[key]
+ states.append(state)
+ max_length = max(max_length, state.shape[-1])
+
+ # Second pass: pad states to max length if needed
+ if len(states) > 0 and states[0].dim() >= 2:
+ padded_states = []
+ for state in states:
+ if state.shape[-1] < max_length:
+ # Pad on the time dimension (last dimension)
+ pad_size = max_length - state.shape[-1]
+ # Pad with zeros on the LEFT to align the most recent samples
+ padded_state = F.pad(state, (pad_size, 0), mode='constant', value=0)
+ padded_states.append(padded_state)
+ else:
+ padded_states.append(state)
+ return torch.stack(padded_states, dim=0)
+ else:
+ return torch.stack(states, dim=0)
+
+ def set(self, layer_id: str, sample_indices: torch.Tensor, states: torch.Tensor):
+ """Set cached states for given layer and sample indices"""
+ for i, idx in enumerate(sample_indices.tolist()):
+ key = (layer_id, idx)
+ self.cache[key] = states[i].detach()
+
+ def set_to_zero(self, sample_indices: torch.Tensor):
+ """Set all cached states to zero for given sample indices"""
+ for key in list(self.cache.keys()):
+ layer_id, sample_idx = key
+ if sample_idx in sample_indices.tolist():
+ # Create zero tensor with same shape and dtype as cached tensor
+ cached_tensor = self.cache[key]
+ self.cache[key] = torch.zeros_like(cached_tensor)
+
+ def clear(self, layer_id: Optional[str] = None, sample_indices: Optional[torch.Tensor] = None):
+ """Clear cache for specific layer/samples or everything"""
+ if layer_id is None and sample_indices is None:
+ self.cache.clear()
+ elif layer_id is not None and sample_indices is None:
+ # Clear all samples for a specific layer
+ keys_to_remove = [k for k in self.cache.keys() if k[0] == layer_id]
+ for k in keys_to_remove:
+ del self.cache[k]
+ elif layer_id is not None and sample_indices is not None:
+ # Clear specific samples for a specific layer
+ for idx in sample_indices.tolist():
+ key = (layer_id, idx)
+ self.cache.pop(key, None)
+
+class SConv1d(nn.Module):
+ """Conv1d with built-in handling of asymmetric or causal padding and normalization."""
+ def __init__(self, in_channels: int, out_channels: int,
+ kernel_size: int, stride: int = 1, dilation: int = 1,
+ groups: int = 1, bias: bool = True, causal: bool = False,
+ norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
+ pad_mode: str = 'reflect'):
+ super().__init__()
+ self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
+ dilation=dilation, groups=groups, bias=bias, causal=causal,
+ norm=norm, norm_kwargs=norm_kwargs)
+ self.causal = causal
+ self.pad_mode = pad_mode
+
+ # Store configuration
+ self.kernel_size = kernel_size
+ self.dilation = dilation
+ self.stride = stride
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ # For causal convolution, we need to maintain kernel_size - 1 samples as context
+ # need to check use which context_size is more suitable
+ # self.context_size = (kernel_size - 1) * dilation
+ self.context_size = (kernel_size - 1) * dilation - (stride - 1)
+
+ # For non-streaming mode, calculate padding
+ self.padding_total = (kernel_size - 1) * dilation - (stride - 1)
+
+ # Create a unique layer ID for cache management
+ self._layer_id = None
+
+ @property
+ def layer_id(self):
+ if self._layer_id is None:
+ self._layer_id = f"sconv1d_{id(self)}"
+ return self._layer_id
+
+ def forward(self, x: torch.Tensor,
+ cache: Optional[VibeVoiceTokenizerStreamingCache] = None,
+ sample_indices: Optional[torch.Tensor] = None,
+ use_cache: bool = False,
+ debug: bool = False) -> torch.Tensor:
+ """
+ Forward pass with optional streaming support via cache.
+
+ Args:
+ x: Input tensor [batch_size, channels, time]
+ cache: VibeVoiceTokenizerStreamingCache object for maintaining states
+ sample_indices: Indices identifying each sample for cache management
+ use_cache: Whether to use cached states for streaming
+ debug: Whether to print debug information
+
+ Returns:
+ Output tensor
+ """
+ B, C, T = x.shape
+
+ # Non-streaming mode
+ if not use_cache or cache is None:
+ return self._forward_non_streaming(x, debug=debug)
+
+ # Streaming mode
+ assert self.causal, "Streaming mode is only supported for causal convolutions"
+ assert sample_indices is not None, "sample_indices must be provided for streaming mode"
+ assert len(sample_indices) == B, "sample_indices must match batch size"
+
+ return self._forward_streaming(x, cache, sample_indices, debug)
+
+ def _forward_streaming(self, x: torch.Tensor,
+ cache: VibeVoiceTokenizerStreamingCache,
+ sample_indices: torch.Tensor,
+ debug: bool = False) -> torch.Tensor:
+ """Streaming forward pass with cache operations kept separate from compiled code"""
+ B, C, T = x.shape
+
+ # Cache operations (not compiled)
+ cached_states = cache.get(self.layer_id, sample_indices)
+
+ if cached_states is None:
+ # First chunk - initialize with zeros for context
+ if self.context_size > 0:
+ cached_states = torch.zeros(B, C, self.context_size, device=x.device, dtype=x.dtype)
+ if debug:
+ print(f"[DEBUG] Initialized cache with shape: {cached_states.shape}, context_size={self.context_size}")
+ else:
+ cached_states = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype)
+ if debug:
+ print(f"[DEBUG] No context needed (kernel_size=stride)")
+
+ # Concatenate cached states with input
+ if cached_states.shape[2] > 0:
+ input_with_context = torch.cat([cached_states, x], dim=2)
+ else:
+ input_with_context = x
+
+ if debug:
+ print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_states.shape}, Combined: {input_with_context.shape}")
+
+ # Apply convolution directly - no extra padding in streaming mode
+ # The conv layer will handle its own padding internally
+ output = self.conv(input_with_context)
+
+ if debug:
+ print(f"[DEBUG] Output shape: {output.shape}")
+
+ # Update cache for next chunk
+ if self.context_size > 0:
+ # Calculate how many samples to keep
+ total_input_length = input_with_context.shape[2]
+
+ # Keep the last context_size samples
+ if total_input_length >= self.context_size:
+ new_cache_start = total_input_length - self.context_size
+ new_cache = input_with_context[:, :, new_cache_start:]
+ else:
+ # If we have less than context_size samples, keep everything
+ new_cache = input_with_context
+
+ if debug:
+ print(f"[DEBUG] New cache shape: {new_cache.shape}")
+
+ cache.set(self.layer_id, sample_indices, new_cache)
+
+ return output
+
+ def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor:
+ """Standard forward pass without streaming"""
+ B, C, T = x.shape
+ kernel_size = self.kernel_size
+ stride = self.stride
+ dilation = self.dilation
+ padding_total = self.padding_total
+
+ # Compute extra padding for stride alignment
+ extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
+
+ if debug:
+ print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}, padding_total={padding_total}, extra_padding={extra_padding}")
+
+ if self.causal:
+ # Left padding for causal
+ if self.pad_mode == 'constant':
+ x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode, value=0)
+ else:
+ x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
+ else:
+ # Symmetric padding for non-causal
+ padding_right = padding_total // 2
+ padding_left = padding_total - padding_right
+ x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
+
+ if debug:
+ print(f"[DEBUG NON-STREAMING] After padding: {x.shape}")
+
+ output = self.conv(x)
+
+ if debug:
+ print(f"[DEBUG NON-STREAMING] Output shape: {output.shape}")
+
+ return output
+
+
+class SConvTranspose1d(nn.Module):
+ """ConvTranspose1d with built-in handling of asymmetric or causal padding and normalization."""
+ def __init__(self, in_channels: int, out_channels: int,
+ kernel_size: int, stride: int = 1, causal: bool = False,
+ norm: str = 'none', trim_right_ratio: float = 1.,
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, bias: bool = True):
+ super().__init__()
+ self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
+ causal=causal, norm=norm, norm_kwargs=norm_kwargs, bias=bias)
+ self.causal = causal
+ self.trim_right_ratio = trim_right_ratio
+ assert self.causal or self.trim_right_ratio == 1., \
+ "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
+ assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
+
+ # Store configuration
+ self.kernel_size = kernel_size
+ self.stride = stride
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ # For transposed convolution, padding calculation is different
+ self.padding_total = kernel_size - stride
+
+ # For streaming, we need to keep track of input history
+ # Transposed conv needs to see multiple input samples to produce correct output
+ self.context_size = kernel_size - 1
+
+ # Create a unique layer ID for cache management
+ self._layer_id = None
+
+ @property
+ def layer_id(self):
+ if self._layer_id is None:
+ self._layer_id = f"sconvtr1d_{id(self)}"
+ return self._layer_id
+
+ def forward(self, x: torch.Tensor,
+ cache: Optional[VibeVoiceTokenizerStreamingCache] = None,
+ sample_indices: Optional[torch.Tensor] = None,
+ use_cache: bool = False,
+ debug: bool = False) -> torch.Tensor:
+ """
+ Forward pass with optional streaming support via cache.
+ """
+ B, C, T = x.shape
+
+ # Non-streaming mode
+ if not use_cache or cache is None:
+ return self._forward_non_streaming(x, debug=debug)
+
+ # Streaming mode
+ assert sample_indices is not None, "sample_indices must be provided for streaming mode"
+ assert len(sample_indices) == B, "sample_indices must match batch size"
+
+ return self._forward_streaming(x, cache, sample_indices, debug)
+
+ def _forward_streaming(self, x: torch.Tensor,
+ cache: VibeVoiceTokenizerStreamingCache,
+ sample_indices: torch.Tensor,
+ debug: bool = False) -> torch.Tensor:
+ """Streaming forward pass with cache operations kept separate from compiled code"""
+ B, C, T = x.shape
+
+ # Cache operations (not compiled)
+ cached_input = cache.get(self.layer_id, sample_indices)
+
+ if cached_input is None:
+ # First chunk - no history yet
+ cached_input = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype)
+ if debug:
+ print(f"[DEBUG] Initialized empty cache for transposed conv")
+
+ # Concatenate cached input with new input
+ full_input = torch.cat([cached_input, x], dim=2)
+
+ if debug:
+ print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_input.shape}, Combined: {full_input.shape}")
+
+ # First chunk or debug mode - use uncompiled version
+ full_output = self.convtr(full_input)
+
+ if debug:
+ print(f"[DEBUG] Full transposed conv output shape: {full_output.shape}")
+
+ # Calculate padding to remove
+ if self.causal:
+ padding_right = math.ceil(self.padding_total * self.trim_right_ratio)
+ padding_left = self.padding_total - padding_right
+ else:
+ padding_right = self.padding_total // 2
+ padding_left = self.padding_total - padding_right
+
+ # Remove padding
+ if padding_left + padding_right > 0:
+ full_output = unpad1d(full_output, (padding_left, padding_right))
+
+ if debug:
+ print(f"[DEBUG] After unpadding: {full_output.shape}")
+
+ # Determine which part of the output corresponds to the new input
+ if cached_input.shape[2] == 0:
+ # First chunk - return all output
+ output = full_output
+ else:
+ # Subsequent chunks - return only the new output
+ expected_new_output = T * self.stride
+
+ # Take the last expected_new_output samples
+ if full_output.shape[2] >= expected_new_output:
+ output = full_output[:, :, -expected_new_output:]
+ else:
+ output = full_output
+
+ if debug:
+ print(f"[DEBUG] Final streaming output shape: {output.shape}")
+
+ # Update cache
+ if full_input.shape[2] > self.context_size:
+ new_cache = full_input[:, :, -self.context_size:]
+ else:
+ new_cache = full_input
+
+ if debug:
+ print(f"[DEBUG] New cache shape: {new_cache.shape}")
+
+ cache.set(self.layer_id, sample_indices, new_cache)
+
+ return output
+
+ def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor:
+ """Standard forward pass without streaming"""
+ if debug:
+ print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}")
+
+ # Apply transposed convolution
+ y = self.convtr(x)
+
+ if debug:
+ print(f"[DEBUG NON-STREAMING] After transposed conv: {y.shape}")
+
+ # Calculate and remove padding
+ if self.causal:
+ padding_right = math.ceil(self.padding_total * self.trim_right_ratio)
+ padding_left = self.padding_total - padding_right
+ else:
+ padding_right = self.padding_total // 2
+ padding_left = self.padding_total - padding_right
+
+ if padding_left + padding_right > 0:
+ y = unpad1d(y, (padding_left, padding_right))
+
+ if debug:
+ print(f"[DEBUG NON-STREAMING] Final output shape: {y.shape}")
+
+ return y
+
+# FFN
+class FFN(nn.Module):
+ def __init__(
+ self,
+ embed_dim,
+ ffn_dim,
+ bias=False,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.linear1 = nn.Linear(self.embed_dim, ffn_dim, bias=bias)
+ self.gelu = ACT2FN["gelu"]
+ self.linear2 = nn.Linear(ffn_dim, self.embed_dim, bias=bias)
+
+ def forward(self, x):
+ x = self.linear1(x)
+ x = self.gelu(x)
+ x = self.linear2(x)
+ return x
+
+
+class Convlayer(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ dilation=1,
+ groups=1,
+ bias=True,
+ pad_mode='zeros',
+ norm='weight_norm',
+ causal=True,
+ ):
+ super().__init__()
+ self.conv = SConv1d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation,
+ groups=groups, bias=bias, pad_mode=pad_mode, norm=norm, causal=causal)
+
+ def forward(self, x):
+ return self.conv(x)
+
+class Block1D(nn.Module):
+ def __init__(self, dim, kernel_size=7, drop_path=0., mixer_layer='conv',
+ layer_scale_init_value=1e-6, **kwargs):
+ super().__init__()
+
+ if kwargs.get('layernorm', 'LN') == 'LN':
+ self.norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6))
+ self.ffn_norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6))
+ elif kwargs.get('layernorm', 'RMSNorm') == 'RMSNorm':
+ self.norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6))
+ self.ffn_norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6))
+
+ if mixer_layer == 'conv':
+ self.mixer = Convlayer(dim, dim, groups=kwargs.get('groups', 1),
+ kernel_size=kernel_size,
+ pad_mode=kwargs.get('pad_mode', 'reflect'),
+ norm=kwargs.get('norm', 'none'),
+ causal=kwargs.get('causal', True),
+ bias=kwargs.get('bias', True),
+ )
+ elif mixer_layer == 'depthwise_conv':
+ self.mixer = Convlayer(dim, dim, groups=dim,
+ kernel_size=kernel_size,
+ pad_mode=kwargs.get('pad_mode', 'reflect'),
+ norm=kwargs.get('norm', 'none'),
+ causal=kwargs.get('causal', True),
+ bias=kwargs.get('bias', True),
+ )
+ else:
+ raise ValueError(f"Unsupported mixer layer: {mixer_layer}")
+
+ self.ffn = FFN(
+ dim,
+ kwargs.get('ffn_expansion', 4) * dim,
+ bias=kwargs.get('bias', False),
+ )
+ self.drop_path = nn.Identity() if drop_path <= 0. else nn.modules.DropPath(drop_path)
+
+ if layer_scale_init_value > 0:
+ self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+ self.ffn_gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+ else:
+ self.gamma = None
+ self.ffn_gamma = None
+
+ def forward(self, x):
+ # mixer
+ residual = x
+ x = self.norm(x)
+ x = self.mixer(x)
+ if self.gamma is not None:
+ x = x * self.gamma.unsqueeze(-1)
+ x = residual + self.drop_path(x)
+
+ # ffn
+ residual = x
+ x = self.ffn_norm(x)
+ x = x.permute(0, 2, 1)
+ x = self.ffn(x)
+ x = x.permute(0, 2, 1)
+ if self.ffn_gamma is not None:
+ x = x * self.ffn_gamma.unsqueeze(-1)
+ x = residual + self.drop_path(x)
+
+ return x
+
+
+class TokenizerEncoder(nn.Module):
+ """
+ Encoder component for the VibeVoice tokenizer that converts audio to latent representations.
+
+ Args:
+ config: Configuration object with model parameters
+ """
+ def __init__(self, config):
+ super().__init__()
+
+ # Extract parameters from config
+ self.channels = config.channels
+ self.dimension = config.dimension
+ self.n_filters = config.n_filters
+ self.ratios = list(reversed(config.ratios))
+ self.depths = config.depths
+ self.n_residual_layers = getattr(config, "n_residual_layers", 1)
+ self.hop_length = np.prod(self.ratios)
+ self.causal = config.causal
+
+ # Additional config parameters with defaults
+ kernel_size = getattr(config, "kernel_size", 7)
+ last_kernel_size = getattr(config, "last_kernel_size", 7)
+ norm = getattr(config, "norm", "none")
+ norm_params = getattr(config, "norm_params", {})
+ pad_mode = getattr(config, "pad_mode", "reflect")
+ bias = getattr(config, "bias", True)
+ layernorm = getattr(config, "layernorm", "LN")
+ layernorm_eps = getattr(config, "layernorm_eps", 1e-6)
+ layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True)
+ drop_path_rate = getattr(config, "drop_path_rate", 0.0)
+ mixer_layer = getattr(config, "mixer_layer", "conv")
+ layer_scale_init_value = getattr(config, "layer_scale_init_value", 0)
+ disable_last_norm = getattr(config, "disable_last_norm", False)
+
+ # determine the norm type based on layernorm
+ if layernorm == 'LN':
+ norm_type = ConvLayerNorm
+ elif layernorm == 'RMSNorm':
+ norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine)
+ else:
+ raise ValueError(f"Unsupported norm type: {layernorm}")
+
+ # stem and intermediate downsampling conv layers
+ stem = nn.Sequential(
+ SConv1d(self.channels, self.n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias),
+ )
+
+ self.downsample_layers = nn.ModuleList()
+ self.downsample_layers.append(stem)
+ for i in range(len(self.ratios)):
+ in_ch = self.n_filters * (2 ** i)
+ out_ch = self.n_filters * (2 ** (i + 1))
+ downsample_layer = nn.Sequential(
+ SConv1d(in_ch, out_ch, kernel_size=self.ratios[i] * 2, stride=self.ratios[i], causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
+ )
+ self.downsample_layers.append(downsample_layer)
+
+ # configure the transformer blocks
+ layer_type = partial(
+ Block1D,
+ mixer_layer=mixer_layer,
+ layernorm=layernorm,
+ eps=layernorm_eps,
+ causal=self.causal,
+ pad_mode=pad_mode,
+ norm=norm,
+ bias=bias,
+ layer_scale_init_value=layer_scale_init_value,
+ )
+
+ self.stages = nn.ModuleList()
+ dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
+ cur = 0
+
+ for i in range(len(self.depths)):
+ in_ch = self.n_filters * (2 ** i)
+ stage = nn.Sequential(
+ *[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])]
+ )
+ self.stages.append(stage)
+ cur += self.depths[i]
+
+ if not disable_last_norm:
+ self.norm = norm_type(in_ch, eps=layernorm_eps)
+ else:
+ self.norm = nn.Identity()
+ self.head = SConv1d(in_ch, self.dimension, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
+
+ def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
+ for i in range(len(self.depths)):
+ # Apply downsampling
+ for layer in self.downsample_layers[i]:
+ if isinstance(layer, SConv1d):
+ x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ else:
+ x = layer(x)
+
+ # Apply stage (Block1D contains Convlayer which contains SConv1d)
+ for block in self.stages[i]:
+ if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d):
+ # Block1D forward with cache support
+ residual = x
+ x = block.norm(x)
+ x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ if block.gamma is not None:
+ x = x * block.gamma.unsqueeze(-1)
+ x = residual + x
+
+ # FFN part
+ residual = x
+ x = block.ffn_norm(x)
+ x = x.permute(0, 2, 1)
+ x = block.ffn(x)
+ x = x.permute(0, 2, 1)
+ if block.ffn_gamma is not None:
+ x = x * block.ffn_gamma.unsqueeze(-1)
+ x = residual + x
+ else:
+ x = block(x)
+
+ return self.norm(x)
+
+ def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
+ x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ return x
+
+
+class TokenizerDecoder(nn.Module):
+ """
+ Decoder component for the VibeVoice tokenizer that converts latent representations back to audio.
+
+ Args:
+ config: Configuration object with model parameters
+ """
+ def __init__(self, config):
+ super().__init__()
+
+ # Extract parameters from config
+ self.dimension = config.dimension
+ self.channels = config.channels
+ self.n_filters = config.n_filters
+ self.ratios = config.ratios
+
+ # IMPORTANT CHANGE: Don't reverse depths again since they're already reversed in VibeVoiceAcousticTokenizerModel
+ self.depths = config.depths # Changed from list(reversed(config.depths))
+
+ self.n_residual_layers = getattr(config, "n_residual_layers", 1)
+ self.hop_length = np.prod(self.ratios)
+ self.causal = config.causal
+
+ # Additional config parameters with defaults
+ kernel_size = getattr(config, "kernel_size", 7)
+ last_kernel_size = getattr(config, "last_kernel_size", 7)
+ norm = getattr(config, "norm", "none")
+ norm_params = getattr(config, "norm_params", {})
+ pad_mode = getattr(config, "pad_mode", "reflect")
+ bias = getattr(config, "bias", True)
+ layernorm = getattr(config, "layernorm", "LN")
+ layernorm_eps = getattr(config, "layernorm_eps", 1e-6)
+ trim_right_ratio = getattr(config, "trim_right_ratio", 1.0)
+ layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True)
+ drop_path_rate = getattr(config, "drop_path_rate", 0.0)
+ mixer_layer = getattr(config, "mixer_layer", "conv")
+ layer_scale_init_value = getattr(config, "layer_scale_init_value", 0)
+ disable_last_norm = getattr(config, "disable_last_norm", False)
+
+ # determine the norm type based on layernorm
+ if layernorm == 'LN':
+ norm_type = ConvLayerNorm
+ elif layernorm == 'RMSNorm':
+ norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine)
+ else:
+ raise ValueError(f"Unsupported norm type: {layernorm}")
+
+ # stem and upsampling layers
+ stem = nn.Sequential(
+ SConv1d(self.dimension, self.n_filters * 2 ** (len(self.depths) - 1), kernel_size, norm=norm,
+ norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias),
+ )
+
+ self.upsample_layers = nn.ModuleList()
+ self.upsample_layers.append(stem)
+ for i in range(len(self.ratios)):
+ in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i))
+ out_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i - 1))
+ upsample_layer = nn.Sequential(
+ SConvTranspose1d(in_ch, out_ch,
+ kernel_size=self.ratios[i] * 2, stride=self.ratios[i],
+ norm=norm, norm_kwargs=norm_params, bias=bias,
+ causal=self.causal, trim_right_ratio=trim_right_ratio),
+ )
+ self.upsample_layers.append(upsample_layer)
+
+ # configure transformer blocks
+ layer_type = partial(
+ Block1D,
+ mixer_layer=mixer_layer,
+ layernorm=layernorm,
+ eps=layernorm_eps,
+ causal=self.causal,
+ pad_mode=pad_mode,
+ norm=norm,
+ bias=bias,
+ layer_scale_init_value=layer_scale_init_value,
+ )
+
+ self.stages = nn.ModuleList()
+ dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
+ cur = 0
+
+ # Create stages in the same order as the original model
+ for i in range(len(self.depths)):
+ in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i))
+ stage = nn.Sequential(
+ *[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])]
+ )
+ self.stages.append(stage)
+ cur += self.depths[i]
+
+ if not disable_last_norm:
+ self.norm = norm_type(in_ch, eps=layernorm_eps)
+ else:
+ self.norm = nn.Identity()
+ self.head = SConv1d(in_ch, self.channels, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
+
+ def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
+ for i in range(len(self.depths)):
+ # Apply upsampling
+ for layer in self.upsample_layers[i]:
+ if isinstance(layer, (SConv1d, SConvTranspose1d)):
+ x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ else:
+ x = layer(x)
+
+ # Apply stage (Block1D contains Convlayer which contains SConv1d)
+ for block in self.stages[i]:
+ if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d):
+ # Block1D forward with cache support
+ residual = x
+ x = block.norm(x)
+ x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ if block.gamma is not None:
+ x = x * block.gamma.unsqueeze(-1)
+ x = residual + x
+
+ # FFN part
+ residual = x
+ x = block.ffn_norm(x)
+ x = x.permute(0, 2, 1)
+ x = block.ffn(x)
+ x = x.permute(0, 2, 1)
+ if block.ffn_gamma is not None:
+ x = x * block.ffn_gamma.unsqueeze(-1)
+ x = residual + x
+ else:
+ x = block(x)
+
+ return self.norm(x)
+
+ def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
+ x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ return x
+
+
+@dataclass
+class VibeVoiceTokenizerEncoderOutput:
+ """
+ Output of VibeVoice tokenizer encoder, representing a Gaussian distribution with fixed variance.
+
+ Args:
+ mean (`torch.FloatTensor`): The mean parameters of the distribution.
+ std (`float` or `torch.FloatTensor`): Fixed standard deviation value.
+ """
+ mean: torch.Tensor
+ std: Optional[Union[float, torch.Tensor]] = None
+
+ def sample(self, dist_type='fix'):
+ """
+ Sample from the distribution.
+
+ Args:
+ dist_type (`str`): Sampling method, either 'fix' or 'gaussian'.
+
+ Returns:
+ `torch.FloatTensor`: Sampled values.
+ `torch.FloatTensor` (optional): Standard deviation used (only when dist_type='gaussian').
+ """
+ if dist_type == 'fix':
+ x = self.mean + self.std * torch.randn_like(self.mean)
+ return x, self.std
+ elif dist_type == 'gaussian':
+ batch_size = self.mean.size(0)
+ value = self.std / 0.8
+ std = torch.randn(batch_size, device=self.mean.device, dtype=self.mean.dtype) * value
+
+ while std.dim() < self.mean.dim():
+ std = std.unsqueeze(-1)
+
+ x = self.mean + std * torch.randn_like(self.mean)
+ return x, std
+ else:
+ return self.mean, self.std
+
+ def kl(self):
+ """Compute KL divergence between this distribution and a standard normal."""
+ target = torch.zeros_like(self.mean)
+ return F.mse_loss(self.mean, target, reduction='none')
+
+ def mode(self):
+ """Return the distribution mode (which is the mean for Gaussian)."""
+ return self.mean
+
+class VibeVoiceAcousticTokenizerModel(PreTrainedModel):
+ """VibeVoice speech tokenizer model combining encoder and decoder for acoustic tokens"""
+
+ config_class = VibeVoiceAcousticTokenizerConfig
+ base_model_prefix = "vibevoice_acoustic_tokenizer"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _no_split_modules = ["TokenizerEncoder", "TokenizerDecoder"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.register_buffer('fix_std', torch.tensor(config.fix_std), persistent=False)
+ self.std_dist_type = getattr(config, "std_dist_type", "fix")
+
+ # Parse encoder depths
+ if isinstance(config.encoder_depths, str):
+ encoder_depths = [int(d) for d in config.encoder_depths.split('-')]
+ else:
+ encoder_depths = config.encoder_depths
+
+ # Parse decoder depths if provided
+ if config.decoder_depths is not None and isinstance(config.decoder_depths, str):
+ decoder_depths = [int(d) for d in config.decoder_depths.split('-')]
+ else:
+ # Default: use reversed encoder depths if decoder_depths is None
+ decoder_depths = list(reversed(encoder_depths))
+
+ # Create encoder config
+ encoder_config = copy.deepcopy(config)
+ encoder_config.dimension = config.vae_dim
+ encoder_config.n_filters = config.encoder_n_filters
+ encoder_config.ratios = config.encoder_ratios
+ encoder_config.depths = encoder_depths
+ encoder_config.norm = config.conv_norm
+ encoder_config.pad_mode = config.pad_mode
+ encoder_config.bias = config.conv_bias
+ encoder_config.layernorm_eps = config.layernorm_eps
+ encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
+ encoder_config.mixer_layer = config.mixer_layer
+ encoder_config.layer_scale_init_value = config.layer_scale_init_value
+ encoder_config.disable_last_norm = config.disable_last_norm
+
+ # Create decoder config
+ decoder_config = copy.deepcopy(config)
+ decoder_config.dimension = config.vae_dim
+ decoder_config.n_filters = config.decoder_n_filters
+ decoder_config.ratios = config.decoder_ratios
+ decoder_config.depths = decoder_depths
+ decoder_config.norm = config.conv_norm
+ decoder_config.pad_mode = config.pad_mode
+ decoder_config.bias = config.conv_bias
+ decoder_config.layernorm_eps = config.layernorm_eps
+ decoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
+ decoder_config.mixer_layer = config.mixer_layer
+ decoder_config.layer_scale_init_value = config.layer_scale_init_value
+ decoder_config.disable_last_norm = config.disable_last_norm
+
+ # Initialize encoder and decoder
+ self.encoder = TokenizerEncoder(encoder_config)
+ self.decoder = TokenizerDecoder(decoder_config)
+
+ # Initialize weights
+ self.apply(self._init_weights)
+
+ def _init_weights(self, module):
+ """Initialize weights for the model"""
+ if isinstance(module, nn.Linear):
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
+ if module.bias is not None:
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.LayerNorm):
+ nn.init.ones_(module.weight)
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.Conv1d):
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
+ if module.bias is not None:
+ nn.init.zeros_(module.bias)
+
+ @torch.no_grad()
+ def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
+ """Convert audio to latent representations"""
+ latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1), std=self.fix_std)
+
+ @torch.no_grad()
+ def sampling(self, encoder_output, dist_type=None):
+ """Sample from the encoder output distribution"""
+ dist_type = dist_type or self.std_dist_type
+
+ if dist_type == 'fix':
+ return encoder_output.sample(dist_type='fix')
+ elif dist_type == 'gaussian':
+ return encoder_output.sample(dist_type='gaussian')
+ else:
+ raise ValueError(f"Unsupported dist_type: {dist_type}, expected 'fix' or 'gaussian'")
+
+ @torch.no_grad()
+ def decode(self, latents, cache=None, sample_indices=None, use_cache=False, debug=False):
+ """Convert latent representations back to audio"""
+ if latents.shape[1] == self.config.vae_dim:
+ pass
+ else:
+ latents = latents.permute(0, 2, 1)
+
+ audio = self.decoder(latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ return audio
+
+ def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
+ """Full forward pass: encode audio to latents, then decode back to audio"""
+ encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ sampled_latents, _ = self.sampling(encoder_output)
+ reconstructed = self.decode(sampled_latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ return reconstructed, sampled_latents
+
+
+class VibeVoiceSemanticTokenizerModel(PreTrainedModel):
+ """VibeVoice speech tokenizer model with only encoder for semantic tokens"""
+
+ config_class = VibeVoiceSemanticTokenizerConfig
+ base_model_prefix = "vibevoice_semantic_tokenizer"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _no_split_modules = ["TokenizerEncoder"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ # Parse encoder depths
+ if isinstance(config.encoder_depths, str):
+ encoder_depths = [int(d) for d in config.encoder_depths.split('-')]
+ else:
+ encoder_depths = config.encoder_depths
+
+ # Create encoder config
+ encoder_config = copy.deepcopy(config)
+ encoder_config.dimension = config.vae_dim
+ encoder_config.n_filters = config.encoder_n_filters
+ encoder_config.ratios = config.encoder_ratios
+ encoder_config.depths = encoder_depths
+ encoder_config.norm = config.conv_norm
+ encoder_config.pad_mode = config.pad_mode
+ encoder_config.bias = config.conv_bias
+ encoder_config.layernorm_eps = config.layernorm_eps
+ encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
+ encoder_config.mixer_layer = config.mixer_layer
+ encoder_config.layer_scale_init_value = config.layer_scale_init_value
+ encoder_config.disable_last_norm = config.disable_last_norm
+
+ # Initialize encoder and decoder
+ self.encoder = TokenizerEncoder(encoder_config)
+
+ # Initialize weights
+ self.apply(self._init_weights)
+
+ def _init_weights(self, module):
+ """Initialize weights for the model"""
+ if isinstance(module, nn.Linear):
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
+ if module.bias is not None:
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.LayerNorm):
+ nn.init.ones_(module.weight)
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.Conv1d):
+ nn.init.normal_(module.weight, std=self.config.weight_init_value)
+ if module.bias is not None:
+ nn.init.zeros_(module.bias)
+
+ @torch.no_grad()
+ def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
+ """Convert audio to latent representations"""
+ latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1))
+
+ @torch.no_grad()
+ def sampling(self, encoder_output, dist_type=None):
+ """Sample from the encoder output distribution"""
+ return encoder_output.sample(dist_type='none')
+
+ def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
+ """Full forward pass: encode audio to latents, then decode back to audio"""
+ encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
+ sampled_latents, _ = self.sampling(encoder_output, dist_type='none')
+ return None, sampled_latents
+
+AutoModel.register(VibeVoiceAcousticTokenizerConfig, VibeVoiceAcousticTokenizerModel)
+AutoModel.register(VibeVoiceSemanticTokenizerConfig, VibeVoiceSemanticTokenizerModel)
+
+__all__ = [
+ "VibeVoiceTokenizerStreamingCache",
+ "VibeVoiceAcousticTokenizerModel",
+ "VibeVoiceSemanticTokenizerModel",
+]
\ No newline at end of file
diff --git a/vibevoice/modular/streamer.py b/vibevoice/modular/streamer.py
new file mode 100644
index 0000000..7a76cb0
--- /dev/null
+++ b/vibevoice/modular/streamer.py
@@ -0,0 +1,264 @@
+from __future__ import annotations
+
+import torch
+
+import asyncio
+from queue import Queue
+from typing import TYPE_CHECKING, Optional
+
+
+from transformers.generation import BaseStreamer
+
+
+class AudioStreamer(BaseStreamer):
+ """
+ Audio streamer that stores audio chunks in queues for each sample in the batch.
+ This allows streaming audio generation for multiple samples simultaneously.
+
+ Parameters:
+ batch_size (`int`):
+ The batch size for generation
+ stop_signal (`any`, *optional*):
+ The signal to put in the queue when generation ends. Defaults to None.
+ timeout (`float`, *optional*):
+ The timeout for the audio queue. If `None`, the queue will block indefinitely.
+ """
+
+ def __init__(
+ self,
+ batch_size: int,
+ stop_signal: Optional[any] = None,
+ timeout: Optional[float] = None,
+ ):
+ self.batch_size = batch_size
+ self.stop_signal = stop_signal
+ self.timeout = timeout
+
+ # Create a queue for each sample in the batch
+ self.audio_queues = [Queue() for _ in range(batch_size)]
+ self.finished_flags = [False for _ in range(batch_size)]
+ self.sample_indices_map = {} # Maps from sample index to queue index
+
+ def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor):
+ """
+ Receives audio chunks and puts them in the appropriate queues.
+
+ Args:
+ audio_chunks: Tensor of shape (num_samples, ...) containing audio chunks
+ sample_indices: Tensor indicating which samples these chunks belong to
+ """
+ for i, sample_idx in enumerate(sample_indices):
+ idx = sample_idx.item()
+ if idx < self.batch_size and not self.finished_flags[idx]:
+ # Convert to numpy or keep as tensor based on preference
+ audio_chunk = audio_chunks[i].detach().cpu()
+ self.audio_queues[idx].put(audio_chunk, timeout=self.timeout)
+
+ def end(self, sample_indices: Optional[torch.Tensor] = None):
+ """
+ Signals the end of generation for specified samples or all samples.
+
+ Args:
+ sample_indices: Optional tensor of sample indices to end. If None, ends all.
+ """
+ if sample_indices is None:
+ # End all samples
+ for idx in range(self.batch_size):
+ if not self.finished_flags[idx]:
+ self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout)
+ self.finished_flags[idx] = True
+ else:
+ # End specific samples
+ for sample_idx in sample_indices:
+ idx = sample_idx.item() if torch.is_tensor(sample_idx) else sample_idx
+ if idx < self.batch_size and not self.finished_flags[idx]:
+ self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout)
+ self.finished_flags[idx] = True
+
+ def __iter__(self):
+ """Returns an iterator over the batch of audio streams."""
+ return AudioBatchIterator(self)
+
+ def get_stream(self, sample_idx: int):
+ """Get the audio stream for a specific sample."""
+ if sample_idx >= self.batch_size:
+ raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}")
+ return AudioSampleIterator(self, sample_idx)
+
+
+class AudioSampleIterator:
+ """Iterator for a single audio stream from the batch."""
+
+ def __init__(self, streamer: AudioStreamer, sample_idx: int):
+ self.streamer = streamer
+ self.sample_idx = sample_idx
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ value = self.streamer.audio_queues[self.sample_idx].get(timeout=self.streamer.timeout)
+ if value == self.streamer.stop_signal:
+ raise StopIteration()
+ return value
+
+
+class AudioBatchIterator:
+ """Iterator that yields audio chunks for all samples in the batch."""
+
+ def __init__(self, streamer: AudioStreamer):
+ self.streamer = streamer
+ self.active_samples = set(range(streamer.batch_size))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if not self.active_samples:
+ raise StopIteration()
+
+ batch_chunks = {}
+ samples_to_remove = set()
+
+ # Try to get chunks from all active samples
+ for idx in self.active_samples:
+ try:
+ value = self.streamer.audio_queues[idx].get(block=False)
+ if value == self.streamer.stop_signal:
+ samples_to_remove.add(idx)
+ else:
+ batch_chunks[idx] = value
+ except:
+ # Queue is empty for this sample, skip it this iteration
+ pass
+
+ # Remove finished samples
+ self.active_samples -= samples_to_remove
+
+ if batch_chunks:
+ return batch_chunks
+ elif self.active_samples:
+ # If no chunks were ready but we still have active samples,
+ # wait a bit and try again
+ import time
+ time.sleep(0.01)
+ return self.__next__()
+ else:
+ raise StopIteration()
+
+
+class AsyncAudioStreamer(AudioStreamer):
+ """
+ Async version of AudioStreamer for use in async contexts.
+ """
+
+ def __init__(
+ self,
+ batch_size: int,
+ stop_signal: Optional[any] = None,
+ timeout: Optional[float] = None,
+ ):
+ super().__init__(batch_size, stop_signal, timeout)
+ # Replace regular queues with async queues
+ self.audio_queues = [asyncio.Queue() for _ in range(batch_size)]
+ self.loop = asyncio.get_running_loop()
+
+ def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor):
+ """Put audio chunks in the appropriate async queues."""
+ for i, sample_idx in enumerate(sample_indices):
+ idx = sample_idx.item()
+ if idx < self.batch_size and not self.finished_flags[idx]:
+ audio_chunk = audio_chunks[i].detach().cpu()
+ self.loop.call_soon_threadsafe(
+ self.audio_queues[idx].put_nowait, audio_chunk
+ )
+
+ def end(self, sample_indices: Optional[torch.Tensor] = None):
+ """Signal the end of generation for specified samples."""
+ if sample_indices is None:
+ indices_to_end = range(self.batch_size)
+ else:
+ indices_to_end = [s.item() if torch.is_tensor(s) else s for s in sample_indices]
+
+ for idx in indices_to_end:
+ if idx < self.batch_size and not self.finished_flags[idx]:
+ self.loop.call_soon_threadsafe(
+ self.audio_queues[idx].put_nowait, self.stop_signal
+ )
+ self.finished_flags[idx] = True
+
+ async def get_stream(self, sample_idx: int):
+ """Get async iterator for a specific sample's audio stream."""
+ if sample_idx >= self.batch_size:
+ raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}")
+
+ while True:
+ value = await self.audio_queues[sample_idx].get()
+ if value == self.stop_signal:
+ break
+ yield value
+
+ def __aiter__(self):
+ """Returns an async iterator over all audio streams."""
+ return AsyncAudioBatchIterator(self)
+
+
+class AsyncAudioBatchIterator:
+ """Async iterator for batch audio streaming."""
+
+ def __init__(self, streamer: AsyncAudioStreamer):
+ self.streamer = streamer
+ self.active_samples = set(range(streamer.batch_size))
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ if not self.active_samples:
+ raise StopAsyncIteration()
+
+ batch_chunks = {}
+ samples_to_remove = set()
+
+ # Create tasks for all active samples
+ tasks = {
+ idx: asyncio.create_task(self._get_chunk(idx))
+ for idx in self.active_samples
+ }
+
+ # Wait for at least one chunk to be ready
+ done, pending = await asyncio.wait(
+ tasks.values(),
+ return_when=asyncio.FIRST_COMPLETED,
+ timeout=self.streamer.timeout
+ )
+
+ # Cancel pending tasks
+ for task in pending:
+ task.cancel()
+
+ # Process completed tasks
+ for idx, task in tasks.items():
+ if task in done:
+ try:
+ value = await task
+ if value == self.streamer.stop_signal:
+ samples_to_remove.add(idx)
+ else:
+ batch_chunks[idx] = value
+ except asyncio.CancelledError:
+ pass
+
+ self.active_samples -= samples_to_remove
+
+ if batch_chunks:
+ return batch_chunks
+ elif self.active_samples:
+ # Try again if we still have active samples
+ return await self.__anext__()
+ else:
+ raise StopAsyncIteration()
+
+ async def _get_chunk(self, idx):
+ """Helper to get a chunk from a specific queue."""
+ return await self.streamer.audio_queues[idx].get()
\ No newline at end of file
diff --git a/vibevoice/processor/__init__.py b/vibevoice/processor/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vibevoice/processor/vibevoice_processor.py b/vibevoice/processor/vibevoice_processor.py
new file mode 100644
index 0000000..66d0a9d
--- /dev/null
+++ b/vibevoice/processor/vibevoice_processor.py
@@ -0,0 +1,677 @@
+import math
+import warnings
+from typing import List, Optional, Union, Dict, Any, Tuple
+import os
+import re
+
+import numpy as np
+import torch
+
+from transformers.tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from transformers.utils import TensorType, logging
+from .vibevoice_tokenizer_processor import AudioNormalizer
+
+logger = logging.get_logger(__name__)
+
+
+class VibeVoiceProcessor:
+ r"""
+ Constructs a VibeVoice processor which wraps a VibeVoice tokenizer and audio processor into a single processor.
+
+ [`VibeVoiceProcessor`] offers all the functionalities of [`VibeVoiceTokenizer`] and [`VibeVoiceTokenizerProcessor`].
+ See the [`~VibeVoiceProcessor.__call__`] and [`~VibeVoiceProcessor.decode`] for more information.
+
+ Args:
+ tokenizer (`VibeVoiceTextTokenizer` or `VibeVoiceTextTokenizerFast`):
+ The tokenizer for text processing.
+ audio_processor (`VibeVoiceTokenizerProcessor`):
+ The audio processor for speech processing.
+ speech_tok_compress_ratio (`int`, *optional*, defaults to 3200):
+ The compression ratio for speech tokenization.
+ db_normalize (`bool`, *optional*, defaults to True):
+ Whether to apply decibel normalization to audio inputs.
+ """
+
+ def __init__(self, tokenizer=None, audio_processor=None, speech_tok_compress_ratio=3200, db_normalize=True, **kwargs):
+ self.tokenizer = tokenizer
+ self.audio_processor = audio_processor
+ self.speech_tok_compress_ratio = speech_tok_compress_ratio
+ self.db_normalize = db_normalize
+ self.audio_normalizer = AudioNormalizer() if db_normalize else None
+ self.system_prompt = " Transform the text provided by various speakers into speech output, utilizing the distinct voice of each respective speaker.\n"
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ """
+ Instantiate a VibeVoiceProcessor from a pretrained VibeVoice processor.
+
+ Args:
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
+ This can be either:
+ - a string, the *model id* of a pretrained model
+ - a path to a *directory* containing processor config
+
+ Returns:
+ [`VibeVoiceProcessor`]: The processor object instantiated from pretrained model.
+ """
+ import os
+ import json
+ from .vibevoice_tokenizer_processor import VibeVoiceTokenizerProcessor
+ from vibevoice.modular.modular_vibevoice_text_tokenizer import (
+ VibeVoiceTextTokenizer,
+ VibeVoiceTextTokenizerFast
+ )
+
+ # Load processor configuration
+ config_path = os.path.join(pretrained_model_name_or_path, "preprocessor_config.json")
+ if os.path.exists(config_path):
+ with open(config_path, 'r') as f:
+ config = json.load(f)
+ else:
+ logger.warning(f"No preprocessor_config.json found at {pretrained_model_name_or_path}, using defaults")
+ config = {
+ "speech_tok_compress_ratio": 3200,
+ "db_normalize": True,
+ }
+
+ # Extract main processor parameters
+ speech_tok_compress_ratio = config.get("speech_tok_compress_ratio", 3200)
+ db_normalize = config.get("db_normalize", True)
+
+ # Load tokenizer - try from model path first, then fallback to Qwen
+ language_model_pretrained_name = config.get("language_model_pretrained_name", None) or kwargs.pop("language_model_pretrained_name", "Qwen/Qwen2.5-1.5B")
+ logger.info(f"Loading tokenizer from {language_model_pretrained_name}")
+ if 'qwen' in language_model_pretrained_name.lower():
+ tokenizer = VibeVoiceTextTokenizerFast.from_pretrained(
+ language_model_pretrained_name,
+ **kwargs
+ )
+ else:
+ raise ValueError(f"Unsupported tokenizer type for {language_model_pretrained_name}. Supported types: Qwen, Llama, Gemma.")
+
+ # Load audio processor
+ if "audio_processor" in config:
+ # Create audio processor from config
+ audio_config = config["audio_processor"]
+ audio_processor = VibeVoiceTokenizerProcessor(
+ sampling_rate=audio_config.get("sampling_rate", 24000),
+ normalize_audio=audio_config.get("normalize_audio", True),
+ target_dB_FS=audio_config.get("target_dB_FS", -25),
+ eps=audio_config.get("eps", 1e-6),
+ )
+ else:
+ # Create default audio processor
+ audio_processor = VibeVoiceTokenizerProcessor()
+
+ # Create and return the processor
+ return cls(
+ tokenizer=tokenizer,
+ audio_processor=audio_processor,
+ speech_tok_compress_ratio=speech_tok_compress_ratio,
+ db_normalize=db_normalize,
+ )
+
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
+ """
+ Save a processor to a directory, so that it can be re-loaded using the
+ [`~VibeVoiceProcessor.from_pretrained`] class method.
+
+ Args:
+ save_directory (`str` or `os.PathLike`):
+ Directory where the processor will be saved.
+ """
+ import os
+ import json
+
+ os.makedirs(save_directory, exist_ok=True)
+
+ # Save processor configuration
+ processor_config = {
+ "processor_class": "VibeVoiceProcessor",
+ "speech_tok_compress_ratio": self.speech_tok_compress_ratio,
+ "db_normalize": self.db_normalize,
+ "audio_processor": {
+ "feature_extractor_type": "VibeVoiceTokenizerProcessor",
+ "sampling_rate": getattr(self.audio_processor, 'sampling_rate', 24000),
+ "normalize_audio": getattr(self.audio_processor, 'normalize_audio', True),
+ "target_dB_FS": getattr(self.audio_processor, 'target_dB_FS', -25),
+ "eps": getattr(self.audio_processor, 'eps', 1e-6),
+ }
+ }
+
+ config_path = os.path.join(save_directory, "preprocessor_config.json")
+ with open(config_path, 'w') as f:
+ json.dump(processor_config, f, indent=2)
+
+ logger.info(f"Processor configuration saved in {config_path}")
+
+ def __call__(
+ self,
+ text: Optional[Union[str, List[str], TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
+ voice_samples: Optional[Union[List[Union[str, np.ndarray]], List[List[Union[str, np.ndarray]]]]] = None,
+ padding: Union[bool, str, PaddingStrategy] = True,
+ truncation: Union[bool, str, TruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_attention_mask: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to process one or more podcast scripts with optional voice samples.
+
+ Args:
+ text (`str`, `List[str]`):
+ The input text(s) to process. Can be:
+ - A single script string
+ - A list of script strings for batch processing
+ - A path to a .json or .txt file
+ - A list of paths
+ voice_samples (`List[Union[str, np.ndarray]]`, `List[List[Union[str, np.ndarray]]]`, *optional*):
+ Voice samples for each script. Can be:
+ - A list of samples for a single script
+ - A list of lists for batch processing
+ padding (`bool`, `str` or `PaddingStrategy`, defaults to `True`):
+ Whether to pad sequences to the same length
+ truncation (`bool`, `str` or `TruncationStrategy`, defaults to `False`):
+ Whether to truncate sequences
+ max_length (`int`, *optional*):
+ Maximum length of the returned sequences
+ return_tensors (`str` or `TensorType`, *optional*):
+ If set, will return tensors of a particular framework
+ return_attention_mask (`bool`, defaults to `True`):
+ Whether to return the attention mask
+
+ Returns:
+ `BatchEncoding`: A BatchEncoding with the following fields:
+ - **input_ids** -- List of token id sequences or tensor
+ - **attention_mask** -- List of attention masks or tensor
+ - **speech_tensors** -- Padded speech inputs (if voice_samples provided)
+ - **speech_masks** -- Speech masks (if voice_samples provided)
+ - **speech_input_mask** -- Boolean masks indicating speech token positions
+ """
+ # Handle single vs batch input
+ if isinstance(text, str) or (isinstance(text, list) and len(text) > 0 and not isinstance(text[0], str)):
+ # Single input
+ texts = [text]
+ is_batched = False
+ else:
+ # Batch input
+ texts = text
+ is_batched = True
+
+ # Handle voice samples
+ if voice_samples is not None:
+ if not is_batched or (isinstance(voice_samples[0], (str, np.ndarray))):
+ # Single set of voice samples
+ voice_samples_list = [voice_samples]
+ else:
+ # Batch of voice samples
+ voice_samples_list = voice_samples
+ else:
+ voice_samples_list = [None] * len(texts)
+
+ # Process each input
+ all_encodings = []
+ for text_input, voice_input in zip(texts, voice_samples_list):
+ encoding = self._process_single(text_input, voice_input)
+ all_encodings.append(encoding)
+
+ # Combine batch
+ batch_encoding = self._batch_encode(
+ all_encodings,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ return_tensors=return_tensors,
+ return_attention_mask=return_attention_mask,
+ )
+
+ return batch_encoding
+
+ def _process_single(
+ self,
+ text: Union[str, TextInput],
+ voice_samples: Optional[List[Union[str, np.ndarray]]] = None,
+ ) -> Dict[str, Any]:
+ """Process a single podcast script."""
+ # Determine if text is a file path or direct script
+ script = None
+ if isinstance(text, str):
+ # Check if it's a file path
+ if text.endswith('.json') and os.path.exists(text):
+ script = self._convert_json_to_script(text)
+ elif text.endswith('.txt') and os.path.exists(text):
+ script = self._convert_text_to_script(text)
+ else:
+ # Assume it's the script content directly
+ script = text
+
+ if script is None:
+ raise ValueError(f"Could not process input text: {text}")
+
+ # Parse the script
+ parsed_lines = self._parse_script(script)
+ all_speakers = list(set(speaker_id for speaker_id, _ in parsed_lines))
+
+ # Create system prompt
+ # system_tokens = self.tokenizer.encode(self.system_prompt, add_special_tokens=False)
+ system_tokens = self.tokenizer.encode(self.system_prompt)
+
+ # Process voice samples if provided
+ if voice_samples:
+ voice_tokens, voice_speech_inputs, voice_speech_masks = self._create_voice_prompt(voice_samples[:len(all_speakers)])
+ else:
+ voice_tokens, voice_speech_inputs, voice_speech_masks = [], [], []
+
+ # Build full token sequence
+ full_tokens = system_tokens + voice_tokens
+ speech_input_mask = [False] * len(system_tokens) + voice_speech_masks
+
+ # Add text input section
+ full_tokens += self.tokenizer.encode(' Text input:\n', add_special_tokens=False)
+ speech_input_mask += [False] * len(self.tokenizer.encode(' Text input:\n', add_special_tokens=False))
+
+ for speaker_id, speaker_text in parsed_lines:
+ speaker_text_tokens = self.tokenizer.encode(f" Speaker {speaker_id}:{speaker_text}\n", add_special_tokens=False)
+ full_tokens += speaker_text_tokens
+ speech_input_mask += [False] * len(speaker_text_tokens)
+
+ # Add speech output section
+ full_tokens += self.tokenizer.encode(' Speech output:\n', add_special_tokens=False) + [self.tokenizer.speech_start_id]
+ speech_input_mask += [False] * (len(self.tokenizer.encode(' Speech output:\n', add_special_tokens=False)) + 1)
+
+ return {
+ "input_ids": full_tokens,
+ "speech_inputs": voice_speech_inputs if voice_speech_inputs else None,
+ "speech_input_mask": speech_input_mask,
+ "parsed_script": parsed_lines,
+ "all_speakers": all_speakers,
+ }
+
+ def _batch_encode(
+ self,
+ encodings: List[Dict[str, Any]],
+ padding: Union[bool, str, PaddingStrategy] = True,
+ truncation: Union[bool, str, TruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_attention_mask: bool = True,
+ ) -> BatchEncoding:
+ """Combine multiple encodings into a batch with padding."""
+ # Extract input_ids and create attention_mask
+ input_ids_list = [enc["input_ids"] for enc in encodings]
+ speech_input_masks_list = [enc["speech_input_mask"] for enc in encodings]
+
+ # Determine padding strategy
+ if isinstance(padding, bool):
+ padding_strategy = PaddingStrategy.LONGEST if padding else PaddingStrategy.DO_NOT_PAD
+ elif isinstance(padding, str):
+ padding_strategy = PaddingStrategy(padding)
+ else:
+ padding_strategy = padding
+
+ # Apply padding to input_ids
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD:
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_len = max(len(ids) for ids in input_ids_list)
+ elif padding_strategy == PaddingStrategy.MAX_LENGTH and max_length is not None:
+ max_len = max_length
+ else:
+ max_len = max(len(ids) for ids in input_ids_list)
+
+ # Pad sequences
+ padded_input_ids = []
+ attention_masks = []
+ padded_speech_input_masks = []
+
+ for input_ids, speech_mask in zip(input_ids_list, speech_input_masks_list):
+ # Truncate if needed
+ if truncation and len(input_ids) > max_len:
+ input_ids = input_ids[:max_len]
+ speech_mask = speech_mask[:max_len]
+
+ # Pad
+ padding_length = max_len - len(input_ids)
+ # padded_ids = [self.tokenizer.pad_token_id] * padding_length + input_ids
+ padded_ids = [self.tokenizer.pad_id] * padding_length + input_ids
+ attention_mask = [0] * padding_length + [1] * len(input_ids)
+ padded_speech_mask = [False] * padding_length + speech_mask
+
+ padded_input_ids.append(padded_ids)
+ attention_masks.append(attention_mask)
+ padded_speech_input_masks.append(padded_speech_mask)
+
+ input_ids_list = padded_input_ids
+ speech_input_masks_list = padded_speech_input_masks
+ else:
+ # No padding, just create attention masks
+ attention_masks = [[1] * len(ids) for ids in input_ids_list] if return_attention_mask else None
+
+ # Process speech inputs
+ all_speech_inputs = []
+ has_speech = False
+ for enc in encodings:
+ if enc["speech_inputs"] is not None:
+ all_speech_inputs.extend(enc["speech_inputs"])
+ has_speech = True
+
+ # Prepare batch encoding
+ batch_encoding = BatchEncoding()
+
+ # Handle tensor conversion
+ if return_tensors is not None:
+ batch_encoding["input_ids"] = torch.tensor(input_ids_list, dtype=torch.long)
+ if return_attention_mask and attention_masks is not None:
+ batch_encoding["attention_mask"] = torch.tensor(attention_masks, dtype=torch.long)
+ batch_encoding["speech_input_mask"] = torch.tensor(speech_input_masks_list, dtype=torch.bool)
+ else:
+ batch_encoding["input_ids"] = input_ids_list
+ if return_attention_mask and attention_masks is not None:
+ batch_encoding["attention_mask"] = attention_masks
+ batch_encoding["speech_input_mask"] = speech_input_masks_list
+
+ # Process speech tensors if present
+ if has_speech:
+ speech_dict = self.prepare_speech_inputs(
+ all_speech_inputs,
+ return_tensors=return_tensors,
+ )
+ batch_encoding["speech_tensors"] = speech_dict["padded_speeches"]
+ batch_encoding["speech_masks"] = speech_dict["speech_masks"]
+ else:
+ batch_encoding["speech_tensors"] = None
+ batch_encoding["speech_masks"] = None
+
+ # Add metadata
+ batch_encoding["parsed_scripts"] = [enc["parsed_script"] for enc in encodings]
+ batch_encoding["all_speakers_list"] = [enc["all_speakers"] for enc in encodings]
+
+ return batch_encoding
+
+ def _create_voice_prompt(
+ self,
+ speaker_samples: List[Union[str, np.ndarray]]
+ ) -> Tuple[List[int], List[np.ndarray], List[bool]]:
+ """
+ Create voice prompt tokens and process audio samples.
+
+ Returns:
+ tuple: (voice_tokens, voice_speech_inputs, voice_speech_masks)
+ """
+ vae_token_id = self.tokenizer.speech_diffusion_id
+
+ voice_full_tokens = self.tokenizer.encode(' Voice input:\n', add_special_tokens=False)
+ voice_speech_inputs = []
+ voice_speech_masks = [False] * len(voice_full_tokens)
+
+ for speaker_id, speaker_audio in enumerate(speaker_samples):
+ prefix_tokens = self.tokenizer.encode(f" Speaker {speaker_id}:", add_special_tokens=False)
+
+ # Process audio
+ if isinstance(speaker_audio, str):
+ # Load audio from file
+ wav = self.audio_processor._load_audio_from_path(speaker_audio)
+ else:
+ wav = np.array(speaker_audio, dtype=np.float32)
+
+ # Apply normalization if needed
+ if self.db_normalize and self.audio_normalizer:
+ wav = self.audio_normalizer(wav)
+
+ # Calculate token length based on compression ratio
+ # if speaker_audio.endswith('.pt') or speaker_audio.endswith('.npy'):
+ # vae_tok_len = wav.shape[0]
+ # else:
+ vae_tok_len = math.ceil(wav.shape[0] / self.speech_tok_compress_ratio)
+
+ # Build tokens and masks
+ speaker_tokens = (prefix_tokens +
+ [self.tokenizer.speech_start_id] +
+ [vae_token_id] * vae_tok_len +
+ [self.tokenizer.speech_end_id] +
+ self.tokenizer.encode('\n', add_special_tokens=False))
+
+ vae_input_mask = ([False] * len(prefix_tokens) +
+ [False] +
+ [True] * vae_tok_len +
+ [False] +
+ [False])
+
+ voice_full_tokens.extend(speaker_tokens)
+ voice_speech_masks.extend(vae_input_mask)
+ voice_speech_inputs.append(wav)
+
+ return voice_full_tokens, voice_speech_inputs, voice_speech_masks
+
+ def prepare_speech_inputs(
+ self,
+ speech_inputs: List[np.ndarray],
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ dtype: Optional[torch.dtype] = None,
+ ) -> Dict[str, Any]:
+ """
+ Prepare speech inputs for model consumption.
+
+ Args:
+ speech_inputs: List of speech arrays
+ return_tensors: Output tensor type
+ device: Device to place tensors on
+ dtype: Data type for tensors
+
+ Returns:
+ Dictionary with padded_speeches and speech_masks
+ """
+ if not speech_inputs:
+ return {"padded_speeches": None, "speech_masks": None}
+
+ # Calculate sequence lengths
+ vae_tok_seqlens = [math.ceil(s.shape[0] / self.speech_tok_compress_ratio) for s in speech_inputs]
+ # vae_tok_seqlens = [math.ceil(s.shape[0] / self.speech_tok_compress_ratio) if s.ndim == 1 else s.shape[0] for s in speech_inputs]
+ max_speech_length = max(s.shape[0] for s in speech_inputs)
+
+ # Pad speeches
+ if speech_inputs[0].ndim == 1:
+ padded_speeches = np.full((len(speech_inputs), max_speech_length), fill_value=0, dtype=np.float32)
+ else:
+ padded_speeches = np.full((len(speech_inputs), max_speech_length, speech_inputs[0].shape[-1]), fill_value=0, dtype=np.float32)
+ speech_masks = np.zeros((len(speech_inputs), max(vae_tok_seqlens)), dtype=np.bool_)
+
+ for i, (speech, vae_tok_length) in enumerate(zip(speech_inputs, vae_tok_seqlens)):
+ padded_speeches[i, :len(speech)] = speech
+ speech_masks[i, :vae_tok_length] = True
+
+ result = {
+ "padded_speeches": padded_speeches,
+ "speech_masks": speech_masks,
+ }
+
+ # Convert to tensors if requested
+ if return_tensors == "pt":
+ result["padded_speeches"] = torch.tensor(padded_speeches, device=device, dtype=dtype or torch.float32)
+ result["speech_masks"] = torch.tensor(speech_masks, device=device, dtype=torch.bool)
+
+ return result
+
+ def _convert_json_to_script(self, json_file: str) -> str:
+ """
+ Convert JSON format to script format.
+ Expected JSON format:
+ [
+ {"speaker": "1", "text": "Hello everyone..."},
+ {"speaker": "2", "text": "Great to be here..."}
+ ]
+ """
+ import json
+
+ with open(json_file, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+
+ if not isinstance(data, list):
+ raise ValueError("JSON file must contain a list of speaker entries")
+
+ script_lines = []
+ for item in data:
+ if not isinstance(item, dict):
+ logger.warning(f"Skipping non-dict entry: {item}")
+ continue
+
+ speaker = item.get('speaker')
+ text = item.get('text')
+
+ if speaker is None or text is None:
+ logger.warning(f"Skipping entry missing speaker or text: {item}")
+ continue
+
+ # Ensure speaker ID is valid
+ try:
+ speaker_id = int(speaker)
+ except (ValueError, TypeError):
+ logger.warning(f"Invalid speaker ID: {speaker}, skipping entry")
+ continue
+
+ # Clean up text
+ text = text.strip()
+ if text:
+ script_lines.append(f"Speaker {speaker_id}: {text}")
+
+ if not script_lines:
+ raise ValueError("No valid entries found in JSON file")
+
+ return "\n".join(script_lines)
+
+ def _convert_text_to_script(self, text_file: str) -> str:
+ """
+ Convert text file to script format.
+ Handles multiple formats:
+ 1. Already formatted as "Speaker X: text"
+ 2. Plain text (assigns to Speaker 1)
+
+ Handles edge cases like multiple colons in a line.
+ """
+ with open(text_file, 'r', encoding='utf-8') as f:
+ lines = f.readlines()
+
+ script_lines = []
+ current_speaker = 1
+
+ for line in lines:
+ line = line.strip()
+ if not line:
+ continue
+
+ # Try to parse as "Speaker X: text" format
+ # Use regex to be more robust
+ speaker_match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line, re.IGNORECASE)
+
+ if speaker_match:
+ speaker_id = int(speaker_match.group(1))
+ text = speaker_match.group(2).strip()
+ if text:
+ script_lines.append(f"Speaker {speaker_id}: {text}")
+ else:
+ # Treat as plain text - assign to current speaker
+ script_lines.append(f"Speaker {current_speaker}: {line}")
+
+ if not script_lines:
+ raise ValueError("No valid content found in text file")
+
+ return "\n".join(script_lines)
+
+ def _parse_script(self, script: str) -> List[Tuple[int, str]]:
+ """Parse script into list of (speaker_id, text) tuples."""
+ lines = script.strip().split("\n")
+ parsed_lines = []
+ speaker_ids = []
+
+ # First pass: parse all lines and collect speaker IDs
+ for line in lines:
+ if not line.strip():
+ continue
+
+ # Use regex to handle edge cases like multiple colons
+ match = re.match(r'^Speaker\s+(\d+)\s*:\s*(.*)$', line.strip(), re.IGNORECASE)
+
+ if match:
+ speaker_id = int(match.group(1))
+ text = ' ' + match.group(2).strip()
+ parsed_lines.append((speaker_id, text))
+ speaker_ids.append(speaker_id)
+ else:
+ logger.warning(f"Could not parse line: '{line}'")
+
+ if not parsed_lines:
+ raise ValueError("No valid speaker lines found in script")
+
+ # Check if we need to normalize speaker IDs (only if all are > 0)
+ min_speaker_id = min(speaker_ids)
+ if min_speaker_id > 0:
+ # Normalize to start from 0
+ normalized_lines = []
+ for speaker_id, text in parsed_lines:
+ normalized_lines.append((speaker_id - 1, text))
+ return normalized_lines
+ else:
+ # Keep original IDs
+ return parsed_lines
+
+ def _merge_inputs(self, text_inputs: BatchEncoding, audio_inputs: Dict) -> BatchEncoding:
+ """Merge text and audio inputs into a single BatchEncoding."""
+ # Start with text inputs
+ merged = BatchEncoding(text_inputs)
+
+ # Add audio-specific fields
+ if "audio" in audio_inputs:
+ merged["speech_inputs"] = audio_inputs["audio"]
+ if "streaming" in audio_inputs:
+ merged["streaming"] = audio_inputs["streaming"]
+
+ return merged
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to VibeVoiceTextTokenizer's [`~PreTrainedTokenizer.batch_decode`].
+ Please refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to VibeVoiceTextTokenizer's [`~PreTrainedTokenizer.decode`].
+ Please refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ """
+ Return the list of inputs accepted by the model.
+ """
+ tokenizer_input_names = self.tokenizer.model_input_names
+ audio_processor_input_names = self.audio_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + audio_processor_input_names + ["speech_inputs", "speech_input_mask"]))
+
+ def save_audio(self,
+ audio: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]],
+ output_path: str = "output.wav",
+ sampling_rate: Optional[int] = None,
+ normalize: bool = False,
+ batch_prefix: str = "audio_",
+ ) -> str:
+ """
+ Save audio data to a file.
+ Args:
+ audio (Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]]):
+ The audio data to save. Can be a single tensor/array or a list of them.
+ output_path (str, optional): Path to save the audio file. Defaults to "output.wav".
+ sampling_rate (int, optional): Sampling rate for the audio. If None, uses the processor's default.
+ normalize (bool, optional): Whether to normalize the audio before saving. Defaults to False.
+ batch_prefix (str, optional): Prefix for batch audio files. Defaults to "audio_".
+ Returns:
+ str: The path to the saved audio file.
+ """
+ return self.audio_processor.save_audio(audio, output_path=output_path, sampling_rate=sampling_rate, normalize=normalize, batch_prefix=batch_prefix)
+
+__all__ = [
+ "VibeVoiceProcessor",
+]
\ No newline at end of file
diff --git a/vibevoice/processor/vibevoice_tokenizer_processor.py b/vibevoice/processor/vibevoice_tokenizer_processor.py
new file mode 100644
index 0000000..0d854b7
--- /dev/null
+++ b/vibevoice/processor/vibevoice_tokenizer_processor.py
@@ -0,0 +1,483 @@
+"""
+Processor class for VibeVoice models.
+"""
+
+import os
+import json
+import warnings
+from typing import List, Optional, Union, Dict, Any
+
+import numpy as np
+import torch
+
+from transformers.feature_extraction_utils import FeatureExtractionMixin
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+
+class AudioNormalizer:
+ """
+ Audio normalization class for VibeVoice tokenizer.
+
+ This class provides audio normalization to ensure consistent input levels
+ for the VibeVoice tokenizer while maintaining audio quality.
+ """
+
+ def __init__(self, target_dB_FS: float = -25, eps: float = 1e-6):
+ """
+ Initialize the audio normalizer.
+
+ Args:
+ target_dB_FS (float): Target dB FS level for the audio. Default: -25
+ eps (float): Small value to avoid division by zero. Default: 1e-6
+ """
+ self.target_dB_FS = target_dB_FS
+ self.eps = eps
+
+ def tailor_dB_FS(self, audio: np.ndarray) -> tuple:
+ """
+ Adjust the audio to the target dB FS level.
+
+ Args:
+ audio (np.ndarray): Input audio signal
+
+ Returns:
+ tuple: (normalized_audio, rms, scalar)
+ """
+ rms = np.sqrt(np.mean(audio**2))
+ scalar = 10 ** (self.target_dB_FS / 20) / (rms + self.eps)
+ normalized_audio = audio * scalar
+ return normalized_audio, rms, scalar
+
+ def avoid_clipping(self, audio: np.ndarray, scalar: Optional[float] = None) -> tuple:
+ """
+ Avoid clipping by scaling down if necessary.
+
+ Args:
+ audio (np.ndarray): Input audio signal
+ scalar (float, optional): Explicit scaling factor
+
+ Returns:
+ tuple: (normalized_audio, scalar)
+ """
+ if scalar is None:
+ max_val = np.max(np.abs(audio))
+ if max_val > 1.0:
+ scalar = max_val + self.eps
+ else:
+ scalar = 1.0
+
+ return audio / scalar, scalar
+
+ def __call__(self, audio: np.ndarray) -> np.ndarray:
+ """
+ Normalize the audio by adjusting to target dB FS and avoiding clipping.
+
+ Args:
+ audio (np.ndarray): Input audio signal
+
+ Returns:
+ np.ndarray: Normalized audio signal
+ """
+ # First adjust to target dB FS
+ audio, _, _ = self.tailor_dB_FS(audio)
+ # Then avoid clipping
+ audio, _ = self.avoid_clipping(audio)
+ return audio
+
+
+# Change from ProcessorMixin to FeatureExtractionMixin which is designed for single components
+class VibeVoiceTokenizerProcessor(FeatureExtractionMixin):
+ """
+ Processor for VibeVoice acoustic tokenizer models.
+
+ This processor handles audio preprocessing for VibeVoice models, including:
+ - Audio format conversion (stereo to mono)
+ - Optional audio normalization
+ - Streaming support for infinite-length audio
+
+ Args:
+ sampling_rate (int, optional): Expected sampling rate. Defaults to 24000.
+ normalize_audio (bool, optional): Whether to normalize audio. Defaults to True.
+ target_dB_FS (float, optional): Target dB FS for normalization. Defaults to -25.
+ eps (float, optional): Small value for numerical stability. Defaults to 1e-6.
+ """
+ model_input_names = ["input_features"]
+
+ def __init__(
+ self,
+ sampling_rate: int = 24000,
+ normalize_audio: bool = True,
+ target_dB_FS: float = -25,
+ eps: float = 1e-6,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.sampling_rate = sampling_rate
+ self.normalize_audio = normalize_audio
+
+ # Initialize audio normalizer if needed
+ if self.normalize_audio:
+ self.normalizer = AudioNormalizer(target_dB_FS=target_dB_FS, eps=eps)
+ else:
+ self.normalizer = None
+
+ # Save config
+ self.feature_extractor_dict = {
+ "sampling_rate": sampling_rate,
+ "normalize_audio": normalize_audio,
+ "target_dB_FS": target_dB_FS,
+ "eps": eps,
+ }
+
+ def _ensure_mono(self, audio: np.ndarray) -> np.ndarray:
+ """
+ Convert stereo audio to mono if needed.
+
+ Args:
+ audio (np.ndarray): Input audio array
+
+ Returns:
+ np.ndarray: Mono audio array
+ """
+ if len(audio.shape) == 1:
+ return audio
+ elif len(audio.shape) == 2:
+ if audio.shape[0] == 2: # (2, time)
+ return np.mean(audio, axis=0)
+ elif audio.shape[1] == 2: # (time, 2)
+ return np.mean(audio, axis=1)
+ else:
+ # If one dimension is 1, squeeze it
+ if audio.shape[0] == 1:
+ return audio.squeeze(0)
+ elif audio.shape[1] == 1:
+ return audio.squeeze(1)
+ else:
+ raise ValueError(f"Unexpected audio shape: {audio.shape}")
+ else:
+ raise ValueError(f"Audio should be 1D or 2D, got shape: {audio.shape}")
+
+ def _process_single_audio(self, audio: Union[np.ndarray, List[float]]) -> np.ndarray:
+ """
+ Process a single audio array.
+
+ Args:
+ audio: Single audio input
+
+ Returns:
+ np.ndarray: Processed audio
+ """
+ # Convert to numpy array
+ if not isinstance(audio, np.ndarray):
+ audio = np.array(audio, dtype=np.float32)
+ else:
+ audio = audio.astype(np.float32)
+
+ # Ensure mono
+ audio = self._ensure_mono(audio)
+
+ # Normalize if requested
+ if self.normalize_audio and self.normalizer is not None:
+ audio = self.normalizer(audio)
+
+ return audio
+
+ def __call__(
+ self,
+ audio: Union[str, np.ndarray, List[float], List[np.ndarray], List[List[float]], List[str]] = None,
+ sampling_rate: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ **kwargs,
+ ):
+ """
+ Process audio for VibeVoice models.
+
+ Args:
+ audio: Audio input(s) to process. Can be:
+ - str: Path to audio file
+ - np.ndarray: Audio array
+ - List[float]: Audio as list of floats
+ - List[np.ndarray]: Batch of audio arrays
+ - List[str]: Batch of audio file paths
+ sampling_rate (int, optional): Sampling rate of the input audio
+ return_tensors (str, optional): Return format ('pt' for PyTorch, 'np' for NumPy)
+
+ Returns:
+ dict: Processed audio inputs with keys:
+ - input_features: Audio tensor(s) ready for the model
+ """
+ if audio is None:
+ raise ValueError("Audio input is required")
+
+ # Validate sampling rate
+ if sampling_rate is not None and sampling_rate != self.sampling_rate:
+ logger.warning(
+ f"Input sampling rate ({sampling_rate}) differs from expected "
+ f"sampling rate ({self.sampling_rate}). Please resample your audio."
+ )
+
+ # Handle different input types
+ if isinstance(audio, str):
+ # Single audio file path
+ audio = self._load_audio_from_path(audio)
+ is_batched = False
+ elif isinstance(audio, list):
+ if len(audio) == 0:
+ raise ValueError("Empty audio list provided")
+
+ # Check if it's a list of file paths
+ if all(isinstance(item, str) for item in audio):
+ # Batch of audio file paths
+ audio = [self._load_audio_from_path(path) for path in audio]
+ is_batched = True
+ else:
+ # Check if it's batched audio arrays
+ is_batched = isinstance(audio[0], (np.ndarray, list))
+ else:
+ # Single audio array or list
+ is_batched = False
+
+ # Process audio
+ if is_batched:
+ processed_audio = [self._process_single_audio(a) for a in audio]
+ else:
+ processed_audio = [self._process_single_audio(audio)]
+
+ # Convert to tensors if requested
+ if return_tensors == "pt":
+ if len(processed_audio) == 1:
+ # Create a proper batch dimension (B, T)
+ input_features = torch.from_numpy(processed_audio[0]).unsqueeze(0).unsqueeze(1)
+ else:
+ # For batched input with different lengths, create a batch properly
+ input_features = torch.stack([torch.from_numpy(a) for a in processed_audio]).unsqueeze(1)
+ elif return_tensors == "np":
+ if len(processed_audio) == 1:
+ input_features = processed_audio[0][np.newaxis, np.newaxis, :]
+ else:
+ input_features = np.stack(processed_audio)[:, np.newaxis, :]
+ else:
+ input_features = processed_audio[0] if len(processed_audio) == 1 else processed_audio
+
+ outputs = {
+ "audio": input_features, # Use "audio" instead of "input_features"
+ }
+
+ return outputs
+
+ def _load_audio_from_path(self, audio_path: str) -> np.ndarray:
+ """
+ Load audio from file path.
+
+ Args:
+ audio_path (str): Path to audio file
+
+ Returns:
+ np.ndarray: Loaded audio array
+ """
+ # Get file extension to determine loading method
+ file_ext = os.path.splitext(audio_path)[1].lower()
+
+ if file_ext in ['.wav', '.mp3', '.flac', '.m4a', '.ogg']:
+ # Audio file - use librosa
+ import librosa
+ audio_array, sr = librosa.load(
+ audio_path,
+ sr=self.sampling_rate,
+ mono=True
+ )
+ return audio_array
+ elif file_ext == '.pt':
+ # PyTorch tensor file
+ audio_tensor = torch.load(audio_path, map_location='cpu').squeeze()
+ if isinstance(audio_tensor, torch.Tensor):
+ audio_array = audio_tensor.numpy()
+ else:
+ audio_array = np.array(audio_tensor)
+ return audio_array.astype(np.float32)
+ elif file_ext == '.npy':
+ # NumPy file
+ audio_array = np.load(audio_path)
+ return audio_array.astype(np.float32)
+ else:
+ raise ValueError(
+ f"Unsupported file format: {file_ext}. "
+ f"Supported formats: .wav, .mp3, .flac, .m4a, .ogg, .pt, .npy, .npz"
+ )
+
+ def preprocess_audio(
+ self,
+ audio_path_or_array: Union[str, np.ndarray],
+ normalize: Optional[bool] = None,
+ ) -> np.ndarray:
+ """
+ Convenience method to preprocess audio from file path or array.
+ This method is kept for backward compatibility but __call__ is recommended.
+
+ Args:
+ audio_path_or_array: Path to audio file or numpy array
+ normalize: Whether to normalize (overrides default setting)
+
+ Returns:
+ np.ndarray: Preprocessed audio array
+ """
+ if isinstance(audio_path_or_array, str):
+ audio_array = self._load_audio_from_path(audio_path_or_array)
+ else:
+ audio_array = np.array(audio_path_or_array, dtype=np.float32)
+
+ # Override normalization setting if specified
+ original_normalize = self.normalize_audio
+ if normalize is not None:
+ self.normalize_audio = normalize
+
+ try:
+ processed = self._process_single_audio(audio_array)
+ finally:
+ # Restore original setting
+ self.normalize_audio = original_normalize
+
+ return processed
+
+ # Override to_dict method for configuration saving
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Convert the object to a dict containing all attributes needed for serialization.
+ """
+ return self.feature_extractor_dict
+
+ def save_audio(
+ self,
+ audio: Union[torch.Tensor, np.ndarray, List[Union[torch.Tensor, np.ndarray]]],
+ output_path: str = "output.wav",
+ sampling_rate: Optional[int] = None,
+ normalize: bool = False,
+ batch_prefix: str = "audio_",
+ ):
+ """
+ Save audio data to WAV file(s).
+
+ Args:
+ audio: Audio data to save. Can be:
+ - torch.Tensor: PyTorch tensor with shape (B, C, T) or (B, T) or (T)
+ - np.ndarray: NumPy array with shape (B, C, T) or (B, T) or (T)
+ - List of tensors or arrays
+ output_path: Path where to save the audio. If saving multiple files,
+ this is treated as a directory and individual files will be saved inside.
+ sampling_rate: Sampling rate for the saved audio. Defaults to the processor's rate.
+ normalize: Whether to normalize audio before saving.
+ batch_prefix: Prefix for batch files when saving multiple audios.
+
+ Returns:
+ List[str]: Paths to the saved audio files.
+ """
+ if sampling_rate is None:
+ sampling_rate = self.sampling_rate
+
+ try:
+ import soundfile as sf
+ except ImportError:
+ raise ImportError(
+ "soundfile is required to save audio files. "
+ "Install it with: pip install soundfile"
+ )
+
+ # Ensure audio is in the right format
+ if isinstance(audio, torch.Tensor):
+ # Convert PyTorch tensor to numpy
+ audio_np = audio.float().detach().cpu().numpy()
+ elif isinstance(audio, np.ndarray):
+ audio_np = audio
+ elif isinstance(audio, list):
+ # Handle list of tensors or arrays
+ if all(isinstance(a, torch.Tensor) for a in audio):
+ audio_np = [a.float().detach().cpu().numpy() for a in audio]
+ else:
+ audio_np = audio
+ else:
+ raise ValueError(f"Unsupported audio type: {type(audio)}")
+
+ saved_paths = []
+
+ # Handle based on shape or type
+ if isinstance(audio_np, list):
+ # Multiple separate audios to save
+ output_dir = output_path
+
+ # Ensure output directory exists
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Save each audio
+ for i, audio_item in enumerate(audio_np):
+ audio_item = self._prepare_audio_for_save(audio_item, normalize)
+ file_path = os.path.join(output_dir, f"{batch_prefix}{i}.wav")
+ sf.write(file_path, audio_item, sampling_rate)
+ saved_paths.append(file_path)
+
+ else:
+ # Handle different dimensions
+ if len(audio_np.shape) >= 3: # (B, C, T) or similar
+ # Get batch size
+ batch_size = audio_np.shape[0]
+
+ if batch_size > 1:
+ # Multiple audios in a batch
+ output_dir = output_path
+
+ # Ensure output directory exists
+ os.makedirs(output_dir, exist_ok=True)
+
+ # Save each audio in the batch
+ for i in range(batch_size):
+ # Extract single audio and remove channel dim if present
+ single_audio = audio_np[i]
+ if len(single_audio.shape) > 1:
+ if single_audio.shape[0] == 1: # (1, T)
+ single_audio = single_audio.squeeze(0)
+
+ single_audio = self._prepare_audio_for_save(single_audio, normalize)
+ file_path = os.path.join(output_dir, f"{batch_prefix}{i}.wav")
+ sf.write(file_path, single_audio, sampling_rate)
+ saved_paths.append(file_path)
+ else:
+ # Single audio with batch and channel dims
+ audio_item = audio_np.squeeze() # Remove batch and channel dimensions
+ audio_item = self._prepare_audio_for_save(audio_item, normalize)
+ sf.write(output_path, audio_item, sampling_rate)
+ saved_paths.append(output_path)
+ else:
+ # Single audio without batch dimension
+ audio_item = self._prepare_audio_for_save(audio_np, normalize)
+ sf.write(output_path, audio_item, sampling_rate)
+ saved_paths.append(output_path)
+
+ return saved_paths
+
+ def _prepare_audio_for_save(self, audio: np.ndarray, normalize: bool) -> np.ndarray:
+ """
+ Prepare audio for saving by ensuring it's the right shape and optionally normalizing.
+
+ Args:
+ audio: Audio data as numpy array
+ normalize: Whether to normalize audio
+
+ Returns:
+ np.ndarray: Processed audio ready for saving
+ """
+ # Ensure right dimensionality
+ if len(audio.shape) > 1 and audio.shape[0] == 1: # (1, T)
+ audio = audio.squeeze(0)
+
+ # Normalize if requested
+ if normalize:
+ max_val = np.abs(audio).max()
+ if max_val > 0:
+ audio = audio / max_val
+
+ return audio
+
+
+__all__ = ["VibeVoiceTokenizerProcessor", "AudioNormalizer"]
\ No newline at end of file
diff --git a/vibevoice/schedule/__init__.py b/vibevoice/schedule/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vibevoice/schedule/dpm_solver.py b/vibevoice/schedule/dpm_solver.py
new file mode 100644
index 0000000..806241f
--- /dev/null
+++ b/vibevoice/schedule/dpm_solver.py
@@ -0,0 +1,1065 @@
+# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.utils import deprecate
+from diffusers.utils.torch_utils import randn_tensor
+from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
+
+def betas_for_alpha_bar(
+ num_diffusion_timesteps,
+ max_beta=0.999,
+ alpha_transform_type="cosine",
+):
+ """
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
+ (1-beta) over time from t = [0,1].
+
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
+ to that part of the diffusion process.
+
+
+ Args:
+ num_diffusion_timesteps (`int`): the number of betas to produce.
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
+ prevent singularities.
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
+ Choose from `cosine` or `exp`
+
+ Returns:
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
+ """
+ if alpha_transform_type == "cosine":
+
+ def alpha_bar_fn(t):
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
+ # return math.cos(t * math.pi / 2 * 0.95) ** 2
+
+ elif alpha_transform_type == "exp":
+
+ def alpha_bar_fn(t):
+ return math.exp(t * -12.0)
+
+ elif alpha_transform_type == "cauchy":
+ # Β΅ + Ξ³ tan (Ο (0.5 - x)) Ξ³ = 1, Β΅ = 3
+ # alpha^2 = 1-1/(exp(Ξ»)+1)
+ def alpha_bar_fn(t, gamma=1, mu=3):
+ snr = mu + gamma * math.tan(math.pi * (0.5 - t) * 0.9)
+ return 1 - 1 / (math.exp(snr) + 1.1)
+
+ elif alpha_transform_type == "laplace":
+ # Β΅ β bsgn(0.5 β t) log(1 β 2|t β 0.5|) Β΅ = 0, b = 1
+ def alpha_bar_fn(t, mu=0, b=1):
+ snr = mu - b * math.copysign(1, 0.5 - t) * math.log(1 - 2 * abs(t - 0.5) * 0.98)
+ return 1 - 1 / (math.exp(snr) + 1.02)
+
+ else:
+ raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
+
+ betas = []
+ for i in range(num_diffusion_timesteps):
+ t1 = i / num_diffusion_timesteps
+ t2 = (i + 1) / num_diffusion_timesteps
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
+ return torch.tensor(betas, dtype=torch.float32)
+
+
+# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
+def rescale_zero_terminal_snr(betas):
+ """
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
+
+
+ Args:
+ betas (`torch.Tensor`):
+ the betas that the scheduler is being initialized with.
+
+ Returns:
+ `torch.Tensor`: rescaled betas with zero terminal SNR
+ """
+ # Convert betas to alphas_bar_sqrt
+ alphas = 1.0 - betas
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
+
+ # Store old values.
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
+
+ # Shift so the last timestep is zero.
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
+
+ # Scale so the first timestep is back to the old value.
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
+
+ # Convert alphas_bar_sqrt to betas
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
+ alphas = torch.cat([alphas_bar[0:1], alphas])
+ betas = 1 - alphas
+
+ return betas
+
+class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
+ """
+ `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs.
+
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
+ methods the library implements for all schedulers such as loading and saving.
+
+ Args:
+ num_train_timesteps (`int`, defaults to 1000):
+ The number of diffusion steps to train the model.
+ beta_start (`float`, defaults to 0.0001):
+ The starting `beta` value of inference.
+ beta_end (`float`, defaults to 0.02):
+ The final `beta` value.
+ beta_schedule (`str`, defaults to `"linear"`):
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
+ trained_betas (`np.ndarray`, *optional*):
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
+ solver_order (`int`, defaults to 2):
+ The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided
+ sampling, and `solver_order=3` for unconditional sampling.
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
+ Video](https://imagen.research.google/video/paper.pdf) paper).
+ thresholding (`bool`, defaults to `False`):
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
+ as Stable Diffusion.
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
+ sample_max_value (`float`, defaults to 1.0):
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True` and
+ `algorithm_type="dpmsolver++"`.
+ algorithm_type (`str`, defaults to `dpmsolver++`):
+ Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The
+ `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927)
+ paper, and the `dpmsolver++` type implements the algorithms in the
+ [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or
+ `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion.
+ solver_type (`str`, defaults to `midpoint`):
+ Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the
+ sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers.
+ lower_order_final (`bool`, defaults to `True`):
+ Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can
+ stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10.
+ euler_at_final (`bool`, defaults to `False`):
+ Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail
+ richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference
+ steps, but sometimes may result in blurring.
+ use_karras_sigmas (`bool`, *optional*, defaults to `False`):
+ Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`,
+ the sigmas are determined according to a sequence of noise levels {Οi}.
+ use_lu_lambdas (`bool`, *optional*, defaults to `False`):
+ Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during
+ the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of
+ `lambda(t)`.
+ final_sigmas_type (`str`, defaults to `"zero"`):
+ The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final
+ sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0.
+ lambda_min_clipped (`float`, defaults to `-inf`):
+ Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the
+ cosine (`squaredcos_cap_v2`) noise schedule.
+ variance_type (`str`, *optional*):
+ Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output
+ contains the predicted Gaussian variance.
+ timestep_spacing (`str`, defaults to `"linspace"`):
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
+ steps_offset (`int`, defaults to 0):
+ An offset added to the inference steps, as required by some model families.
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
+ """
+
+ _compatibles = [e.name for e in KarrasDiffusionSchedulers]
+ order = 1
+
+ @register_to_config
+ def __init__(
+ self,
+ num_train_timesteps: int = 1000,
+ beta_start: float = 0.0001,
+ beta_end: float = 0.02,
+ beta_schedule: str = "linear",
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
+ solver_order: int = 2,
+ prediction_type: str = "epsilon",
+ thresholding: bool = False,
+ dynamic_thresholding_ratio: float = 0.995,
+ sample_max_value: float = 1.0,
+ algorithm_type: str = "dpmsolver++",
+ solver_type: str = "midpoint",
+ lower_order_final: bool = True,
+ euler_at_final: bool = False,
+ use_karras_sigmas: Optional[bool] = False,
+ use_lu_lambdas: Optional[bool] = False,
+ final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
+ lambda_min_clipped: float = -float("inf"),
+ variance_type: Optional[str] = None,
+ timestep_spacing: str = "linspace",
+ steps_offset: int = 0,
+ rescale_betas_zero_snr: bool = False,
+ ):
+ if algorithm_type in ["dpmsolver", "sde-dpmsolver"]:
+ deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead"
+ deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message)
+
+ if trained_betas is not None:
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
+ elif beta_schedule == "linear":
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
+ elif beta_schedule == "scaled_linear":
+ # this schedule is very specific to the latent diffusion model.
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
+ elif beta_schedule == "squaredcos_cap_v2" or beta_schedule == "cosine":
+ # Glide cosine schedule
+ self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine")
+ elif beta_schedule == "cauchy":
+ self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cauchy")
+ elif beta_schedule == "laplace":
+ self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="laplace")
+ else:
+ raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
+
+ if rescale_betas_zero_snr:
+ self.betas = rescale_zero_terminal_snr(self.betas)
+
+ self.alphas = 1.0 - self.betas
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
+
+ if rescale_betas_zero_snr:
+ # Close to 0 without being 0 so first sigma is not inf
+ # FP16 smallest positive subnormal works well here
+ self.alphas_cumprod[-1] = 2**-24
+
+ # Currently we only support VP-type noise schedule
+ self.alpha_t = torch.sqrt(self.alphas_cumprod)
+ self.sigma_t = torch.sqrt(1 - self.alphas_cumprod)
+ self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t)
+ self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5
+
+ # standard deviation of the initial noise distribution
+ self.init_noise_sigma = 1.0
+
+ # settings for DPM-Solver
+ if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]:
+ if algorithm_type == "deis":
+ self.register_to_config(algorithm_type="dpmsolver++")
+ else:
+ raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}")
+
+ if solver_type not in ["midpoint", "heun"]:
+ if solver_type in ["logrho", "bh1", "bh2"]:
+ self.register_to_config(solver_type="midpoint")
+ else:
+ raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}")
+
+ if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero":
+ raise ValueError(
+ f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead."
+ )
+
+ # setable values
+ self.num_inference_steps = None
+ timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
+ self.timesteps = torch.from_numpy(timesteps)
+ self.model_outputs = [None] * solver_order
+ self.lower_order_nums = 0
+ self._step_index = None
+ self._begin_index = None
+ self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
+
+ @property
+ def step_index(self):
+ """
+ The index counter for current timestep. It will increase 1 after each scheduler step.
+ """
+ return self._step_index
+
+ @property
+ def begin_index(self):
+ """
+ The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
+ """
+ return self._begin_index
+
+ def set_begin_index(self, begin_index: int = 0):
+ """
+ Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
+
+ Args:
+ begin_index (`int`):
+ The begin index for the scheduler.
+ """
+ self._begin_index = begin_index
+
+ def set_timesteps(
+ self,
+ num_inference_steps: int = None,
+ device: Union[str, torch.device] = None,
+ timesteps: Optional[List[int]] = None,
+ ):
+ """
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
+
+ Args:
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated
+ based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas`
+ must be `None`, and `timestep_spacing` attribute will be ignored.
+ """
+ if num_inference_steps is None and timesteps is None:
+ raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.")
+ if num_inference_steps is not None and timesteps is not None:
+ raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.")
+ if timesteps is not None and self.config.use_karras_sigmas:
+ raise ValueError("Cannot use `timesteps` with `config.use_karras_sigmas = True`")
+ if timesteps is not None and self.config.use_lu_lambdas:
+ raise ValueError("Cannot use `timesteps` with `config.use_lu_lambdas = True`")
+
+ if timesteps is not None:
+ timesteps = np.array(timesteps).astype(np.int64)
+ else:
+ # Clipping the minimum of all lambda(t) for numerical stability.
+ # This is critical for cosine (squaredcos_cap_v2) noise schedule.
+ clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped)
+ last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item()
+
+ # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
+ if self.config.timestep_spacing == "linspace":
+ timesteps = (
+ np.linspace(0, last_timestep - 1, num_inference_steps + 1)
+ .round()[::-1][:-1]
+ .copy()
+ .astype(np.int64)
+ )
+ elif self.config.timestep_spacing == "leading":
+ step_ratio = last_timestep // (num_inference_steps + 1)
+ # creates integer timesteps by multiplying by ratio
+ # casting to int to avoid issues when num_inference_step is power of 3
+ timesteps = (
+ (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64)
+ )
+ timesteps += self.config.steps_offset
+ elif self.config.timestep_spacing == "trailing":
+ step_ratio = self.config.num_train_timesteps / num_inference_steps
+ # creates integer timesteps by multiplying by ratio
+ # casting to int to avoid issues when num_inference_step is power of 3
+ timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64)
+ timesteps -= 1
+ else:
+ raise ValueError(
+ f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
+ )
+
+ sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
+ log_sigmas = np.log(sigmas)
+
+ if self.config.use_karras_sigmas:
+ sigmas = np.flip(sigmas).copy()
+ sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
+ timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
+ elif self.config.use_lu_lambdas:
+ lambdas = np.flip(log_sigmas.copy())
+ lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps)
+ sigmas = np.exp(lambdas)
+ timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
+ else:
+ sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
+
+ if self.config.final_sigmas_type == "sigma_min":
+ sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5
+ elif self.config.final_sigmas_type == "zero":
+ sigma_last = 0
+ else:
+ raise ValueError(
+ f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}"
+ )
+
+ sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32)
+
+ self.sigmas = torch.from_numpy(sigmas)
+ self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64)
+
+ self.num_inference_steps = len(timesteps)
+
+ self.model_outputs = [
+ None,
+ ] * self.config.solver_order
+ self.lower_order_nums = 0
+
+ # add an index counter for schedulers that allow duplicated timesteps
+ self._step_index = None
+ self._begin_index = None
+ self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
+
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
+ def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
+ """
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
+
+ https://arxiv.org/abs/2205.11487
+ """
+ dtype = sample.dtype
+ batch_size, channels, *remaining_dims = sample.shape
+
+ if dtype not in (torch.float32, torch.float64):
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
+
+ # Flatten sample for doing quantile calculation along each image
+ sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
+
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
+
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
+ s = torch.clamp(
+ s, min=1, max=self.config.sample_max_value
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
+
+ sample = sample.reshape(batch_size, channels, *remaining_dims)
+ sample = sample.to(dtype)
+
+ return sample
+
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
+ def _sigma_to_t(self, sigma, log_sigmas):
+ # get log sigma
+ log_sigma = np.log(np.maximum(sigma, 1e-10))
+
+ # get distribution
+ dists = log_sigma - log_sigmas[:, np.newaxis]
+
+ # get sigmas range
+ low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
+ high_idx = low_idx + 1
+
+ low = log_sigmas[low_idx]
+ high = log_sigmas[high_idx]
+
+ # interpolate sigmas
+ w = (low - log_sigma) / (low - high)
+ w = np.clip(w, 0, 1)
+
+ # transform interpolation to time range
+ t = (1 - w) * low_idx + w * high_idx
+ t = t.reshape(sigma.shape)
+ return t
+
+ def _sigma_to_alpha_sigma_t(self, sigma):
+ alpha_t = 1 / ((sigma**2 + 1) ** 0.5)
+ sigma_t = sigma * alpha_t
+
+ return alpha_t, sigma_t
+
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
+ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
+ """Constructs the noise schedule of Karras et al. (2022)."""
+
+ # Hack to make sure that other schedulers which copy this function don't break
+ # TODO: Add this logic to the other schedulers
+ if hasattr(self.config, "sigma_min"):
+ sigma_min = self.config.sigma_min
+ else:
+ sigma_min = None
+
+ if hasattr(self.config, "sigma_max"):
+ sigma_max = self.config.sigma_max
+ else:
+ sigma_max = None
+
+ sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
+ sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
+
+ rho = 7.0 # 7.0 is the value used in the paper
+ ramp = np.linspace(0, 1, num_inference_steps)
+ min_inv_rho = sigma_min ** (1 / rho)
+ max_inv_rho = sigma_max ** (1 / rho)
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return sigmas
+
+ def _convert_to_lu(self, in_lambdas: torch.Tensor, num_inference_steps) -> torch.Tensor:
+ """Constructs the noise schedule of Lu et al. (2022)."""
+
+ lambda_min: float = in_lambdas[-1].item()
+ lambda_max: float = in_lambdas[0].item()
+
+ rho = 1.0 # 1.0 is the value used in the paper
+ ramp = np.linspace(0, 1, num_inference_steps)
+ min_inv_rho = lambda_min ** (1 / rho)
+ max_inv_rho = lambda_max ** (1 / rho)
+ lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return lambdas
+
+ def convert_model_output(
+ self,
+ model_output: torch.Tensor,
+ *args,
+ sample: torch.Tensor = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ """
+ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is
+ designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an
+ integral of the data prediction model.
+
+
+
+ The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise
+ prediction and data prediction models.
+
+
+
+ Args:
+ model_output (`torch.Tensor`):
+ The direct output from the learned diffusion model.
+ sample (`torch.Tensor`):
+ A current instance of a sample created by the diffusion process.
+
+ Returns:
+ `torch.Tensor`:
+ The converted model output.
+ """
+ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None)
+ if sample is None:
+ if len(args) > 1:
+ sample = args[1]
+ else:
+ raise ValueError("missing `sample` as a required keyward argument")
+ if timestep is not None:
+ deprecate(
+ "timesteps",
+ "1.0.0",
+ "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ # DPM-Solver++ needs to solve an integral of the data prediction model.
+ if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]:
+ if self.config.prediction_type == "epsilon":
+ # DPM-Solver and DPM-Solver++ only need the "mean" output.
+ if self.config.variance_type in ["learned", "learned_range"]:
+ model_output = model_output[:, :3]
+ sigma = self.sigmas[self.step_index]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
+ x0_pred = (sample - sigma_t * model_output) / alpha_t
+ elif self.config.prediction_type == "sample":
+ x0_pred = model_output
+ elif self.config.prediction_type == "v_prediction":
+ sigma = self.sigmas[self.step_index]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
+ x0_pred = alpha_t * sample - sigma_t * model_output
+ else:
+ raise ValueError(
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
+ " `v_prediction` for the DPMSolverMultistepScheduler."
+ )
+
+ if self.config.thresholding:
+ x0_pred = self._threshold_sample(x0_pred)
+
+ return x0_pred
+
+ # DPM-Solver needs to solve an integral of the noise prediction model.
+ elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]:
+ if self.config.prediction_type == "epsilon":
+ # DPM-Solver and DPM-Solver++ only need the "mean" output.
+ if self.config.variance_type in ["learned", "learned_range"]:
+ epsilon = model_output[:, :3]
+ else:
+ epsilon = model_output
+ elif self.config.prediction_type == "sample":
+ sigma = self.sigmas[self.step_index]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
+ epsilon = (sample - alpha_t * model_output) / sigma_t
+ elif self.config.prediction_type == "v_prediction":
+ sigma = self.sigmas[self.step_index]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
+ epsilon = alpha_t * model_output + sigma_t * sample
+ else:
+ raise ValueError(
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
+ " `v_prediction` for the DPMSolverMultistepScheduler."
+ )
+
+ if self.config.thresholding:
+ sigma = self.sigmas[self.step_index]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma)
+ x0_pred = (sample - sigma_t * epsilon) / alpha_t
+ x0_pred = self._threshold_sample(x0_pred)
+ epsilon = (sample - alpha_t * x0_pred) / sigma_t
+
+ return epsilon
+
+ def dpm_solver_first_order_update(
+ self,
+ model_output: torch.Tensor,
+ *args,
+ sample: torch.Tensor = None,
+ noise: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ """
+ One step for the first-order DPMSolver (equivalent to DDIM).
+
+ Args:
+ model_output (`torch.Tensor`):
+ The direct output from the learned diffusion model.
+ sample (`torch.Tensor`):
+ A current instance of a sample created by the diffusion process.
+
+ Returns:
+ `torch.Tensor`:
+ The sample tensor at the previous timestep.
+ """
+ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None)
+ prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None)
+ if sample is None:
+ if len(args) > 2:
+ sample = args[2]
+ else:
+ raise ValueError(" missing `sample` as a required keyward argument")
+ if timestep is not None:
+ deprecate(
+ "timesteps",
+ "1.0.0",
+ "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ if prev_timestep is not None:
+ deprecate(
+ "prev_timestep",
+ "1.0.0",
+ "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index]
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
+ alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s)
+ lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
+ lambda_s = torch.log(alpha_s) - torch.log(sigma_s)
+
+ h = lambda_t - lambda_s
+ if self.config.algorithm_type == "dpmsolver++":
+ x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output
+ elif self.config.algorithm_type == "dpmsolver":
+ x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output
+ elif self.config.algorithm_type == "sde-dpmsolver++":
+ assert noise is not None
+ x_t = (
+ (sigma_t / sigma_s * torch.exp(-h)) * sample
+ + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
+ )
+ elif self.config.algorithm_type == "sde-dpmsolver":
+ assert noise is not None
+ x_t = (
+ (alpha_t / alpha_s) * sample
+ - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output
+ + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise
+ )
+ return x_t
+
+ def multistep_dpm_solver_second_order_update(
+ self,
+ model_output_list: List[torch.Tensor],
+ *args,
+ sample: torch.Tensor = None,
+ noise: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ """
+ One step for the second-order multistep DPMSolver.
+
+ Args:
+ model_output_list (`List[torch.Tensor]`):
+ The direct outputs from learned diffusion model at current and latter timesteps.
+ sample (`torch.Tensor`):
+ A current instance of a sample created by the diffusion process.
+
+ Returns:
+ `torch.Tensor`:
+ The sample tensor at the previous timestep.
+ """
+ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None)
+ prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None)
+ if sample is None:
+ if len(args) > 2:
+ sample = args[2]
+ else:
+ raise ValueError(" missing `sample` as a required keyward argument")
+ if timestep_list is not None:
+ deprecate(
+ "timestep_list",
+ "1.0.0",
+ "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ if prev_timestep is not None:
+ deprecate(
+ "prev_timestep",
+ "1.0.0",
+ "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ sigma_t, sigma_s0, sigma_s1 = (
+ self.sigmas[self.step_index + 1],
+ self.sigmas[self.step_index],
+ self.sigmas[self.step_index - 1],
+ )
+
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
+ alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0)
+ alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1)
+
+ lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
+ lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0)
+ lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1)
+
+ m0, m1 = model_output_list[-1], model_output_list[-2]
+
+ h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
+ r0 = h_0 / h
+ D0, D1 = m0, (1.0 / r0) * (m0 - m1)
+ if self.config.algorithm_type == "dpmsolver++":
+ # See https://arxiv.org/abs/2211.01095 for detailed derivations
+ if self.config.solver_type == "midpoint":
+ x_t = (
+ (sigma_t / sigma_s0) * sample
+ - (alpha_t * (torch.exp(-h) - 1.0)) * D0
+ - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1
+ )
+ elif self.config.solver_type == "heun":
+ x_t = (
+ (sigma_t / sigma_s0) * sample
+ - (alpha_t * (torch.exp(-h) - 1.0)) * D0
+ + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1
+ )
+ elif self.config.algorithm_type == "dpmsolver":
+ # See https://arxiv.org/abs/2206.00927 for detailed derivations
+ if self.config.solver_type == "midpoint":
+ x_t = (
+ (alpha_t / alpha_s0) * sample
+ - (sigma_t * (torch.exp(h) - 1.0)) * D0
+ - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1
+ )
+ elif self.config.solver_type == "heun":
+ x_t = (
+ (alpha_t / alpha_s0) * sample
+ - (sigma_t * (torch.exp(h) - 1.0)) * D0
+ - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
+ )
+ elif self.config.algorithm_type == "sde-dpmsolver++":
+ assert noise is not None
+ if self.config.solver_type == "midpoint":
+ x_t = (
+ (sigma_t / sigma_s0 * torch.exp(-h)) * sample
+ + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0
+ + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
+ )
+ elif self.config.solver_type == "heun":
+ x_t = (
+ (sigma_t / sigma_s0 * torch.exp(-h)) * sample
+ + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0
+ + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1
+ + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise
+ )
+ elif self.config.algorithm_type == "sde-dpmsolver":
+ assert noise is not None
+ if self.config.solver_type == "midpoint":
+ x_t = (
+ (alpha_t / alpha_s0) * sample
+ - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0
+ - (sigma_t * (torch.exp(h) - 1.0)) * D1
+ + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise
+ )
+ elif self.config.solver_type == "heun":
+ x_t = (
+ (alpha_t / alpha_s0) * sample
+ - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0
+ - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
+ + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise
+ )
+ return x_t
+
+ def multistep_dpm_solver_third_order_update(
+ self,
+ model_output_list: List[torch.Tensor],
+ *args,
+ sample: torch.Tensor = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ """
+ One step for the third-order multistep DPMSolver.
+
+ Args:
+ model_output_list (`List[torch.Tensor]`):
+ The direct outputs from learned diffusion model at current and latter timesteps.
+ sample (`torch.Tensor`):
+ A current instance of a sample created by diffusion process.
+
+ Returns:
+ `torch.Tensor`:
+ The sample tensor at the previous timestep.
+ """
+
+ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None)
+ prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None)
+ if sample is None:
+ if len(args) > 2:
+ sample = args[2]
+ else:
+ raise ValueError(" missing`sample` as a required keyward argument")
+ if timestep_list is not None:
+ deprecate(
+ "timestep_list",
+ "1.0.0",
+ "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ if prev_timestep is not None:
+ deprecate(
+ "prev_timestep",
+ "1.0.0",
+ "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`",
+ )
+
+ sigma_t, sigma_s0, sigma_s1, sigma_s2 = (
+ self.sigmas[self.step_index + 1],
+ self.sigmas[self.step_index],
+ self.sigmas[self.step_index - 1],
+ self.sigmas[self.step_index - 2],
+ )
+
+ alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t)
+ alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0)
+ alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1)
+ alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2)
+
+ lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
+ lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0)
+ lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1)
+ lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2)
+
+ m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
+
+ h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2
+ r0, r1 = h_0 / h, h_1 / h
+ D0 = m0
+ D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2)
+ D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
+ D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)
+ if self.config.algorithm_type == "dpmsolver++":
+ # See https://arxiv.org/abs/2206.00927 for detailed derivations
+ x_t = (
+ (sigma_t / sigma_s0) * sample
+ - (alpha_t * (torch.exp(-h) - 1.0)) * D0
+ + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1
+ - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
+ )
+ elif self.config.algorithm_type == "dpmsolver":
+ # See https://arxiv.org/abs/2206.00927 for detailed derivations
+ x_t = (
+ (alpha_t / alpha_s0) * sample
+ - (sigma_t * (torch.exp(h) - 1.0)) * D0
+ - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1
+ - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
+ )
+ return x_t
+
+ def index_for_timestep(self, timestep, schedule_timesteps=None):
+ if schedule_timesteps is None:
+ schedule_timesteps = self.timesteps
+
+ index_candidates = (schedule_timesteps == timestep).nonzero()
+
+ if len(index_candidates) == 0:
+ step_index = len(self.timesteps) - 1
+ # The sigma index that is taken for the **very** first `step`
+ # is always the second index (or the last index if there is only 1)
+ # This way we can ensure we don't accidentally skip a sigma in
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
+ elif len(index_candidates) > 1:
+ step_index = index_candidates[1].item()
+ else:
+ step_index = index_candidates[0].item()
+
+ return step_index
+
+ def _init_step_index(self, timestep):
+ """
+ Initialize the step_index counter for the scheduler.
+ """
+
+ if self.begin_index is None:
+ if isinstance(timestep, torch.Tensor):
+ timestep = timestep.to(self.timesteps.device)
+ self._step_index = self.index_for_timestep(timestep)
+ else:
+ self._step_index = self._begin_index
+
+ def step(
+ self,
+ model_output: torch.Tensor,
+ timestep: int,
+ sample: torch.Tensor,
+ generator=None,
+ variance_noise: Optional[torch.Tensor] = None,
+ return_dict: bool = True,
+ ) -> Union[SchedulerOutput, Tuple]:
+ """
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with
+ the multistep DPMSolver.
+
+ Args:
+ model_output (`torch.Tensor`):
+ The direct output from learned diffusion model.
+ timestep (`int`):
+ The current discrete timestep in the diffusion chain.
+ sample (`torch.Tensor`):
+ A current instance of a sample created by the diffusion process.
+ generator (`torch.Generator`, *optional*):
+ A random number generator.
+ variance_noise (`torch.Tensor`):
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
+ itself. Useful for methods such as [`LEdits++`].
+ return_dict (`bool`):
+ Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`.
+
+ Returns:
+ [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
+ If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a
+ tuple is returned where the first element is the sample tensor.
+
+ """
+ if self.num_inference_steps is None:
+ raise ValueError(
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
+ )
+
+ if self.step_index is None:
+ self._init_step_index(timestep)
+
+ # Improve numerical stability for small number of steps
+ lower_order_final = (self.step_index == len(self.timesteps) - 1) and (
+ self.config.euler_at_final
+ or (self.config.lower_order_final and len(self.timesteps) < 15)
+ or self.config.final_sigmas_type == "zero"
+ )
+ lower_order_second = (
+ (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15
+ )
+
+ model_output = self.convert_model_output(model_output, sample=sample)
+ for i in range(self.config.solver_order - 1):
+ self.model_outputs[i] = self.model_outputs[i + 1]
+ self.model_outputs[-1] = model_output
+
+ # Upcast to avoid precision issues when computing prev_sample
+ sample = sample.to(torch.float32)
+ if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None:
+ noise = randn_tensor(
+ model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32
+ )
+ elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]:
+ noise = variance_noise.to(device=model_output.device, dtype=torch.float32)
+ else:
+ noise = None
+
+ if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final:
+ prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise)
+ elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second:
+ prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise)
+ else:
+ prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample)
+
+ if self.lower_order_nums < self.config.solver_order:
+ self.lower_order_nums += 1
+
+ # Cast sample back to expected dtype
+ prev_sample = prev_sample.to(model_output.dtype)
+
+ # upon completion increase step index by one
+ self._step_index += 1
+
+ if not return_dict:
+ return (prev_sample,)
+
+ return SchedulerOutput(prev_sample=prev_sample)
+
+ def add_noise(
+ self,
+ original_samples: torch.Tensor,
+ noise: torch.Tensor,
+ timesteps: torch.IntTensor,
+ ) -> torch.Tensor:
+ # Make sure sigmas and timesteps have the same device and dtype as original_samples
+ # alpha_t = self.alpha_t.to(device=original_samples.device, dtype=original_samples.dtype)
+ # sigma_t = self.sigma_t.to(device=original_samples.device, dtype=original_samples.dtype)
+ alpha_t = self.alpha_t.to(original_samples.device).to(original_samples.dtype)
+ sigma_t = self.sigma_t.to(original_samples.device).to(original_samples.dtype)
+ timesteps = timesteps.to(original_samples.device)
+ alpha_t = alpha_t[timesteps].flatten()
+ while len(alpha_t.shape) < len(original_samples.shape):
+ alpha_t = alpha_t.unsqueeze(-1)
+
+ sigma_t = sigma_t[timesteps].flatten()
+ while len(sigma_t.shape) < len(original_samples.shape):
+ sigma_t = sigma_t.unsqueeze(-1)
+ noisy_samples = alpha_t * original_samples + sigma_t * noise
+ return noisy_samples
+
+ def get_velocity(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
+ # alpha_t = self.alpha_t.to(device=original_samples.device, dtype=original_samples.dtype)
+ # sigma_t = self.sigma_t.to(device=original_samples.device, dtype=original_samples.dtype)
+ alpha_t = self.alpha_t.to(original_samples.device).to(original_samples.dtype)
+ sigma_t = self.sigma_t.to(original_samples.device).to(original_samples.dtype)
+
+ timesteps = timesteps.to(original_samples.device)
+ alpha_t = alpha_t[timesteps].flatten()
+ while len(alpha_t.shape) < len(original_samples.shape):
+ alpha_t = alpha_t.unsqueeze(-1)
+
+ sigma_t = sigma_t[timesteps].flatten()
+ while len(sigma_t.shape) < len(original_samples.shape):
+ sigma_t = sigma_t.unsqueeze(-1)
+
+ velocity = alpha_t * noise - sigma_t * original_samples
+ return velocity
+
+ def __len__(self):
+ return self.config.num_train_timesteps
\ No newline at end of file
diff --git a/vibevoice/schedule/timestep_sampler.py b/vibevoice/schedule/timestep_sampler.py
new file mode 100644
index 0000000..177b66f
--- /dev/null
+++ b/vibevoice/schedule/timestep_sampler.py
@@ -0,0 +1,19 @@
+import math
+import torch
+
+
+class UniformSampler:
+ def __init__(self, timesteps = 1000):
+ self.timesteps = timesteps
+ def sample(self, batch_size, device):
+ return torch.randint(0, self.timesteps, (batch_size,), device=device)
+
+class LogitNormalSampler:
+ def __init__(self, timesteps = 1000, m = 0, s = 1):
+ self.timesteps = timesteps
+ timesteps = torch.linspace(0, 1, timesteps)
+ logit = torch.log(timesteps / (1 - timesteps))
+ self.prob = torch.exp(-0.5 * (logit - m) ** 2 / s ** 2) / (s * math.sqrt(2 * math.pi))
+ def sample(self, batch_size, device):
+ return torch.multinomial(self.prob, batch_size, replacement=True).to(device)
+
\ No newline at end of file
diff --git a/vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py b/vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py
new file mode 100644
index 0000000..bb814cf
--- /dev/null
+++ b/vibevoice/scripts/convert_nnscaler_checkpoint_to_transformers.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+import argparse
+import json
+import os
+from pathlib import Path
+import re
+import torch
+from typing import Dict, List, Tuple
+
+from vibevoice.modular.configuration_vibevoice import (
+ VibeVoiceConfig
+)
+from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+def convert_vibevoice_nnscaler_checkpoint_to_hf(
+ checkpoint_path: str,
+ pytorch_dump_folder_path: str,
+ config_path: str = None,
+):
+ """
+ Convert a nnscaler VibeVoice checkpoint to HuggingFace format.
+ Supports both regular checkpoints and tensor parallel checkpoints.
+ """
+
+ # Load regular checkpoint
+ logger.info(f"Loading regular checkpoint from {checkpoint_path}")
+ checkpoint = torch.load(checkpoint_path, map_location="cpu") # ['model', 'optimizer', 'lr_scheduler', 'train_status', 'train_args', 'rng_states', 'nnscaler', 'dataloader']
+
+ # config = checkpoint['train_args']
+ init_config_name = checkpoint['train_args']['vars']['model_args']['config_path']['relative_path']
+ pretrained_name = checkpoint['train_args']['vars']['data_args']['tokenizer_path']
+
+ init_config_path = Path(__file__).parent.parent / 'configs' / init_config_name.split('/')[-1]
+ if init_config_path.exists():
+ logger.info(f"Loading initial config from {init_config_path}")
+ with open(init_config_path, 'r') as f:
+ init_config = json.load(f)
+ else:
+ raise FileNotFoundError(f"Initial config file {init_config_path} not found. Please provide a valid path.")
+
+ tie_word_embeddings = init_config['decoder_config'].get('tie_word_embeddings', True)
+ logger.info(f"Tie word embeddings: {tie_word_embeddings}")
+
+ init_config['decoder_config']['use_cache'] = True
+ config = VibeVoiceConfig(**init_config, tie_word_embeddings=tie_word_embeddings)
+
+ # # Extract the model state dict
+ model_state_dict = {k.replace('model.model.', 'model.'): v for k, v in checkpoint["model"].items() if k.startswith('model.model.')}
+ if not tie_word_embeddings and 'model.lm_head.weight' in checkpoint["model"].keys():
+ # If not tying weights, we need to add the lm_head weight separately
+ model_state_dict['lm_head.weight'] = checkpoint["model"]['model.lm_head.weight']
+
+ # Override with provided config if available
+ if config_path:
+ logger.info(f"Loading config from {config_path}")
+ with open(config_path, 'r') as f:
+ config_dict = json.load(f)
+ config = VibeVoiceConfig.from_dict(config_dict)
+
+ # Set the default dtype to bfloat16 before creating the model
+ original_dtype = torch.get_default_dtype()
+ torch.set_default_dtype(torch.bfloat16)
+
+ # Create the HuggingFace model
+ logger.info("Creating HuggingFace VibeVoiceForConditionalGeneration model")
+ model = VibeVoiceForConditionalGeneration(config)
+
+ # Restore original dtype
+ torch.set_default_dtype(original_dtype)
+
+ # Load the state dict
+ logger.info("Loading weights into model")
+ missing_keys, unexpected_keys = model.load_state_dict(model_state_dict, strict=False)
+
+ if missing_keys:
+ logger.warning(f"Missing keys: {missing_keys}")
+ if unexpected_keys:
+ logger.warning(f"Unexpected keys: {unexpected_keys}")
+
+ # Create output directory
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+
+ # Save the model and config
+ logger.info(f"Saving model to {pytorch_dump_folder_path}")
+
+ # Save config
+ config.save_pretrained(pytorch_dump_folder_path)
+
+ # Save VibeVoiceProcessor configuration
+ logger.info("Saving VibeVoiceProcessor configuration")
+ processor_config = {
+ "processor_class": "VibeVoiceProcessor",
+ "speech_tok_compress_ratio": 3200,
+ "db_normalize": True,
+ # Audio processor configuration
+ "audio_processor": {
+ "feature_extractor_type": "VibeVoiceTokenizerProcessor",
+ "sampling_rate": 24000,
+ "normalize_audio": True,
+ "target_dB_FS": -25,
+ "eps": 1e-6,
+ },
+ "language_model_pretrained_name": pretrained_name,
+ }
+
+ processor_config_path = os.path.join(pytorch_dump_folder_path, "preprocessor_config.json")
+ with open(processor_config_path, 'w') as f:
+ json.dump(processor_config, f, indent=2)
+ logger.info(f"Saved processor config to {processor_config_path}")
+
+ # Save model with sharding
+ # save_pretrained handles tied weights automatically
+ logger.info("Saving model weights with sharding...")
+ model.save_pretrained(
+ pytorch_dump_folder_path,
+ max_shard_size="2GB", # Set maximum size for each shard
+ safe_serialization=True # Ensure saving in .safetensors format
+ )
+ logger.info(f"Model weights saved to {pytorch_dump_folder_path}")
+
+ logger.info("Conversion complete!")
+
+ # Verify the saved model can be loaded
+ logger.info("Verifying saved model...")
+ loaded_model = VibeVoiceForConditionalGeneration.from_pretrained(pytorch_dump_folder_path)
+ logger.info("Model successfully loaded from saved checkpoint!")
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--nnscaler_checkpoint_path",
+ type=str,
+ required=True,
+ help="Path to the fairseq checkpoint (.pt file). For tensor parallel checkpoints, "
+ "provide any one of the part files (e.g., checkpoint_1_5000-model_part-0.pt), "
+ "and the script will automatically detect and merge all parts.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ type=str,
+ required=True,
+ help="Path to the output PyTorch model directory",
+ )
+ parser.add_argument(
+ "--config_path",
+ type=str,
+ default=None,
+ help="Optional path to a config JSON file to override extracted config",
+ )
+
+ args = parser.parse_args()
+
+ convert_vibevoice_nnscaler_checkpoint_to_hf(
+ args.nnscaler_checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.config_path,
+ )
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file