Files
AiVideo/configs/config.yaml
2026-03-18 17:36:07 +08:00

66 lines
1.8 KiB
YAML

app:
# ComfyUI base url (local)
comfy_base_url: "http://127.0.0.1:8188"
# ComfyUI output directory on the same machine running this code
comfy_output_dir: "./ComfyUI/output"
openai:
# Prefer environment variables in real deployments.
# OPENAI_API_KEY must be set; OPENAI_BASE_URL optional (for DeepSeek / other gateways).
api_key_env: "OPENAI_API_KEY"
base_url_env: "OPENAI_BASE_URL"
# Example: "gpt-4o-mini" / "gpt-4o" / gateway specific names
model: "gpt-4o-mini"
script_gen:
# Narration length constraint per scene (Chinese chars approx)
narration_min_chars: 15
narration_max_chars: 20
scene_count: 3
tts:
voice: "zh-CN-XiaoxiaoNeural"
rate: "+0%"
volume: "+0%"
output_dir: "./assets/audio"
video:
# Final output path
final_output: "./final_poc.mp4"
# If ComfyUI is not ready, generate mock clips with this size & fps
mock_size: [1024, 576]
mock_fps: 24
transition_seconds: 0.25
comfy_workflow:
# Workflow JSON file (exported for API usage)
workflow_path: "./workflow_api.json"
# --- Dynamic node-id config (preferred) ---
# If any of these are null/empty, we will fallback to class_type search.
prompt_node_id: null
prompt_input_key: "text" # usually "text" in CLIPTextEncode
seed_node_id: null
seed_input_key: "seed" # usually "seed" in KSampler
# Optional: some workflows also take a motion prompt. If you wire it, configure here.
motion_node_id: null
motion_input_key: "text"
# Save/Combine node (optional; output filename will be discovered from history anyway)
save_node_id: null
# --- Fallback rules (robust) ---
prompt_node_class_types:
- "CLIPTextEncode"
- "CLIPTextEncodeSDXL"
- "CLIPTextEncodeFlux"
seed_node_class_types:
- "KSampler"
- "KSamplerAdvanced"
save_node_class_types:
- "SaveVideo"
- "VideoCombine"
- "VHS_VideoCombine"