98 lines
2.5 KiB
YAML
98 lines
2.5 KiB
YAML
app:
|
|
# ComfyUI base url (docker internal service)
|
|
comfy_base_url: "http://comfyui:8188"
|
|
# ComfyUI output directory on the same machine running this code
|
|
comfy_output_dir: "./ComfyUI/output"
|
|
|
|
global:
|
|
# Used by prompt_injector + adapters.
|
|
style: ""
|
|
character: ""
|
|
negative_prompt: ""
|
|
|
|
llm:
|
|
# Controls /script + /refine generation.
|
|
provider: "mock" # "openai" to enable OpenAI/DashScope calls
|
|
|
|
image:
|
|
provider: "mock" # "mock" | "comfy" | "replicate" | "openai"
|
|
# Generic model name (used by some providers as fallback).
|
|
model: ""
|
|
|
|
replicate:
|
|
# Example: "stability-ai/sdxl"
|
|
model: "stability-ai/sdxl"
|
|
|
|
openai:
|
|
# Example: "gpt-image-1"
|
|
model: "gpt-image-1"
|
|
|
|
image_fallback:
|
|
provider: "mock"
|
|
|
|
video:
|
|
provider: "moviepy"
|
|
|
|
tts:
|
|
provider: "edge"
|
|
|
|
openai:
|
|
# Prefer environment variables in real deployments.
|
|
# OPENAI_API_KEY must be set; OPENAI_BASE_URL optional (for DeepSeek / other gateways).
|
|
api_key_env: "sk-85880595fc714d63bfd0b025e917bd26"
|
|
base_url_env: "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
|
# Example: "gpt-4o-mini" / "gpt-4o" / gateway specific names
|
|
model: "qwen3.5-plus"
|
|
|
|
script_gen:
|
|
# Narration length constraint per scene (Chinese chars approx)
|
|
narration_min_chars: 15
|
|
narration_max_chars: 20
|
|
scene_count: 3
|
|
|
|
tts:
|
|
voice: "zh-CN-XiaoxiaoNeural"
|
|
rate: "+0%"
|
|
volume: "+0%"
|
|
output_dir: "./assets/audio"
|
|
|
|
video:
|
|
# Final output path
|
|
final_output: "./outputs/final_poc.mp4"
|
|
# If ComfyUI is not ready, generate mock clips with this size & fps
|
|
mock_size: [1024, 576]
|
|
mock_fps: 24
|
|
transition_seconds: 0.25
|
|
|
|
comfy_workflow:
|
|
# Workflow JSON file (exported for API usage)
|
|
workflow_path: "./workflow_api.json"
|
|
|
|
# --- Dynamic node-id config (preferred) ---
|
|
# If any of these are null/empty, we will fallback to class_type search.
|
|
prompt_node_id: null
|
|
prompt_input_key: "text" # usually "text" in CLIPTextEncode
|
|
|
|
seed_node_id: null
|
|
seed_input_key: "seed" # usually "seed" in KSampler
|
|
|
|
# Optional: some workflows also take a motion prompt. If you wire it, configure here.
|
|
motion_node_id: null
|
|
motion_input_key: "text"
|
|
|
|
# Save/Combine node (optional; output filename will be discovered from history anyway)
|
|
save_node_id: null
|
|
|
|
# --- Fallback rules (robust) ---
|
|
prompt_node_class_types:
|
|
- "CLIPTextEncode"
|
|
- "CLIPTextEncodeSDXL"
|
|
- "CLIPTextEncodeFlux"
|
|
seed_node_class_types:
|
|
- "KSampler"
|
|
- "KSamplerAdvanced"
|
|
save_node_class_types:
|
|
- "SaveVideo"
|
|
- "VideoCombine"
|
|
- "VHS_VideoCombine"
|