feat:优化架构

This commit is contained in:
Daniel
2026-03-25 13:43:00 +08:00
parent 8991f2a2d7
commit a2f224d01f
7 changed files with 509 additions and 5 deletions

27
engine/assembler.py Normal file
View File

@@ -0,0 +1,27 @@
from __future__ import annotations
from pathlib import Path
from moviepy import VideoFileClip, concatenate_videoclips
def assemble_clips(clips: list[str | Path], output_path: str | Path) -> Path:
out = Path(output_path)
out.parent.mkdir(parents=True, exist_ok=True)
if not clips:
raise ValueError("clips must not be empty")
vclips: list[VideoFileClip] = []
for c in clips:
vclips.append(VideoFileClip(str(c)))
final = concatenate_videoclips(vclips, method="compose")
try:
fps = vclips[0].fps if vclips and vclips[0].fps else 24
final.write_videofile(str(out), codec="libx264", audio_codec="aac", fps=fps, preset="medium", threads=4)
finally:
final.close()
for c in vclips:
c.close()
return out

43
engine/director.py Normal file
View File

@@ -0,0 +1,43 @@
from __future__ import annotations
from typing import Any
def _read_scene(scene: Any) -> tuple[str, str, str]:
if hasattr(scene, "image_prompt") and hasattr(scene, "video_motion") and hasattr(scene, "narration"):
return (
str(getattr(scene, "image_prompt", "")).strip(),
str(getattr(scene, "video_motion", "")).strip(),
str(getattr(scene, "narration", "")).strip(),
)
if isinstance(scene, dict):
return (
str(scene.get("image_prompt", "")).strip(),
str(scene.get("video_motion", scene.get("motion", ""))).strip(),
str(scene.get("narration", scene.get("tts", ""))).strip(),
)
return ("", "", "")
def scenes_to_shots(scenes: list) -> list[dict[str, Any]]:
shots: list[dict[str, Any]] = []
for scene_idx, scene in enumerate(scenes, start=1):
image_prompt, motion, tts = _read_scene(scene)
scene_id = f"scene_{scene_idx:02d}"
shot_id = f"{scene_id}_01"
# Keep default duration simple and deterministic for MVP.
duration = 3
shots.append(
{
"shot_id": shot_id,
"scene_id": scene_id,
"duration": int(duration),
"image_prompt": image_prompt,
"motion": motion,
"camera": "",
"tts": tts,
"status": "pending",
}
)
return shots

View File

@@ -13,9 +13,13 @@ from moviepy import ImageClip
from PIL import Image, ImageDraw, ImageFont
from engine.audio_gen import synthesize_scenes
from engine.assembler import assemble_clips
from engine.comfy_client import ComfyClient
from engine.config import AppConfig
from engine.director import scenes_to_shots
from engine.shot_executor import render_shot
from engine.script_gen import generate_scenes, refine_scene
from engine.task_store import create_task, update_shot_status, update_task_status
from engine.types import Scene
from engine.video_editor import Segment, render_final
@@ -65,6 +69,10 @@ def _prog(p: float, msg: str) -> None:
_emit("PROG " + json.dumps({"p": p2, "msg": msg}, ensure_ascii=False))
def _prog_shot(shot_id: str, status: str) -> None:
_emit(f"PROG_SHOT {shot_id} {status}")
def _normalize_style(style: str | None) -> str:
s = (style or "").strip()
if not s:
@@ -308,13 +316,43 @@ def step_refine(
def step_render(prompt: str, cfg: AppConfig, mock: bool, *, style: str | None, character: str | None, out_dir: Path) -> int:
payload = _read_stdin_json()
scenes = _parse_scenes_from_obj(payload)
scenes_raw = _parse_scenes_from_obj(payload)
scenes = [
Scene(
image_prompt=_decorate_image_prompt(s.image_prompt, style=style, character=character),
video_motion=s.video_motion,
narration=s.narration,
)
for s in scenes_raw
]
shots = scenes_to_shots(scenes)
out_dir.mkdir(parents=True, exist_ok=True)
task_id = out_dir.name
create_task(task_id, shots)
update_task_status(task_id, "running")
_prog(0.05, "Start render")
out = asyncio.run(_render_from_scenes(prompt, scenes, cfg, mock=mock, style=style, character=character, out_dir=out_dir))
_prog(1.0, "Render finished")
_emit("RENDER_DONE " + json.dumps({"output": str(out)}, ensure_ascii=False))
return 0
clips: list[str] = []
total = max(1, len(shots))
try:
for idx, shot in enumerate(shots, start=1):
shot_id = str(shot.get("shot_id", f"shot_{idx:02d}"))
update_shot_status(task_id, shot_id, "running")
_prog_shot(shot_id, "running")
clip_path = render_shot(shot, out_dir, cfg, mock=mock)
clips.append(clip_path)
update_shot_status(task_id, shot_id, "done")
_prog_shot(shot_id, "done")
_prog(0.05 + 0.8 * idx / total, f"Rendered shot {idx}/{total}")
final_out = out_dir / "final.mp4"
out = assemble_clips(clips, final_out)
update_task_status(task_id, "done")
_prog(1.0, "Render finished")
_emit("RENDER_DONE " + json.dumps({"output": str(out)}, ensure_ascii=False))
return 0
except Exception:
update_task_status(task_id, "failed")
raise
def main() -> int:

113
engine/shot_executor.py Normal file
View File

@@ -0,0 +1,113 @@
from __future__ import annotations
import asyncio
import random
from pathlib import Path
from typing import Any
from moviepy import AudioFileClip, CompositeVideoClip, TextClip, VideoFileClip, vfx
from .audio_gen import synthesize_one
from .comfy_client import ComfyClient
from .config import AppConfig
def _fit_video_to_audio(video: VideoFileClip, audio: AudioFileClip) -> VideoFileClip:
if audio.duration is None or video.duration is None:
return video.with_audio(audio)
if audio.duration > video.duration:
video = video.with_effects([vfx.Loop(duration=audio.duration)])
elif video.duration > audio.duration:
video = video.subclipped(0, audio.duration)
return video.with_audio(audio)
def _subtitle_clip(text: str, size: tuple[int, int], duration: float) -> TextClip:
return (
TextClip(
text=text,
font_size=44,
color="white",
stroke_color="black",
stroke_width=2,
size=(int(size[0] * 0.92), None),
method="caption",
)
.with_position(("center", "bottom"))
.with_duration(duration)
.with_opacity(0.95)
)
async def _render_shot_async(
shot: dict[str, Any],
output_dir: str | Path,
cfg: AppConfig,
*,
mock: bool = False,
) -> str:
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
clips_dir = out_dir / "clips"
audio_dir = out_dir / "audio"
clips_dir.mkdir(parents=True, exist_ok=True)
audio_dir.mkdir(parents=True, exist_ok=True)
shot_id = str(shot.get("shot_id", "unknown"))
image_prompt = str(shot.get("image_prompt", "")).strip()
motion = str(shot.get("motion", "")).strip()
tts_text = str(shot.get("tts", "")).strip()
duration_s = max(1.0, float(shot.get("duration", 3)))
voice = str(cfg.get("tts.voice", "zh-CN-XiaoxiaoNeural"))
rate = str(cfg.get("tts.rate", "+0%"))
volume = str(cfg.get("tts.volume", "+0%"))
audio_path = audio_dir / f"shot_{shot_id}.mp3"
audio_asset = await synthesize_one(tts_text or " ", audio_path, voice, rate, volume)
if mock:
from engine.main import _ensure_mock_image, _make_mock_video # local import to avoid circular at module import
mock_size = cfg.get("video.mock_size", [1024, 576])
w, h = int(mock_size[0]), int(mock_size[1])
mock_image = _ensure_mock_image(Path("./assets/mock.png"), (w, h))
fps = int(cfg.get("video.mock_fps", 24))
raw_video_path = out_dir / f"shot_raw_{shot_id}.mp4"
_make_mock_video(raw_video_path, mock_image, max(duration_s, audio_asset.duration_s), fps=fps)
else:
comfy = ComfyClient(cfg)
wf = comfy.load_workflow()
seed = random.randint(1, 2_147_483_647)
wf_i = comfy.inject_params(wf, image_prompt=image_prompt, seed=seed, motion_prompt=motion or None)
result = await comfy.run_workflow(wf_i)
candidates = [p for p in result.output_files if p.suffix.lower() in {".mp4", ".mov", ".webm"}]
raw_video_path = candidates[0] if candidates else result.output_files[0]
clip_out = clips_dir / f"shot_{shot_id}.mp4"
v = VideoFileClip(str(raw_video_path))
a = AudioFileClip(str(audio_asset.path))
try:
v2 = _fit_video_to_audio(v, a)
w2, h2 = v2.size
subtitle = _subtitle_clip(tts_text, (w2, h2), v2.duration or a.duration or duration_s)
comp = CompositeVideoClip([v2, subtitle])
try:
comp.write_videofile(str(clip_out), codec="libx264", audio_codec="aac", fps=v2.fps or 24, preset="veryfast")
finally:
comp.close()
finally:
v.close()
a.close()
return str(clip_out)
def render_shot(
shot: dict[str, Any],
output_dir: str | Path,
cfg: AppConfig | None = None,
*,
mock: bool = False,
) -> str:
cfg2 = cfg or AppConfig.load("./configs/config.yaml")
return asyncio.run(_render_shot_async(shot, output_dir, cfg2, mock=mock))

68
engine/task_store.py Normal file
View File

@@ -0,0 +1,68 @@
from __future__ import annotations
import json
from pathlib import Path
from typing import Any
def _task_path(task_id: str, base_dir: str | Path = "./outputs") -> Path:
return Path(base_dir) / str(task_id) / "task.json"
def create_task(task_id: str, shots: list[dict[str, Any]], base_dir: str | Path = "./outputs") -> dict[str, Any]:
p = _task_path(task_id, base_dir=base_dir)
p.parent.mkdir(parents=True, exist_ok=True)
data = {
"task_id": str(task_id),
"status": "queued",
"shots": [
{
"shot_id": str(s.get("shot_id", "")),
"status": str(s.get("status", "pending") or "pending"),
}
for s in shots
],
}
p.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
return data
def load_task(task_id: str, base_dir: str | Path = "./outputs") -> dict[str, Any]:
p = _task_path(task_id, base_dir=base_dir)
if not p.exists():
raise FileNotFoundError(f"task file not found: {p}")
raw = json.loads(p.read_text(encoding="utf-8"))
if not isinstance(raw, dict):
raise ValueError("task.json must be an object")
return raw
def _save_task(task_id: str, data: dict[str, Any], base_dir: str | Path = "./outputs") -> None:
p = _task_path(task_id, base_dir=base_dir)
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
def update_shot_status(task_id: str, shot_id: str, status: str, base_dir: str | Path = "./outputs") -> dict[str, Any]:
data = load_task(task_id, base_dir=base_dir)
shots = data.get("shots")
if not isinstance(shots, list):
raise ValueError("task.json shots must be an array")
found = False
for s in shots:
if isinstance(s, dict) and str(s.get("shot_id", "")) == str(shot_id):
s["status"] = str(status)
found = True
break
if not found:
shots.append({"shot_id": str(shot_id), "status": str(status)})
_save_task(task_id, data, base_dir=base_dir)
return data
def update_task_status(task_id: str, status: str, base_dir: str | Path = "./outputs") -> dict[str, Any]:
data = load_task(task_id, base_dir=base_dir)
data["status"] = str(status)
_save_task(task_id, data, base_dir=base_dir)
return data