fix: 优化架构
This commit is contained in:
1
engine/adapters/llm/__init__.py
Normal file
1
engine/adapters/llm/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
12
engine/adapters/llm/base.py
Normal file
12
engine/adapters/llm/base.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
class BaseLLM:
|
||||
def generate_script(self, prompt: str, context: dict[str, Any] | None = None) -> Any:
|
||||
raise NotImplementedError
|
||||
|
||||
def refine_scene(self, scene: Any, context: dict[str, Any] | None = None) -> Any:
|
||||
raise NotImplementedError
|
||||
|
||||
25
engine/adapters/llm/mock_adapter.py
Normal file
25
engine/adapters/llm/mock_adapter.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from engine.types import Scene
|
||||
|
||||
from .base import BaseLLM
|
||||
|
||||
|
||||
class MockLLM(BaseLLM):
|
||||
def generate_script(self, prompt: str, context: dict[str, Any] | None = None) -> list[Scene]:
|
||||
# Simple deterministic scenes for offline development.
|
||||
prompt = (prompt or "").strip()
|
||||
if not prompt:
|
||||
prompt = "a warm city night"
|
||||
return [
|
||||
Scene(image_prompt=f"{prompt},城市夜景,霓虹灯,电影感", video_motion="缓慢推进镜头,轻微摇镜", narration="夜色温柔落在街灯上"),
|
||||
Scene(image_prompt=f"{prompt},咖啡店窗边,暖光,细雨", video_motion="侧向平移,人物轻轻抬头", narration="雨声里藏着一段回忆"),
|
||||
Scene(image_prompt=f"{prompt},桥上远景,车流光轨,温暖", video_motion="拉远全景,光轨流动", narration="我们在光里学会告别"),
|
||||
]
|
||||
|
||||
def refine_scene(self, scene: Scene, context: dict[str, Any] | None = None) -> Scene:
|
||||
# Minimal polish: append a hint.
|
||||
return Scene(image_prompt=scene.image_prompt, video_motion=scene.video_motion, narration=(scene.narration + "(更凝练)")[:30])
|
||||
|
||||
29
engine/adapters/llm/openai_adapter.py
Normal file
29
engine/adapters/llm/openai_adapter.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from engine.config import AppConfig
|
||||
from engine.script_gen import generate_scenes, refine_scene
|
||||
|
||||
from .base import BaseLLM
|
||||
|
||||
|
||||
class OpenAIAdapter(BaseLLM):
|
||||
def __init__(self, cfg: AppConfig):
|
||||
self.cfg = cfg
|
||||
|
||||
def generate_script(self, prompt: str, context: dict[str, Any] | None = None):
|
||||
# Existing script_gen already enforces JSON schema and length constraints.
|
||||
return generate_scenes(prompt, self.cfg)
|
||||
|
||||
def refine_scene(self, scene: Any, context: dict[str, Any] | None = None):
|
||||
if context is None:
|
||||
context = {}
|
||||
# Context carries needed values to call refine_scene in script_gen.
|
||||
scenes = context.get("scenes")
|
||||
prompt2 = context.get("prompt")
|
||||
target_index = context.get("target_index")
|
||||
if scenes is None or prompt2 is None or target_index is None:
|
||||
raise ValueError("OpenAIAdapter.refine_scene missing context: scenes/prompt/target_index")
|
||||
return refine_scene(prompt=prompt2, scenes=scenes, target_index=int(target_index), cfg=self.cfg)
|
||||
|
||||
Reference in New Issue
Block a user