Files
AI_A4000/video_worker/app/main.py
2026-04-07 00:37:39 +08:00

54 lines
1.6 KiB
Python

from contextlib import asynccontextmanager
import torch
from fastapi import FastAPI
from app.api import router
from app.backends.hunyuan_backend import HunyuanBackend
from app.backends.ltx_backend import LTXBackend
from app.gpu_worker import GPUWorker
from app.model_router import ModelRouter
from app.settings import settings
from app.task_manager import TaskManager
from app.task_store import TaskStore
from app.utils.files import ensure_dir
from app.utils.logger import build_logger
def build_app() -> FastAPI:
logger = build_logger("video_worker", settings.log_level)
ensure_dir(settings.output_dir)
ensure_dir(settings.runtime_dir)
ensure_dir(settings.runtime_dir / "logs")
store = TaskStore(settings.sqlite_path)
store.migrate()
ltx_backend = LTXBackend(settings.ltx_model_dir)
hunyuan_backend = HunyuanBackend(settings.hunyuan_model_dir)
model_router = ModelRouter(ltx_backend, hunyuan_backend)
task_manager = TaskManager(store=store, output_root=settings.output_dir)
gpu_worker = GPUWorker(task_manager, model_router, log_level=settings.log_level)
@asynccontextmanager
async def lifespan(_: FastAPI):
logger.info("Starting GPU worker")
await gpu_worker.start()
yield
logger.info("Stopping GPU worker")
await gpu_worker.stop()
app = FastAPI(title="Local Video Worker", version="0.1.0", lifespan=lifespan)
router.task_manager = task_manager
router.ltx_backend = ltx_backend
router.hunyuan_backend = hunyuan_backend
router.torch = torch
app.include_router(router)
return app
app = build_app()