88 lines
3.1 KiB
Python
88 lines
3.1 KiB
Python
from functools import lru_cache
|
|
from pathlib import Path
|
|
|
|
from pydantic import Field
|
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
|
|
|
|
BASE_DIR = Path(__file__).resolve().parents[2]
|
|
ROOT_DIR = BASE_DIR.parents[1]
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore")
|
|
|
|
app_name: str = "Gig POC API"
|
|
app_env: str = "development"
|
|
app_host: str = "0.0.0.0"
|
|
app_port: int = 8000
|
|
log_level: str = "INFO"
|
|
app_rate_limit_per_minute: int = 1200
|
|
app_circuit_breaker_error_rate: float = 0.5
|
|
app_circuit_breaker_min_requests: int = 50
|
|
app_circuit_breaker_window_seconds: int = 60
|
|
app_circuit_breaker_cooldown_seconds: int = 30
|
|
alert_webhook_url: str | None = None
|
|
|
|
database_url: str = "postgresql+psycopg://gig:gig@postgres:5432/gig_poc"
|
|
database_pool_size: int = 20
|
|
database_max_overflow: int = 30
|
|
database_pool_timeout: int = 30
|
|
qdrant_url: str = "http://qdrant:6333"
|
|
qdrant_collection: str = "gig_poc_entities"
|
|
vector_size: int = 64
|
|
|
|
llm_enabled: bool = False
|
|
llm_base_url: str | None = None
|
|
llm_fallback_base_urls: list[str] = Field(default_factory=list)
|
|
llm_api_key: str | None = None
|
|
llm_model: str = "gpt-5.4"
|
|
extraction_llm_max_retries: int = 2
|
|
|
|
embedding_backend: str = "hash" # hash | openai_compatible
|
|
embedding_enabled: bool = False
|
|
embedding_base_url: str | None = None
|
|
embedding_fallback_base_urls: list[str] = Field(default_factory=list)
|
|
embedding_api_key: str | None = None
|
|
embedding_model: str = "text-embedding-3-small"
|
|
embedding_vector_size: int = 1536
|
|
ai_request_timeout_seconds: float = 30.0
|
|
ai_rate_limit_per_minute: int = 120
|
|
ai_circuit_breaker_fail_threshold: int = 5
|
|
ai_circuit_breaker_cooldown_seconds: int = 30
|
|
|
|
bootstrap_jobs: int = 100
|
|
bootstrap_workers: int = 300
|
|
default_recall_top_k: int = 30
|
|
default_match_top_n: int = 10
|
|
|
|
prompt_dir: Path = Field(default=ROOT_DIR / "packages" / "prompts")
|
|
sample_data_dir: Path = Field(default=ROOT_DIR / "packages" / "sample-data")
|
|
shared_types_dir: Path = Field(default=ROOT_DIR / "packages" / "shared-types")
|
|
data_dir: Path = Field(default=ROOT_DIR / "data")
|
|
match_weights_path: Path = Field(default=ROOT_DIR / "data" / "match_weights.json")
|
|
|
|
score_skill_weight: float = 0.35
|
|
score_region_weight: float = 0.20
|
|
score_time_weight: float = 0.15
|
|
score_experience_weight: float = 0.15
|
|
score_reliability_weight: float = 0.15
|
|
ranking_learning_enabled: bool = True
|
|
ranking_learning_rate: float = 0.08
|
|
cache_backend: str = "memory" # memory | redis
|
|
redis_url: str = "redis://redis:6379/0"
|
|
redis_prefix: str = "gig_poc"
|
|
match_cache_enabled: bool = True
|
|
match_cache_ttl_seconds: int = 30
|
|
query_cache_enabled: bool = True
|
|
query_cache_ttl_seconds: int = 20
|
|
ingest_async_enabled: bool = True
|
|
ingest_queue_max_size: int = 10000
|
|
match_async_enabled: bool = True
|
|
match_queue_max_size: int = 10000
|
|
|
|
|
|
@lru_cache
|
|
def get_settings() -> Settings:
|
|
return Settings()
|