fix: 修复爬虫问题
This commit is contained in:
@@ -283,7 +283,7 @@ def _rss_to_gdelt_fallback() -> None:
|
||||
|
||||
|
||||
# ==========================
|
||||
# RSS 新闻抓取(补充 situation_update + AI 提取面板数据)
|
||||
# RSS 新闻抓取:资讯落库(去重) → AI 提取 → 面板数据落库 → 通知前端
|
||||
# ==========================
|
||||
LAST_FETCH = {"items": 0, "inserted": 0, "error": None}
|
||||
|
||||
@@ -292,6 +292,7 @@ def fetch_news() -> None:
|
||||
try:
|
||||
from scrapers.rss_scraper import fetch_all
|
||||
from db_writer import write_updates
|
||||
from news_storage import save_and_dedup
|
||||
from translate_utils import translate_to_chinese
|
||||
from cleaner_ai import clean_news_for_panel
|
||||
from cleaner_ai import ensure_category, ensure_severity
|
||||
@@ -304,36 +305,44 @@ def fetch_news() -> None:
|
||||
it["summary"] = clean_news_for_panel(raw_summary or raw_title, max_len=120)
|
||||
it["category"] = ensure_category(it.get("category", "other"))
|
||||
it["severity"] = ensure_severity(it.get("severity", "medium"))
|
||||
n = write_updates(items) if items else 0
|
||||
it["source"] = it.get("source") or "rss"
|
||||
# 1. 历史去重:资讯内容落库 news_content(独立表,便于后续消费)
|
||||
new_items, n_news = save_and_dedup(items, db_path=DB_PATH)
|
||||
# 2. 面板展示:新增资讯写入 situation_update(供前端 recentUpdates)
|
||||
n_panel = write_updates(new_items) if new_items else 0
|
||||
LAST_FETCH["items"] = len(items)
|
||||
LAST_FETCH["inserted"] = n
|
||||
if items:
|
||||
_extract_and_merge_panel_data(items)
|
||||
LAST_FETCH["inserted"] = n_news
|
||||
# 3. AI 提取 + 合并到 combat_losses / key_location 等
|
||||
if new_items:
|
||||
_extract_and_merge_panel_data(new_items)
|
||||
# GDELT 禁用时用 RSS 填充 gdelt_events,使地图有冲突点
|
||||
if GDELT_DISABLED:
|
||||
_rss_to_gdelt_fallback()
|
||||
# 每次抓取完成都通知 Node 更新时间戳,便于「实时更新」显示
|
||||
_notify_node()
|
||||
print(f"[{datetime.now().strftime('%H:%M:%S')}] RSS 抓取 {len(items)} 条,新增入库 {n} 条")
|
||||
print(f"[{datetime.now().strftime('%H:%M:%S')}] RSS 抓取 {len(items)} 条,去重后新增 {n_news} 条资讯,面板 {n_panel} 条")
|
||||
except Exception as e:
|
||||
LAST_FETCH["error"] = str(e)
|
||||
print(f"[{datetime.now().strftime('%H:%M:%S')}] 新闻抓取失败: {e}")
|
||||
|
||||
|
||||
def _extract_and_merge_panel_data(items: list) -> None:
|
||||
"""对新闻做 AI/规则 提取,合并到 combat_losses / retaliation / wall_street_trend 等表"""
|
||||
"""AI 分析提取面板相关数据,清洗后落库"""
|
||||
if not items or not os.path.exists(DB_PATH):
|
||||
return
|
||||
try:
|
||||
from db_merge import merge
|
||||
if os.environ.get("CLEANER_AI_DISABLED", "0") == "1":
|
||||
use_dashscope = bool(os.environ.get("DASHSCOPE_API_KEY", "").strip())
|
||||
if use_dashscope:
|
||||
from extractor_dashscope import extract_from_news
|
||||
limit = 10
|
||||
elif os.environ.get("CLEANER_AI_DISABLED", "0") == "1":
|
||||
from extractor_rules import extract_from_news
|
||||
limit = 25
|
||||
else:
|
||||
from extractor_ai import extract_from_news
|
||||
limit = 10
|
||||
from datetime import timezone
|
||||
merged_any = False
|
||||
# 规则模式可多处理几条(无 Ollama);AI 模式限制 5 条避免调用过多
|
||||
limit = 25 if os.environ.get("CLEANER_AI_DISABLED", "0") == "1" else 10
|
||||
for it in items[:limit]:
|
||||
text = (it.get("title", "") or "") + " " + (it.get("summary", "") or "")
|
||||
if len(text.strip()) < 20:
|
||||
|
||||
Reference in New Issue
Block a user