Упрощение для балбесов
Some checks failed
CI / Lint (ruff + mypy) (push) Failing after 35s
CI / Run tests (push) Has been skipped
CI / Docker build test (push) Successful in 18s

This commit is contained in:
2026-03-30 18:26:49 +07:00
parent bdae79db58
commit ea4a6fbe38
6 changed files with 548 additions and 452 deletions

View File

@@ -1,4 +1,5 @@
import logging
from asyncio import Lock, wait_for
from collections import defaultdict
from dataclasses import dataclass
from datetime import UTC, datetime
@@ -13,6 +14,7 @@ from glitchup_bot.models.issues import IssueCache
from glitchup_bot.models.sync import SyncState
logger = logging.getLogger(__name__)
_sync_lock = Lock()
@dataclass(slots=True)
@@ -106,86 +108,109 @@ async def mark_sync_success(source: str) -> None:
async def sync_issues(project_slugs: list[str] | None = None) -> SyncSummary:
slugs = project_slugs or _configured_project_slugs()
client = get_glitchtip_client()
snapshots: list[IssueSnapshot] = []
async with _sync_lock:
slugs = project_slugs or _configured_project_slugs()
client = get_glitchtip_client()
snapshots: list[IssueSnapshot] = []
for slug in slugs:
issues = await client.list_issues(slug)
snapshots.extend(
_normalize_issue(slug, issue) for issue in issues if issue.get("id") is not None
for slug in slugs:
issues = await client.list_issues(slug)
snapshots.extend(
_normalize_issue(slug, issue) for issue in issues if issue.get("id") is not None
)
issue_ids_by_slug: dict[str, set[int]] = defaultdict(set)
for snapshot in snapshots:
issue_ids_by_slug[snapshot.project_slug].add(snapshot.issue_id)
now = datetime.now(UTC)
resolved_count = 0
async with get_session_factory()() as session:
existing_rows = (
await session.execute(select(IssueCache).where(IssueCache.project_slug.in_(slugs)))
).scalars()
existing_by_id = {row.glitchtip_issue_id: row for row in existing_rows}
for snapshot in snapshots:
row = existing_by_id.get(snapshot.issue_id)
if row is None:
row = IssueCache(
glitchtip_issue_id=snapshot.issue_id,
project_slug=snapshot.project_slug,
title=snapshot.title,
culprit=snapshot.culprit,
level=snapshot.level,
status=snapshot.status,
first_seen=snapshot.first_seen,
last_seen=snapshot.last_seen,
event_count=snapshot.event_count,
is_regression=snapshot.is_regression,
link=snapshot.link,
release=snapshot.release,
)
session.add(row)
continue
row.project_slug = snapshot.project_slug
row.title = snapshot.title
row.culprit = snapshot.culprit
row.level = snapshot.level
row.status = snapshot.status
row.first_seen = snapshot.first_seen
row.last_seen = snapshot.last_seen
row.event_count = snapshot.event_count
row.is_regression = snapshot.is_regression
row.link = snapshot.link
row.release = snapshot.release
row.updated_at = now
for row in existing_by_id.values():
if row.glitchtip_issue_id in issue_ids_by_slug[row.project_slug]:
continue
if row.status != "resolved":
row.status = "resolved"
row.updated_at = now
resolved_count += 1
result = await session.execute(select(SyncState).where(SyncState.source == "api_sync"))
state = result.scalar_one_or_none()
if state is None:
state = SyncState(source="api_sync", last_successful_at=now)
session.add(state)
else:
state.last_successful_at = now
await session.commit()
return SyncSummary(
project_count=len(slugs),
issue_count=len(snapshots),
resolved_count=resolved_count,
synced_at=now,
)
issue_ids_by_slug: dict[str, set[int]] = defaultdict(set)
for snapshot in snapshots:
issue_ids_by_slug[snapshot.project_slug].add(snapshot.issue_id)
now = datetime.now(UTC)
resolved_count = 0
async with get_session_factory()() as session:
existing_rows = (
await session.execute(select(IssueCache).where(IssueCache.project_slug.in_(slugs)))
).scalars()
existing_by_id = {row.glitchtip_issue_id: row for row in existing_rows}
for snapshot in snapshots:
row = existing_by_id.get(snapshot.issue_id)
if row is None:
row = IssueCache(
glitchtip_issue_id=snapshot.issue_id,
project_slug=snapshot.project_slug,
title=snapshot.title,
culprit=snapshot.culprit,
level=snapshot.level,
status=snapshot.status,
first_seen=snapshot.first_seen,
last_seen=snapshot.last_seen,
event_count=snapshot.event_count,
is_regression=snapshot.is_regression,
link=snapshot.link,
release=snapshot.release,
)
session.add(row)
continue
row.project_slug = snapshot.project_slug
row.title = snapshot.title
row.culprit = snapshot.culprit
row.level = snapshot.level
row.status = snapshot.status
row.first_seen = snapshot.first_seen
row.last_seen = snapshot.last_seen
row.event_count = snapshot.event_count
row.is_regression = snapshot.is_regression
row.link = snapshot.link
row.release = snapshot.release
row.updated_at = now
for row in existing_by_id.values():
if row.glitchtip_issue_id in issue_ids_by_slug[row.project_slug]:
continue
if row.status != "resolved":
row.status = "resolved"
row.updated_at = now
resolved_count += 1
result = await session.execute(select(SyncState).where(SyncState.source == "api_sync"))
state = result.scalar_one_or_none()
if state is None:
state = SyncState(source="api_sync", last_successful_at=now)
session.add(state)
else:
state.last_successful_at = now
await session.commit()
return SyncSummary(
project_count=len(slugs),
issue_count=len(snapshots),
resolved_count=resolved_count,
synced_at=now,
)
async def warm_issue_cache_on_startup(timeout_seconds: int = 180) -> bool:
logger.info("Starting startup cache warmup")
try:
summary = await wait_for(sync_issues(), timeout=timeout_seconds)
logger.info(
"Startup cache warmup finished: %s projects, %s issues, %s resolved",
summary.project_count,
summary.issue_count,
summary.resolved_count,
)
return True
except TimeoutError:
logger.warning(
"Startup cache warmup timed out after %s seconds; continuing with cached data",
timeout_seconds,
)
return False
except Exception:
logger.exception("Startup cache warmup failed; continuing with cached data")
return False
async def load_issue_snapshots(