feat(anidb_cache): add cache mechanism to fetch data efficiently

Added caching mechanism to store and retrieve AniDB API responses for efficient data fetching. This reduces redundant requests and improves performance by utilizing cached data when available. Cache files are stored in a directory named 'anidb_cache', and each entry is identified by the anime ID (aid). The cache validity period is set at 24 hours, ensuring that outdated information is refreshed regularly.
This commit is contained in:
2026-02-16 16:49:11 +07:00
parent 36100853df
commit 628875f676

View File

@@ -5,8 +5,13 @@ import sys
import requests
import xml.etree.ElementTree as ET
import argparse
import json
import time
from datetime import datetime, timedelta
HISTORY_FILE = "regex_history.txt"
CACHE_DIR = "anidb_cache"
CACHE_EXPIRY_SECONDS = 86400 # 24 hours in seconds
# ==============================================================================
# CORE LOGIC (GUI-agnostic)
@@ -27,6 +32,27 @@ def fetch_anidb_data_core(folder_path):
aid = match.group(1)
# Check if cache exists and is valid
cache_file = os.path.join(CACHE_DIR, f"{aid}.json")
cache_data = None
if os.path.exists(cache_file):
try:
with open(cache_file, 'r', encoding='utf-8') as f:
cache_data = json.load(f)
# Check if cache is still valid (not expired)
cache_timestamp = cache_data.get('timestamp', 0)
if cache_timestamp > time.time() - CACHE_EXPIRY_SECONDS:
episodes = cache_data.get('episodes', [])
if episodes:
# Sort episodes by number
episodes.sort(key=lambda x: x[0])
return episodes, None
except (IOError, json.JSONDecodeError, KeyError):
# If cache is corrupted, continue to fetch fresh data
pass
# Fetch XML from AniDB API
url = f"http://api.anidb.net:9001/httpapi?request=anime&client=testdesktop&clientver=1&protover=1&aid={aid}"
try:
@@ -85,6 +111,24 @@ def fetch_anidb_data_core(folder_path):
# Sort episodes by number
episodes.sort(key=lambda x: x[0])
# Save to cache
try:
# Create cache directory if it doesn't exist
os.makedirs(CACHE_DIR, exist_ok=True)
cache_info = {
'aid': aid,
'timestamp': time.time(),
'episodes': episodes
}
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump(cache_info, f, indent=2)
except IOError:
# Cache save failure is not critical, continue with episodes
pass
return episodes, None
def format_titles_from_episodes(episodes):