Upload 303 files
Browse files- hf_unified_server.py +193 -0
hf_unified_server.py
CHANGED
|
@@ -35,6 +35,7 @@ import json
|
|
| 35 |
from pathlib import Path
|
| 36 |
import httpx
|
| 37 |
|
|
|
|
| 38 |
from ai_models import (
|
| 39 |
analyze_chart_points,
|
| 40 |
analyze_crypto_sentiment,
|
|
@@ -85,6 +86,28 @@ WORKSPACE_ROOT = Path(__file__).parent
|
|
| 85 |
PROVIDERS_CONFIG_PATH = settings.providers_config_path
|
| 86 |
FALLBACK_RESOURCE_PATH = WORKSPACE_ROOT / "crypto_resources_unified_2025-11-11.json"
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
def load_providers_config():
|
| 89 |
"""Load providers from providers_config_extended.json"""
|
| 90 |
try:
|
|
@@ -636,6 +659,24 @@ async def get_market():
|
|
| 636 |
if not prices:
|
| 637 |
raise HTTPException(status_code=503, detail="Unable to fetch market data")
|
| 638 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 639 |
return {
|
| 640 |
"total_market_cap": overview.get("total_market_cap", 0),
|
| 641 |
"btc_dominance": overview.get("btc_dominance", 0),
|
|
@@ -2406,3 +2447,155 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 2406 |
ws_manager.disconnect(websocket)
|
| 2407 |
except:
|
| 2408 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
from pathlib import Path
|
| 36 |
import httpx
|
| 37 |
|
| 38 |
+
from database import CryptoDatabase
|
| 39 |
from ai_models import (
|
| 40 |
analyze_chart_points,
|
| 41 |
analyze_crypto_sentiment,
|
|
|
|
| 86 |
PROVIDERS_CONFIG_PATH = settings.providers_config_path
|
| 87 |
FALLBACK_RESOURCE_PATH = WORKSPACE_ROOT / "crypto_resources_unified_2025-11-11.json"
|
| 88 |
|
| 89 |
+
LOG_DIR = WORKSPACE_ROOT / "logs"
|
| 90 |
+
APL_REPORT_PATH = WORKSPACE_ROOT / "PROVIDER_AUTO_DISCOVERY_REPORT.json"
|
| 91 |
+
|
| 92 |
+
# Ensure log directory exists
|
| 93 |
+
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
| 94 |
+
|
| 95 |
+
# Initialize SQLite database for real market data persistence
|
| 96 |
+
db = CryptoDatabase()
|
| 97 |
+
|
| 98 |
+
def tail_log_file(path: Path, max_lines: int = 200) -> List[str]:
|
| 99 |
+
"""Return the last max_lines from a log file, if it exists."""
|
| 100 |
+
if not path.exists():
|
| 101 |
+
return []
|
| 102 |
+
try:
|
| 103 |
+
with path.open("r", encoding="utf-8", errors="ignore") as f:
|
| 104 |
+
lines = f.readlines()
|
| 105 |
+
return lines[-max_lines:]
|
| 106 |
+
except Exception as e:
|
| 107 |
+
logger.error(f"Error reading log file {path}: {e}")
|
| 108 |
+
return []
|
| 109 |
+
|
| 110 |
+
|
| 111 |
def load_providers_config():
|
| 112 |
"""Load providers from providers_config_extended.json"""
|
| 113 |
try:
|
|
|
|
| 659 |
if not prices:
|
| 660 |
raise HTTPException(status_code=503, detail="Unable to fetch market data")
|
| 661 |
|
| 662 |
+
# Persist real market data into SQLite database (no mocks)
|
| 663 |
+
try:
|
| 664 |
+
for item in prices:
|
| 665 |
+
payload = {
|
| 666 |
+
"symbol": item.get("symbol", "").upper(),
|
| 667 |
+
"name": item.get("name"),
|
| 668 |
+
"price_usd": item.get("current_price") or item.get("price"),
|
| 669 |
+
"volume_24h": item.get("total_volume"),
|
| 670 |
+
"market_cap": item.get("market_cap"),
|
| 671 |
+
"percent_change_1h": item.get("price_change_1h") or item.get("price_change_percentage_1h_in_currency"),
|
| 672 |
+
"percent_change_24h": item.get("price_change_percentage_24h"),
|
| 673 |
+
"percent_change_7d": item.get("price_change_percentage_7d_in_currency"),
|
| 674 |
+
"rank": item.get("market_cap_rank"),
|
| 675 |
+
}
|
| 676 |
+
db.save_price(payload)
|
| 677 |
+
except Exception as db_err:
|
| 678 |
+
logger.warning(f"Failed to save market data to DB: {db_err}")
|
| 679 |
+
|
| 680 |
return {
|
| 681 |
"total_market_cap": overview.get("total_market_cap", 0),
|
| 682 |
"btc_dominance": overview.get("btc_dominance", 0),
|
|
|
|
| 2447 |
ws_manager.disconnect(websocket)
|
| 2448 |
except:
|
| 2449 |
pass
|
| 2450 |
+
|
| 2451 |
+
|
| 2452 |
+
@app.get("/api/market/history")
|
| 2453 |
+
async def get_market_history(symbol: str = "BTC", limit: int = 10):
|
| 2454 |
+
"""
|
| 2455 |
+
Get historical prices from the real SQLite database.
|
| 2456 |
+
|
| 2457 |
+
This uses only stored real market data inserted by /api/market
|
| 2458 |
+
and never generates synthetic or mock values.
|
| 2459 |
+
"""
|
| 2460 |
+
symbol = symbol.upper()
|
| 2461 |
+
try:
|
| 2462 |
+
history = db.get_price_history(symbol, limit=limit)
|
| 2463 |
+
except Exception as e:
|
| 2464 |
+
logger.error(f"Error reading history for {symbol}: {e}")
|
| 2465 |
+
raise HTTPException(status_code=500, detail="Error reading market history")
|
| 2466 |
+
|
| 2467 |
+
if not history:
|
| 2468 |
+
return {
|
| 2469 |
+
"symbol": symbol,
|
| 2470 |
+
"history": [],
|
| 2471 |
+
"count": 0,
|
| 2472 |
+
"message": "No history available",
|
| 2473 |
+
}
|
| 2474 |
+
|
| 2475 |
+
return {
|
| 2476 |
+
"symbol": symbol,
|
| 2477 |
+
"history": history,
|
| 2478 |
+
"count": len(history),
|
| 2479 |
+
"source": "SQLite database (real data)",
|
| 2480 |
+
}
|
| 2481 |
+
|
| 2482 |
+
|
| 2483 |
+
@app.get("/api/status")
|
| 2484 |
+
async def get_status():
|
| 2485 |
+
"""
|
| 2486 |
+
System status endpoint used by the admin UI.
|
| 2487 |
+
|
| 2488 |
+
This reports real-time information about providers and database,
|
| 2489 |
+
without fabricating any market data.
|
| 2490 |
+
"""
|
| 2491 |
+
providers_cfg = load_providers_config()
|
| 2492 |
+
providers = providers_cfg or {}
|
| 2493 |
+
validated_count = sum(1 for p in providers.values() if p.get("validated"))
|
| 2494 |
+
|
| 2495 |
+
db_path = Path(db.db_path)
|
| 2496 |
+
db_status = "connected" if db_path.exists() else "initializing"
|
| 2497 |
+
|
| 2498 |
+
return {
|
| 2499 |
+
"system_health": "healthy",
|
| 2500 |
+
"timestamp": datetime.now().isoformat(),
|
| 2501 |
+
"total_providers": len(providers),
|
| 2502 |
+
"validated_providers": validated_count,
|
| 2503 |
+
"database_status": db_status,
|
| 2504 |
+
"apl_available": APL_REPORT_PATH.exists(),
|
| 2505 |
+
"use_mock_data": False,
|
| 2506 |
+
}
|
| 2507 |
+
|
| 2508 |
+
|
| 2509 |
+
@app.get("/api/logs/recent")
|
| 2510 |
+
async def get_recent_logs():
|
| 2511 |
+
"""
|
| 2512 |
+
Return recent log lines for the admin UI.
|
| 2513 |
+
|
| 2514 |
+
We read from the main server log file if available.
|
| 2515 |
+
This does not fabricate content; if there are no logs,
|
| 2516 |
+
an empty list is returned.
|
| 2517 |
+
"""
|
| 2518 |
+
log_file = LOG_DIR / "server.log"
|
| 2519 |
+
lines = tail_log_file(log_file, max_lines=200)
|
| 2520 |
+
# Wrap plain text lines as structured entries
|
| 2521 |
+
logs = [{"line": line.rstrip("
|
| 2522 |
+
")} for line in lines]
|
| 2523 |
+
return {"logs": logs, "count": len(logs)}
|
| 2524 |
+
|
| 2525 |
+
|
| 2526 |
+
@app.get("/api/logs/errors")
|
| 2527 |
+
async def get_error_logs():
|
| 2528 |
+
"""
|
| 2529 |
+
Return recent error log lines from the same log file.
|
| 2530 |
+
|
| 2531 |
+
This is a best-effort filter based on typical ERROR prefixes.
|
| 2532 |
+
"""
|
| 2533 |
+
log_file = LOG_DIR / "server.log"
|
| 2534 |
+
lines = tail_log_file(log_file, max_lines=400)
|
| 2535 |
+
error_lines = [line for line in lines if "ERROR" in line or "WARNING" in line]
|
| 2536 |
+
logs = [{"line": line.rstrip("
|
| 2537 |
+
")} for line in error_lines[-200:]]
|
| 2538 |
+
return {"errors": logs, "count": len(logs)}
|
| 2539 |
+
|
| 2540 |
+
|
| 2541 |
+
def _load_apl_report() -> Optional[Dict[str, Any]]:
|
| 2542 |
+
"""Load the APL (Auto Provider Loader) validation report if available."""
|
| 2543 |
+
if not APL_REPORT_PATH.exists():
|
| 2544 |
+
return None
|
| 2545 |
+
try:
|
| 2546 |
+
with APL_REPORT_PATH.open("r", encoding="utf-8") as f:
|
| 2547 |
+
return json.load(f)
|
| 2548 |
+
except Exception as e:
|
| 2549 |
+
logger.error(f"Error reading APL report: {e}")
|
| 2550 |
+
return None
|
| 2551 |
+
|
| 2552 |
+
|
| 2553 |
+
@app.get("/api/apl/summary")
|
| 2554 |
+
async def get_apl_summary():
|
| 2555 |
+
"""
|
| 2556 |
+
Summary of the Auto Provider Loader (APL) report.
|
| 2557 |
+
|
| 2558 |
+
If the report is missing, we return a clear not_available status
|
| 2559 |
+
instead of fabricating metrics.
|
| 2560 |
+
"""
|
| 2561 |
+
report = _load_apl_report()
|
| 2562 |
+
if not report or "stats" not in report:
|
| 2563 |
+
return {
|
| 2564 |
+
"status": "not_available",
|
| 2565 |
+
"message": "APL report not found",
|
| 2566 |
+
}
|
| 2567 |
+
|
| 2568 |
+
stats = report.get("stats", {})
|
| 2569 |
+
return {
|
| 2570 |
+
"status": "ok",
|
| 2571 |
+
"http_candidates": stats.get("total_http_candidates", 0),
|
| 2572 |
+
"http_valid": stats.get("http_valid", 0),
|
| 2573 |
+
"http_invalid": stats.get("http_invalid", 0),
|
| 2574 |
+
"http_conditional": stats.get("http_conditional", 0),
|
| 2575 |
+
"hf_candidates": stats.get("total_hf_candidates", 0),
|
| 2576 |
+
"hf_valid": stats.get("hf_valid", 0),
|
| 2577 |
+
"hf_invalid": stats.get("hf_invalid", 0),
|
| 2578 |
+
"hf_conditional": stats.get("hf_conditional", 0),
|
| 2579 |
+
"timestamp": datetime.now().isoformat(),
|
| 2580 |
+
}
|
| 2581 |
+
|
| 2582 |
+
|
| 2583 |
+
@app.get("/api/hf/models")
|
| 2584 |
+
async def get_hf_models_from_apl():
|
| 2585 |
+
"""
|
| 2586 |
+
Return the list of Hugging Face models discovered by the APL report.
|
| 2587 |
+
|
| 2588 |
+
This is used by the admin UI. The data comes from the real
|
| 2589 |
+
PROVIDER_AUTO_DISCOVERY_REPORT.json file if present.
|
| 2590 |
+
"""
|
| 2591 |
+
report = _load_apl_report()
|
| 2592 |
+
if not report:
|
| 2593 |
+
return {"models": [], "count": 0, "source": "none"}
|
| 2594 |
+
|
| 2595 |
+
hf_models = report.get("hf_models", {}).get("results", [])
|
| 2596 |
+
return {
|
| 2597 |
+
"models": hf_models,
|
| 2598 |
+
"count": len(hf_models),
|
| 2599 |
+
"source": "APL report",
|
| 2600 |
+
}
|
| 2601 |
+
|