| import sys |
| from pathlib import Path |
| import logging |
| from contextlib import asynccontextmanager |
|
|
| from fastapi import FastAPI |
| from fastapi.middleware.cors import CORSMiddleware |
| import uvicorn |
|
|
| |
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" |
| ) |
| logger = logging.getLogger(__name__) |
|
|
| |
| app_dir = Path(__file__).parent |
| sys.path.insert(0, str(app_dir)) |
|
|
| |
| from config import ( |
| API_TITLE, API_DESCRIPTION, API_VERSION, |
| HUGGINGFACE_API_KEY, HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_LABEL_MODEL_ID, |
| HOST, PORT, RELOAD, |
| CORS_ORIGINS, CORS_METHODS, CORS_HEADERS, CORS_CREDENTIALS, |
| PRELOAD_MODELS_ON_STARTUP, LOAD_STANCE_MODEL, LOAD_KPA_MODEL |
| ) |
|
|
| |
| from services.stance_model_manager import stance_model_manager |
| from services.label_model_manager import kpa_model_manager |
|
|
| from services.stt_service import speech_to_text |
| from services.tts_service import text_to_speech |
|
|
| |
| @asynccontextmanager |
| async def lifespan(app: FastAPI): |
| logger.info("="*60) |
| logger.info("π API STARTUP - Loading HuggingFace models...") |
| logger.info("="*60) |
|
|
| if PRELOAD_MODELS_ON_STARTUP: |
|
|
| |
| if LOAD_STANCE_MODEL: |
| try: |
| stance_model_manager.load_model(HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_API_KEY) |
| logger.info("β Stance model loaded") |
| except Exception as e: |
| logger.error(f"β Failed loading stance model: {e}") |
|
|
| |
| if LOAD_KPA_MODEL: |
| try: |
| kpa_model_manager.load_model(HUGGINGFACE_LABEL_MODEL_ID, HUGGINGFACE_API_KEY) |
| logger.info("β KPA model loaded") |
| except Exception as e: |
| logger.error(f"β Failed loading KPA model: {e}") |
|
|
| logger.info("β Startup complete. API ready.") |
| yield |
| logger.info("π Shutting down...") |
|
|
| |
| app = FastAPI( |
| title=API_TITLE, |
| description=API_DESCRIPTION, |
| version=API_VERSION, |
| lifespan=lifespan |
| ) |
|
|
| |
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=CORS_ORIGINS, |
| allow_credentials=CORS_CREDENTIALS, |
| allow_methods=CORS_METHODS, |
| allow_headers=CORS_HEADERS, |
| ) |
|
|
| |
| try: |
| from routes.stt_routes import router as stt_router |
| app.include_router(stt_router, prefix="/api/v1/stt", tags=["Speech To Text"]) |
| logger.info("β STT route loaded (Groq Whisper)") |
| except Exception as e: |
| logger.warning(f"β Failed loading STT route: {e}") |
|
|
| try: |
| from routes.tts_routes import router as tts_router |
| app.include_router(tts_router, prefix="/api/v1/tts", tags=["Text To Speech"]) |
| logger.info("β TTS route loaded (Groq PlayAI TTS)") |
| except Exception as e: |
| logger.warning(f"β Failed loading TTS route: {e}") |
|
|
| try: |
| from routes import api_router |
| app.include_router(api_router) |
| logger.info("β Main API routes loaded") |
| except Exception as e: |
| logger.warning(f"β Failed loading main API routes: {e}") |
|
|
| |
| @app.get("/health") |
| async def health(): |
| return {"status": "healthy", "service": "NLP Debater + Groq Voice"} |
|
|
| @app.get("/") |
| async def root(): |
| return { |
| "message": "NLP Debater API with Groq Voice Support", |
| "docs": "/docs", |
| "voice_stt": "/api/v1/stt", |
| "voice_tts": "/api/v1/tts" |
| } |
|
|
| |
| if __name__ == "__main__": |
| uvicorn.run("main:app", host=HOST, port=PORT, reload=RELOAD) |
|
|