# Import required libraries and modules
import time
import os
from flask import Blueprint, request, jsonify, send_file, current_app
from services.llm import get_llm
from services.tts import get_tts
from services.stt import get_stt
from azure.storage.blob import BlobServiceClient
from services.proctor import Proctoring
from io import BytesIO
from deepface import DeepFace
import traceback, base64, numpy as np, cv2
from pydantic import BaseModel, ValidationError
from typing import Optional, List
from datetime import datetime
from pytz import timezone
from services.proctor.models.models import AnalysisRequest, AnalysisResponse, HeadPose
from services.proctor.fraud_check import fraudCheck

routes = Blueprint('routes', __name__)

@routes.route("/", methods=["GET", "POST"])
def index():
    return jsonify({
        "message": "Engine is running",
        "status": "success"
    })
    
    

@routes.route("/api/llm", methods=["POST"])
def llm_api():
    """
    LLM (Language Learning Model) API endpoint
    Purpose: Generates text responses using specified LLM engine
    Input: JSON with required 'prompt' field and optional 'engine' field
    Returns: Generated text response or error message
    """
    start = time.time()
    try:
        data = request.get_json()
        if not data:
            return jsonify({
                "error": "No JSON data provided"
            }), 400
            
        if "prompt" not in data:
            return jsonify({
                "error": "Missing required field: 'prompt'"
            }), 400
            
        engine = data["engine"] if "engine" in data and data["engine"] else "openai"
        prompt = data["prompt"]
        
        llm_engine = get_llm(engine)
        result = llm_engine.generate_text(prompt)
        
        get_duration(start, 'llm')
        return jsonify({"response": result})
        
    except Exception as e:
        get_duration(start, 'llm')
        return jsonify({
            "error": f"An error occurred: {str(e)}",
            "status": "failed"
        }), 500


@routes.route("/api/tts", methods=["POST"])
def tts_api():
    """
    Text-to-Speech (TTS) API endpoint
    Purpose: Converts text to speech audio and handles storage
    Input: JSON with required fields:
        - text: Text to convert to speech
        - name: Filename for the audio
        - engine: TTS engine to use
        - storage: Storage type (local/azure)
    Returns: Audio file location (local path or Azure URL)
    """
    start = time.time() 
    local_path = None  # Define local_path at the start
    try:
        data = request.get_json()
        if not data:
            return jsonify({"error": "No JSON data provided"}), 400
        
        # Validate required fields
        required_fields = ["text", "name", "engine","voice"]
        if not all(k in data for k in required_fields):
            return jsonify({
                "error": f"Missing required fields: {', '.join(required_fields)}"
            }), 400
        
        engine = data.get("engine") or "elevenlabs"
        storage = data.get("storage", "local").lower()
        
        # Get deployment environment and create filename before try block
        deploy_env = current_app.config.get('DEPLOYMENT_ENV', 'unknown')
        file_name = data['name']
        file_name = f"{deploy_env}_{file_name}"
        local_path = f"./temp/questions_audios/{file_name}.mp3"

        os.makedirs("./temp/questions_audios", exist_ok=True)

        try:
            # Generate audio
            tts_engine = get_tts(engine)
            audio_io = tts_engine.text_to_speech(data)
            
            # Save audio to local file
            if storage == "local":
                with open(local_path, 'wb') as f:
                    audio_io.seek(0)  # Reset buffer position
                    f.write(audio_io.read())
            
            response = {
                "status": "success",
                "filename": f"{file_name}.mp3",
                "local_path": local_path,
                "environment": deploy_env
            }
            
            get_duration(start, 'tts')
            return jsonify(response)
            
        except Exception as e:
            if local_path and os.path.exists(local_path):
                os.remove(local_path)  # Cleanup on error
            raise e
            
    except Exception as e:
        get_duration(start, 'tts')
        # Clean up file if it exists
        if local_path and os.path.exists(local_path):
            os.remove(local_path)
        return jsonify({
            "error": str(e),
            "status": "failed"
        }), 500

@routes.route("/api/stt", methods=["POST"])
def stt_api():
    """
    Speech-to-Text (STT) API endpoint
    Purpose: Converts audio file at given path to text using specified STT engine
    Input: JSON with fields:
        - file_path: Path to the audio file on server
        - engine: STT engine to use (default: whisper)
    Returns: JSON with transcribed text
    """
    start = time.time()

    try:
        data = request.get_json()
        if not data:
            return jsonify({"error": "No JSON body provided"}), 400

        file_path = data.get("file_path")
        engine = data.get("engine", "whisper").lower()

        # Input validation
        if not file_path or not os.path.isfile(file_path):
            return jsonify({"error": "Invalid or missing file_path"}), 400

        try:
            # Open the file and send it to the STT engine
            stt_engine = get_stt(engine)
            transcript = stt_engine.transcribe_audio(file_path)

            get_duration(start, 'stt')
            return jsonify({
                "status": "success",
                "transcript": transcript
            })

        except Exception as e:
            get_duration(start, 'stt')
            return jsonify({
                "error": f"Failed to transcribe audio: {str(e)}",
                "status": "failed"
            }), 500

    except Exception as e:
        get_duration(start, 'stt')
        return jsonify({
            "error": f"Unexpected error: {str(e)}",
            "status": "failed"
        }), 500

def get_duration(start, fnName):
    """
    Utility function to measure and log execution time
    Purpose: Calculates and prints the duration of API calls
    Parameters:
        - start: Start time of the function
        - fnName: Name of the function being timed
    Returns: Duration in seconds
    """
    duration = time.time() - start
    print(f"Function {fnName} took {duration:.2f} seconds")
    return duration

def upload_to_azure(audio_io, name):
    """
    Azure Blob Storage upload handler
    Purpose: Uploads audio files to Azure Blob Storage
    Parameters:
        - audio_io: BytesIO object containing audio data
        - name: Base name for the blob file
    Returns: Tuple of (blob_url, blob_name)
    Required Environment Variables:
        - AZURE_STORAGE_CONNECTION_STRING
        - AZURE_CONTAINER_NAME
        - AZURE_STORAGE_ACCOUNT
    """
    try:
        # Azure Blob Storage configuration
        connection_string = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
        container_name = os.getenv('AZURE_CONTAINER_NAME')
        
        # Generate unique blob name
        timestamp = time.strftime("%Y%m%d-%H%M%S")
        blob_name = f"tts/{name}-{timestamp}.mp3"
        
        # Upload to Azure Blob
        blob_service_client = BlobServiceClient.from_connection_string(connection_string)
        container_client = blob_service_client.get_container_client(container_name)
        
        # Reset buffer pointer and upload
        audio_io.seek(0)
        blob_client = container_client.upload_blob(
            name=blob_name,
            data=audio_io,
            overwrite=True
        )
        
        # Get blob URL
        blob_url = f"https://{os.getenv('AZURE_STORAGE_ACCOUNT')}.blob.core.windows.net/{container_name}/{blob_name}"
        
        return blob_url, blob_name
    except Exception as e:
        raise Exception(f"Azure upload failed: {str(e)}")
    

proctoring= Proctoring()
@routes.route("/api/proctor/analyze", methods=["POST"])
def analyze():
    print('calling analyze endpoint')
    try:
        requestData = AnalysisRequest(**request.json)
    except ValidationError as e:
        return jsonify({"error": "Invalid request format", "details": e.errors()}), 400

    try:
        imageData = base64.b64decode(requestData.image)
        frame = cv2.imdecode(np.frombuffer(imageData, np.uint8), cv2.IMREAD_COLOR)
        if frame is None:
            raise ValueError("Decoded image is None")
    except Exception:
        return jsonify({"error": "Invalid base64 image"}), 400

    try:
        visionResults = proctoring.analyzeFrame(frame)
        headPoseData = HeadPose(**visionResults["head_pose"]) if visionResults["head_pose"] else None

        dominantEmotion = None
        try:
            analysis = DeepFace.analyze(
                img_path=frame,
                actions=['emotion'],
                enforce_detection=False,
                detector_backend='opencv'
            )
            if isinstance(analysis, list) and len(analysis) > 0:
                emotions = analysis[0]['emotion']
                dominantEmotion = proctoring.fuseEmotions(emotions)
        except:
            traceback.print_exc()

        headOrientationReadable = "Not detected"
        if headPoseData:
            headOrientationReadable = proctoring.describeHeadPose(
                headPoseData.yaw,
                headPoseData.pitch,
                headPoseData.roll
            )

        istTime = datetime.now(timezone('Asia/Kolkata')).isoformat()

        response = AnalysisResponse(
            face_detected=visionResults["face_detected"],
            people_count=visionResults["people_count"],
            head_pose=headPoseData,
            dominant_emotion=dominantEmotion,
            flags=[],
            timestamp=istTime
        )

        # Add flags here if needed
        payload = response.dict()
        payload['head_orientation_readable'] = headOrientationReadable
        #print(payload)
        return jsonify(payload)

    except Exception as e:
        traceback.print_exc()
        return jsonify({"error": "Internal server error", "details": str(e)}), 500
    
@routes.route('/api/proctor/verify_identity', methods=['POST'])
def verify_identity():
    try:
        data = request.get_json()
        id_image_b64 = data.get('id_image')
        selfie_b64 = data.get('selfie')

        result = fraudCheck.match_faces(id_image_b64, selfie_b64)

        return jsonify(result)

    except Exception as e:
        traceback.print_exc()
        return jsonify({'error': str(e)}), 500
    
