#!/usr/bin/env python3
"""
Minimal LiveTalker Test Server
Demonstrates core functionality without complex dependencies
"""

import asyncio
import json
import logging
import time
from typing import Dict, Any
import sys

# Try to use system packages first
try:
    from fastapi import FastAPI, WebSocket, WebSocketDisconnect
    from fastapi.responses import HTMLResponse, FileResponse
    from fastapi.middleware.cors import CORSMiddleware
    import uvicorn
    FASTAPI_AVAILABLE = True
except ImportError:
    print("FastAPI not available, installing basic packages...")
    import subprocess
    subprocess.run([sys.executable, "-m", "pip", "install", "fastapi", "uvicorn[standard]", "python-multipart"])
    from fastapi import FastAPI, WebSocket, WebSocketDisconnect
    from fastapi.responses import HTMLResponse, FileResponse
    from fastapi.middleware.cors import CORSMiddleware
    import uvicorn
    FASTAPI_AVAILABLE = True

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="LiveTalker Test Server")

# Enable CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Store active connections
active_connections: Dict[str, WebSocket] = {}

@app.get("/")
async def root():
    """Root page with test interface"""
    html_content = """
<!DOCTYPE html>
<html>
<head>
    <title>LiveTalker Test Interface</title>
    <style>
        body { 
            font-family: Arial, sans-serif; 
            margin: 40px; 
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white;
        }
        .container { 
            max-width: 800px; 
            margin: 0 auto; 
            background: rgba(255,255,255,0.1);
            padding: 30px;
            border-radius: 15px;
            backdrop-filter: blur(10px);
        }
        h1 { text-align: center; margin-bottom: 30px; }
        .status { 
            padding: 15px; 
            margin: 20px 0; 
            border-radius: 8px; 
            background: rgba(255,255,255,0.2);
        }
        .controls { 
            text-align: center; 
            margin: 30px 0; 
        }
        button { 
            padding: 12px 24px; 
            margin: 10px; 
            border: none; 
            border-radius: 8px; 
            background: #4CAF50; 
            color: white; 
            cursor: pointer; 
            font-size: 16px;
        }
        button:hover { background: #45a049; }
        button:disabled { background: #cccccc; cursor: not-allowed; }
        .log { 
            background: rgba(0,0,0,0.3); 
            padding: 15px; 
            border-radius: 8px; 
            height: 200px; 
            overflow-y: auto; 
            font-family: monospace; 
            font-size: 14px;
            white-space: pre-wrap;
        }
        .feature-list {
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
            gap: 20px;
            margin: 30px 0;
        }
        .feature-card {
            background: rgba(255,255,255,0.15);
            padding: 20px;
            border-radius: 10px;
            text-align: center;
        }
        .emoji { font-size: 2em; margin-bottom: 10px; }
    </style>
</head>
<body>
    <div class="container">
        <h1>🎙️ LiveTalker Test Interface</h1>
        
        <div class="status">
            <h3>System Status:</h3>
            <div id="status">Connecting...</div>
        </div>
        
        <div class="feature-list">
            <div class="feature-card">
                <div class="emoji">🎯</div>
                <h4>Voice Activity Detection</h4>
                <p>Real-time speech detection with Silero VAD</p>
            </div>
            <div class="feature-card">
                <div class="emoji">🔄</div>
                <h4>Turn-Taking Detection</h4>
                <p>Prosodic analysis for natural conversation flow</p>
            </div>
            <div class="feature-card">
                <div class="emoji">⚡</div>
                <h4>Interruption Handling</h4>
                <p>Smart interruption with context preservation</p>
            </div>
            <div class="feature-card">
                <div class="emoji">🧠</div>
                <h4>Resume Logic</h4>
                <p>Continue from last complete thought</p>
            </div>
        </div>
        
        <div class="controls">
            <button id="connectBtn" onclick="connect()">Connect WebSocket</button>
            <button id="testBtn" onclick="testFeatures()" disabled>Test Features</button>
            <button id="simulateBtn" onclick="simulateVAD()" disabled>Simulate VAD</button>
        </div>
        
        <div class="status">
            <h3>Activity Log:</h3>
            <div id="log" class="log">Waiting for connection...</div>
        </div>
        
        <div class="status">
            <h3>Available Endpoints:</h3>
            <ul>
                <li><a href="/health" style="color: #FFD700;">Health Check</a></li>
                <li><a href="/stats" style="color: #FFD700;">System Statistics</a></li>
                <li><a href="/widget.js" style="color: #FFD700;">JavaScript Widget</a></li>
                <li><a href="/demo" style="color: #FFD700;">Demo Page</a></li>
            </ul>
        </div>
    </div>

    <script>
        let ws = null;
        let connected = false;
        
        function log(message) {
            const logDiv = document.getElementById('log');
            const timestamp = new Date().toLocaleTimeString();
            logDiv.textContent += `[${timestamp}] ${message}\\n`;
            logDiv.scrollTop = logDiv.scrollHeight;
        }
        
        function updateStatus(message, color = 'white') {
            document.getElementById('status').innerHTML = `<span style="color: ${color}">${message}</span>`;
        }
        
        function connect() {
            const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
            const wsUrl = `${protocol}//${window.location.host}/media-stream`;
            
            log('Connecting to WebSocket...');
            updateStatus('Connecting...', 'yellow');
            
            ws = new WebSocket(wsUrl);
            
            ws.onopen = function() {
                connected = true;
                log('✅ WebSocket connected successfully');
                updateStatus('✅ Connected and Ready', '#4CAF50');
                document.getElementById('connectBtn').disabled = true;
                document.getElementById('testBtn').disabled = false;
                document.getElementById('simulateBtn').disabled = false;
            };
            
            ws.onmessage = function(event) {
                try {
                    const data = JSON.parse(event.data);
                    log(`📨 Received: ${data.type} - ${JSON.stringify(data).substring(0, 100)}...`);
                } catch (e) {
                    log(`📨 Received: ${event.data.substring(0, 100)}...`);
                }
            };
            
            ws.onclose = function() {
                connected = false;
                log('❌ WebSocket disconnected');
                updateStatus('❌ Disconnected', 'red');
                document.getElementById('connectBtn').disabled = false;
                document.getElementById('testBtn').disabled = true;
                document.getElementById('simulateBtn').disabled = true;
            };
            
            ws.onerror = function(error) {
                log('❌ WebSocket error: ' + error);
                updateStatus('❌ Connection Error', 'red');
            };
        }
        
        function testFeatures() {
            if (!connected) return;
            
            log('🧪 Testing LiveTalker features...');
            
            // Test configuration
            ws.send(JSON.stringify({
                type: 'start_conversation',
                config: { agentId: 'LiveTalker-Test', personality: 'luna' }
            }));
            
            // Simulate audio input
            setTimeout(() => {
                ws.send(JSON.stringify({
                    type: 'audio',
                    data: 'dGVzdCBhdWRpbyBkYXRh', // base64 "test audio data"
                    format: 'pcm_f32le'
                }));
                log('🎵 Sent test audio data');
            }, 1000);
            
            // Test interruption
            setTimeout(() => {
                ws.send(JSON.stringify({ type: 'interrupt' }));
                log('⚡ Sent interruption signal');
            }, 2000);
        }
        
        function simulateVAD() {
            if (!connected) return;
            
            log('🎙️ Simulating Voice Activity Detection...');
            
            const vadStates = ['speech_detected', 'silence', 'speech_detected', 'turn_end'];
            vadStates.forEach((state, index) => {
                setTimeout(() => {
                    ws.send(JSON.stringify({
                        type: 'vad_simulation',
                        state: state,
                        confidence: 0.8
                    }));
                    log(`🔊 VAD State: ${state}`);
                }, index * 1000);
            });
        }
        
        // Auto-connect on page load
        document.addEventListener('DOMContentLoaded', function() {
            log('LiveTalker Test Interface loaded');
            updateStatus('Ready to connect', 'white');
        });
    </script>
</body>
</html>
    """
    return HTMLResponse(content=html_content)

@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {
        "status": "healthy",
        "timestamp": time.time(),
        "version": "LiveTalker Test v1.0",
        "features": [
            "Voice Activity Detection",
            "Turn-Taking Detection", 
            "Interruption Handling",
            "Resume from Last Thought",
            "WebSocket Communication",
            "Personality Profiles"
        ]
    }

@app.get("/stats")
async def get_stats():
    """System statistics"""
    return {
        "active_connections": len(active_connections),
        "server_uptime": time.time(),
        "components": {
            "websocket_gateway": "ready",
            "vad_system": "simulated",
            "turn_detector": "simulated", 
            "speech_manager": "simulated",
            "audio_processor": "simulated"
        },
        "test_mode": True,
        "performance": {
            "expected_ttfc_ms": "500-650 (with CSM)",
            "expected_rtf": "2.0-3.5x (with optimizations)",
            "current_mode": "demonstration"
        }
    }

@app.get("/widget.js")
async def get_widget():
    """Return the JavaScript widget"""
    try:
        return FileResponse("frontend/widget.js", media_type="application/javascript")
    except FileNotFoundError:
        # Return a minimal widget for testing
        js_content = '''
// LiveTalker Test Widget
console.log("LiveTalker widget loaded in test mode");
window.LiveTalkerWidget = class {
    constructor(config = {}) {
        console.log("LiveTalker widget initialized:", config);
        this.createTestButton();
    }
    
    createTestButton() {
        const button = document.createElement("button");
        button.textContent = "🎙️ LiveTalker (Test)";
        button.style.cssText = "position:fixed;bottom:20px;right:20px;z-index:9999;padding:15px;background:#667eea;color:white;border:none;border-radius:50px;cursor:pointer;";
        button.onclick = () => window.open("/", "_blank");
        document.body.appendChild(button);
    }
};
if (document.currentScript && document.currentScript.hasAttribute('data-agent-id')) {
    new LiveTalkerWidget({
        agentId: document.currentScript.getAttribute('data-agent-id')
    });
}
        '''
        return HTMLResponse(content=js_content, media_type="application/javascript")

@app.get("/demo")
async def demo_page():
    """Demo page showing widget integration"""
    demo_html = """
<!DOCTYPE html>
<html>
<head>
    <title>LiveTalker Widget Demo</title>
    <style>
        body { 
            font-family: Arial, sans-serif; 
            margin: 40px;
            background: #f0f2f5;
        }
        .demo-content {
            max-width: 800px;
            margin: 0 auto;
            background: white;
            padding: 40px;
            border-radius: 10px;
            box-shadow: 0 4px 12px rgba(0,0,0,0.1);
        }
    </style>
</head>
<body>
    <div class="demo-content">
        <h1>LiveTalker Widget Integration Demo</h1>
        <p>This page demonstrates how the LiveTalker widget appears on a website.</p>
        <p>Look for the floating voice button in the bottom-right corner!</p>
        
        <h2>Integration Code:</h2>
        <pre style="background: #f8f9fa; padding: 15px; border-radius: 5px;">
&lt;script src="/widget.js" data-agent-id="Demo-Assistant"&gt;&lt;/script&gt;
        </pre>
        
        <h2>Features Demonstrated:</h2>
        <ul>
            <li>✅ Floating voice interface button</li>
            <li>✅ WebSocket connection to backend</li>
            <li>✅ Audio processing simulation</li>
            <li>✅ Turn-taking detection</li>
            <li>✅ Interruption handling</li>
        </ul>
    </div>
    
    <!-- LiveTalker Widget Integration -->
    <script src="/widget.js" data-agent-id="Demo-Assistant"></script>
</body>
</html>
    """
    return HTMLResponse(content=demo_html)

@app.websocket("/media-stream")
async def websocket_endpoint(websocket: WebSocket):
    """WebSocket endpoint for real-time communication"""
    await websocket.accept()
    session_id = f"session_{int(time.time() * 1000)}"
    active_connections[session_id] = websocket
    
    logger.info(f"New WebSocket connection: {session_id}")
    
    try:
        # Send welcome message
        await websocket.send_json({
            "type": "config",
            "session_id": session_id,
            "message": "LiveTalker Test Server - WebSocket Connected",
            "features": {
                "vad": "simulated",
                "turn_taking": "simulated", 
                "interruption_handling": "simulated",
                "personality_profiles": "available"
            }
        })
        
        async for message in websocket.iter_json():
            await handle_websocket_message(websocket, session_id, message)
            
    except WebSocketDisconnect:
        logger.info(f"WebSocket disconnected: {session_id}")
    except Exception as e:
        logger.error(f"WebSocket error: {e}")
    finally:
        if session_id in active_connections:
            del active_connections[session_id]

async def handle_websocket_message(websocket: WebSocket, session_id: str, message: Dict[str, Any]):
    """Handle incoming WebSocket messages"""
    msg_type = message.get("type")
    
    logger.info(f"Received message type: {msg_type}")
    
    if msg_type == "start_conversation":
        await websocket.send_json({
            "type": "conversation_started",
            "session_id": session_id,
            "personality": message.get("config", {}).get("personality", "default"),
            "message": "Conversation started! (Test mode - simulated responses)"
        })
        
    elif msg_type == "audio":
        # Simulate VAD processing
        await websocket.send_json({
            "type": "vad_result",
            "is_speech": True,
            "confidence": 0.85,
            "message": "Audio received and processed (simulated)"
        })
        
        # Simulate turn-taking detection
        await asyncio.sleep(0.5)
        await websocket.send_json({
            "type": "turn_detected",
            "state": "user_speaking",
            "message": "Turn-taking: User is speaking"
        })
        
        # Simulate response after 2 seconds
        await asyncio.sleep(2)
        await websocket.send_json({
            "type": "response_complete",
            "message": "This is a simulated voice response. In the full implementation, this would be streaming audio from CSM/Sesame.",
            "segments": 3,
            "performance": {
                "ttfc_ms": 650,
                "rtf": 2.5,
                "note": "Simulated performance metrics"
            }
        })
        
    elif msg_type == "interrupt":
        await websocket.send_json({
            "type": "interrupted",
            "message": "Assistant interrupted - switching to listening mode",
            "context_preserved": True
        })
        
    elif msg_type == "vad_simulation":
        await websocket.send_json({
            "type": "vad_update",
            "state": message.get("state"),
            "confidence": message.get("confidence", 0.8),
            "timestamp": time.time()
        })
    
    else:
        await websocket.send_json({
            "type": "echo",
            "original_message": message,
            "timestamp": time.time()
        })

if __name__ == "__main__":
    print("🚀 Starting LiveTalker Test Server...")
    print("📍 Access points:")
    print("   • Main Interface: http://localhost:8000")
    print("   • Health Check: http://localhost:8000/health")
    print("   • System Stats: http://localhost:8000/stats") 
    print("   • Widget Demo: http://localhost:8000/demo")
    print("   • WebSocket: ws://localhost:8000/media-stream")
    print("")
    print("🎯 This test server demonstrates:")
    print("   • WebSocket real-time communication")
    print("   • Voice Activity Detection (simulated)")
    print("   • Turn-taking detection (simulated)")
    print("   • Interruption handling (simulated)")
    print("   • Widget integration")
    print("")
    
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8000,
        log_level="info"
    )