#!/usr/bin/env python3
"""
Simple LiveTalker Voice Chat Server
Works with basic dependencies and provides HTTPS + Tailscale integration
"""

import ssl
import uvicorn
import asyncio
import json
import logging
import time
import base64
from typing import Dict, Any
from pathlib import Path

from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import HTMLResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="LiveTalker Simple Voice Chat")

# Enable CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

active_sessions: Dict[str, Dict] = {}

def simple_vad(audio_data, threshold: float = 0.01) -> tuple:
    """Simple energy-based voice activity detection"""
    if not audio_data:
        return False, 0.0
    
    # Calculate simple energy level
    energy = sum(abs(x) for x in audio_data) / len(audio_data) if audio_data else 0
    is_speech = energy > threshold
    confidence = min(energy / threshold, 1.0) if threshold > 0 else 0.0
    
    return is_speech, confidence

@app.get("/")
async def serve_main_page():
    """Serve main test page with voice chat widget"""
    html_content = """
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>LiveTalker Voice Chat - HTTPS</title>
    <style>
        body {
            font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
            margin: 0;
            padding: 40px;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white;
            min-height: 100vh;
        }
        .container {
            max-width: 800px;
            margin: 0 auto;
            background: rgba(255,255,255,0.1);
            padding: 40px;
            border-radius: 20px;
            backdrop-filter: blur(15px);
            box-shadow: 0 8px 32px rgba(0,0,0,0.3);
        }
        h1 {
            text-align: center;
            font-size: 3em;
            margin-bottom: 30px;
            text-shadow: 2px 2px 4px rgba(0,0,0,0.3);
        }
        .status {
            background: rgba(76,175,80,0.2);
            border: 2px solid #4CAF50;
            padding: 15px;
            border-radius: 10px;
            margin: 20px 0;
            text-align: center;
        }
        .controls {
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
            gap: 15px;
            margin: 30px 0;
        }
        .btn {
            padding: 15px 25px;
            border: none;
            border-radius: 10px;
            background: #4CAF50;
            color: white;
            cursor: pointer;
            font-size: 16px;
            font-weight: 600;
            transition: all 0.3s ease;
        }
        .btn:hover { background: #45a049; transform: translateY(-2px); }
        .btn:disabled { background: #666; cursor: not-allowed; }
        .btn.danger { background: #f44336; }
        .vad-display {
            margin: 30px 0;
            padding: 20px;
            background: rgba(0,0,0,0.3);
            border-radius: 12px;
        }
        .vad-bar {
            width: 100%;
            height: 40px;
            background: rgba(255,255,255,0.2);
            border-radius: 20px;
            overflow: hidden;
            margin: 15px 0;
        }
        .vad-level {
            height: 100%;
            background: linear-gradient(90deg, #4CAF50, #8BC34A, #FFC107, #FF5722);
            width: 0%;
            transition: width 0.1s ease;
            border-radius: 20px;
        }
        .conversation {
            background: rgba(0,0,0,0.4);
            border-radius: 15px;
            padding: 20px;
            margin: 20px 0;
            max-height: 300px;
            overflow-y: auto;
            min-height: 150px;
        }
        .message {
            margin: 10px 0;
            padding: 10px 15px;
            border-radius: 8px;
            max-width: 85%;
            word-wrap: break-word;
        }
        .message.user {
            background: rgba(33,150,243,0.4);
            margin-left: auto;
            text-align: right;
        }
        .message.assistant {
            background: rgba(76,175,80,0.4);
            margin-right: auto;
        }
        .message.system {
            background: rgba(158,158,158,0.3);
            margin: 10px auto;
            text-align: center;
            font-style: italic;
            max-width: 90%;
        }
    </style>
</head>
<body>
    <div class="container">
        <h1>🎙️ LiveTalker Voice Chat</h1>
        
        <div class="status">
            ✅ <strong>HTTPS Enabled</strong> - Voice chat ready!
        </div>
        
        <div class="controls">
            <button class="btn" onclick="connectWebSocket()">🔗 Connect</button>
            <button class="btn" onclick="requestMicrophone()" id="micBtn">🎤 Enable Mic</button>
            <button class="btn" onclick="startListening()" id="startBtn" disabled>🎧 Start</button>
            <button class="btn danger" onclick="stopListening()" id="stopBtn" disabled>🛑 Stop</button>
        </div>
        
        <div class="vad-display">
            <h3>🎵 Voice Activity</h3>
            <div class="vad-bar">
                <div class="vad-level" id="vadLevel"></div>
            </div>
            <div style="text-align: center;" id="vadStatus">Click "Connect" to start</div>
        </div>
        
        <div class="conversation" id="conversation">
            <div class="message system">Ready to start voice conversation...</div>
        </div>
    </div>

    <script>
        let ws = null;
        let mediaStream = null;
        let audioContext = null;
        let processor = null;
        let isRecording = false;
        let connected = false;
        
        function addMessage(type, content) {
            const conversation = document.getElementById('conversation');
            const message = document.createElement('div');
            message.className = `message ${type}`;
            message.textContent = content;
            conversation.appendChild(message);
            conversation.scrollTop = conversation.scrollHeight;
        }
        
        function updateVAD(level, isActive) {
            const vadLevel = document.getElementById('vadLevel');
            const vadStatus = document.getElementById('vadStatus');
            
            vadLevel.style.width = `${level * 100}%`;
            vadStatus.textContent = isActive ? 
                `🎵 Voice: ${(level * 100).toFixed(0)}%` : 
                `🔇 Silence: ${(level * 100).toFixed(0)}%`;
        }
        
        async function connectWebSocket() {
            const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
            const wsUrl = `${protocol}//${window.location.host}/voice`;
            
            try {
                ws = new WebSocket(wsUrl);
                
                ws.onopen = function() {
                    connected = true;
                    addMessage('system', '✅ Connected to voice chat');
                    document.getElementById('vadStatus').textContent = 'Connected - enable microphone';
                };
                
                ws.onmessage = function(event) {
                    const data = JSON.parse(event.data);
                    handleServerMessage(data);
                };
                
                ws.onclose = function() {
                    connected = false;
                    addMessage('system', '❌ Disconnected');
                };
                
            } catch (error) {
                addMessage('system', `❌ Connection failed: ${error}`);
            }
        }
        
        function handleServerMessage(data) {
            switch(data.type) {
                case 'vad_result':
                    updateVAD(data.confidence || 0, data.is_speech || false);
                    break;
                    
                case 'transcription':
                    if (data.text && data.text.trim()) {
                        addMessage('user', data.text);
                    }
                    break;
                    
                case 'response':
                    if (data.text) {
                        addMessage('assistant', data.text);
                    }
                    break;
                    
                case 'status':
                    addMessage('system', data.message);
                    break;
            }
        }
        
        async function requestMicrophone() {
            try {
                mediaStream = await navigator.mediaDevices.getUserMedia({
                    audio: {
                        sampleRate: 16000,
                        channelCount: 1,
                        echoCancellation: true,
                        noiseSuppression: true
                    }
                });
                
                addMessage('system', '✅ Microphone enabled');
                
                const AudioContextClass = window.AudioContext || window.webkitAudioContext;
                audioContext = new AudioContextClass({ sampleRate: 16000 });
                
                const source = audioContext.createMediaStreamSource(mediaStream);
                processor = audioContext.createScriptProcessor(1024, 1, 1);
                
                processor.onaudioprocess = function(event) {
                    if (isRecording && connected) {
                        const inputData = event.inputBuffer.getChannelData(0);
                        sendAudioData(inputData);
                    }
                };
                
                source.connect(processor);
                processor.connect(audioContext.destination);
                
                document.getElementById('startBtn').disabled = false;
                document.getElementById('micBtn').disabled = true;
                document.getElementById('micBtn').textContent = '✅ Mic Ready';
                
            } catch (error) {
                addMessage('system', `❌ Microphone error: ${error.message}`);
            }
        }
        
        function sendAudioData(audioData) {
            if (!ws || ws.readyState !== WebSocket.OPEN) return;
            
            const int16Array = new Int16Array(audioData.length);
            for (let i = 0; i < audioData.length; i++) {
                int16Array[i] = Math.max(-1, Math.min(1, audioData[i])) * 0x7FFF;
            }
            
            const uint8Array = new Uint8Array(int16Array.buffer);
            const base64String = btoa(String.fromCharCode.apply(null, uint8Array));
            
            ws.send(JSON.stringify({
                type: 'audio',
                data: base64String
            }));
        }
        
        function startListening() {
            if (!connected || !mediaStream) return;
            
            isRecording = true;
            addMessage('system', '🎧 Listening for voice...');
            
            document.getElementById('startBtn').disabled = true;
            document.getElementById('stopBtn').disabled = false;
            
            if (audioContext?.state === 'suspended') {
                audioContext.resume();
            }
            
            ws?.send(JSON.stringify({ type: 'start_listening' }));
        }
        
        function stopListening() {
            isRecording = false;
            addMessage('system', '🛑 Stopped listening');
            
            document.getElementById('startBtn').disabled = false;
            document.getElementById('stopBtn').disabled = true;
            
            ws?.send(JSON.stringify({ type: 'stop_listening' }));
        }
        
        // Auto-connect on page load
        window.addEventListener('load', function() {
            addMessage('system', 'Page loaded - click Connect to start');
        });
    </script>
</body>
</html>
    """
    return HTMLResponse(content=html_content)

@app.get("/health")
async def health_check():
    return {
        "status": "healthy",
        "timestamp": time.time(),
        "https_enabled": True,
        "features": ["voice_chat", "https", "tailscale_ready"]
    }

@app.websocket("/voice")
async def voice_websocket(websocket: WebSocket):
    """WebSocket for voice processing"""
    await websocket.accept()
    session_id = f"session_{int(time.time() * 1000)}"
    
    session = {
        "id": session_id,
        "websocket": websocket,
        "audio_buffer": [],
        "is_listening": False
    }
    active_sessions[session_id] = session
    
    try:
        await websocket.send_json({
            "type": "status",
            "message": "Voice chat connected! Enable microphone to start."
        })
        
        async for message in websocket.iter_json():
            if message.get("type") == "start_listening":
                session["is_listening"] = True
                await websocket.send_json({
                    "type": "status",
                    "message": "🎤 Listening for voice..."
                })
                
            elif message.get("type") == "audio" and session["is_listening"]:
                try:
                    # Decode audio data
                    audio_data = base64.b64decode(message["data"])
                    # Convert to list for simple processing
                    audio_values = list(audio_data)
                    
                    if audio_values:
                        # Simple voice activity detection
                        is_speech, confidence = simple_vad(audio_values)
                        
                        await websocket.send_json({
                            "type": "vad_result",
                            "is_speech": is_speech,
                            "confidence": confidence
                        })
                        
                        if is_speech and confidence > 0.3:
                            session["audio_buffer"].extend(audio_values)
                            
                            # Simulate speech recognition after 2 seconds of audio
                            if len(session["audio_buffer"]) > 32000:  # ~2 seconds at 16kHz
                                duration = len(session["audio_buffer"]) / 16000
                                
                                # Simple mock transcription
                                await websocket.send_json({
                                    "type": "transcription",
                                    "text": f"Voice detected for {duration:.1f} seconds"
                                })
                                
                                # Simple mock response
                                await websocket.send_json({
                                    "type": "response",
                                    "text": f"I heard you speaking for {duration:.1f} seconds! The voice chat is working perfectly."
                                })
                                
                                session["audio_buffer"] = []
                                
                except Exception as e:
                    await websocket.send_json({
                        "type": "status",
                        "message": f"Audio processing error: {str(e)}"
                    })
                    
            elif message.get("type") == "stop_listening":
                session["is_listening"] = False
                await websocket.send_json({
                    "type": "status",
                    "message": "Stopped listening"
                })
                
    except WebSocketDisconnect:
        pass
    finally:
        if session_id in active_sessions:
            del active_sessions[session_id]

def main():
    print("🎙️ Starting LiveTalker Simple Voice Chat...")
    
    # Check for HTTPS certificates
    cert_file = "livetalker.crt"
    key_file = "livetalker.key"
    
    if not (Path(cert_file).exists() and Path(key_file).exists()):
        print(f"❌ HTTPS certificates not found ({cert_file}, {key_file})")
        print("Running in HTTP mode (microphone may not work in browsers)")
        
        uvicorn.run(app, host="0.0.0.0", port=8000)
    else:
        print("✅ HTTPS certificates found")
        print("🌟 Starting with HTTPS support")
        print("")
        print("📍 Access URLs:")
        print("   Local HTTPS: https://localhost:8000")
        print("   Setup Tailscale: ./scripts/setup_tailscale.sh")
        print("")
        
        uvicorn.run(
            app,
            host="0.0.0.0",
            port=8000,
            ssl_certfile=cert_file,
            ssl_keyfile=key_file
        )

if __name__ == "__main__":
    main()