#!/usr/bin/env python3
"""
LiveTalker HTTPS Voice Server
Proper SSL/TLS support for microphone access
"""

import asyncio
import json
import logging
import time
import base64
import numpy as np
import ssl
import os
from pathlib import Path
from typing import Dict, Any

from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import HTMLResponse
from fastapi.middleware.cors import CORSMiddleware
import uvicorn

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="LiveTalker HTTPS Voice Server")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

active_connections: Dict[str, Dict] = {}

def simple_vad(audio_data: np.ndarray, threshold: float = 0.01) -> tuple:
    """Energy-based voice activity detection"""
    if len(audio_data) == 0:
        return False, 0.0
    
    rms = np.sqrt(np.mean(audio_data ** 2))
    is_speech = rms > threshold
    confidence = min(rms / threshold, 1.0) if threshold > 0 else 0.0
    
    return is_speech, confidence

def create_ssl_certificate():
    """Create self-signed SSL certificate for HTTPS"""
    try:
        from cryptography import x509
        from cryptography.x509.oid import NameOID
        from cryptography.hazmat.primitives import hashes, serialization
        from cryptography.hazmat.primitives.asymmetric import rsa
        import datetime
        import ipaddress

        # Generate private key
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048,
        )

        # Create certificate subject
        subject = issuer = x509.Name([
            x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
            x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "CA"),
            x509.NameAttribute(NameOID.LOCALITY_NAME, "LiveTalker"),
            x509.NameAttribute(NameOID.ORGANIZATION_NAME, "LiveTalker Voice Assistant"),
            x509.NameAttribute(NameOID.COMMON_NAME, "localhost"),
        ])

        # Create certificate with proper SANs for our use case
        cert = x509.CertificateBuilder().subject_name(
            subject
        ).issuer_name(
            issuer
        ).public_key(
            private_key.public_key()
        ).serial_number(
            x509.random_serial_number()
        ).not_valid_before(
            datetime.datetime.now(datetime.timezone.utc)
        ).not_valid_after(
            datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=365)
        ).add_extension(
            x509.SubjectAlternativeName([
                x509.DNSName("localhost"),
                x509.DNSName("*.localhost"),
                x509.DNSName("bitbots01.tail51b02f.ts.net"),
                x509.IPAddress(ipaddress.IPv4Address("127.0.0.1")),
                x509.IPAddress(ipaddress.IPv4Address("100.118.75.128")),
            ]),
            critical=False,
        ).sign(private_key, hashes.SHA256())

        # Write certificate and key files
        cert_path = "livetalker.crt"
        key_path = "livetalker.key"
        
        with open(cert_path, "wb") as f:
            f.write(cert.public_bytes(serialization.Encoding.PEM))
        
        with open(key_path, "wb") as f:
            f.write(private_key.private_bytes(
                encoding=serialization.Encoding.PEM,
                format=serialization.PrivateFormat.PKCS8,
                encryption_algorithm=serialization.NoEncryption()
            ))
        
        print(f"✅ SSL certificate created: {cert_path}")
        print(f"✅ SSL private key created: {key_path}")
        return cert_path, key_path
    
    except Exception as e:
        print(f"❌ Failed to create SSL certificate: {e}")
        return None, None

@app.get("/")
async def root():
    """HTTPS voice interface"""
    html_content = """<!DOCTYPE html>
<html>
<head>
    <title>LiveTalker HTTPS Voice Assistant</title>
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <style>
        body { 
            font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
            margin: 0; padding: 20px;
            background: linear-gradient(135deg, #059669 0%, #0d9488 50%, #0f766e 100%);
            color: white; min-height: 100vh;
        }
        .container { 
            max-width: 1000px; margin: 0 auto; 
            background: rgba(255,255,255,0.1);
            padding: 40px; border-radius: 24px;
            backdrop-filter: blur(20px);
            box-shadow: 0 20px 40px rgba(0,0,0,0.3);
        }
        h1 { 
            text-align: center; margin-bottom: 20px; 
            font-size: 3em; font-weight: 700;
            background: linear-gradient(45deg, #fbbf24, #f59e0b);
            -webkit-background-clip: text;
            background-clip: text;
            -webkit-text-fill-color: transparent;
            text-shadow: none;
        }
        .subtitle {
            text-align: center; margin-bottom: 40px;
            font-size: 1.2em; opacity: 0.9;
        }
        .card { 
            padding: 24px; margin: 24px 0; border-radius: 16px; 
            background: rgba(255,255,255,0.15); 
            border: 2px solid transparent;
            transition: all 0.3s ease;
        }
        .card:hover { transform: translateY(-2px); }
        .card.success { border-color: #10b981; background: rgba(16,185,129,0.2); }
        .card.warning { border-color: #f59e0b; background: rgba(245,158,11,0.2); }
        .card.error { border-color: #ef4444; background: rgba(239,68,68,0.2); }
        .card.active { border-color: #06b6d4; background: rgba(6,182,212,0.2); }
        
        .controls { 
            display: grid; 
            grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
            gap: 20px; margin: 30px 0;
        }
        .btn { 
            padding: 16px 24px; border: none; border-radius: 12px; 
            background: linear-gradient(135deg, #3b82f6, #1d4ed8);
            color: white; cursor: pointer; 
            font-size: 16px; font-weight: 600;
            transition: all 0.3s ease;
            box-shadow: 0 4px 12px rgba(59,130,246,0.3);
        }
        .btn:hover { 
            transform: translateY(-3px); 
            box-shadow: 0 8px 20px rgba(59,130,246,0.4);
        }
        .btn:disabled { 
            background: #6b7280; cursor: not-allowed; 
            transform: none; box-shadow: none;
        }
        .btn.success { 
            background: linear-gradient(135deg, #10b981, #047857);
            box-shadow: 0 4px 12px rgba(16,185,129,0.3);
        }
        .btn.danger { 
            background: linear-gradient(135deg, #ef4444, #dc2626);
            box-shadow: 0 4px 12px rgba(239,68,68,0.3);
        }
        
        .https-notice {
            background: linear-gradient(135deg, #10b981, #059669);
            padding: 20px; border-radius: 12px; margin: 20px 0;
            text-align: center; border: 2px solid #34d399;
        }
        
        .mic-section {
            text-align: center; margin: 40px 0;
            padding: 30px; background: rgba(0,0,0,0.2);
            border-radius: 20px;
        }
        .mic-btn {
            width: 120px; height: 120px; border-radius: 50%;
            background: linear-gradient(135deg, #ef4444, #dc2626);
            border: none; cursor: pointer;
            font-size: 40px; color: white;
            transition: all 0.3s ease;
            box-shadow: 0 8px 24px rgba(239,68,68,0.4);
            margin: 20px auto; display: block;
        }
        .mic-btn:hover { transform: scale(1.05); }
        .mic-btn.active {
            background: linear-gradient(135deg, #10b981, #059669);
            animation: pulse 1.5s infinite;
            box-shadow: 0 0 40px rgba(16,185,129,0.6);
        }
        @keyframes pulse {
            0%, 100% { transform: scale(1); opacity: 1; }
            50% { transform: scale(1.1); opacity: 0.8; }
        }
        
        .vad-display {
            margin: 20px 0;
        }
        .vad-bar {
            width: 100%; height: 40px; margin: 20px 0;
            background: rgba(255,255,255,0.2); border-radius: 20px;
            overflow: hidden; position: relative;
        }
        .vad-level {
            height: 100%; 
            background: linear-gradient(90deg, #10b981, #f59e0b, #ef4444);
            width: 0%; transition: width 0.1s ease; 
            border-radius: 20px;
        }
        .vad-text {
            text-align: center; font-size: 18px; font-weight: 600;
            margin: 15px 0;
        }
        
        .conversation {
            background: rgba(0,0,0,0.3); border-radius: 16px; 
            padding: 24px; margin: 24px 0; height: 350px; 
            overflow-y: auto; border: 1px solid rgba(255,255,255,0.1);
        }
        .message {
            margin: 15px 0; padding: 12px 16px; border-radius: 12px; 
            max-width: 85%; word-wrap: break-word;
            animation: fadeIn 0.3s ease;
        }
        @keyframes fadeIn {
            from { opacity: 0; transform: translateY(10px); }
            to { opacity: 1; transform: translateY(0); }
        }
        .message.user { 
            background: linear-gradient(135deg, #3b82f6, #1e40af);
            margin-left: auto; text-align: right;
            box-shadow: 0 2px 8px rgba(59,130,246,0.3);
        }
        .message.assistant { 
            background: linear-gradient(135deg, #10b981, #047857);
            margin-right: auto;
            box-shadow: 0 2px 8px rgba(16,185,129,0.3);
        }
        .message.system { 
            background: rgba(107,114,128,0.5); text-align: center; 
            margin: 15px auto; font-style: italic; max-width: 90%;
        }
        
        .log {
            background: rgba(0,0,0,0.4); padding: 20px; border-radius: 12px;
            height: 200px; overflow-y: auto; font-family: 'SF Mono', Consolas, monospace;
            font-size: 14px; white-space: pre-wrap;
            border: 1px solid rgba(255,255,255,0.1);
        }
        
        .stats {
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
            gap: 15px; margin: 20px 0;
        }
        .stat-card {
            background: rgba(255,255,255,0.1); padding: 20px;
            border-radius: 12px; text-align: center;
        }
        .stat-value {
            font-size: 2.5em; font-weight: bold; 
            background: linear-gradient(45deg, #fbbf24, #f59e0b);
            -webkit-background-clip: text; background-clip: text;
            -webkit-text-fill-color: transparent;
        }
        
        .feature-grid {
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
            gap: 20px; margin: 30px 0;
        }
        .feature {
            background: rgba(255,255,255,0.1); padding: 24px;
            border-radius: 16px; text-align: center;
            transition: transform 0.3s ease;
        }
        .feature:hover { transform: translateY(-5px); }
        .feature-icon { font-size: 3em; margin-bottom: 15px; }
        
        @media (max-width: 768px) {
            .container { padding: 20px; margin: 10px; }
            h1 { font-size: 2em; }
            .controls { grid-template-columns: 1fr; }
            .mic-btn { width: 100px; height: 100px; font-size: 30px; }
        }
    </style>
</head>
<body>
    <div class="container">
        <h1>🔒 LiveTalker HTTPS</h1>
        <div class="subtitle">Secure Voice Assistant with Real-time Processing</div>
        
        <div class="https-notice">
            <h3>🔒 HTTPS Enabled</h3>
            <p>Microphone access is now available with SSL/TLS encryption</p>
        </div>
        
        <div class="card" id="status">
            <h3>📡 Connection Status</h3>
            <div id="statusText">Ready for secure connection</div>
        </div>
        
        <div class="controls">
            <button class="btn" onclick="connect()">🔗 Connect Secure</button>
            <button class="btn" id="micBtn" onclick="setupMicrophone()" disabled>🎤 Enable Microphone</button>
            <button class="btn success" id="startBtn" onclick="startVoice()" disabled>🎧 Start Voice</button>
            <button class="btn danger" id="stopBtn" onclick="stopVoice()" disabled>⏹️ Stop Voice</button>
        </div>
        
        <div class="card" id="micStatus">
            <h3>🎤 Microphone Status</h3>
            <div id="micText">HTTPS required for microphone access</div>
        </div>
        
        <div class="mic-section">
            <h3>🎵 Voice Activity Detection</h3>
            <button class="mic-btn" id="micVisual" onclick="toggleVoice()">🎤</button>
            <div class="vad-display">
                <div class="vad-bar">
                    <div class="vad-level" id="vadBar"></div>
                </div>
                <div class="vad-text" id="vadText">Secure microphone access available</div>
            </div>
            
            <div class="stats">
                <div class="stat-card">
                    <div class="stat-value" id="audioCount">0</div>
                    <div>Audio Chunks</div>
                </div>
                <div class="stat-card">
                    <div class="stat-value" id="speechCount">0</div>
                    <div>Speech Events</div>
                </div>
                <div class="stat-card">
                    <div class="stat-value" id="confidenceAvg">0%</div>
                    <div>Avg Confidence</div>
                </div>
            </div>
        </div>
        
        <div class="conversation" id="chat">
            <div class="message system">🔒 Secure HTTPS connection ready! Enable microphone to start voice conversation.</div>
        </div>
        
        <div class="card">
            <h3>📊 Activity Log</h3>
            <div class="log" id="log">HTTPS Voice Server initialized...</div>
        </div>
        
        <div class="feature-grid">
            <div class="feature">
                <div class="feature-icon">🔒</div>
                <h4>HTTPS Secure</h4>
                <p>SSL/TLS encryption for microphone access</p>
            </div>
            <div class="feature">
                <div class="feature-icon">🎯</div>
                <h4>Real-time VAD</h4>
                <p>Energy-based voice activity detection</p>
            </div>
            <div class="feature">
                <div class="feature-icon">⚡</div>
                <h4>Low Latency</h4>
                <p>WebSocket streaming audio processing</p>
            </div>
            <div class="feature">
                <div class="feature-icon">🌐</div>
                <h4>Cross-platform</h4>
                <p>Works on all modern browsers</p>
            </div>
        </div>
    </div>

    <script>
        let ws = null;
        let stream = null;
        let context = null;
        let processor = null;
        let recording = false;
        let connected = false;
        let stats = { audioChunks: 0, speechEvents: 0, totalConfidence: 0 };
        
        function log(msg) {
            const el = document.getElementById('log');
            const timestamp = new Date().toLocaleTimeString();
            el.textContent += `[${timestamp}] ${msg}\\n`;
            el.scrollTop = el.scrollHeight;
        }
        
        function updateStatus(text, type = '') {
            const el = document.getElementById('status');
            document.getElementById('statusText').textContent = text;
            el.className = `card ${type}`;
        }
        
        function updateMic(text, type = '') {
            const el = document.getElementById('micStatus');
            document.getElementById('micText').textContent = text;
            el.className = `card ${type}`;
        }
        
        function addChat(role, text) {
            const chat = document.getElementById('chat');
            const msg = document.createElement('div');
            msg.className = `message ${role}`;
            msg.textContent = text;
            chat.appendChild(msg);
            chat.scrollTop = chat.scrollHeight;
        }
        
        function updateVAD(level, active) {
            document.getElementById('vadBar').style.width = `${level * 100}%`;
            document.getElementById('vadText').textContent = active ? 
                `🎵 Voice Activity: ${Math.round(level * 100)}%` : 
                `🔇 Silence: ${Math.round(level * 100)}%`;
            
            const visual = document.getElementById('micVisual');
            if (active) {
                visual.classList.add('active');
            } else {
                visual.classList.remove('active');
            }
        }
        
        function updateStats() {
            document.getElementById('audioCount').textContent = stats.audioChunks;
            document.getElementById('speechCount').textContent = stats.speechEvents;
            const avg = stats.audioChunks > 0 ? 
                Math.round((stats.totalConfidence / stats.audioChunks) * 100) : 0;
            document.getElementById('confidenceAvg').textContent = avg + '%';
        }
        
        async function connect() {
            try {
                const protocol = 'wss:'; // Always use secure WebSocket
                const url = `${protocol}//${location.host}/media-stream`;
                
                log('🔒 Connecting via secure WebSocket...');
                updateStatus('🔄 Connecting securely...', 'warning');
                
                ws = new WebSocket(url);
                
                ws.onopen = () => {
                    connected = true;
                    log('✅ Secure connection established');
                    updateStatus('✅ Secure connection active', 'success');
                    document.getElementById('micBtn').disabled = false;
                };
                
                ws.onmessage = (event) => {
                    const data = JSON.parse(event.data);
                    handleMessage(data);
                };
                
                ws.onclose = () => {
                    connected = false;
                    log('❌ Secure connection lost');
                    updateStatus('❌ Connection lost', 'error');
                };
                
                ws.onerror = () => {
                    log('❌ Secure connection error');
                    updateStatus('❌ Connection failed', 'error');
                };
                
            } catch (error) {
                log(`❌ Connection error: ${error.message}`);
                updateStatus('❌ Connection failed', 'error');
            }
        }
        
        function handleMessage(data) {
            log(`📨 ${data.type}`);
            
            switch(data.type) {
                case 'config':
                    addChat('system', '🔒 Secure voice processing ready!');
                    break;
                    
                case 'vad_result':
                    stats.audioChunks++;
                    stats.totalConfidence += data.confidence || 0;
                    if (data.is_speech) {
                        stats.speechEvents++;
                    }
                    updateVAD(data.confidence || 0, data.is_speech);
                    updateStats();
                    break;
                    
                case 'speech_to_text':
                    if (data.text) {
                        addChat('user', data.text);
                        log(`🗣️ Speech: ${data.text}`);
                    }
                    break;
                    
                case 'ai_response':
                    if (data.text) {
                        addChat('assistant', data.text);
                        log('🤖 AI response generated');
                    }
                    break;
                    
                case 'conversation_started':
                    addChat('system', '🎧 Listening for your voice...');
                    updateStatus('🎧 Voice detection active', 'active');
                    break;
                    
                case 'error':
                    log(`❌ Error: ${data.error}`);
                    addChat('system', `Error: ${data.error}`);
                    break;
            }
        }
        
        async function setupMicrophone() {
            try {
                log('🎤 Requesting microphone via HTTPS...');
                updateMic('🔄 Requesting secure microphone access...', 'warning');
                
                if (!navigator.mediaDevices?.getUserMedia) {
                    throw new Error('Microphone API not supported');
                }
                
                stream = await navigator.mediaDevices.getUserMedia({
                    audio: {
                        sampleRate: 16000,
                        channelCount: 1,
                        echoCancellation: true,
                        noiseSuppression: true,
                        autoGainControl: true
                    }
                });
                
                log('✅ Microphone access granted');
                updateMic('✅ Microphone ready for voice input', 'success');
                
                // Setup audio processing
                const AudioContext = window.AudioContext || window.webkitAudioContext;
                context = new AudioContext({ sampleRate: 16000 });
                
                const source = context.createMediaStreamSource(stream);
                processor = context.createScriptProcessor(1024, 1, 1);
                
                processor.onaudioprocess = (event) => {
                    if (recording && connected) {
                        const input = event.inputBuffer.getChannelData(0);
                        sendAudio(input);
                    }
                };
                
                source.connect(processor);
                processor.connect(context.destination);
                
                document.getElementById('startBtn').disabled = false;
                document.getElementById('micBtn').disabled = true;
                document.getElementById('micBtn').textContent = '✅ Microphone Ready';
                
            } catch (error) {
                log(`❌ Microphone error: ${error.message}`);
                updateMic(`❌ ${error.message}`, 'error');
                
                if (error.name === 'NotAllowedError') {
                    alert('🎤 Please allow microphone access for voice input');
                } else if (error.name === 'NotFoundError') {
                    alert('🎤 No microphone found - please check your audio devices');
                } else {
                    alert(`🎤 Microphone error: ${error.message}`);
                }
            }
        }
        
        function sendAudio(data) {
            if (!ws || ws.readyState !== WebSocket.OPEN) return;
            
            // Convert to 16-bit PCM
            const buffer = new Int16Array(data.length);
            for (let i = 0; i < data.length; i++) {
                buffer[i] = Math.max(-1, Math.min(1, data[i])) * 0x7FFF;
            }
            
            // Convert to base64
            const bytes = new Uint8Array(buffer.buffer);
            const base64 = btoa(String.fromCharCode(...bytes));
            
            ws.send(JSON.stringify({
                type: 'audio',
                data: base64,
                format: 'pcm_s16le',
                sample_rate: 16000,
                channels: 1
            }));
        }
        
        function startVoice() {
            if (!connected) { alert('Please connect first'); return; }
            if (!stream) { alert('Please enable microphone first'); return; }
            
            recording = true;
            log('🎧 Voice detection started');
            
            document.getElementById('startBtn').disabled = true;
            document.getElementById('stopBtn').disabled = false;
            
            if (context?.state === 'suspended') {
                context.resume();
            }
            
            ws?.send(JSON.stringify({
                type: 'start_conversation',
                config: { mode: 'secure_voice', https: true }
            }));
        }
        
        function stopVoice() {
            recording = false;
            log('🛑 Voice detection stopped');
            updateStatus('✅ Secure connection active', 'success');
            updateVAD(0, false);
            
            document.getElementById('startBtn').disabled = false;
            document.getElementById('stopBtn').disabled = true;
            
            ws?.send(JSON.stringify({ type: 'stop_listening' }));
        }
        
        function toggleVoice() {
            if (recording) {
                stopVoice();
            } else {
                startVoice();
            }
        }
        
        // Initialize
        document.addEventListener('DOMContentLoaded', () => {
            log('🔒 HTTPS LiveTalker Voice Interface loaded');
            log('✅ Secure microphone access available');
            log('🔧 Steps: Connect → Enable Microphone → Start Voice');
            
            // Check if we're actually on HTTPS
            if (location.protocol === 'https:') {
                log('✅ Running on HTTPS - microphone access enabled');
            } else {
                log('⚠️ Not HTTPS - microphone may not work');
            }
        });
        
        // Cleanup
        window.addEventListener('beforeunload', () => {
            if (recording) stopVoice();
            if (stream) {
                stream.getTracks().forEach(track => track.stop());
            }
            ws?.close();
        });
    </script>
</body>
</html>"""
    return HTMLResponse(content=html_content)

@app.get("/health")
async def health_check():
    """Health check with SSL status"""
    return {
        "status": "healthy",
        "timestamp": time.time(),
        "protocol": "HTTPS",
        "active_connections": len(active_connections),
        "ssl_enabled": True,
        "features": {
            "secure_microphone": True,
            "voice_activity_detection": True,
            "real_time_processing": True,
            "ssl_encryption": True
        }
    }

@app.websocket("/media-stream")
async def websocket_endpoint(websocket: WebSocket):
    """Secure WebSocket for voice processing"""
    await websocket.accept()
    session_id = f"session_{int(time.time() * 1000)}"
    
    session = {
        "id": session_id,
        "websocket": websocket,
        "audio_buffer": [],
        "is_listening": False,
        "last_speech": 0,
        "https_enabled": True
    }
    active_connections[session_id] = session
    
    logger.info(f"Secure voice session: {session_id}")
    
    try:
        await websocket.send_json({
            "type": "config",
            "session_id": session_id,
            "message": "Secure HTTPS voice processing ready",
            "ssl_enabled": True
        })
        
        async for message in websocket.iter_json():
            await handle_secure_message(session, message)
            
    except WebSocketDisconnect:
        logger.info(f"Secure session ended: {session_id}")
    except Exception as e:
        logger.error(f"Secure session error: {e}")
        try:
            await websocket.send_json({
                "type": "error",
                "error": str(e)
            })
        except:
            pass
    finally:
        if session_id in active_connections:
            del active_connections[session_id]

async def handle_secure_message(session: Dict, message: Dict[str, Any]):
    """Handle secure WebSocket messages"""
    msg_type = message.get("type")
    
    if msg_type == "start_conversation":
        session["is_listening"] = True
        await session["websocket"].send_json({
            "type": "conversation_started",
            "message": "🔒 Secure voice conversation started!",
            "https_enabled": True
        })
        
    elif msg_type == "audio" and session["is_listening"]:
        try:
            # Decode and process audio
            audio_data = base64.b64decode(message["data"])
            audio_np = np.frombuffer(audio_data, dtype=np.int16).astype(np.float32) / 32768.0
            
            if len(audio_np) > 0:
                # Run VAD
                is_speech, confidence = simple_vad(audio_np, threshold=0.015)
                
                await session["websocket"].send_json({
                    "type": "vad_result",
                    "is_speech": bool(is_speech),
                    "confidence": float(confidence),
                    "timestamp": time.time(),
                    "secure": True
                })
                
                # Accumulate speech
                if is_speech:
                    session["audio_buffer"].extend(audio_np.tolist())
                    session["last_speech"] = time.time()
                    
                    if len(session["audio_buffer"]) > 16000:  # ~1 second
                        await process_secure_speech(session)
                        
                elif (len(session["audio_buffer"]) > 8000 and 
                      time.time() - session["last_speech"] > 0.8):
                    await process_secure_speech(session)
                    
        except Exception as e:
            await session["websocket"].send_json({
                "type": "error",
                "error": f"Secure audio processing error: {str(e)}"
            })
            
    elif msg_type == "stop_listening":
        session["is_listening"] = False
        if session["audio_buffer"]:
            await process_secure_speech(session)

async def process_secure_speech(session: Dict):
    """Process speech with HTTPS security context"""
    if len(session["audio_buffer"]) < 4000:
        session["audio_buffer"] = []
        return
        
    try:
        duration = len(session["audio_buffer"]) / 16000
        text = f"🔒 Secure speech: {duration:.1f}s voice segment detected"
        
        await session["websocket"].send_json({
            "type": "speech_to_text",
            "text": text,
            "confidence": 0.9,
            "duration": duration,
            "secure": True
        })
        
        # Secure AI response
        response = f"✅ Received {duration:.1f}s of secure voice input! HTTPS encryption is working perfectly for real-time voice processing."
        
        await session["websocket"].send_json({
            "type": "ai_response",
            "text": response,
            "processing_info": {
                "duration": f"{duration:.1f}s",
                "encryption": "HTTPS/TLS",
                "security": "enabled"
            }
        })
        
        session["audio_buffer"] = []
        logger.info(f"Processed {duration:.1f}s secure speech in session {session['id']}")
        
    except Exception as e:
        logger.error(f"Secure speech processing error: {e}")
        session["audio_buffer"] = []

if __name__ == "__main__":
    print("🔒 LiveTalker HTTPS Voice Server")
    print("=" * 50)
    
    # Create SSL certificate
    cert_path, key_path = create_ssl_certificate()
    
    if cert_path and key_path and os.path.exists(cert_path) and os.path.exists(key_path):
        # Configure SSL context
        ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
        ssl_context.load_cert_chain(cert_path, key_path)
        
        print("✅ SSL certificate ready")
        print("✅ HTTPS encryption enabled")
        print("✅ Microphone access available")
        print("")
        print("🌐 HTTPS URLs:")
        print("   Local:   https://localhost:8000")
        print("   Network: https://100.118.75.128:8000")
        print("   Tailnet: https://bitbots01.tail51b02f.ts.net:8000")
        print("")
        print("🔧 Features:")
        print("   ✅ Secure microphone access")
        print("   ✅ Real-time voice processing")
        print("   ✅ SSL/TLS encryption")
        print("   ✅ Cross-platform compatibility")
        print("")
        print("⚠️  You may see browser security warnings for the self-signed certificate.")
        print("   Click 'Advanced' → 'Continue to site' to proceed.")
        
        try:
            uvicorn.run(
                app,
                host="0.0.0.0",
                port=8000,
                ssl_keyfile=key_path,
                ssl_certfile=cert_path,
                log_level="info"
            )
        except KeyboardInterrupt:
            print("\n🛑 Server stopped")
        finally:
            # Cleanup certificate files
            for file_path in [cert_path, key_path]:
                if os.path.exists(file_path):
                    os.remove(file_path)
                    print(f"🗑️  Cleaned up: {file_path}")
    else:
        print("❌ Failed to create SSL certificate")
        print("Falling back to HTTP mode...")
        
        uvicorn.run(
            app,
            host="0.0.0.0",
            port=8000,
            log_level="info"
        )