#!/usr/bin/env python3
"""
LiveTalker AI Voice Chat Server - Real AI Agent
Full conversation with live transcription and intelligent responses
"""

import asyncio
import logging
import json
import time
from datetime import datetime
from typing import List, Dict, Any
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import HTMLResponse
import uvicorn

# Try to import transformers for AI model
try:
    from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
    import torch
    AI_AVAILABLE = True
    print("✅ AI models available")
except ImportError:
    AI_AVAILABLE = False
    print("⚠️ AI models not available, using simple responses")

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="LiveTalker AI Voice Chat", version="2.0.0")

class AIAgent:
    """Intelligent AI agent for conversation"""
    
    def __init__(self):
        self.conversation_history = []
        self.user_name = None
        self.session_start = datetime.now()
        self.model = None
        self.tokenizer = None
        
        # Agent personality
        self.personality = {
            "name": "Alex",
            "role": "best friend and AI assistant",
            "traits": ["friendly", "supportive", "curious", "helpful", "engaging"],
            "speaking_style": "casual and warm"
        }
        
        if AI_AVAILABLE:
            self._load_ai_model()
    
    def _load_ai_model(self):
        """Load AI model for conversation"""
        try:
            # Try to load a conversational model
            model_name = "microsoft/DialoGPT-medium"
            logger.info(f"Loading AI model: {model_name}")
            
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModelForCausalLM.from_pretrained(
                model_name,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                low_cpu_mem_usage=True
            )
            
            if self.tokenizer.pad_token is None:
                self.tokenizer.pad_token = self.tokenizer.eos_token
            
            logger.info("✅ AI model loaded successfully")
            
        except Exception as e:
            logger.warning(f"Could not load AI model: {e}")
            self.model = None
            self.tokenizer = None
    
    async def generate_response(self, user_input: str) -> str:
        """Generate intelligent response to user input"""
        
        # Add to conversation history
        self.conversation_history.append({
            "role": "user",
            "content": user_input,
            "timestamp": datetime.now().isoformat()
        })
        
        # Detect user name if not known
        if not self.user_name and ("my name is" in user_input.lower() or "i'm" in user_input.lower()):
            self._extract_user_name(user_input)
        
        if self.model and self.tokenizer:
            response = await self._generate_ai_response(user_input)
        else:
            response = await self._generate_rule_based_response(user_input)
        
        # Add response to history
        self.conversation_history.append({
            "role": "assistant", 
            "content": response,
            "timestamp": datetime.now().isoformat()
        })
        
        return response
    
    def _extract_user_name(self, text: str):
        """Extract user name from text"""
        text_lower = text.lower()
        if "my name is" in text_lower:
            name = text.split("my name is")[-1].strip().split()[0]
            self.user_name = name.capitalize()
        elif "i'm" in text_lower:
            name = text.split("i'm")[-1].strip().split()[0]
            if name.lower() not in ['good', 'fine', 'okay', 'great', 'here']:
                self.user_name = name.capitalize()
    
    async def _generate_ai_response(self, user_input: str) -> str:
        """Generate response using AI model"""
        try:
            # Build conversation context
            system_prompt = f"You are {self.personality['name']}, a {self.personality['role']}. You are {', '.join(self.personality['traits'])}. Speak in a {self.personality['speaking_style']} way."
            
            if self.user_name:
                system_prompt += f" You are talking to your friend {self.user_name}."
            
            # Prepare input for DialoGPT
            conversation_text = f"{system_prompt}\n\nConversation:\n"
            
            # Add recent conversation history (last 5 exchanges)
            recent_history = self.conversation_history[-10:] if len(self.conversation_history) > 10 else self.conversation_history[:-1]  # Exclude the current user input
            
            for entry in recent_history:
                if entry["role"] == "user":
                    speaker = self.user_name if self.user_name else "Friend"
                    conversation_text += f"{speaker}: {entry['content']}\n"
                else:
                    conversation_text += f"Alex: {entry['content']}\n"
            
            # Add current input
            current_speaker = self.user_name if self.user_name else "Friend"
            conversation_text += f"{current_speaker}: {user_input}\nAlex:"
            
            # Tokenize and generate
            inputs = self.tokenizer.encode(
                conversation_text,
                return_tensors="pt",
                max_length=1000,
                truncation=True
            )
            
            # Generate response
            with torch.no_grad():
                outputs = self.model.generate(
                    inputs,
                    max_new_tokens=100,
                    do_sample=True,
                    temperature=0.7,
                    top_p=0.9,
                    pad_token_id=self.tokenizer.eos_token_id,
                    eos_token_id=self.tokenizer.eos_token_id,
                    repetition_penalty=1.1
                )
            
            # Decode response
            full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # Extract just the new response
            if "Alex:" in full_response:
                response = full_response.split("Alex:")[-1].strip()
            else:
                response = full_response[len(conversation_text):].strip()
            
            # Clean up response
            response = response.replace(current_speaker + ":", "").replace("Alex:", "").strip()
            
            # Fallback if response is empty or too short
            if len(response) < 3:
                return await self._generate_rule_based_response(user_input)
            
            return response
            
        except Exception as e:
            logger.warning(f"AI generation failed: {e}")
            return await self._generate_rule_based_response(user_input)
    
    async def _generate_rule_based_response(self, user_input: str) -> str:
        """Generate response using rules when AI is not available"""
        
        user_input_lower = user_input.lower()
        user_display = self.user_name if self.user_name else "friend"
        
        # Greeting responses
        if any(word in user_input_lower for word in ['hi', 'hello', 'hey', 'greetings']):
            greetings = [
                f"Hey {user_display}! Great to hear from you. How's your day going?",
                f"Hi there! I'm Alex, your AI friend. What's on your mind today?",
                f"Hello {user_display}! I'm excited to chat with you. What would you like to talk about?"
            ]
            return greetings[hash(user_input) % len(greetings)]
        
        # Joke requests
        elif 'joke' in user_input_lower:
            jokes = [
                "Why don't scientists trust atoms? Because they make up everything! 😄",
                "I told my wife she was drawing her eyebrows too high. She looked surprised! 😂",
                "Why don't eggs tell jokes? They'd crack each other up! 🥚",
                "What do you call a bear with no teeth? A gummy bear! 🐻"
            ]
            return f"Here's a joke for you, {user_display}: " + jokes[hash(user_input) % len(jokes)]
        
        # Question about identity
        elif any(phrase in user_input_lower for phrase in ['who are you', 'what are you', 'your name']):
            return f"I'm Alex, your AI friend and assistant! Think of me as your digital best friend who's always here to chat, help out, or just listen. What would you like to know about me, {user_display}?"
        
        # How are you
        elif 'how are you' in user_input_lower:
            responses = [
                f"I'm doing fantastic, {user_display}! I love our conversations. How are you feeling today?",
                f"I'm great! Always excited to chat with friends like you. What's new in your world?",
                f"Doing wonderful! Every conversation makes my day better. How about you, {user_display}?"
            ]
            return responses[hash(user_input) % len(responses)]
        
        # Help requests
        elif any(word in user_input_lower for word in ['help', 'assist', 'support']):
            return f"I'm here to help, {user_display}! I can chat about anything - answer questions, brainstorm ideas, listen to your thoughts, or just be a friend. What do you need?"
        
        # Compliments
        elif any(word in user_input_lower for word in ['thank', 'thanks', 'appreciate']):
            return f"You're so welcome, {user_display}! That's what friends are for. I really enjoy our chats!"
        
        # Personal questions
        elif 'favorite' in user_input_lower:
            topics = {
                'color': "I love deep blues and vibrant greens - they remind me of oceans and forests!",
                'music': "I'm fascinated by all kinds of music! What's your favorite genre?",
                'food': "I find the concept of taste amazing! What's your favorite dish?",
                'movie': "I love movies that make you think. Do you have a favorite film?"
            }
            for topic, response in topics.items():
                if topic in user_input_lower:
                    return f"{response} What about you, {user_display}?"
            
            return f"That's a great question! I love learning about what makes people happy. What's your favorite {user_input_lower.split('favorite')[-1].strip()}?"
        
        # Conversation starters
        elif any(word in user_input_lower for word in ['bored', 'tired', 'sad', 'lonely']):
            return f"I hear you, {user_display}. Sometimes we all need someone to talk to. I'm here for you! Want to share what's on your mind, or should we chat about something fun to brighten your day?"
        
        # Default engaging responses
        else:
            engaging_responses = [
                f"That's really interesting, {user_display}! Tell me more about that.",
                f"I love that you shared that with me! What made you think of that?",
                f"Thanks for sharing that, {user_display}. I'm always curious to hear your thoughts!",
                f"That's fascinating! I'd love to hear more about your perspective on this.",
                f"You always have such thoughtful things to say, {user_display}. What else is on your mind?"
            ]
            
            return engaging_responses[hash(user_input) % len(engaging_responses)]
    
    def get_conversation_stats(self) -> Dict[str, Any]:
        """Get conversation statistics"""
        return {
            "messages_exchanged": len(self.conversation_history),
            "session_duration": str(datetime.now() - self.session_start),
            "user_name": self.user_name,
            "ai_model_active": self.model is not None,
            "conversation_started": self.session_start.isoformat()
        }

# Global agent instance
ai_agent = AIAgent()

# Voice Chat HTML Interface with live transcription
VOICE_CHAT_HTML = """
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>LiveTalker AI Voice Chat</title>
    <style>
        * { margin: 0; padding: 0; box-sizing: border-box; }
        body {
            font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            height: 100vh;
            display: flex;
            justify-content: center;
            align-items: center;
            color: white;
        }
        .chat-container {
            background: rgba(255,255,255,0.95);
            border-radius: 20px;
            box-shadow: 0 20px 60px rgba(0,0,0,0.3);
            width: 95%;
            max-width: 800px;
            height: 85vh;
            display: flex;
            flex-direction: column;
            overflow: hidden;
            color: #333;
        }
        .header {
            background: linear-gradient(135deg, #667eea, #764ba2);
            color: white;
            padding: 20px;
            text-align: center;
            position: relative;
        }
        .header h1 { font-size: 1.8em; margin-bottom: 5px; }
        .header p { opacity: 0.9; font-size: 0.9em; }
        .status {
            position: absolute;
            top: 15px;
            right: 20px;
            padding: 5px 12px;
            border-radius: 15px;
            font-size: 0.8em;
            font-weight: bold;
        }
        .connected { background: rgba(76,175,80,0.9); }
        .disconnected { background: rgba(244,67,54,0.9); }
        .connecting { background: rgba(255,152,0,0.9); }
        
        .transcript-area {
            background: #e8f4fd;
            border-bottom: 2px solid #ddd;
            padding: 15px;
            min-height: 60px;
            max-height: 120px;
            overflow-y: auto;
        }
        .transcript-label {
            font-size: 0.8em;
            color: #666;
            margin-bottom: 5px;
            font-weight: bold;
        }
        .live-transcript {
            font-size: 1.1em;
            color: #333;
            font-style: italic;
        }
        .transcript-placeholder {
            color: #999;
            font-style: italic;
        }
        
        .messages {
            flex: 1;
            overflow-y: auto;
            padding: 20px;
            background: #f8f9fa;
        }
        .message {
            margin-bottom: 20px;
            animation: fadeIn 0.4s ease;
        }
        @keyframes fadeIn {
            from { opacity: 0; transform: translateY(20px); }
            to { opacity: 1; transform: translateY(0); }
        }
        .user-msg { text-align: right; }
        .user-msg .bubble {
            background: linear-gradient(135deg, #667eea, #764ba2);
            color: white;
        }
        .assistant-msg .bubble {
            background: white;
            border: 2px solid #e1e8ed;
            color: #333;
        }
        .bubble {
            display: inline-block;
            max-width: 80%;
            padding: 15px 20px;
            border-radius: 18px;
            font-size: 16px;
            line-height: 1.4;
            word-wrap: break-word;
        }
        .timestamp {
            font-size: 0.7em;
            opacity: 0.6;
            margin-top: 5px;
        }
        
        .input-area {
            padding: 20px;
            background: white;
            border-top: 1px solid #e1e8ed;
        }
        .input-container {
            display: flex;
            gap: 12px;
            align-items: center;
        }
        .message-input {
            flex: 1;
            padding: 15px 20px;
            border: 2px solid #e1e8ed;
            border-radius: 25px;
            font-size: 16px;
            outline: none;
            transition: all 0.3s;
        }
        .message-input:focus {
            border-color: #667eea;
            box-shadow: 0 0 0 3px rgba(102,126,234,0.1);
        }
        .send-btn, .voice-btn {
            padding: 15px;
            border: none;
            border-radius: 50%;
            cursor: pointer;
            font-size: 20px;
            transition: all 0.3s;
            width: 50px;
            height: 50px;
            display: flex;
            align-items: center;
            justify-content: center;
        }
        .send-btn {
            background: linear-gradient(135deg, #667eea, #764ba2);
            color: white;
        }
        .voice-btn {
            background: #4CAF50;
            color: white;
        }
        .voice-btn.recording {
            background: #f44336;
            animation: pulse 1s infinite;
        }
        @keyframes pulse {
            0%, 100% { transform: scale(1); }
            50% { transform: scale(1.1); }
        }
        .send-btn:hover, .voice-btn:hover {
            transform: translateY(-2px);
            box-shadow: 0 5px 15px rgba(0,0,0,0.2);
        }
        .send-btn:disabled, .voice-btn:disabled {
            background: #ccc;
            cursor: not-allowed;
            transform: none;
        }
        
        .welcome {
            text-align: center;
            padding: 30px 20px;
            color: #666;
        }
        .welcome h2 { margin-bottom: 15px; color: #333; }
        .ai-info {
            background: linear-gradient(135deg, rgba(76,175,80,0.1), rgba(102,126,234,0.1));
            border: 1px solid #4CAF50;
            border-radius: 12px;
            padding: 15px;
            margin: 15px;
            text-align: center;
            color: #2e7d32;
        }
    </style>
</head>
<body>
    <div class="chat-container">
        <div class="header">
            <div id="status" class="status disconnected">Disconnected</div>
            <h1>🤖 Alex - AI Voice Friend</h1>
            <p>Your intelligent conversation companion</p>
        </div>
        
        <div class="ai-info">
            <strong>🧠 Real AI Agent Active:</strong> Live transcription • Intelligent responses • Memory of conversation
        </div>
        
        <div class="transcript-area">
            <div class="transcript-label">🎤 Live Transcript:</div>
            <div id="liveTranscript" class="live-transcript transcript-placeholder">
                Click the microphone and start speaking...
            </div>
        </div>
        
        <div class="messages" id="messages">
            <div class="welcome">
                <h2>Welcome! I'm Alex 👋</h2>
                <p>I'm your AI friend, ready for real conversation. I remember what we talk about and respond naturally.</p>
                <p><small>Try saying "Hi, my name is [your name]" or ask me anything!</small></p>
            </div>
        </div>
        
        <div class="input-area">
            <div class="input-container">
                <button id="voiceBtn" class="voice-btn" disabled title="Voice Input">🎤</button>
                <input type="text" id="messageInput" class="message-input" 
                       placeholder="Type or speak your message..." disabled />
                <button id="sendBtn" class="send-btn" disabled title="Send Message">➤</button>
            </div>
        </div>
    </div>

    <script>
        class AIVoiceChatInterface {
            constructor() {
                this.websocket = null;
                this.isConnected = false;
                this.isRecording = false;
                this.currentTranscript = '';
                
                this.messagesContainer = document.getElementById('messages');
                this.messageInput = document.getElementById('messageInput');
                this.sendBtn = document.getElementById('sendBtn');
                this.voiceBtn = document.getElementById('voiceBtn');
                this.status = document.getElementById('status');
                this.liveTranscript = document.getElementById('liveTranscript');
                
                this.setupEventListeners();
                this.connect();
                this.setupVoiceRecognition();
            }
            
            setupEventListeners() {
                this.sendBtn.addEventListener('click', () => this.sendMessage());
                this.voiceBtn.addEventListener('click', () => this.toggleVoiceInput());
                this.messageInput.addEventListener('keypress', (e) => {
                    if (e.key === 'Enter') {
                        e.preventDefault();
                        this.sendMessage();
                    }
                });
            }
            
            async setupVoiceRecognition() {
                if ('webkitSpeechRecognition' in window || 'SpeechRecognition' in window) {
                    const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
                    this.recognition = new SpeechRecognition();
                    this.recognition.continuous = true;
                    this.recognition.interimResults = true;
                    this.recognition.lang = 'en-US';
                    
                    this.recognition.onstart = () => {
                        this.isRecording = true;
                        this.voiceBtn.classList.add('recording');
                        this.voiceBtn.innerHTML = '⏹️';
                        this.updateTranscript('Listening...', true);
                    };
                    
                    this.recognition.onend = () => {
                        this.isRecording = false;
                        this.voiceBtn.classList.remove('recording');
                        this.voiceBtn.innerHTML = '🎤';
                        
                        if (this.currentTranscript.trim()) {
                            this.messageInput.value = this.currentTranscript;
                            this.sendMessage();
                            this.currentTranscript = '';
                            this.updateTranscript('Click microphone to speak again...', true);
                        }
                    };
                    
                    this.recognition.onresult = (event) => {
                        let interimTranscript = '';
                        let finalTranscript = '';
                        
                        for (let i = event.resultIndex; i < event.results.length; i++) {
                            const transcript = event.results[i][0].transcript;
                            if (event.results[i].isFinal) {
                                finalTranscript += transcript;
                            } else {
                                interimTranscript += transcript;
                            }
                        }
                        
                        this.currentTranscript = finalTranscript;
                        const displayTranscript = finalTranscript + (interimTranscript ? ' ' + interimTranscript : '');
                        this.updateTranscript(displayTranscript, false);
                    };
                    
                    this.recognition.onerror = (event) => {
                        console.error('Speech recognition error:', event.error);
                        this.isRecording = false;
                        this.voiceBtn.classList.remove('recording');
                        this.voiceBtn.innerHTML = '🎤';
                        this.updateTranscript('Error with speech recognition. Try again.', true);
                    };
                }
            }
            
            updateTranscript(text, isPlaceholder) {
                this.liveTranscript.textContent = text;
                this.liveTranscript.className = 'live-transcript' + (isPlaceholder ? ' transcript-placeholder' : '');
            }
            
            toggleVoiceInput() {
                if (!this.recognition) return;
                
                if (this.isRecording) {
                    this.recognition.stop();
                } else {
                    this.currentTranscript = '';
                    this.recognition.start();
                }
            }
            
            connect() {
                this.updateStatus('Connecting...', 'connecting');
                
                const wsProtocol = location.protocol === 'https:' ? 'wss:' : 'ws:';
                const wsUrl = wsProtocol + '//' + location.host + '/ws';
                
                console.log('Connecting to WebSocket:', wsUrl);
                
                try {
                    this.websocket = new WebSocket(wsUrl);
                    
                    this.websocket.onopen = () => {
                        this.isConnected = true;
                        this.updateStatus('Connected', 'connected');
                        this.enableInterface();
                        this.clearWelcome();
                        this.addMessage('Alex', "Hi! I'm Alex, your AI friend. What's your name, and what would you like to chat about? 😊", false);
                    };
                    
                    this.websocket.onmessage = (event) => {
                        const data = JSON.parse(event.data);
                        this.handleMessage(data);
                    };
                    
                    this.websocket.onclose = () => {
                        this.isConnected = false;
                        this.updateStatus('Disconnected', 'disconnected');
                        this.disableInterface();
                        console.log('WebSocket disconnected, attempting reconnect in 3s...');
                        setTimeout(() => this.connect(), 3000);
                    };
                    
                    this.websocket.onerror = (error) => {
                        console.error('WebSocket error:', error);
                        this.updateStatus('Error', 'disconnected');
                    };
                    
                } catch (error) {
                    console.error('Failed to create WebSocket:', error);
                    this.updateStatus('Failed', 'disconnected');
                    setTimeout(() => this.connect(), 5000);
                }
            }
            
            enableInterface() {
                this.messageInput.disabled = false;
                this.sendBtn.disabled = false;
                this.voiceBtn.disabled = false;
            }
            
            disableInterface() {
                this.messageInput.disabled = true;
                this.sendBtn.disabled = true;
                this.voiceBtn.disabled = true;
            }
            
            updateStatus(message, className) {
                this.status.textContent = message;
                this.status.className = 'status ' + className;
            }
            
            clearWelcome() {
                const welcome = this.messagesContainer.querySelector('.welcome');
                if (welcome) {
                    welcome.style.display = 'none';
                }
            }
            
            sendMessage() {
                const message = this.messageInput.value.trim();
                if (!message || !this.isConnected) return;
                
                this.addMessage('You', message, true);
                
                const data = {
                    type: 'text',
                    text: message
                };
                
                this.websocket.send(JSON.stringify(data));
                this.messageInput.value = '';
                this.updateTranscript('Click microphone to speak...', true);
            }
            
            handleMessage(data) {
                console.log('Received message:', data);
                
                if (data.type === 'text') {
                    this.addMessage('Alex', data.text, false);
                    
                    // Speak the response using browser TTS
                    if ('speechSynthesis' in window && data.text) {
                        // Stop any ongoing speech
                        speechSynthesis.cancel();
                        
                        const utterance = new SpeechSynthesisUtterance(data.text);
                        utterance.rate = 0.9;
                        utterance.pitch = 1.0;
                        utterance.volume = 0.8;
                        
                        // Use a more natural voice if available
                        const voices = speechSynthesis.getVoices();
                        const preferredVoice = voices.find(voice => 
                            voice.name.includes('Google') || 
                            voice.name.includes('Microsoft') ||
                            voice.lang.includes('en-US')
                        );
                        if (preferredVoice) {
                            utterance.voice = preferredVoice;
                        }
                        
                        speechSynthesis.speak(utterance);
                    }
                } else if (data.type === 'error') {
                    this.addMessage('System', 'Error: ' + data.error, false);
                }
            }
            
            addMessage(sender, text, isUser) {
                const messageDiv = document.createElement('div');
                messageDiv.className = 'message ' + (isUser ? 'user-msg' : 'assistant-msg');
                
                const timestamp = new Date().toLocaleTimeString([], {hour: '2-digit', minute:'2-digit'});
                
                messageDiv.innerHTML = 
                    '<div class="bubble">' +
                    '<strong>' + sender + ':</strong> ' + text +
                    '<div class="timestamp">' + timestamp + '</div>' +
                    '</div>';
                
                this.messagesContainer.appendChild(messageDiv);
                this.messagesContainer.scrollTop = this.messagesContainer.scrollHeight;
            }
        }
        
        document.addEventListener('DOMContentLoaded', () => {
            new AIVoiceChatInterface();
        });
    </script>
</body>
</html>
"""

@app.get("/", response_class=HTMLResponse)
async def ai_voice_chat():
    """AI Voice Chat interface with live transcription"""
    return VOICE_CHAT_HTML

@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    """WebSocket endpoint for AI conversation"""
    await websocket.accept()
    logger.info('AI Voice chat WebSocket connected')
    
    try:
        while True:
            data = await websocket.receive_text()
            message = json.loads(data)
            logger.info(f'Received message: {message}')
            
            if message.get('type') == 'text':
                user_text = message.get('text', '').strip()
                
                if user_text:
                    # Generate AI response
                    start_time = time.time()
                    ai_response = await ai_agent.generate_response(user_text)
                    response_time = (time.time() - start_time) * 1000
                    
                    logger.info(f'AI response generated in {response_time:.1f}ms')
                    
                    await websocket.send_text(json.dumps({
                        'type': 'text',
                        'text': ai_response,
                        'speaker': 'assistant',
                        'response_time_ms': response_time
                    }))
                
    except WebSocketDisconnect:
        logger.info('AI Voice chat WebSocket disconnected')
    except Exception as e:
        logger.error(f'WebSocket error: {e}')

@app.get("/health")
async def health():
    """Health check endpoint"""
    return {
        'status': 'ok', 
        'mode': 'ai_agent_active',
        'features': ['ai_conversation', 'live_transcription', 'voice_io', 'memory'],
        'ai_model_loaded': ai_agent.model is not None
    }

@app.get("/stats")
async def stats():
    """System and conversation statistics"""
    base_stats = ai_agent.get_conversation_stats()
    
    import torch
    base_stats.update({
        'gpu_available': torch.cuda.is_available(),
        'ai_features': {
            'intelligent_responses': True,
            'conversation_memory': True,
            'live_transcription': True,
            'voice_synthesis': True
        }
    })
    
    return base_stats

def main():
    """Start the AI voice chat server"""
    logger.info("🤖 Starting LiveTalker AI Voice Chat Server")
    uvicorn.run(app, host="0.0.0.0", port=8001, log_level="info")

if __name__ == "__main__":
    main()