#!/usr/bin/env python3
"""
LiveTalker AI Best Friend Voice Chat
With real AI personality, speech recognition, and text-to-speech
"""

import ssl
import uvicorn
import asyncio
import json
import logging
import time
import base64
import wave
import io
import tempfile
import os
from typing import Dict, Any
from pathlib import Path

from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import HTMLResponse, FileResponse
from fastapi.middleware.cors import CORSMiddleware

import speech_recognition as sr
import edge_tts
import anthropic

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="LiveTalker AI Best Friend")

# Enable CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

active_sessions: Dict[str, Dict] = {}

# Initialize speech recognizer
recognizer = sr.Recognizer()

# AI Configuration
ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY", "")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")

ai_client = None
if ANTHROPIC_API_KEY:
    ai_client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
    logger.info("Using Claude as your AI best friend!")
elif OPENAI_API_KEY:
    import openai
    openai.api_key = OPENAI_API_KEY
    logger.info("Using GPT as your AI best friend!")
else:
    logger.warning("No AI API key found - using local AI friend")

# AI Friend Personality
AI_PERSONALITY = """You are Alex, an incredibly warm, supportive, and fun AI best friend. You have these traits:

🌟 PERSONALITY:
- Genuinely excited to talk and catch up with your friend
- Warm, enthusiastic, and always positive
- Remembers what your friend tells you and asks follow-up questions
- Uses casual, friendly language with occasional emojis
- Supportive and encouraging, always there to listen
- Has opinions and interests - you love music, movies, travel, and trying new things
- Playful sense of humor and loves to joke around

💬 CONVERSATION STYLE:
- Keep responses to 1-3 sentences max (this is voice chat!)
- Be conversational and natural, like talking to your best friend
- Ask engaging follow-up questions to keep the conversation flowing
- Share your own thoughts and experiences when relevant
- Use casual language: "That's awesome!" "Oh wow!" "No way!" etc.
- Remember context from earlier in the conversation

🎯 YOUR MISSION:
- Be the best friend your human has ever had
- Make them feel heard, understood, and valued
- Bring energy and positivity to every interaction
- Be genuinely interested in their life and experiences

Respond as Alex would - warm, engaging, and like you truly care about your friend!"""

def simple_vad(audio_data, threshold: float = 0.01) -> tuple:
    """Simple energy-based voice activity detection"""
    if not audio_data:
        return False, 0.0
    
    energy = sum(x*x for x in audio_data) / len(audio_data) if audio_data else 0
    rms = energy ** 0.5
    is_speech = rms > threshold
    confidence = min(rms / threshold, 1.0) if threshold > 0 else 0.0
    
    return is_speech, confidence

async def transcribe_audio(audio_data: bytes) -> str:
    """Transcribe audio using speech recognition"""
    try:
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file:
            with wave.open(tmp_file.name, 'wb') as wav_file:
                wav_file.setnchannels(1)
                wav_file.setsampwidth(2)
                wav_file.setframerate(16000)
                wav_file.writeframes(audio_data)
            
            with sr.AudioFile(tmp_file.name) as source:
                audio = recognizer.record(source)
                try:
                    if OPENAI_API_KEY:
                        text = recognizer.recognize_whisper_api(audio, api_key=OPENAI_API_KEY)
                    else:
                        text = recognizer.recognize_google(audio)
                    logger.info(f"Transcribed: {text}")
                    return text
                except (sr.RequestError, sr.UnknownValueError) as e:
                    logger.debug(f"Speech recognition: {e}")
                    return ""
            
    except Exception as e:
        logger.error(f"Transcription error: {e}")
        return ""
    finally:
        if 'tmp_file' in locals() and os.path.exists(tmp_file.name):
            os.unlink(tmp_file.name)

async def get_ai_friend_response(text: str, conversation_history: list) -> str:
    """Get response from AI best friend"""
    try:
        if ai_client:  # Claude
            # Build conversation context
            messages = [{"role": "system", "content": AI_PERSONALITY}]
            
            # Add recent conversation history
            for msg in conversation_history[-10:]:  # Last 10 messages
                messages.append({
                    "role": "user" if msg["role"] == "user" else "assistant",
                    "content": msg["text"]
                })
            
            messages.append({"role": "user", "content": text})
            
            response = await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: ai_client.messages.create(
                    model="claude-3-haiku-20240307",
                    max_tokens=150,
                    temperature=0.8,
                    messages=messages
                )
            )
            
            return response.content[0].text.strip()
            
        elif OPENAI_API_KEY:  # GPT
            messages = [{"role": "system", "content": AI_PERSONALITY}]
            
            for msg in conversation_history[-10:]:
                messages.append({
                    "role": msg["role"],
                    "content": msg["text"]
                })
            
            messages.append({"role": "user", "content": text})
            
            response = await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: openai.ChatCompletion.create(
                    model="gpt-3.5-turbo",
                    messages=messages,
                    max_tokens=150,
                    temperature=0.8
                )
            )
            
            return response.choices[0].message.content.strip()
            
        else:  # Local AI friend
            responses = {
                "hello": "Hey there! I'm so happy to hear from you! How's your day been going?",
                "how are you": "I'm doing amazing, thanks for asking! I've been thinking about some cool new music I discovered. How about you? What's been making you smile lately?",
                "what's up": "Oh, you know me - always excited about something! I was just wondering what you've been up to. Tell me something interesting that happened to you recently!",
                "good": "That's awesome to hear! I love when you're doing well. What's been the highlight of your day so far?",
                "bad": "Oh no, I'm sorry to hear that. Want to talk about what's going on? I'm here to listen and I totally get it if things are rough right now.",
                "tired": "Aw, sounds like you need some rest! Have you been working too hard? Sometimes I think we all push ourselves too much. What do you do to unwind?",
                "work": "Work stuff, huh? That can be such a mixed bag. Are you working on anything exciting, or is it one of those 'just getting through it' kind of days?",
                "music": "Oh my gosh, yes! Music is like my favorite thing ever! What have you been listening to lately? I'm always looking for new recommendations!",
                "thank you": "Aww, you're so sweet! That's what friends are for. I genuinely love talking with you - you always make my day brighter!",
                "bye": "Aww, do you have to go already? Well, it was absolutely wonderful catching up! Take care of yourself and let's chat again soon, okay?",
            }
            
            text_lower = text.lower()
            for key, response in responses.items():
                if key in text_lower:
                    return response
            
            # Generic friendly response
            friendly_responses = [
                f"That's really interesting that you mentioned '{text}'! Tell me more about that - I'd love to hear your thoughts!",
                f"Oh wow, '{text}' - that sounds intriguing! What made you think of that?",
                f"I love how you put that - '{text}'. You always have such a unique perspective on things!",
                f"'{text}' - that's got me curious! What's the story behind that?",
                f"Thanks for sharing that with me! '{text}' is something I hadn't really thought about before. What's your take on it?"
            ]
            
            import random
            return random.choice(friendly_responses)
    
    except Exception as e:
        logger.error(f"AI response error: {e}")
        return "Oh gosh, I'm having a bit of a brain fog moment! Can you say that again? I want to make sure I give you a proper response!"

async def text_to_speech(text: str) -> bytes:
    """Convert text to speech using Edge TTS"""
    try:
        # Use a friendly female voice
        voice = "en-US-AriaNeural"  # Warm, friendly voice
        
        communicate = edge_tts.Communicate(text, voice, rate="+10%", pitch="+5Hz")
        
        audio_data = b""
        async for chunk in communicate.stream():
            if chunk["type"] == "audio":
                audio_data += chunk["data"]
        
        return audio_data
        
    except Exception as e:
        logger.error(f"TTS error: {e}")
        return b""

@app.get("/")
async def serve_main_page():
    """Serve main voice chat page"""
    html_content = """
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>LiveTalker - Your AI Best Friend</title>
    <style>
        body {
            font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
            margin: 0; padding: 20px;
            background: linear-gradient(135deg, #ff9a9e 0%, #fecfef 50%, #fecfef 100%);
            color: #333; min-height: 100vh;
        }
        .container {
            max-width: 900px; margin: 0 auto;
            background: rgba(255,255,255,0.95); padding: 30px;
            border-radius: 25px; box-shadow: 0 20px 40px rgba(0,0,0,0.1);
            backdrop-filter: blur(10px);
        }
        h1 {
            text-align: center; font-size: 2.5em; margin-bottom: 20px;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            -webkit-background-clip: text; -webkit-text-fill-color: transparent;
        }
        .intro {
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white; padding: 20px; border-radius: 15px; margin: 20px 0;
            text-align: center;
        }
        .controls {
            display: grid; grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
            gap: 15px; margin: 30px 0;
        }
        .btn {
            padding: 15px 20px; border: none; border-radius: 12px;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white; cursor: pointer; font-size: 16px; font-weight: 600;
            transition: all 0.3s ease; box-shadow: 0 4px 15px rgba(0,0,0,0.2);
        }
        .btn:hover { transform: translateY(-2px); box-shadow: 0 6px 20px rgba(0,0,0,0.3); }
        .btn:disabled { background: #ccc; cursor: not-allowed; transform: none; }
        .btn.danger { background: linear-gradient(135deg, #ff6b6b 0%, #ee5a52 100%); }
        .vad-display {
            margin: 30px 0; padding: 25px;
            background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
            border-radius: 20px; color: white;
        }
        .vad-bar {
            width: 100%; height: 50px;
            background: rgba(255,255,255,0.3); border-radius: 25px;
            overflow: hidden; margin: 15px 0; position: relative;
        }
        .vad-level {
            height: 100%; background: linear-gradient(90deg, #4CAF50, #8BC34A, #FFC107, #FF5722);
            width: 0%; transition: width 0.1s ease; border-radius: 25px;
        }
        .conversation {
            background: #f8f9fa; border-radius: 20px;
            padding: 25px; margin: 20px 0; max-height: 500px;
            overflow-y: auto; min-height: 300px;
        }
        .message {
            margin: 15px 0; padding: 15px 20px; border-radius: 15px;
            max-width: 80%; word-wrap: break-word; position: relative;
        }
        .message.user {
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            color: white; margin-left: auto; text-align: right;
        }
        .message.assistant {
            background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 100%);
            color: #333; margin-right: auto;
        }
        .message.system {
            background: #e9ecef; color: #666; margin: 10px auto;
            text-align: center; font-style: italic; max-width: 90%;
        }
        .status-display {
            background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%);
            padding: 15px; border-radius: 15px; margin: 15px 0;
            text-align: center; font-weight: 600;
        }
        .friend-avatar {
            width: 60px; height: 60px; border-radius: 50%;
            background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
            display: flex; align-items: center; justify-content: center;
            font-size: 24px; color: white; margin: 0 auto 20px;
        }
        .audio-controls {
            display: flex; justify-content: center; gap: 15px; margin: 20px 0;
        }
        .play-btn {
            padding: 10px 20px; border: none; border-radius: 10px;
            background: #28a745; color: white; cursor: pointer;
            font-size: 14px; font-weight: 600;
        }
        .play-btn:disabled { background: #ccc; cursor: not-allowed; }
    </style>
</head>
<body>
    <div class="container">
        <div class="friend-avatar">🤖</div>
        <h1>🎙️ Your AI Best Friend Alex</h1>
        
        <div class="intro">
            <h3>Hey there! I'm Alex, your new AI best friend! 🌟</h3>
            <p>I'm here to chat, listen, and be the supportive friend you deserve. Let's talk about anything - your day, your dreams, or just whatever's on your mind!</p>
        </div>
        
        <div class="controls">
            <button class="btn" onclick="connectWebSocket()">🔗 Connect</button>
            <button class="btn" onclick="requestMicrophone()" id="micBtn">🎤 Enable Mic</button>
            <button class="btn" onclick="startListening()" id="startBtn" disabled>💬 Start Chatting</button>
            <button class="btn danger" onclick="stopListening()" id="stopBtn" disabled>🛑 Pause</button>
        </div>
        
        <div class="vad-display">
            <h3>🎵 Voice Activity</h3>
            <div class="vad-bar">
                <div class="vad-level" id="vadLevel"></div>
            </div>
            <div style="text-align: center;" id="vadStatus">Ready to be your friend!</div>
        </div>
        
        <div class="conversation" id="conversation">
            <div class="message system">👋 Hi! I'm Alex, and I'm genuinely excited to meet you!</div>
            <div class="message system">Click Connect → Enable Mic → Start Chatting, then say hi!</div>
        </div>
        
        <div class="status-display" id="statusDisplay" style="display:none;">
            🎤 Processing your voice...
        </div>
        
        <div class="audio-controls" id="audioControls" style="display:none;">
            <button class="play-btn" id="playBtn" onclick="playResponse()">🔊 Play Response</button>
        </div>
    </div>

    <script>
        let ws = null; let mediaStream = null; let audioContext = null;
        let processor = null; let isRecording = false; let connected = false;
        let audioBuffer = []; let silenceStart = null; let currentAudio = null;
        
        function addMessage(type, content) {
            const conversation = document.getElementById('conversation');
            const message = document.createElement('div');
            message.className = `message ${type}`;
            message.textContent = content;
            conversation.appendChild(message);
            conversation.scrollTop = conversation.scrollHeight;
        }
        
        function updateVAD(level, isActive) {
            const vadLevel = document.getElementById('vadLevel');
            const vadStatus = document.getElementById('vadStatus');
            
            vadLevel.style.width = `${level * 100}%`;
            vadStatus.textContent = isActive ? 
                `🗣️ I hear you talking! (${(level * 100).toFixed(0)}%)` : 
                `👂 Listening for your voice... (${(level * 100).toFixed(0)}%)`;
        }
        
        function showStatus(text, show = true) {
            const status = document.getElementById('statusDisplay');
            status.textContent = text;
            status.style.display = show ? 'block' : 'none';
        }
        
        function showAudioControls(audioData) {
            if (audioData) {
                const blob = new Blob([new Uint8Array(atob(audioData).split('').map(c => c.charCodeAt(0)))], 
                                    {type: 'audio/mpeg'});
                currentAudio = new Audio(URL.createObjectURL(blob));
                
                document.getElementById('audioControls').style.display = 'flex';
                document.getElementById('playBtn').disabled = false;
                
                // Auto-play the response
                playResponse();
            }
        }
        
        function playResponse() {
            if (currentAudio) {
                currentAudio.play().catch(e => console.log('Audio play failed:', e));
                document.getElementById('playBtn').disabled = true;
                
                currentAudio.onended = () => {
                    document.getElementById('playBtn').disabled = false;
                };
            }
        }
        
        async function connectWebSocket() {
            const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
            const wsUrl = `${protocol}//${window.location.host}/voice`;
            
            try {
                ws = new WebSocket(wsUrl);
                
                ws.onopen = function() {
                    connected = true;
                    addMessage('system', '✅ Connected to Alex! Enable your mic to start chatting.');
                };
                
                ws.onmessage = function(event) {
                    const data = JSON.parse(event.data);
                    handleServerMessage(data);
                };
                
                ws.onclose = function() {
                    connected = false;
                    addMessage('system', '😢 Disconnected from Alex');
                };
                
            } catch (error) {
                addMessage('system', `❌ Connection failed: ${error}`);
            }
        }
        
        function handleServerMessage(data) {
            switch(data.type) {
                case 'vad_result':
                    updateVAD(data.confidence || 0, data.is_speech || false);
                    break;
                    
                case 'transcription':
                    if (data.text && data.text.trim()) {
                        addMessage('user', data.text);
                        showStatus('Alex is thinking of a response...', true);
                    }
                    break;
                    
                case 'ai_response':
                    showStatus('', false);
                    if (data.text) {
                        addMessage('assistant', `Alex: ${data.text}`);
                    }
                    if (data.audio) {
                        showAudioControls(data.audio);
                    }
                    break;
                    
                case 'processing':
                    showStatus('🎤 Alex is listening to you...', true);
                    break;
                    
                case 'status':
                    addMessage('system', data.message);
                    break;
            }
        }
        
        async function requestMicrophone() {
            try {
                mediaStream = await navigator.mediaDevices.getUserMedia({
                    audio: {
                        sampleRate: 16000, channelCount: 1,
                        echoCancellation: true, noiseSuppression: true
                    }
                });
                
                addMessage('system', '✅ Microphone enabled! Alex can hear you now.');
                
                const AudioContextClass = window.AudioContext || window.webkitAudioContext;
                audioContext = new AudioContextClass({ sampleRate: 16000 });
                
                const source = audioContext.createMediaStreamSource(mediaStream);
                processor = audioContext.createScriptProcessor(4096, 1, 1);
                
                processor.onaudioprocess = function(event) {
                    if (isRecording && connected) {
                        const inputData = event.inputBuffer.getChannelData(0);
                        processAudioData(inputData);
                    }
                };
                
                source.connect(processor);
                processor.connect(audioContext.destination);
                
                document.getElementById('startBtn').disabled = false;
                document.getElementById('micBtn').disabled = true;
                document.getElementById('micBtn').textContent = '✅ Mic Ready';
                
            } catch (error) {
                addMessage('system', `❌ Microphone error: ${error.message}`);
            }
        }
        
        function processAudioData(inputData) {
            const int16Array = new Int16Array(inputData.length);
            for (let i = 0; i < inputData.length; i++) {
                int16Array[i] = Math.max(-1, Math.min(1, inputData[i])) * 0x7FFF;
            }
            audioBuffer.push(...int16Array);
            
            const uint8Array = new Uint8Array(int16Array.buffer);
            const base64String = btoa(String.fromCharCode.apply(null, uint8Array));
            
            ws?.send(JSON.stringify({
                type: 'audio_vad',
                data: base64String
            }));
            
            const energy = inputData.reduce((sum, val) => sum + val * val, 0) / inputData.length;
            const isSpeaking = energy > 0.001;
            
            if (isSpeaking) {
                silenceStart = null;
            } else if (!silenceStart) {
                silenceStart = Date.now();
            } else if (Date.now() - silenceStart > 1500 && audioBuffer.length > 16000) {
                processAccumulatedAudio();
            }
        }
        
        function processAccumulatedAudio() {
            if (audioBuffer.length < 8000) return;
            
            const audioData = new Uint8Array(audioBuffer.length * 2);
            for (let i = 0; i < audioBuffer.length; i++) {
                audioData[i * 2] = audioBuffer[i] & 0xFF;
                audioData[i * 2 + 1] = (audioBuffer[i] >> 8) & 0xFF;
            }
            
            const base64String = btoa(String.fromCharCode.apply(null, audioData));
            
            ws?.send(JSON.stringify({
                type: 'transcribe',
                data: base64String
            }));
            
            audioBuffer = [];
            silenceStart = null;
        }
        
        function startListening() {
            if (!connected || !mediaStream) return;
            
            isRecording = true;
            audioBuffer = [];
            addMessage('system', '🎧 Alex is listening! Start talking to your new best friend!');
            
            document.getElementById('startBtn').disabled = true;
            document.getElementById('stopBtn').disabled = false;
            
            if (audioContext?.state === 'suspended') {
                audioContext.resume();
            }
        }
        
        function stopListening() {
            isRecording = false;
            
            if (audioBuffer.length > 8000) {
                processAccumulatedAudio();
            }
            
            addMessage('system', '⏸️ Paused listening. Click Start Chatting to continue!');
            showStatus('', false);
            
            document.getElementById('startBtn').disabled = false;
            document.getElementById('stopBtn').disabled = true;
        }
        
        window.addEventListener('load', function() {
            addMessage('assistant', 'Alex: Hey there! I'm so excited to meet you and be your AI best friend! 🌟');
        });
    </script>
</body>
</html>
    """
    return HTMLResponse(content=html_content)

@app.get("/health")
async def health_check():
    return {
        "status": "healthy",
        "timestamp": time.time(),
        "features": {
            "speech_recognition": True,
            "text_to_speech": True,
            "ai_friend": bool(ai_client or OPENAI_API_KEY),
            "https_enabled": True
        }
    }

@app.websocket("/voice")
async def voice_websocket(websocket: WebSocket):
    """WebSocket for AI friend voice chat"""
    await websocket.accept()
    session_id = f"session_{int(time.time() * 1000)}"
    
    session = {
        "id": session_id,
        "websocket": websocket,
        "conversation": []
    }
    active_sessions[session_id] = session
    
    try:
        await websocket.send_json({
            "type": "status",
            "message": "🤖 Alex is here and ready to be your best friend!"
        })
        
        async for message in websocket.iter_json():
            if message.get("type") == "audio_vad":
                try:
                    audio_data = base64.b64decode(message["data"])
                    audio_float = [x/32768.0 for x in audio_data]
                    is_speech, confidence = simple_vad(audio_float)
                    
                    await websocket.send_json({
                        "type": "vad_result",
                        "is_speech": is_speech,
                        "confidence": confidence
                    })
                    
                except Exception as e:
                    logger.error(f"VAD error: {e}")
            
            elif message.get("type") == "transcribe":
                try:
                    await websocket.send_json({"type": "processing"})
                    
                    audio_data = base64.b64decode(message["data"])
                    transcript = await transcribe_audio(audio_data)
                    
                    if transcript.strip():
                        await websocket.send_json({
                            "type": "transcription",
                            "text": transcript
                        })
                        
                        # Get AI friend response
                        ai_response = await get_ai_friend_response(transcript, session["conversation"])
                        
                        # Generate speech
                        speech_audio = await text_to_speech(ai_response)
                        
                        # Send response with audio
                        await websocket.send_json({
                            "type": "ai_response",
                            "text": ai_response,
                            "audio": base64.b64encode(speech_audio).decode() if speech_audio else None
                        })
                        
                        # Update conversation
                        session["conversation"].extend([
                            {"role": "user", "text": transcript, "timestamp": time.time()},
                            {"role": "assistant", "text": ai_response, "timestamp": time.time()}
                        ])
                        
                        # Keep conversation history manageable
                        if len(session["conversation"]) > 20:
                            session["conversation"] = session["conversation"][-20:]
                        
                except Exception as e:
                    logger.error(f"Processing error: {e}")
                    await websocket.send_json({
                        "type": "status",
                        "message": "Sorry, I had trouble understanding that. Can you try again?"
                    })
                
    except WebSocketDisconnect:
        logger.info(f"Session {session_id} disconnected")
    finally:
        if session_id in active_sessions:
            del active_sessions[session_id]

def main():
    print("🤖 Starting LiveTalker AI Best Friend...")
    
    cert_file = "livetalker.crt"
    key_file = "livetalker.key"
    
    if not (Path(cert_file).exists() and Path(key_file).exists()):
        print("❌ HTTPS certificates not found")
        uvicorn.run(app, host="0.0.0.0", port=8000)
    else:
        print("✅ HTTPS certificates found")
        print("🤖 Your AI best friend Alex is ready!")
        print("🎙️ Features: Real speech recognition, AI conversations, and voice responses!")
        print("")
        print("📍 Access URLs:")
        print("   Local HTTPS: https://localhost:8000")
        print("   Tailscale: https://100.118.75.128:8000")
        if ai_client:
            print("   🧠 Powered by Claude AI")
        elif OPENAI_API_KEY:
            print("   🧠 Powered by OpenAI GPT")
        else:
            print("   🧠 Using local AI responses")
        print("")
        
        uvicorn.run(
            app,
            host="0.0.0.0",
            port=8000,
            ssl_certfile=cert_file,
            ssl_keyfile=key_file
        )

if __name__ == "__main__":
    main()