Mocha AI Avatar
Mocha AI Avatar
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;600&display=swap');
* {
margin: 0;
padding: 0;
box-sizing: border-box;
font-family: 'Poppins', sans-serif;
}
body {
background: linear-gradient(135deg, #8E44AD, #2980B9);
color: white;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
text-align: center;
padding: 20px;
}
.container {
background: rgba(255, 255, 255, 0.2);
padding: 20px;
border-radius: 12px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.2);
max-width: 600px;
width: 100%;
}
h1 {
margin-bottom: 15px;
}
#avatar-container {
width: 100%;
height: 400px;
background: #2C3E50;
border-radius: 10px;
}
button {
width: 100%;
padding: 10px;
margin: 10px 0;
border: none;
border-radius: 5px;
font-size: 16px;
cursor: pointer;
transition: 0.3s;
background: #E67E22;
color: white;
}
button:hover {
opacity: 0.8;
}
let scene, camera, renderer, avatar;
let isTrackingEnabled = false;
function init3DAvatar() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(75, 1, 0.1, 1000);
renderer = new THREE.WebGLRenderer();
renderer.setSize(400, 400);
document.getElementById('avatar-container').appendChild(renderer.domElement);
let geometry = new THREE.SphereGeometry(1, 32, 32);
let material = new THREE.MeshBasicMaterial({ color: 0xffcc00 });
avatar = new THREE.Mesh(geometry, material);
scene.add(avatar);
camera.position.z = 3;
animateAvatar();
}
function animateAvatar() {
requestAnimationFrame(animateAvatar);
avatar.rotation.y += 0.01;
renderer.render(scene, camera);
}
function toggleTracking() {
isTrackingEnabled = !isTrackingEnabled;
if (isTrackingEnabled) {
startFaceTracking();
}
}
function startFaceTracking() {
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => {
console.log("Face tracking enabled!");
// Implement AI tracking with TensorFlow.js or MediaPipe here
})
.catch(err => console.error("Face tracking error:", err));
}
}
function startVoiceAnimation() {
let recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
recognition.onresult = (event) => {
let transcript = event.results[0][0].transcript;
console.log("User said:", transcript);
avatar.scale.y = 1 + Math.random(); // Simulating lip movement
};
recognition.start();
}
window.onload = init3DAvatar;