| import cv2
|
| from deepface import DeepFace
|
|
|
| # Load the pre-trained emotion recognition model
|
| model = DeepFace.build_model("Emotion")
|
|
|
| # Initialize the webcam
|
| cap = cv2.VideoCapture(0)
|
|
|
| if not cap.isOpened():
|
| print("Error: Could not open webcam.")
|
| exit()
|
|
|
| # Define the emotion labels
|
| emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
|
|
| while True:
|
| # Capture frame-by-frame
|
| ret, frame = cap.read()
|
| if not ret:
|
| print("Error: Could not read frame.")
|
| break
|
|
|
| # Convert the frame to RGB (required by DeepFace)
|
| rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
|
| # Analyze the frame for emotions
|
| try:
|
| results = DeepFace.analyze(rgb_frame, actions=['emotion'], enforce_detection=False)
|
| for result in results:
|
| # Get the dominant emotion
|
| dominant_emotion = result['dominant_emotion']
|
| emotion_scores = result['emotion']
|
|
|
| # Display the dominant emotion on the frame
|
| cv2.putText(frame, f"Emotion: {dominant_emotion}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
|
|
| # Display emotion scores (optional)
|
| y_offset = 80
|
| for emotion, score in emotion_scores.items():
|
| cv2.putText(frame, f"{emotion}: {score:.2f}", (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
|
| y_offset += 30
|
| except Exception as e:
|
| print(f"Error analyzing frame: {e}")
|
|
|
| # Display the frame
|
| cv2.imshow('Emotion Recognition', frame)
|
|
|
| # Break the loop if 'q' is pressed
|
| if cv2.waitKey(1) & 0xFF == ord('q'):
|
| break
|
|
|
| # Release the webcam and close all OpenCV windows
|
| cap.release()
|
| cv2.destroyAllWindows()
|