Quickstart
Get up and running with UniFace in 5 minutes. This guide covers the most common use cases.
Face Detection
Detect faces in an image:
import cv2
from uniface.detection import RetinaFace
# Load image
image = cv2.imread("photo.jpg")
# Initialize detector (models auto-download on first use)
detector = RetinaFace()
# Detect faces
faces = detector.detect(image)
# Print results
for i, face in enumerate(faces):
print(f"Face {i+1}:")
print(f" Confidence: {face.confidence:.2f}")
print(f" BBox: {face.bbox}")
print(f" Landmarks: {len(face.landmarks)} points")
Output:
Visualize Detections
Draw bounding boxes and landmarks:
import cv2
from uniface.detection import RetinaFace
from uniface.draw import draw_detections
# Detect faces
detector = RetinaFace()
image = cv2.imread("photo.jpg")
faces = detector.detect(image)
# Extract visualization data
bboxes = [f.bbox for f in faces]
scores = [f.confidence for f in faces]
landmarks = [f.landmarks for f in faces]
# Draw on image
draw_detections(
image=image,
bboxes=bboxes,
scores=scores,
landmarks=landmarks,
vis_threshold=0.6,
)
# Save result
cv2.imwrite("output.jpg", image)
Face Recognition
Compare two faces:
import cv2
import numpy as np
from uniface.detection import RetinaFace
from uniface.recognition import ArcFace
# Initialize models
detector = RetinaFace()
recognizer = ArcFace()
# Load two images
image1 = cv2.imread("person1.jpg")
image2 = cv2.imread("person2.jpg")
# Detect faces
faces1 = detector.detect(image1)
faces2 = detector.detect(image2)
if faces1 and faces2:
# Extract embeddings
emb1 = recognizer.get_normalized_embedding(image1, faces1[0].landmarks)
emb2 = recognizer.get_normalized_embedding(image2, faces2[0].landmarks)
# Compute similarity (cosine similarity)
similarity = np.dot(emb1, emb2.T)[0][0]
# Interpret result
if similarity > 0.6:
print(f"Same person (similarity: {similarity:.3f})")
else:
print(f"Different people (similarity: {similarity:.3f})")
Similarity Thresholds
> 0.6: Same person (high confidence)0.4 - 0.6: Uncertain (manual review)< 0.4: Different people
Age & Gender Detection
import cv2
from uniface.attribute import AgeGender
from uniface.detection import RetinaFace
# Initialize models
detector = RetinaFace()
age_gender = AgeGender()
# Load image
image = cv2.imread("photo.jpg")
faces = detector.detect(image)
# Predict attributes
for i, face in enumerate(faces):
result = age_gender.predict(image, face.bbox)
print(f"Face {i+1}: {result.sex}, {result.age} years old")
Output:
FairFace Attributes
Detect race, gender, and age group:
import cv2
from uniface.attribute import FairFace
from uniface.detection import RetinaFace
detector = RetinaFace()
fairface = FairFace()
image = cv2.imread("photo.jpg")
faces = detector.detect(image)
for i, face in enumerate(faces):
result = fairface.predict(image, face.bbox)
print(f"Face {i+1}: {result.sex}, {result.age_group}, {result.race}")
Output:
Facial Landmarks (106 Points)
import cv2
from uniface.detection import RetinaFace
from uniface.landmark import Landmark106
detector = RetinaFace()
landmarker = Landmark106()
image = cv2.imread("photo.jpg")
faces = detector.detect(image)
if faces:
landmarks = landmarker.get_landmarks(image, faces[0].bbox)
print(f"Detected {len(landmarks)} landmarks")
# Draw landmarks
for x, y in landmarks.astype(int):
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
cv2.imwrite("landmarks.jpg", image)
Gaze Estimation
import cv2
import numpy as np
from uniface.detection import RetinaFace
from uniface.gaze import MobileGaze
from uniface.draw import draw_gaze
detector = RetinaFace()
gaze_estimator = MobileGaze()
image = cv2.imread("photo.jpg")
faces = detector.detect(image)
for i, face in enumerate(faces):
x1, y1, x2, y2 = map(int, face.bbox[:4])
face_crop = image[y1:y2, x1:x2]
if face_crop.size > 0:
result = gaze_estimator.estimate(face_crop)
print(f"Face {i+1}: pitch={np.degrees(result.pitch):.1f}°, yaw={np.degrees(result.yaw):.1f}°")
# Draw gaze direction
draw_gaze(image, face.bbox, result.pitch, result.yaw)
cv2.imwrite("gaze_output.jpg", image)
Face Parsing
Segment face into semantic components:
import cv2
import numpy as np
from uniface.parsing import BiSeNet
from uniface.draw import vis_parsing_maps
parser = BiSeNet()
# Load face image (already cropped)
face_image = cv2.imread("face.jpg")
# Parse face into 19 components
mask = parser.parse(face_image)
# Visualize with overlay
face_rgb = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
vis_result = vis_parsing_maps(face_rgb, mask, save_image=False)
print(f"Detected {len(np.unique(mask))} facial components")
Face Anonymization
Blur faces for privacy protection:
from uniface.privacy import anonymize_faces
import cv2
# One-liner: automatic detection and blurring
image = cv2.imread("group_photo.jpg")
anonymized = anonymize_faces(image, method='pixelate')
cv2.imwrite("anonymized.jpg", anonymized)
Manual control:
from uniface.detection import RetinaFace
from uniface.privacy import BlurFace
detector = RetinaFace()
blurrer = BlurFace(method='gaussian', blur_strength=5.0)
faces = detector.detect(image)
anonymized = blurrer.anonymize(image, faces)
Available methods:
| Method | Description |
|---|---|
pixelate |
Blocky effect (news media standard) |
gaussian |
Smooth, natural blur |
blackout |
Solid color boxes (maximum privacy) |
elliptical |
Soft oval blur (natural face shape) |
median |
Edge-preserving blur |
Face Anti-Spoofing
Detect real vs. fake faces:
import cv2
from uniface.detection import RetinaFace
from uniface.spoofing import MiniFASNet
detector = RetinaFace()
spoofer = MiniFASNet()
image = cv2.imread("photo.jpg")
faces = detector.detect(image)
for i, face in enumerate(faces):
result = spoofer.predict(image, face.bbox)
label = 'Real' if result.is_real else 'Fake'
print(f"Face {i+1}: {label} ({result.confidence:.1%})")
Webcam Demo
Real-time face detection:
import cv2
from uniface.detection import RetinaFace
from uniface.draw import draw_detections
detector = RetinaFace()
cap = cv2.VideoCapture(0)
print("Press 'q' to quit")
while True:
ret, frame = cap.read()
if not ret:
break
faces = detector.detect(frame)
bboxes = [f.bbox for f in faces]
scores = [f.confidence for f in faces]
landmarks = [f.landmarks for f in faces]
draw_detections(image=frame, bboxes=bboxes, scores=scores, landmarks=landmarks)
cv2.imshow("UniFace - Press 'q' to quit", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Face Tracking
Track faces across video frames with persistent IDs:
import cv2
import numpy as np
from uniface.common import xyxy_to_cxcywh
from uniface.detection import SCRFD
from uniface.tracking import BYTETracker
from uniface.draw import draw_tracks
detector = SCRFD()
tracker = BYTETracker(track_thresh=0.5, track_buffer=30)
cap = cv2.VideoCapture("video.mp4")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
faces = detector.detect(frame)
dets = np.array([[*f.bbox, f.confidence] for f in faces])
dets = dets if len(dets) > 0 else np.empty((0, 5))
tracks = tracker.update(dets)
# Assign track IDs to faces
if len(tracks) > 0 and len(faces) > 0:
face_bboxes = np.array([f.bbox for f in faces], dtype=np.float32)
track_ids = tracks[:, 4].astype(int)
face_centers = xyxy_to_cxcywh(face_bboxes)[:, :2]
track_centers = xyxy_to_cxcywh(tracks[:, :4])[:, :2]
for ti in range(len(tracks)):
dists = (track_centers[ti, 0] - face_centers[:, 0]) ** 2 + (track_centers[ti, 1] - face_centers[:, 1]) ** 2
faces[int(np.argmin(dists))].track_id = track_ids[ti]
tracked_faces = [f for f in faces if f.track_id is not None]
draw_tracks(image=frame, faces=tracked_faces)
cv2.imshow("Tracking", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
For more details, see the Tracking module.
Model Selection
For detailed model comparisons and benchmarks, see the Model Zoo.
Available models by task:
| Task | Available Models |
|---|---|
| Detection | RetinaFace, SCRFD, YOLOv5Face, YOLOv8Face |
| Recognition | ArcFace, AdaFace, MobileFace, SphereFace |
| Tracking | BYTETracker |
| Gaze | MobileGaze (ResNet18/34/50, MobileNetV2, MobileOneS0) |
| Parsing | BiSeNet (ResNet18/34) |
| Attributes | AgeGender, FairFace, Emotion |
| Anti-Spoofing | MiniFASNet (V1SE, V2) |
Common Issues
Models Not Downloading
from uniface.model_store import verify_model_weights
from uniface.constants import RetinaFaceWeights
# Manually download a model
model_path = verify_model_weights(RetinaFaceWeights.MNET_V2)
print(f"Model downloaded to: {model_path}")
Check Hardware Acceleration
import onnxruntime as ort
print("Available providers:", ort.get_available_providers())
# macOS M-series should show: ['CoreMLExecutionProvider', ...]
# NVIDIA GPU should show: ['CUDAExecutionProvider', ...]
Slow Performance on Mac
Verify you're using the ARM64 build of Python:
Import Errors
from uniface.detection import RetinaFace, SCRFD
from uniface.recognition import ArcFace, AdaFace
from uniface.attribute import AgeGender, FairFace
from uniface.landmark import Landmark106
from uniface.gaze import MobileGaze
from uniface.parsing import BiSeNet, XSeg
from uniface.privacy import BlurFace
from uniface.spoofing import MiniFASNet
from uniface.tracking import BYTETracker
from uniface.analyzer import FaceAnalyzer
from uniface.draw import draw_detections, draw_tracks
Next Steps
- Model Zoo - All models, benchmarks, and selection guide
- API Reference - Explore individual modules and their APIs
- Tutorials - Step-by-step examples for common workflows
- Guides - Learn about the architecture and design principles