Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 3 additions & 57 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,58 +1,4 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# Virtual environments
templates/
.venv/
venv/
ENV/
env/
.env

# IDE
.vscode/
.idea/
*.swp
*.swo
*~

# OS
.DS_Store
Thumbs.db

# Logs
*.log
logs/

# Temporary files
tmp/
temp/
tmp_templates/

# Model files (if large)
models/*.pkl
models/*.h5
models/*.model

# Configuration files with sensitive data
config/local.yaml
config/production.yaml
__pycache__/
*.pyc
Empty file added 26.0.1
Empty file.
Empty file added 50ms
Empty file.
2 changes: 1 addition & 1 deletion config/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ liveness:
enable_depth_analysis: true
enable_texture_analysis: true
enable_motion_analysis: true
liveness_threshold: 0.5
liveness_threshold: 0.2

# Quality assessment thresholds
quality:
Expand Down
18 changes: 18 additions & 0 deletions config/development.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Lockless Configuration
system:
log_level: "DEBUG" # Use DEBUG for development
data_directory: "./data"

camera:
device_id: 0 # Change if you have multiple cameras

authentication:
similarity_threshold: 0.6 # Lower for testing (normally 0.7)
quality_threshold: 0.5 # Lower for testing (normally 0.6)

enrollment:
required_samples: 3 # Fewer samples for testing (normally 5)
quality_threshold: 0.5

performance:
enable_gpu_acceleration: false # Set to true if you have compatible GPU
118 changes: 118 additions & 0 deletions src/biometric/authentication.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,124 @@ def authenticate_user(self, user_id: str, password: str,
finally:
self._cleanup_camera()

def authenticate_user_from_frames(
self, user_id: str, password: str, frames: List[np.ndarray]
) -> AuthenticationResponse:
"""Authenticate using pre-captured frames (no camera access)."""
start_time = time.time()

try:
if not frames:
return AuthenticationResponse(
success=False,
user_id=user_id,
result=AuthenticationResult.ERROR,
error_message="No frames provided",
)

if self._is_user_locked_out(user_id):
return AuthenticationResponse(
success=False,
user_id=user_id,
result=AuthenticationResult.REJECTED,
error_message="User account temporarily locked",
)

template = self._load_user_template(user_id, password)
if template is None:
return AuthenticationResponse(
success=False,
user_id=user_id,
result=AuthenticationResult.TEMPLATE_NOT_FOUND,
error_message="User template not found",
)

threshold = self._get_user_threshold(user_id)
best_similarity = 0.0
best_quality = 0.0
best_liveness: Optional[float] = None
processed_faces = 0

for frame in frames:
faces = self.face_detector.detect_faces(frame)
if not faces:
continue

face_bbox = max(faces, key=lambda f: f[2] * f[3])
x, y, w, h = face_bbox
face_image = frame[y:y+h, x:x+w]
if face_image.size == 0:
continue

quality_score = self.quality_assessor.assess_quality(face_image)
if quality_score < self.config.quality_threshold:
continue

liveness_score = None
if self.liveness_detector:
liveness_score = self.liveness_detector.detect_liveness(
frame, face_bbox
)
if liveness_score < 0.5:
continue

features = self.feature_extractor.extract_features(face_image)
if features is None:
continue

processed_faces += 1
similarity = self.feature_extractor.compute_similarity(
features, template
)

if similarity > best_similarity:
best_similarity = similarity
best_quality = quality_score
best_liveness = liveness_score

if similarity >= threshold:
response = AuthenticationResponse(
success=True,
user_id=user_id,
confidence=similarity,
result=AuthenticationResult.SUCCESS,
processing_time=time.time() - start_time,
quality_score=quality_score,
liveness_score=liveness_score,
)
self._update_user_statistics(user_id, True)
return response

processing_time = time.time() - start_time
if processed_faces == 0:
result = AuthenticationResult.NO_FACE_DETECTED
error_message = "No valid face found in provided frames"
else:
result = AuthenticationResult.REJECTED
error_message = "Authentication threshold not met"

self._update_user_statistics(user_id, False)
return AuthenticationResponse(
success=False,
user_id=user_id,
confidence=best_similarity,
result=result,
processing_time=processing_time,
quality_score=best_quality,
liveness_score=best_liveness,
error_message=error_message,
)

except Exception as e:
logger.error(f"Frame-based authentication failed for user {user_id}: {e}")
return AuthenticationResponse(
success=False,
user_id=user_id,
result=AuthenticationResult.ERROR,
processing_time=time.time() - start_time,
error_message=str(e),
)

def authenticate_any_user(self, enrolled_users: List[str],
passwords: Dict[str, str],
timeout: Optional[float] = None
Expand Down
76 changes: 76 additions & 0 deletions src/biometric/enrollment.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
to biometric template generation and storage.
"""

from pyexpat import features
from tempfile import template

import cv2
import numpy as np
import time
Expand Down Expand Up @@ -235,6 +238,79 @@ def _initialize_camera(self):
except Exception as e:
logger.error(f"Camera initialization failed: {e}")
raise CameraError(f"Failed to initialize camera: {e}")

def enroll_from_frames(self, user_id, password, frames):
print("📸 Using frames from GUI")

samples = []

for frame in frames:
faces = self.face_detector.detect_faces(frame)

if not faces:
continue

# ✅ Get bounding box
x, y, w, h = faces[0]

# ✅ Crop face from frame
face_img = frame[y:y+h, x:x+w]

# Safety check
if face_img is None or face_img.size == 0:
continue

# ✅ Quality check (PASS IMAGE)
quality_score = self.quality_assessor.assess_quality(face_img)

if quality_score < self.config.quality_threshold:
continue

# ✅ Feature extraction (PASS IMAGE)
from time import time

features = self.feature_extractor.extract_features(face_img)

if features is None:
continue

sample = EnrollmentSample(
image=face_img,
face_bbox=(x, y, w, h),
quality_score=quality_score,
features=features,
timestamp=time()
)

samples.append(sample)

if len(samples) == 0:
return EnrollmentResult(
success=False,
user_id=user_id,
samples_collected=0,
error_message="No valid samples collected",
processing_time=0.0
)

template = self._generate_template(samples)

# ✅ correct order
self._store_template(user_id, template, password)

print("✅ Enrollment successful (GUI mode)")

return EnrollmentResult(
success=True,
user_id=user_id,
template_id=None,
samples_collected=len(samples),
average_quality=float(
np.mean([s.quality_score for s in samples])
),
error_message=None,
processing_time=0.0
)

def _collect_enrollment_samples(self, user_id: str
) -> List[EnrollmentSample]:
Expand Down
53 changes: 38 additions & 15 deletions src/biometric/face_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,16 @@
except Exception: # pragma: no cover - runtime dependency guard
ort = None # type: ignore[assignment]

from core.logging import get_logger, PerformanceTimer
from core.exceptions import FaceDetectionError, ModelLoadError
try:
from core.logging import get_logger, PerformanceTimer
from core.exceptions import FaceDetectionError, ModelLoadError
except ModuleNotFoundError:
try:
from src.core.logging import get_logger, PerformanceTimer
from src.core.exceptions import FaceDetectionError, ModelLoadError
except ModuleNotFoundError:
from LockLess.src.core.logging import get_logger, PerformanceTimer
from LockLess.src.core.exceptions import FaceDetectionError, ModelLoadError

logger = get_logger(__name__)

Expand Down Expand Up @@ -416,22 +424,37 @@ def _resolve_haarcascade_path(self) -> str:
return "haarcascade_frontalface_default.xml"


# Example usage and testing
if __name__ == "__main__":
# Test face detection
detector = FaceDetector(backend="opencv") # Use OpenCV for testing
detector = FaceDetector(backend="opencv")

cap = cv2.VideoCapture(0)

if not cap.isOpened():
print("Error: Cannot access camera")
exit()

print("Press 'q' to exit")

print("Testing face detection...")
while True:
ret, frame = cap.read()
if not ret:
break

# Create a test image (in practice, this would come from camera)
test_image = np.zeros((480, 640, 3), dtype=np.uint8)
# Detect faces
faces = detector.detect_faces(frame)

print("Faces detected:", len(faces))

# Draw boxes
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

# Test detection
faces = detector.detect_faces(test_image)
print(f"Detected {len(faces)} faces")
# SHOW WINDOW 🔥
cv2.imshow("Face Detection", frame)

if faces:
largest_face = detector.detect_largest_face(test_image)
print(f"Largest face: {largest_face}")
# Exit on 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break

print("Face detection test completed!")
cap.release()
cv2.destroyAllWindows()
8 changes: 4 additions & 4 deletions src/biometric/liveness.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ class SpoofingType(Enum):
@dataclass
class LivenessConfig:
"""Configuration for liveness detection."""
enable_depth_analysis: bool = True
enable_texture_analysis: bool = True
enable_depth_analysis: bool = False
enable_texture_analysis: bool = False
enable_motion_analysis: bool = True
enable_challenge_response: bool = False
liveness_threshold: float = 0.5
liveness_threshold: float = 0.2
depth_threshold: float = 0.3
texture_threshold: float = 0.6
motion_threshold: float = 0.4
Expand Down Expand Up @@ -86,7 +86,7 @@ def __init__(self, config: Optional[LivenessConfig] = None):
config: Liveness detection configuration
"""
self.config = config or LivenessConfig()

# Frame buffer for temporal analysis
self.frame_buffer: List[np.ndarray] = []
self.depth_buffer: List[np.ndarray] = []
Expand Down
Loading