-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
134 lines (108 loc) · 5.18 KB
/
main.py
File metadata and controls
134 lines (108 loc) · 5.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import cv2
import time
from object_detector import ObjectDetector
from situation_analyzer import SituationAnalyzer
from audio_feedback import AudioFeedback
from voice_control import VoiceController
import pyttsx3
import threading
import queue
def main():
# Initialize components
detector = ObjectDetector(model_path="yolov8n.pt", frame_width=640, frame_height=320)
analyzer = SituationAnalyzer()
audio = AudioFeedback()
voice_control = VoiceController()
# Initialize video capture (0 is usually the built-in webcam)
cap = cv2.VideoCapture(1)
if not cap.isOpened():
print("Error: Could not open video capture device")
return
# Set camera resolution
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 320)
print("Waiting for activation phrase: 'Go'...")
print("Press 'q' to quit")
last_analysis_time = 0
analysis_interval = 1.0 # Analysis every 1 second
last_detection_results = None
running = True
try:
while running:
# Capture frame
ret, frame = cap.read()
if not ret:
print("Error: Could not read frame")
continuepy
# Check if system is activated
if voice_control.is_system_active():
current_time = time.time()
# Only process frame at the specified interval
if current_time - last_analysis_time >= analysis_interval:
# Process frame with YOLO
detection_results = detector.process_frame(frame)
# Analyze situation only if detections have changed
if _has_detections_changed(last_detection_results, detection_results):
# Analyze situation
analysis = analyzer.analyze_situation(detection_results)
# Only provide audio feedback if the situation has changed
if analysis.get("changed", True):
audio.provide_feedback(analysis)
last_detection_results = detection_results
last_analysis_time = current_time
# Get audio feedback stats
stats = audio.get_stats()
# Add visual status indicators
status_color = (0, 255, 0) if analysis.get("safe_to_proceed", True) else (0, 0, 255)
cv2.putText(frame, f"ACTIVE - {analysis.get('guidance', '')}",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2)
# Display audio feedback stats
delay_color = (0, 255, 0) if stats['last_message_delay'] < 0.5 else (0, 165, 255)
cv2.putText(frame, f"Audio Delay: {stats['last_message_delay']:.2f}s",
(10, frame.shape[0] - 60), cv2.FONT_HERSHEY_SIMPLEX, 0.6, delay_color, 2)
cv2.putText(frame, f"Queue Size: {stats['queue_size']}",
(10, frame.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# Always draw detection boxes on frame
if last_detection_results:
frame = detector.draw_detections(frame, last_detection_results)
else:
# Draw waiting message on frame
cv2.putText(frame, "Waiting for activation: 'Go'",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# Show processed frame
cv2.imshow('Wheelchair Navigation Assistant', frame)
# Check for quit command
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
running = False
print("\nQuitting application...")
except KeyboardInterrupt:
print("\nStopping application...")
finally:
# Cleanup
print("Cleaning up resources...")
cap.release()
cv2.destroyAllWindows()
audio.cleanup()
voice_control.cleanup()
def _has_detections_changed(last_results: dict, current_results: dict) -> bool:
"""Compare detection results to determine if there's a significant change."""
if last_results is None:
return True
last_detections = last_results.get("detections", [])
current_detections = current_results.get("detections", [])
# Compare number of detections
if len(last_detections) != len(current_detections):
return True
# Create simplified representations for comparison
def simplify_detection(det):
return (
det["object"],
det["distance"],
det["position"]
)
last_simplified = {simplify_detection(det) for det in last_detections}
current_simplified = {simplify_detection(det) for det in current_detections}
return last_simplified != current_simplified
if __name__ == "__main__":
main()