diff --git a/app.py b/app.py index 665d848..19c3749 100644 --- a/app.py +++ b/app.py @@ -102,29 +102,30 @@ def worker(id,source,region=None,stream=False): print("Error opening video stream....") break continue - result = model.track(frame,device=int(id)%4,tracker='botsort.yaml') + # result = model.track(frame,device=int(id)%4,tracker='botsort.yaml') + result = model(frame) use += " track:"+str((time.time()*1000) - bgn) del(ret) del(frame) result = result[0] detections = sv.Detections.from_yolov8(result) - if result.boxes.id is not None: - detections.tracker_id = result.boxes.id.cpu().numpy().astype(int) - else: - detections.tracker_id = np.array([]) - detections.conf = np.array([]) - detections.xyxy=np.empty((0, 4), dtype=np.float32) + # if result.boxes.id is not None: + # detections.tracker_id = result.boxes.id.cpu().numpy().astype(int) + # else: + # detections.tracker_id = np.array([]) + # detections.conf = np.array([]) + # detections.xyxy=np.empty((0, 4), dtype=np.float32) # detections = detections[(detections.tracker_id != None)] count += 1 names = {} - for xyxy,_, confidence, class_id, tracker_id in detections: + for xyxy,_, confidence, class_id,tracker_id in detections: name = model.model.names[class_id] if name in names: print("Name duplicate",name) continue ball = balls.get(name,{}) - ball["tkid"] = int(tracker_id) + # ball["tkid"] = int(tracker_id) ball["conf"] = round(float(confidence), 2) ball["xyxy"] = [int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])] ball["count"] = ball.get("count",0) + 1 diff --git a/test123.py b/test123.py new file mode 100644 index 0000000..51fe9f9 --- /dev/null +++ b/test123.py @@ -0,0 +1,38 @@ +import cv2 +from ultralytics import YOLO + +# Load the YOLOv8 model +model = YOLO('best.pt') + +# Open the video file +video_path = "./videos/123.mp4" +cap = cv2.VideoCapture(video_path) + +cv2.namedWindow("Video", cv2.WINDOW_NORMAL) # Create a named window +cv2.resizeWindow("Video", 640, 384) # Resize this window + +# Loop through the video frames +while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + + if success: + # Run YOLOv8 inference on the frame + results = model(frame) + + # Visualize the results on the frame + annotated_frame = results[0].plot() + + # Display the annotated frame + cv2.imshow("Video", annotated_frame) + + # Break the loop if 'q' is pressed + if cv2.waitKey(1) & 0xFF == ord("q"): + break + else: + # Break the loop if the end of the video is reached + break + +# Release the video capture object and close the display window +cap.release() +cv2.destroyAllWindows() \ No newline at end of file