result
parent
85a3221e94
commit
07b0813f73
3
app.py
3
app.py
|
@ -102,7 +102,8 @@ def worker(id,source,region=None,stream=False):
|
|||
print("Error opening video stream....")
|
||||
break
|
||||
continue
|
||||
result = model.track(frame,device=int(id)%4,tracker='botsort.yaml')
|
||||
# result = model.track(frame,device=int(id)%4,tracker='botsort.yaml')
|
||||
result = model(frame)
|
||||
use += " track:"+str((time.time()*1000) - bgn)
|
||||
del(ret)
|
||||
del(frame)
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
import cv2
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load the YOLOv8 model
|
||||
model = YOLO('best.pt')
|
||||
|
||||
# Open the video file
|
||||
video_path = "./videos/123.mp4"
|
||||
cap = cv2.VideoCapture(video_path)
|
||||
|
||||
cv2.namedWindow("Video", cv2.WINDOW_NORMAL) # Create a named window
|
||||
cv2.resizeWindow("Video", 640, 384) # Resize this window
|
||||
|
||||
# Loop through the video frames
|
||||
while cap.isOpened():
|
||||
# Read a frame from the video
|
||||
success, frame = cap.read()
|
||||
|
||||
if success:
|
||||
# Run YOLOv8 inference on the frame
|
||||
results = model(frame)
|
||||
|
||||
# Visualize the results on the frame
|
||||
annotated_frame = results[0].plot()
|
||||
|
||||
# Display the annotated frame
|
||||
cv2.imshow("Video", annotated_frame)
|
||||
|
||||
# Break the loop if 'q' is pressed
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
else:
|
||||
# Break the loop if the end of the video is reached
|
||||
break
|
||||
|
||||
# Release the video capture object and close the display window
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
Loading…
Reference in New Issue