Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

Can you please help me how to fix the bounding box based on the picture below using the flask code? Please provide the code. Thanks

Can you please help me how to fix the bounding box based on the picture below using the flask code? Please provide the code. Thanks
Here is the code.
import cv2
import math
from deep_sort import build_tracker # Assuming DeepSORT is installed and this is the correct import
app = Flask(__name__)
# model
model = YOLO("best.pt")
# start webcam
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
# object classes
classNames =["person"]
# Initialize DeepSORT tracker
tracker = build_tracker()
def generate_frames():
frame_count =0
fps =0
tick1= cv2.getTickCount()
while True:
success, img = cap.read()
img = cv2.resize(img,(640,480))
# Detect objects using YOLO
results = model.track(img, stream=True, persist=True, tracker="bytetrack.yaml")
# Process detections for DeepSORT
boxes =[]
confidences =[]
class_ids =[]
for r in results:
for box in r.boxes:
x1, y1, x2, y2=[int(x) for x in box.xyxy[0]]
boxes.append([x1, y1, x2-x1, y2-y1]) # Format: [x, y, w, h]
confidences.append(box.conf[0])
class_ids.append(int(box.cls[0]))
# Update tracker
tracks = tracker.update(boxes, confidences, img)
for track in tracks:
bbox = track.to_tlbr() # Get the bounding box coordinates
cv2.rectangle(img,(int(bbox[0]), int(bbox[1])),(int(bbox[2]), int(bbox[3])),(0,255,0),5)
cv2.putText(img, f"{classNames[class_ids[track.track_id]]}: {track.track_id}",
(int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
# Remaining code for FPS calculation and frame generation
# ...
@app.route('/video')
def video():
return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
return render_template('trial.html')
if name =='__main__':
app.run(host='0.0.0.0', debug=True)
# before your while loop
tick1= cv2. getTickCount ()
frame_count =\theta
fps =0
while True:
success, img= cap.read ()
img =cv2.resize(img,(640,480))
#yolo detection
results = model (img, stream=True)
#Deep sort process
boxes =[]
confidences =[]
class_ids =[]
# coordinates
for r in results:
for box in boxes:
# bounding box
for x in box.xyxy[0]] # convert to int values once
boxes.append ([x1,y1,x2-x1,y2-y1])
# put box in cam
cv2. rectangle(img,(x1, y1),(x2, y2),(0,255,0),5)
# confidence
confidences . append (box . conf[\theta ])
print("Confidence --->", confidences)
# class id
class_ids.append(int(box.cls[0]))
# Update tracker
tracks = tracker. update(boxes, confidences, img)
for track in tracks:
Speed: 1.0ms preprocess, 69.0ms inference, 1.0ms postprocess per image at shape (1,3,256,320)
0: 256\times 3201 person, 65.0ms
Speed: 1.0ms preprocess, 65.0ms inference, 1. Oms postprocess per image at shape (1,3,256,320)
0: 256\times 3201 person, 70.0ms
Speed: 1.0ms preprocess, 70.0ms inference, 1. Oms postprocess per image at shape (1,3,256,320)
0: 256\times 3201 person, 62.0ms
Speed: 1.0ms preprocess, 62.0ms inference, 1. Oms postprocess per image at shape (1,3,256,320)# before your while loop
tick1= cv2. getTickCount ()
frame_count =
fps =0
while True:
success, img= cap.read ()
img =cv2.resize(img,(640,480))
#yolo detection
results = model (img, stream=True)
#Deep sort process
boxes =[]
confidences =[]
class_ids =[]
# coordinates
for r in results:
for box in boxes:
# bounding box
for x in box.xyxy[0]] # convert to int values once
boxes.append ([x1,y1,x2-x1,y2-y1])
# put box in cam
cv2. rectangle(img,(x1, y1),(x2, y2),(0,255,0),5)
# confidence
confidences . append (box . conf[])
print("Confidence --->", confidences)
# class id
class_ids.append(int(box.cls[0]))
# Update tracker
tracks = tracker. update(boxes, confidences, img)
for track in tracks:
Speed: 1.0ms preprocess, 69.0ms inference, 1.0ms postprocess per image at shape (1,3,256,320)
0: 2563201 person, 65.0ms
Speed: 1.0ms preprocess, 65.0ms inference, 1. Oms postprocess per image at shape (1,3,256,320)
0: 2563201 person, 70.0ms
Speed: 1.0ms preprocess, 70.0ms inference, 1. Oms postprocess per image at shape (1,3,256,320)
0: 2563201 person,
image text in transcribed

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

More Books

Students also viewed these Databases questions