diff --git a/app.py b/app.py index c38a584..a1a156d 100644 --- a/app.py +++ b/app.py @@ -14,6 +14,7 @@ from multiprocessing import Process, Event app = Flask(__name__) +device_count=0 workers = {} @app.route('/start/') @@ -62,7 +63,6 @@ def worker(id,source,region=None,stream=False): region = 10 else: region = int(region) - device_count = torch.cuda.device_count() print("GPU device count:", device_count) print("start loading model...",id,source,region) model = YOLO('./best.pt') @@ -95,6 +95,7 @@ def worker(id,source,region=None,stream=False): print("Error opening video stream....") break continue + print("device_count",int(id)%device_count) result = model.track(frame,show=False,stream=False,persist=True,device=int(id)%device_count) use += " track:"+str((time.time()*1000) - bgn) del(ret) @@ -190,6 +191,7 @@ def post_request(url, data): # break if __name__ == '__main__': + device_count = torch.cuda.device_count() # cProfile.run('test_function()', 'test_function.profile') # p = pstats.Stats('test_function.profile') # p.sort_stats('cumulative').print_stats(100) # Top 10 by cumulative time diff --git a/show.py b/show.py index 320a539..38224bc 100644 --- a/show.py +++ b/show.py @@ -25,6 +25,16 @@ def show(): print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),text) return 'ok' +@app.route('/mj',methods=['POST']) +def mj(): + print(request.json) + return 'ok' + +@app.route('/cq',methods=['POST']) +def cq(): + print(request.json) + return 'ok' + if __name__ == '__main__': app.logger.setLevel(logging.ERROR) app.run("0.0.0.0",port=8000,threaded=True)