From 744f7c172b725871a95eb05bdbaa797a33ea58f4 Mon Sep 17 00:00:00 2001 From: atif275 Date: Sun, 4 Feb 2024 22:45:22 +0500 Subject: [PATCH 1/6] CORS Updated --- app.py | 166 +++++++++++++++++++++++++++++++++++++++++++ detect.py | 8 +-- static/script.js | 85 ++++++++++++++++++++++ static/style.css | 144 +++++++++++++++++++++++++++++++++++++ templates/index.html | 41 +++++++++++ 5 files changed, 440 insertions(+), 4 deletions(-) create mode 100644 app.py create mode 100644 static/script.js create mode 100644 static/style.css create mode 100644 templates/index.html diff --git a/app.py b/app.py new file mode 100644 index 000000000000..5f7465beeab4 --- /dev/null +++ b/app.py @@ -0,0 +1,166 @@ +from flask import Flask, render_template, Response, jsonify +from flask import Flask, render_template, send_from_directory +import cv2 +import os +import numpy as np +from datetime import datetime +import time +from flask import jsonify +import subprocess + +# from flask_wtf import FlaskForm +# from wtforms import FileField, SubmitField +# from werkzeug.utils import secure_filename +# import os +# from wtforms.validators import InputRequired + + + +app = Flask(__name__) + +streaming_active = False +output_folder = 'videos' +video_writer = None +# class UploadFileForm(FlaskForm): +# file = FileField("File", validators=[InputRequired()]) +# submit = SubmitField("Upload File") + +@app.route('/') +def index(): + return render_template('index.html') + +# @app.route('/home', methods=['GET',"POST"]) +# def home(): +# form = UploadFileForm() +# if form.validate_on_submit(): +# file = form.file.data # First grab the file +# file.save(os.path.join(os.path.abspath(os.path.dirname(__file__)),app.config['UPLOAD_FOLDER'],secure_filename(file.filename))) # Then save the file +# return "File has been uploaded." +# return render_template('index.html', form=form) + +@app.route('/start_stream') + +def start_stream(): + global streaming_active + global out + if not streaming_active: + streaming_active = True + start_recording() + + return jsonify({'status': 'success', 'message': 'Streaming started and recording initiated'}) + else: + return jsonify({'status': 'error', 'message': 'Streaming is already active'}) + +@app.route('/stop_stream') +def stop_stream(): + global streaming_active + if streaming_active: + streaming_active = False + stop_recording() + + return jsonify({'status': 'success', 'message': 'Streaming stopped and recording saved'}) + else: + return jsonify({'status': 'error', 'message': 'Streaming is not active'}) + + +def start_recording(): + global video_writer + + filename = f"recording_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" + fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change the codec as needed + frame_size = (640, 480) # Adjust the frame size as needed + # video_writer = cv2.VideoWriter(filename, fourcc, 10.0, frame_size) + video_writer = cv2.VideoWriter(os.path.join(output_folder, filename), fourcc, 40.0, frame_size) + + + +def stop_recording(): + global video_writer + + if video_writer is not None: + video_writer.release() + video_writer = None + +@app.route('/video_feed') +def video_feed(): + return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame') + +@app.route('/static/') +def static_files(filename): + return send_from_directory('static', filename) + +@app.route('/videos') +def list_videos(): + videos = [video for video in os.listdir('videos') if video.endswith('.mp4')] + return jsonify(videos) + +@app.route('/video/') +def stream_video(filename): + return send_from_directory('videos', filename) + +@app.route('/detection/') +# def detection(filename): +# # Placeholder for detection logic +# print(f"Detection started for {filename}") +# return jsonify({'status': 'Detection started for ' + filename}) +def detection(filename): + try: + print(f"filename################={filename}") + # Construct the command string + command = f'python3 detect.py --source ./videos/{filename}' + + # Execute the command + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + + if process.returncode != 0: + # Handle errors if the command failed + return jsonify({'status': 'Error', 'message': stderr.decode()}), 500 + + # Return success response + return jsonify({'status': 'Detection Done for ' + filename}) + except Exception as e: + # Handle any exceptions + return jsonify({'status': 'Error', 'message': str(e)}), 500 + +def generate_frames(): + global video_writer + folder_path = '/tmp/camera_save_tutorial' + while streaming_active: + image_files = [f for f in os.listdir(folder_path) if f.endswith(('.jpg', '.png'))] + if image_files: + try: + latest_image = max(image_files, key=lambda x: os.path.getctime(os.path.join(folder_path, x))) + image_path = os.path.join(folder_path, latest_image) + + frame = cv2.imread(image_path) + if frame is None: + raise FileNotFoundError("Empty image file or format not supported") + + _, buffer = cv2.imencode('.jpg', frame) + frame = buffer.tobytes() + + # Write the frame to the video file if recording is active + if video_writer is not None: + video_writer.write(cv2.imdecode(np.frombuffer(frame, dtype=np.uint8), 1)) + + yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + # time.sleep(0.05) + + except Exception as e: + print(f"Error processing image: {e}") + continue + + else: + yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + b'\r\n') + + + + + +if __name__ == '__main__': + + + app.run(debug=True) \ No newline at end of file diff --git a/detect.py b/detect.py index b7d77ef431d4..8cf467ef3b1d 100644 --- a/detect.py +++ b/detect.py @@ -69,7 +69,7 @@ @smart_inference_mode() def run( weights=ROOT / "yolov5s.pt", # model path or triton URL - source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam) + source=ROOT / "videos", # file/dir/URL/glob/screen/0(webcam) data=ROOT / "data/coco128.yaml", # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -107,9 +107,9 @@ def run( source = check_file(source) # download # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - + # save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + # (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + save_dir=Path(project) # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) diff --git a/static/script.js b/static/script.js new file mode 100644 index 000000000000..0228d74262f3 --- /dev/null +++ b/static/script.js @@ -0,0 +1,85 @@ +function startStream() { + + fetch('/start_stream') + .then(response => response.json()) + .then(data => { + console.log('Start Stream:', data); + document.getElementById('videoStream').src = '/video_feed'; + }); +} + +function stopStream() { + + fetch('/stop_stream') + .then(response => response.json()) + .then(data => { + console.log('Stop Stream:', data); + document.getElementById('videoStream').src = ''; + }); +} + + +document.addEventListener('DOMContentLoaded', function() { + const videoDropdown = document.getElementById('videoDropdown'); + //const playButton = document.getElementById('playButton'); + // const detectionButton = document.getElementById('detectionButton'); + const videoPlayer = document.getElementById('videoPlayer'); + detectionButton= document.querySelector(".detectionButton"); + playButton= document.querySelector(".playButton"); + + + + fetch('/videos').then(response => response.json()).then(videos => { + videos.forEach(video => { + let option = document.createElement('option'); + option.value = video; + option.textContent = video; + videoDropdown.appendChild(option); + }); + }); + + videoDropdown.onchange = function() { + if (this.value) { + playButton.style.display = 'block'; + detectionButton.style.display='block' + + } else { + playButton.style.display = 'none'; + detectionButton.style.display = 'none'; + } + }; + + playButton.onclick = function() { + + videoPlayer.innerHTML = ``; + // function playVideo(videoSource) { + // var videoPlayer = document.getElementById("videoPlayer"); + // videoPlayer.src = videoSource; + // videoPlayer.load(); + // videoPlayer.play(); + // toggleDropdown(); // Close the dropdown after selecting a video + // } + // playVideo(`/videos/${videoDropdown.value}`); + + }; + + detectionButton.onclick = function() { + + this.innerHTML="
" + // setTimeout(()=>{ + // this.innerHTML="Detection Done"; + // this.style="background : #f1f5f4; color: #333; pointer-events: none"; + // },2000) + + fetch(`/detection/${videoDropdown.value}`).then(response => response.json()).then(data => { + this.innerHTML="Detection Done"; + this.style="background : #f1f5f4; color: #333; pointer-events: none"; + alert(data.status); + }) + .catch(error => { + + loadingMessage.style.display = 'none'; + console.error('Error:', error); + }); + }; +}); diff --git a/static/style.css b/static/style.css new file mode 100644 index 000000000000..a93c4ba60166 --- /dev/null +++ b/static/style.css @@ -0,0 +1,144 @@ +body { + margin: 1; + display: flex; + justify-content: right; + align-items: center; + height: 60vh; + background-color: #ffffff; +} + +img { + width: 30%; + height: auto; +} + +.app-header { + background-color: rgb(255, 255, 255); /* Change the background color as needed */ + color: rgb(0, 0, 0); /* Change the text color as needed */ + text-align: center; + padding: 20px; + position: absolute; + top: 20px; /* Adjust the distance from the top */ + left: 50%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ + } + + .app-header h1 { + margin: 0; + font-size: 2em; /* Adjust the font size as needed */ + } + + .button-container { + text-align: center; + margin-bottom: 20px; + position: absolute; + top: 150px; /* Adjust the distance from the top */ + left: 50%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ +} + +#startButton { + background-color: #4CAF50; /* Green color */ + color: white; + padding: 10px 20px; /* Adjust padding as needed */ + border: none; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 4px 2px; + cursor: pointer; +} + +#stopButton { + background-color: #FF0000; /* Red color */ + color: white; + padding: 10px 20px; /* Adjust padding as needed */ + border: none; + + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 4px 2px; + cursor: pointer; +} +#videoDropdown{ + width: 10%; + padding: 10px; + font-size: 16px; + border: 1px solid #ccc; + border-radius: 4px; + cursor: pointer; + position: absolute; + top: 225px; /* Adjust the distance from the top */ + left: 50%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ +} + +.button-active { + background-color: #4CAF50; /* Green */ + color: white; + + /* Add more styles as needed, such as positioning */ +} + +.button-inactive { + background-color: #ccc; /* Gray */ + color: #666; + /* Add more styles as needed */ +} + +.playButton{ + background-color: #29ca8c; /* Green color */ + color: white; + width: 100px; + height: 50px; + cursor: pointer; + border-radius: 3px; + display: grid; + place-content: center; + position: absolute; + top: 285px; /* Adjust the distance from the top */ + left: 46%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ +} + +.detectionButton{ + background-color: #0004ff; /* Green color */ + color: white; + width: 100px; + height: 50px; + cursor: pointer; + border-radius: 3px; + display: grid; + place-content: center; + position: absolute; + top: 285px; /* Adjust the distance from the top */ + left: 54%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ + +} +.loader { + pointer-events: none; + width: 30px; + height: 30px; + border-radius: 50%; + border: 3px solid transparent; /* Light grey */ + border-top-color: #ffffff; /* Blue */ + animation: an1 1s ease infinite; +} + +@keyframes an1 { + 0% { transform: rotate(0turn); } + 100% { transform: rotate(1turn); } +} + +#vi { + position: relative; + top: 150px; /* Adjust the distance from the top */ + left: -10%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ + width: 40%; + height: auto; +} \ No newline at end of file diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 000000000000..cb9d891b6f44 --- /dev/null +++ b/templates/index.html @@ -0,0 +1,41 @@ + + + + + + + + Echlon Object Detection + + +
+

Echelon Object Detection and Streaming

+
+ +
+ + +
+ + + + + + +
+ + + Video Stream + + + + + + + \ No newline at end of file From a27d403461b8cfac79a547bfbc7ba257d26b174c Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sun, 4 Feb 2024 17:49:03 +0000 Subject: [PATCH 2/6] Auto-format by https://ultralytics.com/actions --- app.py | 86 +++++++++++++++++++++++++++---------------------------- detect.py | 2 +- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/app.py b/app.py index 5f7465beeab4..dd89d56a1714 100644 --- a/app.py +++ b/app.py @@ -15,19 +15,20 @@ # from wtforms.validators import InputRequired - app = Flask(__name__) streaming_active = False -output_folder = 'videos' +output_folder = "videos" video_writer = None # class UploadFileForm(FlaskForm): # file = FileField("File", validators=[InputRequired()]) # submit = SubmitField("Upload File") -@app.route('/') + +@app.route("/") def index(): - return render_template('index.html') + return render_template("index.html") + # @app.route('/home', methods=['GET',"POST"]) # def home(): @@ -38,8 +39,8 @@ def index(): # return "File has been uploaded." # return render_template('index.html', form=form) -@app.route('/start_stream') +@app.route("/start_stream") def start_stream(): global streaming_active global out @@ -47,58 +48,63 @@ def start_stream(): streaming_active = True start_recording() - return jsonify({'status': 'success', 'message': 'Streaming started and recording initiated'}) + return jsonify({"status": "success", "message": "Streaming started and recording initiated"}) else: - return jsonify({'status': 'error', 'message': 'Streaming is already active'}) + return jsonify({"status": "error", "message": "Streaming is already active"}) -@app.route('/stop_stream') + +@app.route("/stop_stream") def stop_stream(): global streaming_active if streaming_active: streaming_active = False stop_recording() - - return jsonify({'status': 'success', 'message': 'Streaming stopped and recording saved'}) + + return jsonify({"status": "success", "message": "Streaming stopped and recording saved"}) else: - return jsonify({'status': 'error', 'message': 'Streaming is not active'}) + return jsonify({"status": "error", "message": "Streaming is not active"}) def start_recording(): global video_writer - + filename = f"recording_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4" - fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change the codec as needed + fourcc = cv2.VideoWriter_fourcc(*"mp4v") # You can change the codec as needed frame_size = (640, 480) # Adjust the frame size as needed # video_writer = cv2.VideoWriter(filename, fourcc, 10.0, frame_size) video_writer = cv2.VideoWriter(os.path.join(output_folder, filename), fourcc, 40.0, frame_size) - def stop_recording(): global video_writer - + if video_writer is not None: video_writer.release() video_writer = None - -@app.route('/video_feed') + + +@app.route("/video_feed") def video_feed(): - return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame') + return Response(generate_frames(), mimetype="multipart/x-mixed-replace; boundary=frame") -@app.route('/static/') + +@app.route("/static/") def static_files(filename): - return send_from_directory('static', filename) + return send_from_directory("static", filename) + -@app.route('/videos') +@app.route("/videos") def list_videos(): - videos = [video for video in os.listdir('videos') if video.endswith('.mp4')] + videos = [video for video in os.listdir("videos") if video.endswith(".mp4")] return jsonify(videos) -@app.route('/video/') + +@app.route("/video/") def stream_video(filename): - return send_from_directory('videos', filename) + return send_from_directory("videos", filename) + -@app.route('/detection/') +@app.route("/detection/") # def detection(filename): # # Placeholder for detection logic # print(f"Detection started for {filename}") @@ -107,7 +113,7 @@ def detection(filename): try: print(f"filename################={filename}") # Construct the command string - command = f'python3 detect.py --source ./videos/{filename}' + command = f"python3 detect.py --source ./videos/{filename}" # Execute the command process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -115,19 +121,20 @@ def detection(filename): if process.returncode != 0: # Handle errors if the command failed - return jsonify({'status': 'Error', 'message': stderr.decode()}), 500 + return jsonify({"status": "Error", "message": stderr.decode()}), 500 # Return success response - return jsonify({'status': 'Detection Done for ' + filename}) + return jsonify({"status": "Detection Done for " + filename}) except Exception as e: # Handle any exceptions - return jsonify({'status': 'Error', 'message': str(e)}), 500 + return jsonify({"status": "Error", "message": str(e)}), 500 + def generate_frames(): global video_writer - folder_path = '/tmp/camera_save_tutorial' + folder_path = "/tmp/camera_save_tutorial" while streaming_active: - image_files = [f for f in os.listdir(folder_path) if f.endswith(('.jpg', '.png'))] + image_files = [f for f in os.listdir(folder_path) if f.endswith((".jpg", ".png"))] if image_files: try: latest_image = max(image_files, key=lambda x: os.path.getctime(os.path.join(folder_path, x))) @@ -137,15 +144,14 @@ def generate_frames(): if frame is None: raise FileNotFoundError("Empty image file or format not supported") - _, buffer = cv2.imencode('.jpg', frame) + _, buffer = cv2.imencode(".jpg", frame) frame = buffer.tobytes() # Write the frame to the video file if recording is active if video_writer is not None: video_writer.write(cv2.imdecode(np.frombuffer(frame, dtype=np.uint8), 1)) - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + frame + b"\r\n") # time.sleep(0.05) except Exception as e: @@ -153,14 +159,8 @@ def generate_frames(): continue else: - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + b'\r\n') - - - + yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + b"\r\n") -if __name__ == '__main__': - - - app.run(debug=True) \ No newline at end of file +if __name__ == "__main__": + app.run(debug=True) diff --git a/detect.py b/detect.py index 8cf467ef3b1d..aacb875ccd84 100644 --- a/detect.py +++ b/detect.py @@ -109,7 +109,7 @@ def run( # Directories # save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run # (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - save_dir=Path(project) + save_dir = Path(project) # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) From 3217828de895cacb07f78e926d3a508daf92b8eb Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sun, 28 Apr 2024 15:19:42 +0000 Subject: [PATCH 3/6] Auto-format by https://ultralytics.com/actions --- app.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/app.py b/app.py index dd89d56a1714..0c0c0a3a4ab4 100644 --- a/app.py +++ b/app.py @@ -1,12 +1,11 @@ -from flask import Flask, render_template, Response, jsonify -from flask import Flask, render_template, send_from_directory -import cv2 import os -import numpy as np -from datetime import datetime -import time -from flask import jsonify import subprocess +import time +from datetime import datetime + +import cv2 +import numpy as np +from flask import Flask, Response, jsonify, render_template, send_from_directory # from flask_wtf import FlaskForm # from wtforms import FileField, SubmitField From c58b0cea7ef2675ecb0b25d6ede73cfa260b3e32 Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sun, 16 Jun 2024 20:09:49 +0000 Subject: [PATCH 4/6] Auto-format by https://ultralytics.com/actions --- app.py | 1 - 1 file changed, 1 deletion(-) diff --git a/app.py b/app.py index 0c0c0a3a4ab4..3d6bf54d406d 100644 --- a/app.py +++ b/app.py @@ -1,6 +1,5 @@ import os import subprocess -import time from datetime import datetime import cv2 From 4b01e9921858f634fa1d5700b58553936cab1b0d Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sun, 11 Aug 2024 08:32:56 +0000 Subject: [PATCH 5/6] Auto-format by https://ultralytics.com/actions --- static/style.css | 231 ++++++++++++++++++++++++----------------------- 1 file changed, 119 insertions(+), 112 deletions(-) diff --git a/static/style.css b/static/style.css index a93c4ba60166..b29acc3c8201 100644 --- a/static/style.css +++ b/static/style.css @@ -1,144 +1,151 @@ body { - margin: 1; - display: flex; - justify-content: right; - align-items: center; - height: 60vh; - background-color: #ffffff; + margin: 1; + display: flex; + justify-content: right; + align-items: center; + height: 60vh; + background-color: #ffffff; } img { - width: 30%; - height: auto; + width: 30%; + height: auto; } .app-header { - background-color: rgb(255, 255, 255); /* Change the background color as needed */ - color: rgb(0, 0, 0); /* Change the text color as needed */ - text-align: center; - padding: 20px; - position: absolute; - top: 20px; /* Adjust the distance from the top */ - left: 50%; /* Center horizontally */ - transform: translateX(-50%); /* Center horizontally */ - } - - .app-header h1 { - margin: 0; - font-size: 2em; /* Adjust the font size as needed */ - } + background-color: rgb( + 255, + 255, + 255 + ); /* Change the background color as needed */ + color: rgb(0, 0, 0); /* Change the text color as needed */ + text-align: center; + padding: 20px; + position: absolute; + top: 20px; /* Adjust the distance from the top */ + left: 50%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ +} - .button-container { - text-align: center; - margin-bottom: 20px; - position: absolute; - top: 150px; /* Adjust the distance from the top */ - left: 50%; /* Center horizontally */ - transform: translateX(-50%); /* Center horizontally */ +.app-header h1 { + margin: 0; + font-size: 2em; /* Adjust the font size as needed */ +} + +.button-container { + text-align: center; + margin-bottom: 20px; + position: absolute; + top: 150px; /* Adjust the distance from the top */ + left: 50%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ } #startButton { - background-color: #4CAF50; /* Green color */ - color: white; - padding: 10px 20px; /* Adjust padding as needed */ - border: none; - text-align: center; - text-decoration: none; - display: inline-block; - font-size: 16px; - margin: 4px 2px; - cursor: pointer; + background-color: #4caf50; /* Green color */ + color: white; + padding: 10px 20px; /* Adjust padding as needed */ + border: none; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 4px 2px; + cursor: pointer; } #stopButton { - background-color: #FF0000; /* Red color */ - color: white; - padding: 10px 20px; /* Adjust padding as needed */ - border: none; - - text-align: center; - text-decoration: none; - display: inline-block; - font-size: 16px; - margin: 4px 2px; - cursor: pointer; + background-color: #ff0000; /* Red color */ + color: white; + padding: 10px 20px; /* Adjust padding as needed */ + border: none; + + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 4px 2px; + cursor: pointer; } -#videoDropdown{ - width: 10%; - padding: 10px; - font-size: 16px; - border: 1px solid #ccc; - border-radius: 4px; - cursor: pointer; - position: absolute; - top: 225px; /* Adjust the distance from the top */ - left: 50%; /* Center horizontally */ - transform: translateX(-50%); /* Center horizontally */ +#videoDropdown { + width: 10%; + padding: 10px; + font-size: 16px; + border: 1px solid #ccc; + border-radius: 4px; + cursor: pointer; + position: absolute; + top: 225px; /* Adjust the distance from the top */ + left: 50%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ } .button-active { - background-color: #4CAF50; /* Green */ - color: white; - - /* Add more styles as needed, such as positioning */ + background-color: #4caf50; /* Green */ + color: white; + + /* Add more styles as needed, such as positioning */ } .button-inactive { - background-color: #ccc; /* Gray */ - color: #666; - /* Add more styles as needed */ + background-color: #ccc; /* Gray */ + color: #666; + /* Add more styles as needed */ } -.playButton{ - background-color: #29ca8c; /* Green color */ - color: white; - width: 100px; - height: 50px; - cursor: pointer; - border-radius: 3px; - display: grid; - place-content: center; - position: absolute; - top: 285px; /* Adjust the distance from the top */ - left: 46%; /* Center horizontally */ - transform: translateX(-50%); /* Center horizontally */ +.playButton { + background-color: #29ca8c; /* Green color */ + color: white; + width: 100px; + height: 50px; + cursor: pointer; + border-radius: 3px; + display: grid; + place-content: center; + position: absolute; + top: 285px; /* Adjust the distance from the top */ + left: 46%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ } -.detectionButton{ - background-color: #0004ff; /* Green color */ - color: white; - width: 100px; - height: 50px; - cursor: pointer; - border-radius: 3px; - display: grid; - place-content: center; - position: absolute; - top: 285px; /* Adjust the distance from the top */ - left: 54%; /* Center horizontally */ - transform: translateX(-50%); /* Center horizontally */ - +.detectionButton { + background-color: #0004ff; /* Green color */ + color: white; + width: 100px; + height: 50px; + cursor: pointer; + border-radius: 3px; + display: grid; + place-content: center; + position: absolute; + top: 285px; /* Adjust the distance from the top */ + left: 54%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ } .loader { - pointer-events: none; - width: 30px; - height: 30px; - border-radius: 50%; - border: 3px solid transparent; /* Light grey */ - border-top-color: #ffffff; /* Blue */ - animation: an1 1s ease infinite; + pointer-events: none; + width: 30px; + height: 30px; + border-radius: 50%; + border: 3px solid transparent; /* Light grey */ + border-top-color: #ffffff; /* Blue */ + animation: an1 1s ease infinite; } - + @keyframes an1 { - 0% { transform: rotate(0turn); } - 100% { transform: rotate(1turn); } + 0% { + transform: rotate(0turn); + } + 100% { + transform: rotate(1turn); + } } #vi { - position: relative; - top: 150px; /* Adjust the distance from the top */ - left: -10%; /* Center horizontally */ - transform: translateX(-50%); /* Center horizontally */ - width: 40%; - height: auto; -} \ No newline at end of file + position: relative; + top: 150px; /* Adjust the distance from the top */ + left: -10%; /* Center horizontally */ + transform: translateX(-50%); /* Center horizontally */ + width: 40%; + height: auto; +} From 893e9417881fd8cc68854b4e37bad23a559cbb56 Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sat, 24 Aug 2024 21:41:36 +0000 Subject: [PATCH 6/6] Auto-format by https://ultralytics.com/actions --- export.py | 1 + utils/augmentations.py | 1 - utils/callbacks.py | 1 - utils/dataloaders.py | 7 ++++--- utils/general.py | 2 -- utils/loggers/__init__.py | 3 ++- utils/loggers/clearml/clearml_utils.py | 14 +++++++------- utils/loggers/wandb/wandb_utils.py | 12 ++++++------ utils/metrics.py | 8 +++----- utils/segment/augmentations.py | 1 - utils/segment/general.py | 3 --- utils/triton.py | 3 +-- 12 files changed, 24 insertions(+), 32 deletions(-) diff --git a/export.py b/export.py index dfb1c06fb5e2..f3216a564290 100644 --- a/export.py +++ b/export.py @@ -449,6 +449,7 @@ def transform_fn(data_item): Quantization transform function. Extracts and preprocess input data from dataloader item for quantization. + Parameters: data_item: Tuple with data item produced by DataLoader during iteration Returns: diff --git a/utils/augmentations.py b/utils/augmentations.py index 4a6e441d7c45..bdbe07712716 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -156,7 +156,6 @@ def random_perspective( ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] - """Applies random perspective transformation to an image, modifying the image and corresponding labels.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/callbacks.py b/utils/callbacks.py index 0a0bcbdb2b96..21c587bd74c6 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -64,7 +64,6 @@ def run(self, hook, *args, thread=False, **kwargs): thread: (boolean) Run callbacks in daemon thread kwargs: Keyword Arguments to receive from YOLOv5 """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" for logger in self._callbacks[hook]: if thread: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 21308f0cedbd..bdeffec465e7 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1104,7 +1104,8 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False): """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() - Arguments + + Arguments: path: Path to images directory weights: Train, val, test weights (list, tuple) annotated_only: Only use images with an annotated txt file @@ -1183,7 +1184,7 @@ class HUBDatasetStats: """ Class for generating HUB dataset JSON and `-hub` dataset directory. - Arguments + Arguments: path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -1314,7 +1315,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder): """ YOLOv5 Classification Dataset. - Arguments + Arguments: root: Dataset path transform: torchvision transforms, used by default album_transform: Albumentations transforms, used if installed diff --git a/utils/general.py b/utils/general.py index e311504b3031..57db68a7ac76 100644 --- a/utils/general.py +++ b/utils/general.py @@ -518,7 +518,6 @@ def check_font(font=FONT, progress=False): def check_dataset(data, autodownload=True): """Validates and/or auto-downloads a dataset, returning its configuration as a dictionary.""" - # Download (optional) extract_dir = "" if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): @@ -1023,7 +1022,6 @@ def non_max_suppression( Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ - # Checks assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2bd8583d2ade..7051e8da0a29 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -350,7 +350,8 @@ class GenericLogger: """ YOLOv5 General purpose logger for non-task specific logging Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments + + Arguments: opt: Run arguments console_logger: Console logger include: loggers to include diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 2b5351ef8533..de4129e08a16 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -80,7 +80,7 @@ def __init__(self, opt, hyp): - Initialize ClearML Task, this object will capture the experiment - Upload dataset version to ClearML Data if opt.upload_dataset is True - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run hyp (dict) -- Hyperparameters for this run @@ -133,7 +133,7 @@ def log_scalars(self, metrics, epoch): """ Log scalars/metrics to ClearML. - arguments: + Arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} epoch (int) iteration number for the current set of metrics """ @@ -145,7 +145,7 @@ def log_model(self, model_path, model_name, epoch=0): """ Log model weights to ClearML. - arguments: + Arguments: model_path (PosixPath or str) Path to the model weights model_name (str) Name of the model visible in ClearML epoch (int) Iteration / epoch of the model weights @@ -158,7 +158,7 @@ def log_summary(self, metrics): """ Log final metrics to a summary table. - arguments: + Arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} """ for k, v in metrics.items(): @@ -168,7 +168,7 @@ def log_plot(self, title, plot_path): """ Log image as plot in the plot section of ClearML. - arguments: + Arguments: title (str) Title of the plot plot_path (PosixPath or str) Path to the saved image file """ @@ -183,7 +183,7 @@ def log_debug_samples(self, files, title="Debug Samples"): """ Log files (images) as debug samples in the ClearML task. - arguments: + Arguments: files (List(PosixPath)) a list of file paths in PosixPath format title (str) A title that groups together images with the same values """ @@ -199,7 +199,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres """ Draw the bounding boxes on a single image and report the result as a ClearML debug sample. - arguments: + Arguments: image_path (PosixPath) the path the original image file boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] class_names (dict): dict containing mapping of class int to class name diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 930f2c7543af..6a32c8cc7b03 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -49,7 +49,7 @@ def __init__(self, opt, run_id=None, job_type="Training"): - Upload dataset if opt.upload_dataset is True - Setup training processes if job_type is 'Training' - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed job_type (str) -- To set the job_type for this run @@ -90,7 +90,7 @@ def setup_training(self, opt): - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - Setup log_dict, initialize bbox_interval - arguments: + Arguments: opt (namespace) -- commandline arguments for this run """ @@ -120,7 +120,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ Log the model checkpoint as W&B artifact. - arguments: + Arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number @@ -159,7 +159,7 @@ def log(self, log_dict): """ Save the metrics to the logging dictionary. - arguments: + Arguments: log_dict (Dict) -- metrics/media to be logged in current step """ if self.wandb_run: @@ -170,7 +170,7 @@ def end_epoch(self): """ Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - arguments: + Arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ if self.wandb_run: @@ -197,7 +197,7 @@ def finish_run(self): @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): - """source - https://gist.github.com/simon-weber/7853144 + """Source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. diff --git a/utils/metrics.py b/utils/metrics.py index 385fdc471748..9acc38591f96 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -41,7 +41,6 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names # Returns The average precision as computed in py-faster-rcnn. """ - # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] @@ -103,7 +102,6 @@ def compute_ap(recall, precision): # Returns Average precision, precision curve, recall curve """ - # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) @@ -137,6 +135,7 @@ def process_batch(self, detections, labels): Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 @@ -233,7 +232,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 Input shapes are box1(1,4) to box2(n,4). """ - # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) @@ -279,14 +277,15 @@ def box_iou(box1, box2, eps=1e-7): Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) + Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) @@ -304,7 +303,6 @@ def bbox_ioa(box1, box2, eps=1e-7): box2: np.array of shape(nx4) returns: np.array of shape(n) """ - # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1 b2_x1, b2_y1, b2_x2, b2_y2 = box2.T diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index d7dd8aec6691..2e1dca1198b0 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -29,7 +29,6 @@ def random_perspective( ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] - """Applies random perspective, rotation, scale, shear, and translation augmentations to an image and targets.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/segment/general.py b/utils/segment/general.py index 2f65d60238dd..0793470a95e4 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -14,7 +14,6 @@ def crop_mask(masks, boxes): - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ - n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) @@ -33,7 +32,6 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): return: h, w, n """ - c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW @@ -51,7 +49,6 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return: h, w, n """ - c, mh, mw = protos.shape # CHW ih, iw = shape masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW diff --git a/utils/triton.py b/utils/triton.py index 3d529ec88a07..2fee42815517 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -17,10 +17,9 @@ class TritonRemoteModel: def __init__(self, url: str): """ - Keyword arguments: + Keyword Arguments: url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 """ - parsed_url = urlparse(url) if parsed_url.scheme == "grpc": from tritonclient.grpc import InferenceServerClient, InferInput