# imports from configs import config from configs.detection import detect_people from scipy.spatial import distance as dist import numpy as np import argparse import imutils import cv2 import os # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--input", type=str, default="", help="path to (optional) input video file") ap.add_argument("-o", "--output", type=str, default="", help="path to (optional) output video file") ap.add_argument("-d", "--display", type=int, default=1, help="whether or not output frame should be displayed") args = vars(ap.parse_args()) # load the COCO class labels the YOLO model was trained on labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"]) LABELS = open(labelsPath).read().strip().split("\n") # derive the paths to the YOLO weights and model configuration weightsPath = os.path.sep.join([config.MODEL_PATH, "yolov3.weights"]) configPath = os.path.sep.join([config.MODEL_PATH, "yolov3.cfg"]) # load the YOLO object detector trained on COCO dataset (80 classes) print("[INFO] loading YOLO from disk...") net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) # check if GPU is to be used or not if config.USE_GPU: # set CUDA s the preferable backend and target print("[INFO] setting preferable backend and target to CUDA...") net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA) # determine only the "output" layer names that we need from YOLO ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] # initialize the video stream and pointer to output video file print("[INFO] accessing video stream...") # open input video if available else webcam stream vs = cv2.VideoCapture(args["input"] if args["input"] else 0) writer = None # loop over the frames from the video stream while True: # read the next frame from the input video (grabbed, frame) = vs.read() # if the frame was not grabbed, then that's the end fo the stream if not grabbed: break # resize the frame and then detect people (only people) in it frame = imutils.resize(frame, width=700) results = detect_people(frame, net, ln, personIdx=LABELS.index("person")) # initialize the set of indexes that violate the minimum social distance violate = set() # ensure there are at least two people detections (required in order to compute the # the pairwise distance maps) if len(results) >= 2: # extract all centroids from the results and compute the Euclidean distances # between all pairs of the centroids centroids = np.array([r[2] for r in results]) D = dist.cdist(centroids, centroids, metric="euclidean") # loop over the upper triangular of the distance matrix for i in range(0, D.shape[0]): for j in range(i+1, D.shape[1]): # check to see if the distance between any two centroid pairs is less # than the configured number of pixels if D[i, j] < config.MIN_DISTANCE: # update the violation set with the indexes of the centroid pairs violate.add(i) violate.add(j) # loop over the results for (i, (prob, bbox, centroid)) in enumerate(results): # extract teh bounding box and centroid coordinates, then initialize the color of the annotation (startX, startY, endX, endY) = bbox (cX, cY) = centroid color = (0, 255, 0) # if the index pair exists within the violation set, then update the color if i in violate: color = (0, 0, 255) # draw (1) a bounding box around the person and (2) the centroid coordinates of the person cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) cv2.circle(frame, (cX, cY), 5, color, 1) # draw the total number of social distancing violations on the output frame text = "Social Distancing Violations: {}".format(len(violate)) cv2.putText(frame, text, (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3) # check to see if the output frame should be displayed to the screen if args["display"] > 0: # show the output frame cv2.imshow("Output", frame) key = cv2.waitKey(1) & 0xFF # if the 'q' key is pressed, break from the loop if key == ord("q"): break # if an output video file path has been supplied and the video writer ahs not been # initialized, do so now if args["output"] != "" and writer is None: # initialize the video writer fourcc = cv2.VideoWriter_fourcc(*"MJPG") writer = cv2.VideoWriter(args["output"], fourcc, 25, (frame.shape[1], frame.shape[0]), True) # if the video writer is not None, write the frame to the output video file if writer is not None: print("[INFO] writing stream to output") writer.write(frame)
Write, Run & Share Python code online using OneCompiler's Python online compiler for free. It's one of the robust, feature-rich online compilers for python language, supporting both the versions which are Python 3 and Python 2.7. Getting started with the OneCompiler's Python editor is easy and fast. The editor shows sample boilerplate code when you choose language as Python or Python2 and start coding.
OneCompiler's python online editor supports stdin and users can give inputs to programs using the STDIN textbox under the I/O tab. Following is a sample python program which takes name as input and print your name with hello.
import sys
name = sys.stdin.readline()
print("Hello "+ name)
Python is a very popular general-purpose programming language which was created by Guido van Rossum, and released in 1991. It is very popular for web development and you can build almost anything like mobile apps, web apps, tools, data analytics, machine learning etc. It is designed to be simple and easy like english language. It's is highly productive and efficient making it a very popular language.
When ever you want to perform a set of operations based on a condition IF-ELSE is used.
if conditional-expression
#code
elif conditional-expression
#code
else:
#code
Indentation is very important in Python, make sure the indentation is followed correctly
For loop is used to iterate over arrays(list, tuple, set, dictionary) or strings.
mylist=("Iphone","Pixel","Samsung")
for i in mylist:
print(i)
While is also used to iterate a set of statements based on a condition. Usually while is preferred when number of iterations are not known in advance.
while condition
#code
There are four types of collections in Python.
List is a collection which is ordered and can be changed. Lists are specified in square brackets.
mylist=["iPhone","Pixel","Samsung"]
print(mylist)
Tuple is a collection which is ordered and can not be changed. Tuples are specified in round brackets.
myTuple=("iPhone","Pixel","Samsung")
print(myTuple)
Below throws an error if you assign another value to tuple again.
myTuple=("iPhone","Pixel","Samsung")
print(myTuple)
myTuple[1]="onePlus"
print(myTuple)
Set is a collection which is unordered and unindexed. Sets are specified in curly brackets.
myset = {"iPhone","Pixel","Samsung"}
print(myset)
Dictionary is a collection of key value pairs which is unordered, can be changed, and indexed. They are written in curly brackets with key - value pairs.
mydict = {
"brand" :"iPhone",
"model": "iPhone 11"
}
print(mydict)
Following are the libraries supported by OneCompiler's Python compiler
Name | Description |
---|---|
NumPy | NumPy python library helps users to work on arrays with ease |
SciPy | SciPy is a scientific computation library which depends on NumPy for convenient and fast N-dimensional array manipulation |
SKLearn/Scikit-learn | Scikit-learn or Scikit-learn is the most useful library for machine learning in Python |
Pandas | Pandas is the most efficient Python library for data manipulation and analysis |
DOcplex | DOcplex is IBM Decision Optimization CPLEX Modeling for Python, is a library composed of Mathematical Programming Modeling and Constraint Programming Modeling |