import hydra import torch import argparse import time from pathlib import Path import cv2 import torch import torch.backends.cudnn as cudnn from numpy import random from ultralytics.yolo.engine.predictor import BasePredictor from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops from ultralytics.yolo.utils.checks import check_imgsz from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.image import img_to_array import cv2 from deep_sort_pytorch.utils.parser import get_config from deep_sort_pytorch.deep_sort import DeepSort from collections import deque import numpy as np palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1) data_deque = {} deepsort = None def count(founded_classes, im0): model_values=[] aligns=im0.shape #align_bottom variable represents the height of the image align_bottom=aligns[0]/14 #align width variable represnts the width of the image align_right=(aligns[1]/1.2) for i,(k,v) in enumerate (founded_classes.items()): a=f"{k} = {v}" model_values.append(v) align_bottom=align_bottom+35 cv2.line(im0, (int(align_right),int(align_bottom-10)), (int(align_right+210),int(align_bottom-10)), (0,255,0), 40) cv2.putText(im0,str(a),(int(align_right),int(align_bottom)),cv2.FONT_HERSHEY_SIMPLEX,1, (255,255,255),2,cv2.LINE_AA) class GenderClassifier (): def __init__(self, weights) : self.weights = weights self.model = self.load_model() def preprocess_image (self, image): face_crop = cv2.resize(image, (100,100)) face_crop = face_crop.astype("float") / 255.0 face_crop = img_to_array(face_crop) face_crop = np.expand_dims(face_crop, axis=0) return face_crop def load_model(self): model = load_model(self.weights) return model def predict (self, image): classes = ['man','woman'] face_crop = self.preprocess_image(image) conf = self.model.predict(face_crop)[0] # model.predict return a 2D matrix, ex: [[9.9993384e-01 7.4850512e-05]] # get label with max accuracy idx = np.argmax(conf) label = classes[idx] label = "{}".format(label) return label def init_tracker(): global deepsort cfg_deep = get_config() cfg_deep.merge_from_file("deep_sort_pytorch/configs/deep_sort.yaml") deepsort= DeepSort(cfg_deep.DEEPSORT.REID_CKPT, max_dist=cfg_deep.DEEPSORT.MAX_DIST, min_confidence=cfg_deep.DEEPSORT.MIN_CONFIDENCE, nms_max_overlap=cfg_deep.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg_deep.DEEPSORT.MAX_IOU_DISTANCE, max_age=cfg_deep.DEEPSORT.MAX_AGE, n_init=cfg_deep.DEEPSORT.N_INIT, nn_budget=cfg_deep.DEEPSORT.NN_BUDGET, use_cuda=True) ########################################################################################## def xyxy_to_xywh(*xyxy): """" Calculates the relative bounding box from absolute pixel values. """ bbox_left = min([xyxy[0].item(), xyxy[2].item()]) bbox_top = min([xyxy[1].item(), xyxy[3].item()]) bbox_w = abs(xyxy[0].item() - xyxy[2].item()) bbox_h = abs(xyxy[1].item() - xyxy[3].item()) x_c = (bbox_left + bbox_w / 2) y_c = (bbox_top + bbox_h / 2) w = bbox_w h = bbox_h return x_c, y_c, w, h def xyxy_to_tlwh(bbox_xyxy): tlwh_bboxs = [] for i, box in enumerate(bbox_xyxy): x1, y1, x2, y2 = [int(i) for i in box] top = x1 left = y1 w = int(x2 - x1) h = int(y2 - y1) tlwh_obj = [top, left, w, h] tlwh_bboxs.append(tlwh_obj) return tlwh_bboxs def compute_color_for_labels(label): """ Simple function that adds fixed color depending on the class """ if label == 0: #person color = (85,45,255) elif label == 2: # Car color = (222,82,175) elif label == 3: # Motobike color = (0, 204, 255) elif label == 5: # Bus color = (0, 149, 255) else: color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette] return tuple(color) def draw_border(img, pt1, pt2, color, thickness, r, d): x1,y1 = pt1 x2,y2 = pt2 # Top left cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness) cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness) cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness) # Top right cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness) cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness) cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness) # Bottom left cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness) cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness) cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness) # Bottom right cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness) cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness) cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness) cv2.rectangle(img, (x1 + r, y1), (x2 - r, y2), color, -1, cv2.LINE_AA) cv2.rectangle(img, (x1, y1 + r), (x2, y2 - r - d), color, -1, cv2.LINE_AA) cv2.circle(img, (x1 +r, y1+r), 2, color, 12) cv2.circle(img, (x2 -r, y1+r), 2, color, 12) cv2.circle(img, (x1 +r, y2-r), 2, color, 12) cv2.circle(img, (x2 -r, y2-r), 2, color, 12) return img def UI_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] cv2.rectangle(img, (c1[0], c1[1] - t_size[1] -3), (c1[0] + t_size[0], c1[1]+3), color,-1, cv2.LINE_AA) cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) def draw_boxes(img, bbox, names,object_id, identities=None, offset=(0, 0)): #cv2.line(img, line[0], line[1], (46,162,112), 3) classifier = GenderClassifier("model.h5") height, width, _ = img.shape # remove tracked point from buffer if object is lost for key in list(data_deque): if key not in identities: data_deque.pop(key) for i, box in enumerate(bbox): x1, y1, x2, y2 = [int(i) for i in box] x1 += offset[0] x2 += offset[0] y1 += offset[1] y2 += offset[1] roi = img[y1:y2, x1:x2] gender_label = "" if (roi.shape[0]) > 10 or (roi.shape[1]) > 10: gender_label = classifier.predict(roi) Y = y1 - 10 if y1 - 10 > 10 else y1 + 10 text = gender_label # get ID of object id = int(identities[i]) if identities is not None else 0 label = '{}{:d}'.format("", id) + ":"+ '%s' % (text) text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2) text_w, text_h = text_size[0] cv2.rectangle(img, (x1,y1), (x2, y2), (0,103,255), thickness=2, lineType=cv2.LINE_AA) cv2.line(img, (x1, Y), (text_w+x1, Y), (121, 233, 121), 30) cv2.putText(img, label, (x1, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) return img class DetectionPredictor(BasePredictor): def get_annotator(self, img): return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names)) def preprocess(self, img): img = torch.from_numpy(img).to(self.model.device) img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 img /= 255 # 0 - 255 to 0.0 - 1.0 return img def postprocess(self, preds, img, orig_img): preds = ops.non_max_suppression(preds, self.args.conf, self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det) for i, pred in enumerate(preds): shape = orig_img[i].shape if self.webcam else orig_img.shape pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round() return preds def write_results(self, idx, preds, batch): p, im, im0 = batch all_outputs = [] log_string = "" if len(im.shape) == 3: im = im[None] # expand for batch dim self.seen += 1 im0 = im0.copy() if self.webcam: # batch_size >= 1 log_string += f'{idx}: ' frame = self.dataset.count else: frame = getattr(self.dataset, 'frame', 0) self.data_path = p save_path = str(self.save_dir / p.name) # im.jpg self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}') log_string += '%gx%g ' % im.shape[2:] # print string self.annotator = self.get_annotator(im0) det = preds[idx] all_outputs.append(det) if len(det) == 0: return log_string founded_classes = {} for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class class_index=int(c) count_of_object=int(n) founded_classes[self.model.names[class_index]]=int(n) log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, " count(founded_classes=founded_classes,im0=im0) # write gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh xywh_bboxs = [] confs = [] oids = [] outputs = [] for *xyxy, conf, cls in reversed(det): x_c, y_c, bbox_w, bbox_h = xyxy_to_xywh(*xyxy) xywh_obj = [x_c, y_c, bbox_w, bbox_h] xywh_bboxs.append(xywh_obj) confs.append([conf.item()]) oids.append(int(cls)) xywhs = torch.Tensor(xywh_bboxs) confss = torch.Tensor(confs) outputs = deepsort.update(xywhs, confss, oids, im0) if len(outputs) > 0: bbox_xyxy = outputs[:, :4] identities = outputs[:, -2] object_id = outputs[:, -1] draw_boxes(im0, bbox_xyxy, self.model.names, object_id,identities) return log_string @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) def predict(cfg): init_tracker() cfg.model = cfg.model or "yolov8n.pt" cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size cfg.source = cfg.source if cfg.source is not None else ROOT / "assets" predictor = DetectionPredictor(cfg) predictor() if __name__ == "__main__": predict()
Write, Run & Share Python code online using OneCompiler's Python online compiler for free. It's one of the robust, feature-rich online compilers for python language, supporting both the versions which are Python 3 and Python 2.7. Getting started with the OneCompiler's Python editor is easy and fast. The editor shows sample boilerplate code when you choose language as Python or Python2 and start coding.
OneCompiler's python online editor supports stdin and users can give inputs to programs using the STDIN textbox under the I/O tab. Following is a sample python program which takes name as input and print your name with hello.
import sys
name = sys.stdin.readline()
print("Hello "+ name)
Python is a very popular general-purpose programming language which was created by Guido van Rossum, and released in 1991. It is very popular for web development and you can build almost anything like mobile apps, web apps, tools, data analytics, machine learning etc. It is designed to be simple and easy like english language. It's is highly productive and efficient making it a very popular language.
When ever you want to perform a set of operations based on a condition IF-ELSE is used.
if conditional-expression
#code
elif conditional-expression
#code
else:
#code
Indentation is very important in Python, make sure the indentation is followed correctly
For loop is used to iterate over arrays(list, tuple, set, dictionary) or strings.
mylist=("Iphone","Pixel","Samsung")
for i in mylist:
print(i)
While is also used to iterate a set of statements based on a condition. Usually while is preferred when number of iterations are not known in advance.
while condition
#code
There are four types of collections in Python.
List is a collection which is ordered and can be changed. Lists are specified in square brackets.
mylist=["iPhone","Pixel","Samsung"]
print(mylist)
Tuple is a collection which is ordered and can not be changed. Tuples are specified in round brackets.
myTuple=("iPhone","Pixel","Samsung")
print(myTuple)
Below throws an error if you assign another value to tuple again.
myTuple=("iPhone","Pixel","Samsung")
print(myTuple)
myTuple[1]="onePlus"
print(myTuple)
Set is a collection which is unordered and unindexed. Sets are specified in curly brackets.
myset = {"iPhone","Pixel","Samsung"}
print(myset)
Dictionary is a collection of key value pairs which is unordered, can be changed, and indexed. They are written in curly brackets with key - value pairs.
mydict = {
"brand" :"iPhone",
"model": "iPhone 11"
}
print(mydict)
Following are the libraries supported by OneCompiler's Python compiler
Name | Description |
---|---|
NumPy | NumPy python library helps users to work on arrays with ease |
SciPy | SciPy is a scientific computation library which depends on NumPy for convenient and fast N-dimensional array manipulation |
SKLearn/Scikit-learn | Scikit-learn or Scikit-learn is the most useful library for machine learning in Python |
Pandas | Pandas is the most efficient Python library for data manipulation and analysis |
DOcplex | DOcplex is IBM Decision Optimization CPLEX Modeling for Python, is a library composed of Mathematical Programming Modeling and Constraint Programming Modeling |