feat: detecting humans

This commit is contained in:
Hazel 2025-04-23 12:17:26 +02:00
parent bb3a3256cb
commit 2951becc00
3 changed files with 110 additions and 1 deletions

View File

@ -1,7 +1,10 @@
[project]
name = "secure_pixelation"
version = "0.0.0"
dependencies = []
dependencies = [
"opencv_python~=4.11.0.86",
"imutils~=0.5.4",
]
authors = []
description = "Hiding faces with Mosaic has proven incredibly unsafe especially with videos, because the algorythm isn't destructive. However, if you black out the selected area, repopulate it with generative ai, and then pixelate it, it should look authentic, but be 100% destructive, thus safe."
readme = "README.md"

View File

@ -1,2 +1,7 @@
from .detect_humans import detect_humans
def cli():
print(f"Running secure_pixelation")
detect_humans("assets/humans.png")

View File

@ -0,0 +1,101 @@
from pathlib import Path
import urllib.request
from typing import Dict, List
import cv2
import imutils
import numpy as np
MODEL_PATH = Path("assets", "models")
MODEL_DEPENDENCIES: Dict[str, List[str]] = {
"yolov3": [
"https://github.com/pjreddie/darknet/raw/refs/heads/master/cfg/yolov3.cfg",
"https://github.com/patrick013/Object-Detection---Yolov3/raw/refs/heads/master/model/yolov3.weights"
]
}
def require_net(name: str):
if name not in MODEL_DEPENDENCIES:
print(f"model {name} not found")
exit(1)
print(f"preparing {name}")
MODEL_PATH.mkdir(exist_ok=True, parents=True)
for dep_url in MODEL_DEPENDENCIES[name]:
dep_path = MODEL_PATH / dep_url.split("/")[-1]
if dep_path.exists():
continue
print(f"downloading {dep_url}")
urllib.request.urlretrieve(
url=dep_url,
filename=str(dep_path)
)
# print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
def detect_humans(to_detect: str):
_p = Path(to_detect)
detected = str(_p.with_name(_p.stem + ".detected" + _p.suffix))
print(f"detecting humans: {to_detect} => {detected}")
require_net("yolov3")
# Load YOLO
net = cv2.dnn.readNet(str(MODEL_PATH / 'yolov3.weights'), str(MODEL_PATH / 'yolov3.cfg'))
layer_names = net.getLayerNames()
indices = net.getUnconnectedOutLayers()
output_layers = [layer_names[int(i) - 1] for i in indices]
# Load image
image = cv2.imread(to_detect)
height, width, channels = image.shape
# Create blob and do forward pass
blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
boxes = []
confidences = []
# Information for each object detected
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5 and class_id == 0: # Class ID 0 is human
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
# Apply Non-Maximum Suppression
indices = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.5, nms_threshold=0.4)
for i in indices:
i = i[0] if isinstance(i, (list, np.ndarray)) else i # Flatten index if needed
x, y, w, h = boxes[i]
print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2)
# Save the result
cv2.imwrite(detected, image)