generated from Hazel/python-project
Compare commits
2 Commits
2951becc00
...
bcfc90acdf
Author | SHA1 | Date | |
---|---|---|---|
bcfc90acdf | |||
f13878d8bc |
@ -3,7 +3,7 @@ name = "secure_pixelation"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"opencv_python~=4.11.0.86",
|
||||
"imutils~=0.5.4",
|
||||
"ultralytics~=8.3.114",
|
||||
]
|
||||
authors = []
|
||||
description = "Hiding faces with Mosaic has proven incredibly unsafe especially with videos, because the algorythm isn't destructive. However, if you black out the selected area, repopulate it with generative ai, and then pixelate it, it should look authentic, but be 100% destructive, thus safe."
|
||||
|
@ -4,4 +4,4 @@ from .detect_humans import detect_humans
|
||||
def cli():
|
||||
print(f"Running secure_pixelation")
|
||||
|
||||
detect_humans("assets/humans.png")
|
||||
detect_humans("assets/human_detection/humans.png")
|
||||
|
@ -1,13 +1,13 @@
|
||||
from pathlib import Path
|
||||
import urllib.request
|
||||
from typing import Dict, List
|
||||
import json
|
||||
|
||||
from ultralytics import YOLO
|
||||
import cv2
|
||||
import imutils
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
MODEL_PATH = Path("assets", "models")
|
||||
MODEL_DEPENDENCIES: Dict[str, List[str]] = {
|
||||
"yolov3": [
|
||||
@ -38,13 +38,42 @@ def require_net(name: str):
|
||||
)
|
||||
|
||||
|
||||
|
||||
# print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
|
||||
def detect_human_parts(human: dict):
|
||||
parts = human["parts"]
|
||||
|
||||
|
||||
def detect_humans(to_detect: str):
|
||||
to_detect = human["crop"]["file"]
|
||||
_p = Path(to_detect)
|
||||
detected = str(_p.with_name(_p.stem + ".detected" + _p.suffix))
|
||||
detected = str(_p.with_name(_p.stem + "_detected" + _p.suffix))
|
||||
boxes_file = str(_p.with_name(_p.stem + "_boxes.json"))
|
||||
print(f"detecting human parts: {to_detect} => {detected}")
|
||||
|
||||
model = YOLO('yolov8n-pose.pt') # You can also try 'yolov8s-pose.pt' for better accuracy
|
||||
|
||||
results = model(to_detect)[0]
|
||||
|
||||
image = cv2.imread(to_detect)
|
||||
|
||||
did_detect = False
|
||||
for person in results.keypoints.data:
|
||||
keypoints = person.cpu().numpy()
|
||||
|
||||
# Common keypoints: 0=nose, 5=left_shoulder, 11=left_hip, 15=left_foot
|
||||
head = tuple(map(int, keypoints[0][:2]))
|
||||
foot = tuple(map(int, keypoints[15][:2]))
|
||||
|
||||
cv2.circle(image, head, 5, (255, 0, 0), -1) # Head in blue
|
||||
cv2.circle(image, foot, 5, (0, 0, 255), -1) # Foot in red
|
||||
did_detect = True
|
||||
|
||||
if did_detect:
|
||||
cv2.imwrite(detected, image)
|
||||
|
||||
|
||||
def detect_humans(to_detect: str, crop_padding: int = 20):
|
||||
_p = Path(to_detect)
|
||||
detected = str(_p.with_name(_p.stem + "_detected" + _p.suffix))
|
||||
boxes_file = str(_p.with_name(_p.stem + "_boxes.json"))
|
||||
print(f"detecting humans: {to_detect} => {detected}")
|
||||
|
||||
require_net("yolov3")
|
||||
@ -58,6 +87,7 @@ def detect_humans(to_detect: str):
|
||||
|
||||
# Load image
|
||||
image = cv2.imread(to_detect)
|
||||
original_image = cv2.imread(to_detect)
|
||||
height, width, channels = image.shape
|
||||
|
||||
# Create blob and do forward pass
|
||||
@ -88,14 +118,53 @@ def detect_humans(to_detect: str):
|
||||
# Apply Non-Maximum Suppression
|
||||
indices = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.5, nms_threshold=0.4)
|
||||
|
||||
boxes_structures = {}
|
||||
human_boxes = boxes_structures["humans"] = []
|
||||
|
||||
human_part_folder = _p.with_name(_p.stem + "_parts")
|
||||
human_part_folder.mkdir(exist_ok=True)
|
||||
|
||||
for i in indices:
|
||||
i = i[0] if isinstance(i, (list, np.ndarray)) else i # Flatten index if needed
|
||||
x, y, w, h = boxes[i]
|
||||
|
||||
human_part_image_path = human_part_folder / (_p.stem + "_" + str(i) + _p.suffix)
|
||||
|
||||
image_height, image_width = image.shape[:2]
|
||||
|
||||
# Compute safe crop coordinates with padding
|
||||
x1 = max(x - crop_padding, 0)
|
||||
y1 = max(y - crop_padding, 0)
|
||||
x2 = min(x + w + crop_padding, image_width)
|
||||
y2 = min(y + h + crop_padding, image_height)
|
||||
human_crop = original_image[y1:y2, x1:x2]
|
||||
|
||||
cv2.imwrite(str(human_part_image_path), human_crop)
|
||||
|
||||
print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
|
||||
human_boxes.append({
|
||||
"x": x,
|
||||
"y": y,
|
||||
"w": w,
|
||||
"h": h,
|
||||
"crop": {
|
||||
"file": str(human_part_image_path),
|
||||
"x": x1,
|
||||
"y": y,
|
||||
"w": x2 - x1,
|
||||
"h": y2 - y1,
|
||||
},
|
||||
"parts": {},
|
||||
})
|
||||
|
||||
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2)
|
||||
|
||||
|
||||
# Save the result
|
||||
with open(boxes_file, "w") as f:
|
||||
json.dump(boxes_structures, f)
|
||||
cv2.imwrite(detected, image)
|
||||
|
||||
for human in human_boxes:
|
||||
detect_human_parts(human["crop"]["file"])
|
||||
|
Loading…
x
Reference in New Issue
Block a user