Compare commits

..

No commits in common. "bcfc90acdf215cc9790c537a34055aaaf3e2a3fd" and "2951becc00021fd1557db6d52706484e28e54085" have entirely different histories.

3 changed files with 8 additions and 77 deletions

View File

@ -3,7 +3,7 @@ name = "secure_pixelation"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"opencv_python~=4.11.0.86", "opencv_python~=4.11.0.86",
"ultralytics~=8.3.114", "imutils~=0.5.4",
] ]
authors = [] authors = []
description = "Hiding faces with Mosaic has proven incredibly unsafe especially with videos, because the algorythm isn't destructive. However, if you black out the selected area, repopulate it with generative ai, and then pixelate it, it should look authentic, but be 100% destructive, thus safe." description = "Hiding faces with Mosaic has proven incredibly unsafe especially with videos, because the algorythm isn't destructive. However, if you black out the selected area, repopulate it with generative ai, and then pixelate it, it should look authentic, but be 100% destructive, thus safe."

View File

@ -4,4 +4,4 @@ from .detect_humans import detect_humans
def cli(): def cli():
print(f"Running secure_pixelation") print(f"Running secure_pixelation")
detect_humans("assets/human_detection/humans.png") detect_humans("assets/humans.png")

View File

@ -1,13 +1,13 @@
from pathlib import Path from pathlib import Path
import urllib.request import urllib.request
from typing import Dict, List from typing import Dict, List
import json
from ultralytics import YOLO
import cv2 import cv2
import imutils
import numpy as np import numpy as np
MODEL_PATH = Path("assets", "models") MODEL_PATH = Path("assets", "models")
MODEL_DEPENDENCIES: Dict[str, List[str]] = { MODEL_DEPENDENCIES: Dict[str, List[str]] = {
"yolov3": [ "yolov3": [
@ -38,42 +38,13 @@ def require_net(name: str):
) )
def detect_human_parts(human: dict):
parts = human["parts"] # print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
to_detect = human["crop"]["file"] def detect_humans(to_detect: str):
_p = Path(to_detect) _p = Path(to_detect)
detected = str(_p.with_name(_p.stem + "_detected" + _p.suffix)) detected = str(_p.with_name(_p.stem + ".detected" + _p.suffix))
boxes_file = str(_p.with_name(_p.stem + "_boxes.json"))
print(f"detecting human parts: {to_detect} => {detected}")
model = YOLO('yolov8n-pose.pt') # You can also try 'yolov8s-pose.pt' for better accuracy
results = model(to_detect)[0]
image = cv2.imread(to_detect)
did_detect = False
for person in results.keypoints.data:
keypoints = person.cpu().numpy()
# Common keypoints: 0=nose, 5=left_shoulder, 11=left_hip, 15=left_foot
head = tuple(map(int, keypoints[0][:2]))
foot = tuple(map(int, keypoints[15][:2]))
cv2.circle(image, head, 5, (255, 0, 0), -1) # Head in blue
cv2.circle(image, foot, 5, (0, 0, 255), -1) # Foot in red
did_detect = True
if did_detect:
cv2.imwrite(detected, image)
def detect_humans(to_detect: str, crop_padding: int = 20):
_p = Path(to_detect)
detected = str(_p.with_name(_p.stem + "_detected" + _p.suffix))
boxes_file = str(_p.with_name(_p.stem + "_boxes.json"))
print(f"detecting humans: {to_detect} => {detected}") print(f"detecting humans: {to_detect} => {detected}")
require_net("yolov3") require_net("yolov3")
@ -87,7 +58,6 @@ def detect_humans(to_detect: str, crop_padding: int = 20):
# Load image # Load image
image = cv2.imread(to_detect) image = cv2.imread(to_detect)
original_image = cv2.imread(to_detect)
height, width, channels = image.shape height, width, channels = image.shape
# Create blob and do forward pass # Create blob and do forward pass
@ -118,53 +88,14 @@ def detect_humans(to_detect: str, crop_padding: int = 20):
# Apply Non-Maximum Suppression # Apply Non-Maximum Suppression
indices = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.5, nms_threshold=0.4) indices = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.5, nms_threshold=0.4)
boxes_structures = {}
human_boxes = boxes_structures["humans"] = []
human_part_folder = _p.with_name(_p.stem + "_parts")
human_part_folder.mkdir(exist_ok=True)
for i in indices: for i in indices:
i = i[0] if isinstance(i, (list, np.ndarray)) else i # Flatten index if needed i = i[0] if isinstance(i, (list, np.ndarray)) else i # Flatten index if needed
x, y, w, h = boxes[i] x, y, w, h = boxes[i]
human_part_image_path = human_part_folder / (_p.stem + "_" + str(i) + _p.suffix)
image_height, image_width = image.shape[:2]
# Compute safe crop coordinates with padding
x1 = max(x - crop_padding, 0)
y1 = max(y - crop_padding, 0)
x2 = min(x + w + crop_padding, image_width)
y2 = min(y + h + crop_padding, image_height)
human_crop = original_image[y1:y2, x1:x2]
cv2.imwrite(str(human_part_image_path), human_crop)
print(f"\tfound human at {x}/{y} with the size of {w} x {h}") print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
human_boxes.append({
"x": x,
"y": y,
"w": w,
"h": h,
"crop": {
"file": str(human_part_image_path),
"x": x1,
"y": y,
"w": x2 - x1,
"h": y2 - y1,
},
"parts": {},
})
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2) cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2)
# Save the result # Save the result
with open(boxes_file, "w") as f:
json.dump(boxes_structures, f)
cv2.imwrite(detected, image) cv2.imwrite(detected, image)
for human in human_boxes:
detect_human_parts(human["crop"]["file"])