generated from Hazel/python-project
Compare commits
2 Commits
923239abd3
...
2951becc00
Author | SHA1 | Date | |
---|---|---|---|
2951becc00 | |||
bb3a3256cb |
1
.gitignore
vendored
1
.gitignore
vendored
@ -160,3 +160,4 @@ cython_debug/
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.venv
|
||||
assets/*
|
@ -12,7 +12,7 @@ I first realized that a normal mosaic algorithm isn't safe AT ALL seeing this pr
|
||||
|
||||
```bash
|
||||
# Step 1: Create and activate virtual environment
|
||||
python3 -m venv venv
|
||||
python3 -m venv .venv
|
||||
source venv/bin/activate
|
||||
|
||||
# Step 2: Install the local Python program add the -e flag for development
|
||||
|
@ -1,7 +1,10 @@
|
||||
[project]
|
||||
name = "secure_pixelation"
|
||||
version = "0.0.0"
|
||||
dependencies = []
|
||||
dependencies = [
|
||||
"opencv_python~=4.11.0.86",
|
||||
"imutils~=0.5.4",
|
||||
]
|
||||
authors = []
|
||||
description = "Hiding faces with Mosaic has proven incredibly unsafe especially with videos, because the algorythm isn't destructive. However, if you black out the selected area, repopulate it with generative ai, and then pixelate it, it should look authentic, but be 100% destructive, thus safe."
|
||||
readme = "README.md"
|
||||
|
@ -1,2 +1,7 @@
|
||||
from .detect_humans import detect_humans
|
||||
|
||||
|
||||
def cli():
|
||||
print(f"Running secure_pixelation")
|
||||
|
||||
detect_humans("assets/humans.png")
|
||||
|
101
secure_pixelation/detect_humans.py
Normal file
101
secure_pixelation/detect_humans.py
Normal file
@ -0,0 +1,101 @@
|
||||
from pathlib import Path
|
||||
import urllib.request
|
||||
from typing import Dict, List
|
||||
|
||||
import cv2
|
||||
import imutils
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
MODEL_PATH = Path("assets", "models")
|
||||
MODEL_DEPENDENCIES: Dict[str, List[str]] = {
|
||||
"yolov3": [
|
||||
"https://github.com/pjreddie/darknet/raw/refs/heads/master/cfg/yolov3.cfg",
|
||||
"https://github.com/patrick013/Object-Detection---Yolov3/raw/refs/heads/master/model/yolov3.weights"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def require_net(name: str):
|
||||
if name not in MODEL_DEPENDENCIES:
|
||||
print(f"model {name} not found")
|
||||
exit(1)
|
||||
|
||||
print(f"preparing {name}")
|
||||
MODEL_PATH.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
for dep_url in MODEL_DEPENDENCIES[name]:
|
||||
dep_path = MODEL_PATH / dep_url.split("/")[-1]
|
||||
|
||||
if dep_path.exists():
|
||||
continue
|
||||
|
||||
print(f"downloading {dep_url}")
|
||||
urllib.request.urlretrieve(
|
||||
url=dep_url,
|
||||
filename=str(dep_path)
|
||||
)
|
||||
|
||||
|
||||
|
||||
# print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
|
||||
|
||||
|
||||
def detect_humans(to_detect: str):
|
||||
_p = Path(to_detect)
|
||||
detected = str(_p.with_name(_p.stem + ".detected" + _p.suffix))
|
||||
print(f"detecting humans: {to_detect} => {detected}")
|
||||
|
||||
require_net("yolov3")
|
||||
|
||||
# Load YOLO
|
||||
net = cv2.dnn.readNet(str(MODEL_PATH / 'yolov3.weights'), str(MODEL_PATH / 'yolov3.cfg'))
|
||||
layer_names = net.getLayerNames()
|
||||
indices = net.getUnconnectedOutLayers()
|
||||
output_layers = [layer_names[int(i) - 1] for i in indices]
|
||||
|
||||
|
||||
# Load image
|
||||
image = cv2.imread(to_detect)
|
||||
height, width, channels = image.shape
|
||||
|
||||
# Create blob and do forward pass
|
||||
blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
|
||||
net.setInput(blob)
|
||||
outs = net.forward(output_layers)
|
||||
|
||||
boxes = []
|
||||
confidences = []
|
||||
|
||||
# Information for each object detected
|
||||
for out in outs:
|
||||
for detection in out:
|
||||
scores = detection[5:]
|
||||
class_id = np.argmax(scores)
|
||||
confidence = scores[class_id]
|
||||
if confidence > 0.5 and class_id == 0: # Class ID 0 is human
|
||||
center_x = int(detection[0] * width)
|
||||
center_y = int(detection[1] * height)
|
||||
w = int(detection[2] * width)
|
||||
h = int(detection[3] * height)
|
||||
x = int(center_x - w / 2)
|
||||
y = int(center_y - h / 2)
|
||||
|
||||
boxes.append([x, y, w, h])
|
||||
confidences.append(float(confidence))
|
||||
|
||||
# Apply Non-Maximum Suppression
|
||||
indices = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.5, nms_threshold=0.4)
|
||||
|
||||
for i in indices:
|
||||
i = i[0] if isinstance(i, (list, np.ndarray)) else i # Flatten index if needed
|
||||
x, y, w, h = boxes[i]
|
||||
|
||||
print(f"\tfound human at {x}/{y} with the size of {w} x {h}")
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2)
|
||||
|
||||
|
||||
# Save the result
|
||||
cv2.imwrite(detected, image)
|
||||
|
Loading…
x
Reference in New Issue
Block a user