diff --git a/deblur/deblur.py b/deblur/deblur.py new file mode 100644 index 0000000..8c11f37 --- /dev/null +++ b/deblur/deblur.py @@ -0,0 +1,57 @@ +import numpy as np +import cv2 + + +image = np.array([1, 3, 1, 2, 1, 6, 1], dtype=np.float32) +kernel = np.array([1, 2, 1], dtype=np.float32) / 4 + +blurred = np.convolve(image, kernel, mode="same") + +print(image) +print(blurred) + +print() +print("building linalg") +# https://numpy.org/doc/stable/reference/generated/numpy.linalg.solve.html +a = [] +b = [] + +for i in range(len(blurred)): + y = blurred[i] + + shift = i - 1 + equation = np.zeros(len(image)) + # Calculate valid range in the output array + start_eq = max(0, shift) + end_eq = min(len(image), shift + len(kernel)) + + # Corresponding range in the kernel + start_k = start_eq - shift # how much to cut from the beginning of the kernel + end_k = start_k + (end_eq - start_eq) + + + # Assign the clipped kernel segment + equation[start_eq:end_eq] = kernel[start_k:end_k] + + a.append(equation) + b.append(y) + goal = image[i] + print(f"{i} ({goal}): {y} = {equation}") + +print() +print("deblurring") +deblurred = np.linalg.solve(a, b) +print(deblurred) + + + +def show_matrix(m): + # Resize the image to make it visible (e.g., scale up to 200x200 pixels) + scaled_image = cv2.resize(m, (200, 200), interpolation=cv2.INTER_NEAREST) + + # Display the image + cv2.imshow('Test Matrix', scaled_image) + cv2.waitKey(0) + cv2.destroyAllWindows() + + diff --git a/secure_pixelation/simple_lama_bindings.py b/secure_pixelation/simple_lama_bindings.py index bb3ecd9..7402dcb 100644 --- a/secure_pixelation/simple_lama_bindings.py +++ b/secure_pixelation/simple_lama_bindings.py @@ -65,6 +65,7 @@ class SimpleLama: print(f"Using device: {self.device}") model_path = hf_hub_download("okaris/simple-lama", "big-lama.pt") + print(f"using model at {model_path}") self.model = torch.jit.load(model_path, map_location=self.device).eval() def __call__(self, image: np.ndarray, mask: np.ndarray) -> np.ndarray: