Compare commits

...

2 Commits

Author SHA1 Message Date
Hazel Noack
6101a8d5e4 feat: deconvolution 2025-05-07 12:38:25 +02:00
Hazel Noack
b4c7512a73 feat: detect color edges 2025-05-07 12:02:54 +02:00

View File

@ -2,9 +2,17 @@ import numpy as np
from scipy.signal import convolve2d from scipy.signal import convolve2d
from scipy.sparse import lil_matrix from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve from scipy.sparse.linalg import spsolve
from scipy.optimize import curve_fit
import cv2 import cv2
import matplotlib
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from pathlib import Path from pathlib import Path
from scipy.ndimage import correlate
from skimage.restoration import richardson_lucy
import os
matplotlib.use('qtagg')
""" """
https://setosa.io/ev/image-kernels/ https://setosa.io/ev/image-kernels/
@ -130,70 +138,119 @@ def get_mask(image_file):
def color_edge_detection(image, threshold=30):
img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
L, A, B = cv2.split(img_lab)
def deconvolution(image_file): def gradient_magnitude(channel):
gx = cv2.Sobel(channel, cv2.CV_64F, 1, 0, ksize=3)
image = cv2.imread(image_file, 0) gy = cv2.Sobel(channel, cv2.CV_64F, 0, 1, ksize=3)
# image = cv2.resize(image, (200, 200), interpolation= cv2.INTER_LINEAR) return gx, gy
mask = get_mask(image_file)
gxL, gyL = gradient_magnitude(L)
gxA, gyA = gradient_magnitude(A)
gxB, gyB = gradient_magnitude(B)
gx_total = gxL**2 + gxA**2 + gxB**2
gy_total = gyL**2 + gyA**2 + gyB**2
magnitude = np.sqrt(gx_total + gy_total)
magnitude = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
edges = (magnitude > threshold).astype(np.uint8) * 255
return edges, magnitude
# Define 2D image and kernel # === Step 2: Extract Vertical Profile ===
def extract_vertical_profile(image, center_x, center_y, length=21):
half_len = length // 2
y_range = np.clip(np.arange(center_y - half_len, center_y + half_len + 1), 0, image.shape[0] - 1)
profile = image[y_range, center_x].astype(np.float64)
profile -= profile.min()
if profile.max() > 0:
profile /= profile.max()
return profile, y_range - center_y # profile, x-axis
# === Step 3: Fit Gaussian ===
def gaussian(x, amp, mu, sigma):
return amp * np.exp(-(x - mu)**2 / (2 * sigma**2))
def fit_gaussian(profile, x_vals):
p0 = [1.0, 0.0, 2.0] # initial guess: amp, mu, sigma
popt, _ = curve_fit(gaussian, x_vals, profile, p0=p0)
return popt # amp, mu, sigma
# === Step 4: Create Gaussian Kernel ===
def create_gaussian_kernel(sigma):
ksize = int(sigma * 6) | 1 # ensure odd size
kernel_1d = cv2.getGaussianKernel(ksize, sigma)
kernel_2d = kernel_1d @ kernel_1d.T
return kernel_2d
kernel = np.array([ def kernel_detection(blurred, mask, edge_threshold=30, profile_length=21):
[1, 1, 1], edges, gradient_mag = color_edge_detection(blurred, threshold=edge_threshold)
[1, 1, 1], edges = cv2.bitwise_and(edges, edges, mask=mask)
[1, 1, 1] # show(edges)
], dtype=np.float32)
kernel /= kernel.sum() # Normalize
# Find central edge pixel
y_idxs, x_idxs = np.where(edges > 0)
if len(x_idxs) == 0:
raise RuntimeError("No edges found.")
idx = len(x_idxs) // 2
cx, cy = x_idxs[idx], y_idxs[idx]
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
profile, x_vals = extract_vertical_profile(gray, cx, cy, length=profile_length)
popt = fit_gaussian(profile, x_vals)
amp, mu, sigma = popt
print(f"Estimated Gaussian sigma: {sigma:.2f}")
kernel = create_gaussian_kernel(sigma)
print(kernel) print(kernel)
return
# Perform 2D convolution (blurring) return kernel / kernel.sum()
h, w = image.shape
kh, kw = kernel.shape
pad_h, pad_w = kh // 2, kw // 2
show(image)
print("Original image:\n", image) def deconvolution(image_file, edge_threshold=30, profile_length=21):
print("\nBlurred image:\n", image) blurred = cv2.imread(image_file)
mask = get_mask(image_file)
print("\nBuilding linear system for deconvolution...") kernel = kernel_detection(blurred, mask, edge_threshold=edge_threshold, profile_length=profile_length)
test_blurred = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.0
# Step 2: Build sparse matrix A deconvolved = richardson_lucy(test_blurred, kernel, num_iter=30)
N = h * w
A = lil_matrix((N, N), dtype=np.float32) # Display results
b = image.flatten() plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plt.title("Blurred Image")
plt.imshow(test_blurred, cmap='gray')
plt.axis('off')
def index(y, x): plt.subplot(1, 3, 2)
return y * w + x plt.title("Estimated Kernel")
plt.imshow(kernel, cmap='hot')
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title("Deconvolved Image")
plt.imshow(deconvolved, cmap='gray')
plt.axis('off')
plt.tight_layout()
plt.show()
for y in range(h):
for x in range(w):
row_idx = index(y, x)
for ky in range(kh):
for kx in range(kw):
iy = y + ky - pad_h
ix = x + kx - pad_w
if 0 <= iy < h and 0 <= ix < w:
col_idx = index(iy, ix)
A[row_idx, col_idx] += kernel[ky, kx]
# Step 3: Solve the sparse system A * x = b
x = spsolve(A.tocsr(), b)
deblurred = x.reshape((h, w))
print("\nDeblurred image:\n", np.round(deblurred, 2))
show(deblurred)
if __name__ == "__main__": if __name__ == "__main__":
img_file = "assets/real_test.jpg" img_file = "assets/real_test.jpg"
#demo("assets/omas.png") #demo("assets/omas.png")
deconvolution(img_file) deconvolution(img_file, edge_threshold=10)