import numpy as np from scipy.signal import convolve2d from scipy.sparse import lil_matrix from scipy.sparse.linalg import spsolve from scipy.optimize import curve_fit import cv2 import matplotlib import matplotlib.pyplot as plt from pathlib import Path from scipy.ndimage import correlate from skimage.restoration import richardson_lucy import os matplotlib.use('qtagg') """ https://setosa.io/ev/image-kernels/ https://openaccess.thecvf.com/content/CVPR2021/papers/Tran_Explore_Image_Deblurring_via_Encoded_Blur_Kernel_Space_CVPR_2021_paper.pdf """ def show(img): cv2.imshow('image',img.astype(np.uint8)) cv2.waitKey(0) cv2.destroyAllWindows() def demo(image_file): # Define 2D image and kernel image = cv2.imread(image_file, 0) image = cv2.resize(image, (200, 200), interpolation= cv2.INTER_LINEAR) kernel = np.array([ [1, 2, 1], [2, 4, 2], [1, 2, 1] ], dtype=np.float32) kernel /= kernel.sum() # Normalize print(kernel) # Perform 2D convolution (blurring) blurred = convolve2d(image, kernel, mode="same", boundary="fill", fillvalue=0) h, w = image.shape kh, kw = kernel.shape pad_h, pad_w = kh // 2, kw // 2 show(image) show(blurred) print("Original image:\n", image) print("\nBlurred image:\n", blurred) print("\nBuilding linear system for deconvolution...") # Step 2: Build sparse matrix A N = h * w A = lil_matrix((N, N), dtype=np.float32) b = blurred.flatten() def index(y, x): return y * w + x for y in range(h): for x in range(w): row_idx = index(y, x) for ky in range(kh): for kx in range(kw): iy = y + ky - pad_h ix = x + kx - pad_w if 0 <= iy < h and 0 <= ix < w: col_idx = index(iy, ix) A[row_idx, col_idx] += kernel[ky, kx] # Step 3: Solve the sparse system A * x = b x = spsolve(A.tocsr(), b) deblurred = x.reshape((h, w)) print("\nDeblurred image:\n", np.round(deblurred, 2)) show(deblurred) def get_mask(image_file): mask_file = Path(image_file) mask_file = mask_file.with_name("mask_" + mask_file.name) if mask_file.exists(): return cv2.imread(str(mask_file), 0) drawing = False # True when mouse is pressed brush_size = 5 image = cv2.imread(image_file) mask = np.zeros(image.shape[:2], dtype=np.uint8) clone = image.copy() def draw_mask(event, x, y, flags, param): nonlocal drawing, mask, brush_size if event == cv2.EVENT_LBUTTONDOWN: drawing = True elif event == cv2.EVENT_MOUSEMOVE: if drawing: cv2.circle(mask, (x, y), brush_size, 255, -1) cv2.circle(image, (x, y), brush_size, (0, 0, 255), -1) elif event == cv2.EVENT_LBUTTONUP: drawing = False cv2.namedWindow("Draw Mask") cv2.setMouseCallback("Draw Mask", draw_mask) while True: display = image.copy() cv2.putText(display, f'Brush size: {brush_size}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2) cv2.imshow("Draw Mask", display) key = cv2.waitKey(1) & 0xFF if key == 13: # Enter to finish break elif key == ord('+') or key == ord('='): # `=` for some keyboard layouts brush_size = min(100, brush_size + 1) elif key == ord('-') or key == ord('_'): brush_size = max(1, brush_size - 1) cv2.destroyAllWindows() cv2.imwrite(str(mask_file), mask) # Apply mask masked_image = cv2.bitwise_and(clone, clone, mask=mask) cv2.imshow("Masked Image", masked_image) cv2.waitKey(0) cv2.destroyAllWindows() def color_edge_detection(image, threshold=30): img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab) L, A, B = cv2.split(img_lab) def gradient_magnitude(channel): gx = cv2.Sobel(channel, cv2.CV_64F, 1, 0, ksize=3) gy = cv2.Sobel(channel, cv2.CV_64F, 0, 1, ksize=3) return gx, gy gxL, gyL = gradient_magnitude(L) gxA, gyA = gradient_magnitude(A) gxB, gyB = gradient_magnitude(B) gx_total = gxL**2 + gxA**2 + gxB**2 gy_total = gyL**2 + gyA**2 + gyB**2 magnitude = np.sqrt(gx_total + gy_total) magnitude = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX) edges = (magnitude > threshold).astype(np.uint8) * 255 return edges, magnitude # === Step 2: Extract Vertical Profile === def extract_vertical_profile(image, center_x, center_y, length=21): half_len = length // 2 y_range = np.clip(np.arange(center_y - half_len, center_y + half_len + 1), 0, image.shape[0] - 1) profile = image[y_range, center_x].astype(np.float64) profile -= profile.min() if profile.max() > 0: profile /= profile.max() return profile, y_range - center_y # profile, x-axis # === Step 3: Fit Gaussian === def gaussian(x, amp, mu, sigma): return amp * np.exp(-(x - mu)**2 / (2 * sigma**2)) def fit_gaussian(profile, x_vals): p0 = [1.0, 0.0, 2.0] # initial guess: amp, mu, sigma popt, _ = curve_fit(gaussian, x_vals, profile, p0=p0) return popt # amp, mu, sigma # === Step 4: Create Gaussian Kernel === def create_gaussian_kernel(sigma): ksize = int(sigma * 6) | 1 # ensure odd size kernel_1d = cv2.getGaussianKernel(ksize, sigma) kernel_2d = kernel_1d @ kernel_1d.T return kernel_2d def kernel_detection(blurred, mask, edge_threshold=30, profile_length=21): edges, gradient_mag = color_edge_detection(blurred, threshold=edge_threshold) edges = cv2.bitwise_and(edges, edges, mask=mask) # show(edges) # Find central edge pixel y_idxs, x_idxs = np.where(edges > 0) if len(x_idxs) == 0: raise RuntimeError("No edges found.") idx = len(x_idxs) // 2 cx, cy = x_idxs[idx], y_idxs[idx] gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY) profile, x_vals = extract_vertical_profile(gray, cx, cy, length=profile_length) popt = fit_gaussian(profile, x_vals) amp, mu, sigma = popt print(f"Estimated Gaussian sigma: {sigma:.2f}") kernel = create_gaussian_kernel(sigma) print(kernel) return kernel / kernel.sum() def deconvolution(image_file, edge_threshold=30, profile_length=21): blurred = cv2.imread(image_file) mask = get_mask(image_file) kernel = kernel_detection(blurred, mask, edge_threshold=edge_threshold, profile_length=profile_length) test_blurred = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.0 deconvolved = richardson_lucy(test_blurred, kernel, num_iter=30) # Display results plt.figure(figsize=(12, 4)) plt.subplot(1, 3, 1) plt.title("Blurred Image") plt.imshow(test_blurred, cmap='gray') plt.axis('off') plt.subplot(1, 3, 2) plt.title("Estimated Kernel") plt.imshow(kernel, cmap='hot') plt.axis('off') plt.subplot(1, 3, 3) plt.title("Deconvolved Image") plt.imshow(deconvolved, cmap='gray') plt.axis('off') plt.tight_layout() plt.show() if __name__ == "__main__": img_file = "assets/real_test.jpg" #demo("assets/omas.png") deconvolution(img_file, edge_threshold=10)