Compare commits

...

9 Commits

Author SHA1 Message Date
Hazel Noack
54a2138746 stuff 2025-05-07 16:48:07 +02:00
Hazel Noack
37a5da37b0 changed deconvolution algorythm 2025-05-07 16:11:39 +02:00
Hazel Noack
edd8096030 feat: 2025-05-07 15:31:58 +02:00
Hazel Noack
d576f9979c feat: ui 2025-05-07 15:20:02 +02:00
Hazel Noack
f6a774a01f feat: generating kernel 2025-05-07 14:49:08 +02:00
Hazel Noack
6126e675f1 heatmaps 2025-05-07 13:46:27 +02:00
Hazel Noack
df4b949dd2 feat: further tests 2025-05-07 13:01:10 +02:00
Hazel Noack
6101a8d5e4 feat: deconvolution 2025-05-07 12:38:25 +02:00
Hazel Noack
b4c7512a73 feat: detect color edges 2025-05-07 12:02:54 +02:00
2 changed files with 514 additions and 43 deletions

View File

@@ -2,9 +2,17 @@ import numpy as np
from scipy.signal import convolve2d
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
from scipy.optimize import curve_fit
import cv2
import matplotlib
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.ndimage import correlate
from skimage.restoration import richardson_lucy
import os
matplotlib.use('qtagg')
"""
https://setosa.io/ev/image-kernels/
@@ -130,70 +138,282 @@ def get_mask(image_file):
def color_edge_detection(image, threshold=30):
img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
L, A, B = cv2.split(img_lab)
def deconvolution(image_file):
image = cv2.imread(image_file, 0)
# image = cv2.resize(image, (200, 200), interpolation= cv2.INTER_LINEAR)
def gradient_magnitude(channel):
gx = cv2.Sobel(channel, cv2.CV_64F, 1, 0, ksize=3)
gy = cv2.Sobel(channel, cv2.CV_64F, 0, 1, ksize=3)
return gx, gy
gxL, gyL = gradient_magnitude(L)
gxA, gyA = gradient_magnitude(A)
gxB, gyB = gradient_magnitude(B)
gx_total = gxL**2 + gxA**2 + gxB**2
gy_total = gyL**2 + gyA**2 + gyB**2
magnitude = np.sqrt(gx_total + gy_total)
magnitude = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
edges = (magnitude > threshold).astype(np.uint8) * 255
return edges, magnitude
# === Step 2: Extract Vertical Profile ===
def extract_vertical_profile(image, center_x, center_y, length=21):
half_len = length // 2
y_range = np.clip(np.arange(center_y - half_len, center_y + half_len + 1), 0, image.shape[0] - 1)
profile = image[y_range, center_x].astype(np.float64)
profile -= profile.min()
if profile.max() > 0:
profile /= profile.max()
return profile, y_range - center_y # profile, x-axis
# === Step 3: Fit Gaussian ===
def gaussian(x, amp, mu, sigma):
return amp * np.exp(-(x - mu)**2 / (2 * sigma**2))
def fit_gaussian(profile, x_vals):
p0 = [1.0, 0.0, 2.0] # initial guess: amp, mu, sigma
popt, _ = curve_fit(gaussian, x_vals, profile, p0=p0)
return popt # amp, mu, sigma
# === Step 4: Create Gaussian Kernel ===
def create_gaussian_kernel(sigma):
ksize = int(sigma * 6) | 1 # ensure odd size
kernel_1d = cv2.getGaussianKernel(ksize, sigma)
kernel_2d = kernel_1d @ kernel_1d.T
return kernel_2d
def kernel_detection(blurred, mask, edge_threshold=30, profile_length=21):
edges, gradient_mag = color_edge_detection(blurred, threshold=edge_threshold)
edges = cv2.bitwise_and(edges, edges, mask=mask)
# show(edges)
# Find central edge pixel
y_idxs, x_idxs = np.where(edges > 0)
if len(x_idxs) == 0:
raise RuntimeError("No edges found.")
idx = len(x_idxs) // 2
cx, cy = x_idxs[idx], y_idxs[idx]
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
profile, x_vals = extract_vertical_profile(gray, cx, cy, length=profile_length)
popt = fit_gaussian(profile, x_vals)
amp, mu, sigma = popt
print(f"Estimated Gaussian sigma: {sigma:.2f}")
kernel = create_gaussian_kernel(sigma)
# print(kernel)
return kernel / kernel.sum()
def kernel_detection_box(blurred, mask, edge_threshold=30, profile_length=21):
def box_function(x, amp, center, width):
"""Simple box profile: flat region with sharp transitions."""
return amp * ((x >= (center - width / 2)) & (x <= (center + width / 2))).astype(float)
def fit_box(profile, x_vals):
# Initial guess: full amplitude, centered at 0, small width
p0 = [1.0, 0.0, 5.0]
bounds = ([0, -10, 1], [1.5, 10, len(x_vals)]) # reasonable bounds
popt, _ = curve_fit(box_function, x_vals, profile, p0=p0, bounds=bounds)
return popt # amp, center, width
def create_box_kernel(width):
"""Generate a normalized 2D box kernel."""
ksize = int(round(width))
if ksize < 1:
ksize = 1
if ksize % 2 == 0:
ksize += 1 # ensure odd size
kernel = np.ones((ksize, ksize), dtype=np.float32)
return kernel / kernel.sum()
edges, gradient_mag = color_edge_detection(blurred, threshold=edge_threshold)
edges = cv2.bitwise_and(edges, edges, mask=mask)
y_idxs, x_idxs = np.where(edges > 0)
if len(x_idxs) == 0:
raise RuntimeError("No edges found.")
idx = len(x_idxs) // 2
cx, cy = x_idxs[idx], y_idxs[idx]
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
profile, x_vals = extract_vertical_profile(gray, cx, cy, length=profile_length)
popt = fit_box(profile, x_vals)
amp, mu, width = popt
print(f"Estimated box width: {width:.2f} pixels")
kernel = create_box_kernel(width)
return kernel
def deconvolution(image_file, edge_threshold=30, profile_length=21):
image = cv2.imread(image_file)
mask = get_mask(image_file)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
kernel = kernel_detection_box(image, mask, edge_threshold=edge_threshold, profile_length=profile_length)
# Define 2D image and kernel
# Apply Richardson-Lucy to each channel
num_iter = 30
deblurred_channels = []
for i in range(3): # R, G, B
channel = image_rgb[..., i]
deconv = richardson_lucy(channel, kernel, num_iter=num_iter)
deblurred_channels.append(deconv)
# Stack back into an RGB image
deblurred_rgb = np.stack(deblurred_channels, axis=2)
deblurred_rgb = np.clip(deblurred_rgb, 0, 1)
# Show result
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_rgb)
plt.title("Blurred Image")
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(deblurred_rgb)
plt.title("Deconvolved Image")
plt.axis('off')
plt.show()
kernel = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
], dtype=np.float32)
kernel /= kernel.sum() # Normalize
def sharpness_heatmap(image, block_size=32, threshold=30):
"""
Compute a sharpness heatmap using color-aware Laplacian variance over blocks,
generate a binary mask highlighting blurred areas, and smooth the edges of the mask.
print(kernel)
return
Args:
image: BGR or RGB image (NumPy array).
block_size: Size of the square block to compute sharpness.
sigma: Standard deviation for Gaussian smoothing of the heatmap.
threshold: Sharpness threshold to define blurred regions (between 0 and 1).
smoothing_sigma: Standard deviation for Gaussian smoothing of the binary mask edges.
# Perform 2D convolution (blurring)
Returns:
blurred_mask: Binary mask highlighting blurred areas (0 = sharp, 255 = blurred).
"""
if image.ndim != 3 or image.shape[2] != 3:
raise ValueError("Input must be a color image (3 channels)")
h, w = image.shape
kh, kw = kernel.shape
pad_h, pad_w = kh // 2, kw // 2
h, w, _ = image.shape
heatmap = np.zeros((h // block_size, w // block_size))
# Calculate sharpness for each block
for y in range(0, h - block_size + 1, block_size):
for x in range(0, w - block_size + 1, block_size):
block = image[y:y + block_size, x:x + block_size, :]
sharpness_vals = []
for c in range(3): # For R, G, B channels
channel = block[..., c]
lap_var = cv2.Laplacian(channel, cv2.CV_64F).var()
sharpness_vals.append(lap_var)
# Use average sharpness across color channels
heatmap[y // block_size, x // block_size] = np.mean(sharpness_vals)
print(heatmap)
# Threshold the heatmap to create a binary mask (blurred regions)
mask = heatmap < threshold
mask = (mask * 255).astype(np.uint8) # Convert to binary mask (0, 255)
# Display Heatmap
plt.subplot(1, 2, 1)
plt.imshow(heatmap, cmap='hot', interpolation='nearest')
plt.title("Sharpness Heatmap")
plt.colorbar(label='Sharpness')
# Display Smoothed Mask
plt.subplot(1, 2, 2)
plt.imshow(mask, cmap='gray', interpolation='nearest')
plt.title("Smoothed Mask for Blurred Areas")
plt.colorbar(label='Blurred Mask')
plt.tight_layout()
plt.show()
return smoothed_mask
show(image)
def graininess_heatmap(image, block_size=32, threshold=100):
"""
Compute a graininess heatmap using local variance (texture/noise) over blocks.
No smoothing or blurring is applied.
print("Original image:\n", image)
print("\nBlurred image:\n", image)
Args:
image: BGR or RGB image (NumPy array).
block_size: Size of the square block to compute variance (graininess).
print("\nBuilding linear system for deconvolution...")
Returns:
graininess_map: Heatmap highlighting the graininess (texture/noise) in the image.
"""
if image.ndim != 3 or image.shape[2] != 3:
raise ValueError("Input must be a color image (3 channels)")
# Step 2: Build sparse matrix A
N = h * w
A = lil_matrix((N, N), dtype=np.float32)
b = image.flatten()
h, w, _ = image.shape
graininess_map = np.zeros((h // block_size, w // block_size))
def index(y, x):
return y * w + x
# Calculate variance for each block
for y in range(0, h - block_size + 1, block_size):
for x in range(0, w - block_size + 1, block_size):
block = image[y:y + block_size, x:x + block_size, :]
variance_vals = []
for y in range(h):
for x in range(w):
row_idx = index(y, x)
for ky in range(kh):
for kx in range(kw):
iy = y + ky - pad_h
ix = x + kx - pad_w
if 0 <= iy < h and 0 <= ix < w:
col_idx = index(iy, ix)
A[row_idx, col_idx] += kernel[ky, kx]
for c in range(3): # For R, G, B channels
channel = block[..., c]
variance = np.var(channel)
variance_vals.append(variance)
# Step 3: Solve the sparse system A * x = b
x = spsolve(A.tocsr(), b)
deblurred = x.reshape((h, w))
# Use average variance across color channels for graininess
graininess_map[y // block_size, x // block_size] = np.mean(variance_vals)
mask = graininess_map < threshold
mask = (mask * 255).astype(np.uint8) # Convert to binary mask (0, 255)
# Display graininess_map
plt.subplot(1, 2, 1)
plt.imshow(graininess_map, cmap='hot', interpolation='nearest')
plt.title("Graininess Heatmap")
plt.colorbar(label='Graininess')
# Display Smoothed Mask
plt.subplot(1, 2, 2)
plt.imshow(mask, cmap='gray', interpolation='nearest')
plt.title("Mask for Blurred Areas")
plt.colorbar(label='Blurred Mask')
plt.tight_layout()
plt.show()
return graininess_map
print("\nDeblurred image:\n", np.round(deblurred, 2))
show(deblurred)
if __name__ == "__main__":
img_file = "assets/real_test.jpg"
#demo("assets/omas.png")
deconvolution(img_file)
# deconvolution(img_file, edge_threshold=5)
image = cv2.imread(img_file)
test = graininess_heatmap(image)
heatmap = sharpness_heatmap(image)

251
deblur/symetric_kernel.py Normal file
View File

@@ -0,0 +1,251 @@
import sys
import numpy as np
import cv2
from PyQt5.QtWidgets import (
QApplication, QWidget, QLabel, QSlider, QVBoxLayout,
QHBoxLayout, QGridLayout, QPushButton, QFileDialog
)
from PyQt5.QtCore import Qt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import scipy.signal
from scipy.signal import convolve2d
import os
os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH", None)
def generate_box_kernel(size):
return np.ones((size, size), dtype=np.float32) / (size * size)
def generate_disk_kernel(radius):
size = 2 * radius + 1
y, x = np.ogrid[-radius:radius+1, -radius:radius+1]
mask = x**2 + y**2 <= radius**2
kernel = np.zeros((size, size), dtype=np.float32)
kernel[mask] = 1
kernel /= kernel.sum()
return kernel
def generate_kernel(radius, sigma=None):
"""
Generate a 2D Gaussian kernel with a given radius.
Parameters:
- radius: int, the radius of the kernel (size will be 2*radius + 1)
- sigma: float (optional), standard deviation of the Gaussian. If None, sigma = radius / 3
Returns:
- kernel: 2D numpy array of shape (2*radius+1, 2*radius+1)
"""
size = 2 * radius + 1
if sigma is None:
sigma = radius / 3.0 # Common default choice
print(f"radius: {radius}, sigma: {sigma}")
# Create a grid of (x,y) coordinates
ax = np.arange(-radius, radius + 1)
xx, yy = np.meshgrid(ax, ax)
# Apply the 2D Gaussian formula
kernel = np.exp(-(xx**2 + yy**2) / (2 * sigma**2))
kernel /= 2 * np.pi * sigma**2 # Normalize based on Gaussian PDF
kernel /= kernel.sum() # Normalize to sum to 1
return kernel
def wiener_deconvolution(blurred, kernel, K=0.1):
"""
Perform Wiener deconvolution on a 2D image.
Parameters:
- blurred: 2D numpy array (blurred image)
- kernel: 2D numpy array (PSF / blur kernel)
- K: float, estimated noise-to-signal ratio
Returns:
- deconvolved: 2D numpy array (deblurred image)
"""
# Pad kernel to image size
kernel /= np.sum(kernel)
pad = [(0, blurred.shape[0] - kernel.shape[0]),
(0, blurred.shape[1] - kernel.shape[1])]
kernel_padded = np.pad(kernel, pad, 'constant')
# FFT of image and kernel
H = np.fft.fft2(kernel_padded)
G = np.fft.fft2(blurred)
# Avoid division by zero
H_conj = np.conj(H)
denominator = H_conj * H + K
F_hat = H_conj / denominator * G
# Inverse FFT to get result
deconvolved = np.fft.ifft2(F_hat)
deconvolved = np.abs(deconvolved)
deconvolved = np.clip(deconvolved, 0, 255)
return deconvolved.astype(np.uint8)
def richardson_lucy(image, psf, iterations=30, clip=True):
image = image.astype(np.float32) + 1e-6
psf = psf / psf.sum()
estimate = np.full(image.shape, 0.5, dtype=np.float32)
psf_mirror = psf[::-1, ::-1]
for _ in range(iterations):
conv = convolve2d(estimate, psf, mode='same', boundary='wrap')
relative_blur = image / (conv + 1e-6)
estimate *= convolve2d(relative_blur, psf_mirror, mode='same', boundary='wrap')
if clip:
estimate = np.clip(estimate, 0, 255)
return estimate
class KernelVisualizer(QWidget):
def __init__(self, image_path=None):
super().__init__()
self.setWindowTitle("Gaussian Kernel Visualizer")
self.image = None
self.deconvolved = None
self.load_button = QPushButton("Load Image")
self.load_button.clicked.connect(self.load_image)
self.radius_slider = QSlider(Qt.Horizontal)
self.radius_slider.setRange(1, 100)
self.radius_slider.setValue(5)
self.sigma_slider = QSlider(Qt.Horizontal)
self.sigma_slider.setRange(1, 300)
self.sigma_slider.setValue(15)
self.radius_slider.valueChanged.connect(self.update_visualization)
self.sigma_slider.valueChanged.connect(self.update_visualization)
self.kernel_fig = Figure(figsize=(3, 3))
self.kernel_canvas = FigureCanvas(self.kernel_fig)
self.image_fig = Figure(figsize=(6, 3))
self.image_canvas = FigureCanvas(self.image_fig)
self.iter_slider = QSlider(Qt.Horizontal)
self.iter_slider.setRange(1, 50)
self.iter_slider.setValue(10)
self.apply_button = QPushButton("Do Deconvolution.")
self.apply_button.clicked.connect(self.apply_kernel)
layout = QVBoxLayout()
layout.addWidget(self.load_button)
sliders_layout = QGridLayout()
sliders_layout.addWidget(QLabel("Radius:"), 0, 0)
sliders_layout.addWidget(self.radius_slider, 0, 1)
sliders_layout.addWidget(QLabel("Sigma:"), 1, 0)
sliders_layout.addWidget(self.sigma_slider, 1, 1)
sliders_layout.addWidget(QLabel("Iterations:"), 2, 0)
sliders_layout.addWidget(self.iter_slider, 2, 1)
sliders_layout.addWidget(self.apply_button, 3, 1)
layout.addLayout(sliders_layout)
layout.addWidget(QLabel("Kernel Visualization:"))
layout.addWidget(self.kernel_canvas)
layout.addWidget(QLabel("Original and Deconvolved Image:"))
layout.addWidget(self.image_canvas)
self.setLayout(layout)
if image_path:
self.load_image(image_path)
else:
self.update_visualization()
def load_image(self, image_path=None):
if not image_path:
fname, _ = QFileDialog.getOpenFileName(self, "Open Image", "", "Images (*.png *.jpg *.bmp *.jpeg)")
image_path = fname
if image_path:
img = cv2.imread(image_path)
if img is not None:
self.image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
self.update_visualization()
def load_image(self, image_path=None):
if not image_path:
fname, _ = QFileDialog.getOpenFileName(self, "Open Image", "", "Images (*.png *.jpg *.bmp *.jpeg)")
image_path = fname
if image_path:
img = cv2.imread(image_path)
if img is not None:
self.image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
self.image = cv2.resize(self.image, (200, 200))
self.update_visualization()
def apply_kernel(self):
radius = self.radius_slider.value()
sigma = self.sigma_slider.value() / 10.0
iterations = self.iter_slider.value()
kernel = generate_kernel(radius, sigma)
self.deconvolved = richardson_lucy(self.image, kernel, iterations=iterations)
self.update_visualization()
def update_visualization(self):
radius = self.radius_slider.value()
sigma = self.sigma_slider.value() / 10.0 * (radius / 3)
kernel = generate_kernel(radius, sigma)
iterations = self.iter_slider.value()
# Kernel Visualization
self.kernel_fig.clear()
ax = self.kernel_fig.add_subplot(111)
cax = ax.imshow(kernel, cmap='hot')
self.kernel_fig.colorbar(cax, ax=ax)
ax.set_title(f"Kernel (r={radius}, σ={sigma:.2f})")
self.kernel_canvas.draw()
if self.image is not None:
self.image_fig.clear()
ax1 = self.image_fig.add_subplot(131)
ax1.imshow(self.image, cmap='gray')
ax1.set_title("Original")
ax1.axis('off')
if self.deconvolved is not None:
ax3 = self.image_fig.add_subplot(133)
ax3.imshow(self.deconvolved, cmap='gray')
ax3.set_title(f"Deconvolved (RL, {iterations} iter)")
ax3.axis('off')
self.image_canvas.draw()
else:
self.image_fig.clear()
ax = self.image_fig.add_subplot(111)
ax.text(0.5, 0.5, "No image loaded", fontsize=14, ha='center', va='center')
ax.axis('off')
self.image_canvas.draw()
if __name__ == "__main__":
image_path = None
if len(sys.argv) > 1:
image_path = sys.argv[1] # Get image path from command-line argument
print(image_path)
app = QApplication(sys.argv)
viewer = KernelVisualizer(image_path=image_path)
viewer.show()
sys.exit(app.exec_())