generated from Hazel/python-project
420 lines
13 KiB
Python
420 lines
13 KiB
Python
import numpy as np
|
|
from scipy.signal import convolve2d
|
|
from scipy.sparse import lil_matrix
|
|
from scipy.sparse.linalg import spsolve
|
|
from scipy.optimize import curve_fit
|
|
import cv2
|
|
import matplotlib
|
|
import matplotlib.pyplot as plt
|
|
from pathlib import Path
|
|
from scipy.ndimage import correlate
|
|
from skimage.restoration import richardson_lucy
|
|
import os
|
|
|
|
|
|
matplotlib.use('qtagg')
|
|
|
|
"""
|
|
https://setosa.io/ev/image-kernels/
|
|
https://openaccess.thecvf.com/content/CVPR2021/papers/Tran_Explore_Image_Deblurring_via_Encoded_Blur_Kernel_Space_CVPR_2021_paper.pdf
|
|
"""
|
|
|
|
def show(img):
|
|
cv2.imshow('image',img.astype(np.uint8))
|
|
cv2.waitKey(0)
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
def demo(image_file):
|
|
# Define 2D image and kernel
|
|
image = cv2.imread(image_file, 0)
|
|
image = cv2.resize(image, (200, 200), interpolation= cv2.INTER_LINEAR)
|
|
|
|
kernel = np.array([
|
|
[1, 2, 1],
|
|
[2, 4, 2],
|
|
[1, 2, 1]
|
|
], dtype=np.float32)
|
|
kernel /= kernel.sum() # Normalize
|
|
|
|
print(kernel)
|
|
|
|
# Perform 2D convolution (blurring)
|
|
blurred = convolve2d(image, kernel, mode="same", boundary="fill", fillvalue=0)
|
|
|
|
h, w = image.shape
|
|
kh, kw = kernel.shape
|
|
pad_h, pad_w = kh // 2, kw // 2
|
|
|
|
|
|
show(image)
|
|
show(blurred)
|
|
|
|
print("Original image:\n", image)
|
|
print("\nBlurred image:\n", blurred)
|
|
|
|
print("\nBuilding linear system for deconvolution...")
|
|
|
|
# Step 2: Build sparse matrix A
|
|
N = h * w
|
|
A = lil_matrix((N, N), dtype=np.float32)
|
|
b = blurred.flatten()
|
|
|
|
def index(y, x):
|
|
return y * w + x
|
|
|
|
for y in range(h):
|
|
for x in range(w):
|
|
row_idx = index(y, x)
|
|
for ky in range(kh):
|
|
for kx in range(kw):
|
|
iy = y + ky - pad_h
|
|
ix = x + kx - pad_w
|
|
if 0 <= iy < h and 0 <= ix < w:
|
|
col_idx = index(iy, ix)
|
|
A[row_idx, col_idx] += kernel[ky, kx]
|
|
|
|
# Step 3: Solve the sparse system A * x = b
|
|
x = spsolve(A.tocsr(), b)
|
|
deblurred = x.reshape((h, w))
|
|
|
|
print("\nDeblurred image:\n", np.round(deblurred, 2))
|
|
|
|
show(deblurred)
|
|
|
|
|
|
def get_mask(image_file):
|
|
mask_file = Path(image_file)
|
|
mask_file = mask_file.with_name("mask_" + mask_file.name)
|
|
|
|
if mask_file.exists():
|
|
return cv2.imread(str(mask_file), 0)
|
|
|
|
drawing = False # True when mouse is pressed
|
|
brush_size = 5
|
|
image = cv2.imread(image_file)
|
|
mask = np.zeros(image.shape[:2], dtype=np.uint8)
|
|
clone = image.copy()
|
|
|
|
def draw_mask(event, x, y, flags, param):
|
|
nonlocal drawing, mask, brush_size
|
|
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
drawing = True
|
|
elif event == cv2.EVENT_MOUSEMOVE:
|
|
if drawing:
|
|
cv2.circle(mask, (x, y), brush_size, 255, -1)
|
|
cv2.circle(image, (x, y), brush_size, (0, 0, 255), -1)
|
|
elif event == cv2.EVENT_LBUTTONUP:
|
|
drawing = False
|
|
|
|
|
|
cv2.namedWindow("Draw Mask")
|
|
cv2.setMouseCallback("Draw Mask", draw_mask)
|
|
|
|
while True:
|
|
display = image.copy()
|
|
cv2.putText(display, f'Brush size: {brush_size}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
|
|
cv2.imshow("Draw Mask", display)
|
|
key = cv2.waitKey(1) & 0xFF
|
|
if key == 13: # Enter to finish
|
|
break
|
|
elif key == ord('+') or key == ord('='): # `=` for some keyboard layouts
|
|
brush_size = min(100, brush_size + 1)
|
|
elif key == ord('-') or key == ord('_'):
|
|
brush_size = max(1, brush_size - 1)
|
|
|
|
|
|
cv2.destroyAllWindows()
|
|
|
|
cv2.imwrite(str(mask_file), mask)
|
|
|
|
# Apply mask
|
|
masked_image = cv2.bitwise_and(clone, clone, mask=mask)
|
|
|
|
cv2.imshow("Masked Image", masked_image)
|
|
cv2.waitKey(0)
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
|
|
def color_edge_detection(image, threshold=30):
|
|
img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
L, A, B = cv2.split(img_lab)
|
|
|
|
def gradient_magnitude(channel):
|
|
gx = cv2.Sobel(channel, cv2.CV_64F, 1, 0, ksize=3)
|
|
gy = cv2.Sobel(channel, cv2.CV_64F, 0, 1, ksize=3)
|
|
return gx, gy
|
|
|
|
gxL, gyL = gradient_magnitude(L)
|
|
gxA, gyA = gradient_magnitude(A)
|
|
gxB, gyB = gradient_magnitude(B)
|
|
|
|
gx_total = gxL**2 + gxA**2 + gxB**2
|
|
gy_total = gyL**2 + gyA**2 + gyB**2
|
|
magnitude = np.sqrt(gx_total + gy_total)
|
|
|
|
magnitude = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
|
|
edges = (magnitude > threshold).astype(np.uint8) * 255
|
|
return edges, magnitude
|
|
|
|
|
|
# === Step 2: Extract Vertical Profile ===
|
|
def extract_vertical_profile(image, center_x, center_y, length=21):
|
|
half_len = length // 2
|
|
y_range = np.clip(np.arange(center_y - half_len, center_y + half_len + 1), 0, image.shape[0] - 1)
|
|
profile = image[y_range, center_x].astype(np.float64)
|
|
profile -= profile.min()
|
|
if profile.max() > 0:
|
|
profile /= profile.max()
|
|
return profile, y_range - center_y # profile, x-axis
|
|
|
|
# === Step 3: Fit Gaussian ===
|
|
def gaussian(x, amp, mu, sigma):
|
|
return amp * np.exp(-(x - mu)**2 / (2 * sigma**2))
|
|
|
|
def fit_gaussian(profile, x_vals):
|
|
p0 = [1.0, 0.0, 2.0] # initial guess: amp, mu, sigma
|
|
popt, _ = curve_fit(gaussian, x_vals, profile, p0=p0)
|
|
return popt # amp, mu, sigma
|
|
|
|
# === Step 4: Create Gaussian Kernel ===
|
|
def create_gaussian_kernel(sigma):
|
|
ksize = int(sigma * 6) | 1 # ensure odd size
|
|
kernel_1d = cv2.getGaussianKernel(ksize, sigma)
|
|
kernel_2d = kernel_1d @ kernel_1d.T
|
|
return kernel_2d
|
|
|
|
|
|
def kernel_detection(blurred, mask, edge_threshold=30, profile_length=21):
|
|
edges, gradient_mag = color_edge_detection(blurred, threshold=edge_threshold)
|
|
edges = cv2.bitwise_and(edges, edges, mask=mask)
|
|
# show(edges)
|
|
|
|
|
|
# Find central edge pixel
|
|
y_idxs, x_idxs = np.where(edges > 0)
|
|
if len(x_idxs) == 0:
|
|
raise RuntimeError("No edges found.")
|
|
idx = len(x_idxs) // 2
|
|
cx, cy = x_idxs[idx], y_idxs[idx]
|
|
|
|
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
|
|
profile, x_vals = extract_vertical_profile(gray, cx, cy, length=profile_length)
|
|
popt = fit_gaussian(profile, x_vals)
|
|
amp, mu, sigma = popt
|
|
|
|
print(f"Estimated Gaussian sigma: {sigma:.2f}")
|
|
|
|
kernel = create_gaussian_kernel(sigma)
|
|
|
|
# print(kernel)
|
|
|
|
return kernel / kernel.sum()
|
|
|
|
|
|
|
|
def kernel_detection_box(blurred, mask, edge_threshold=30, profile_length=21):
|
|
def box_function(x, amp, center, width):
|
|
"""Simple box profile: flat region with sharp transitions."""
|
|
return amp * ((x >= (center - width / 2)) & (x <= (center + width / 2))).astype(float)
|
|
|
|
def fit_box(profile, x_vals):
|
|
# Initial guess: full amplitude, centered at 0, small width
|
|
p0 = [1.0, 0.0, 5.0]
|
|
bounds = ([0, -10, 1], [1.5, 10, len(x_vals)]) # reasonable bounds
|
|
popt, _ = curve_fit(box_function, x_vals, profile, p0=p0, bounds=bounds)
|
|
return popt # amp, center, width
|
|
|
|
def create_box_kernel(width):
|
|
"""Generate a normalized 2D box kernel."""
|
|
ksize = int(round(width))
|
|
if ksize < 1:
|
|
ksize = 1
|
|
if ksize % 2 == 0:
|
|
ksize += 1 # ensure odd size
|
|
kernel = np.ones((ksize, ksize), dtype=np.float32)
|
|
return kernel / kernel.sum()
|
|
|
|
|
|
edges, gradient_mag = color_edge_detection(blurred, threshold=edge_threshold)
|
|
edges = cv2.bitwise_and(edges, edges, mask=mask)
|
|
|
|
y_idxs, x_idxs = np.where(edges > 0)
|
|
if len(x_idxs) == 0:
|
|
raise RuntimeError("No edges found.")
|
|
idx = len(x_idxs) // 2
|
|
cx, cy = x_idxs[idx], y_idxs[idx]
|
|
|
|
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
|
|
profile, x_vals = extract_vertical_profile(gray, cx, cy, length=profile_length)
|
|
popt = fit_box(profile, x_vals)
|
|
amp, mu, width = popt
|
|
|
|
print(f"Estimated box width: {width:.2f} pixels")
|
|
|
|
kernel = create_box_kernel(width)
|
|
return kernel
|
|
|
|
|
|
|
|
def deconvolution(image_file, edge_threshold=30, profile_length=21):
|
|
image = cv2.imread(image_file)
|
|
mask = get_mask(image_file)
|
|
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
|
|
|
kernel = kernel_detection_box(image, mask, edge_threshold=edge_threshold, profile_length=profile_length)
|
|
|
|
|
|
# Apply Richardson-Lucy to each channel
|
|
num_iter = 30
|
|
deblurred_channels = []
|
|
for i in range(3): # R, G, B
|
|
channel = image_rgb[..., i]
|
|
deconv = richardson_lucy(channel, kernel, num_iter=num_iter)
|
|
deblurred_channels.append(deconv)
|
|
|
|
# Stack back into an RGB image
|
|
deblurred_rgb = np.stack(deblurred_channels, axis=2)
|
|
deblurred_rgb = np.clip(deblurred_rgb, 0, 1)
|
|
|
|
|
|
# Show result
|
|
plt.figure(figsize=(10, 5))
|
|
plt.subplot(1, 2, 1)
|
|
plt.imshow(image_rgb)
|
|
plt.title("Blurred Image")
|
|
plt.axis('off')
|
|
|
|
plt.subplot(1, 2, 2)
|
|
plt.imshow(deblurred_rgb)
|
|
plt.title("Deconvolved Image")
|
|
plt.axis('off')
|
|
plt.show()
|
|
|
|
|
|
def sharpness_heatmap(image, block_size=32, threshold=30):
|
|
"""
|
|
Compute a sharpness heatmap using color-aware Laplacian variance over blocks,
|
|
generate a binary mask highlighting blurred areas, and smooth the edges of the mask.
|
|
|
|
Args:
|
|
image: BGR or RGB image (NumPy array).
|
|
block_size: Size of the square block to compute sharpness.
|
|
sigma: Standard deviation for Gaussian smoothing of the heatmap.
|
|
threshold: Sharpness threshold to define blurred regions (between 0 and 1).
|
|
smoothing_sigma: Standard deviation for Gaussian smoothing of the binary mask edges.
|
|
|
|
Returns:
|
|
blurred_mask: Binary mask highlighting blurred areas (0 = sharp, 255 = blurred).
|
|
"""
|
|
if image.ndim != 3 or image.shape[2] != 3:
|
|
raise ValueError("Input must be a color image (3 channels)")
|
|
|
|
h, w, _ = image.shape
|
|
heatmap = np.zeros((h // block_size, w // block_size))
|
|
|
|
# Calculate sharpness for each block
|
|
for y in range(0, h - block_size + 1, block_size):
|
|
for x in range(0, w - block_size + 1, block_size):
|
|
block = image[y:y + block_size, x:x + block_size, :]
|
|
sharpness_vals = []
|
|
|
|
for c in range(3): # For R, G, B channels
|
|
channel = block[..., c]
|
|
lap_var = cv2.Laplacian(channel, cv2.CV_64F).var()
|
|
sharpness_vals.append(lap_var)
|
|
|
|
# Use average sharpness across color channels
|
|
heatmap[y // block_size, x // block_size] = np.mean(sharpness_vals)
|
|
|
|
print(heatmap)
|
|
|
|
# Threshold the heatmap to create a binary mask (blurred regions)
|
|
mask = heatmap < threshold
|
|
mask = (mask * 255).astype(np.uint8) # Convert to binary mask (0, 255)
|
|
|
|
# Display Heatmap
|
|
plt.subplot(1, 2, 1)
|
|
plt.imshow(heatmap, cmap='hot', interpolation='nearest')
|
|
plt.title("Sharpness Heatmap")
|
|
plt.colorbar(label='Sharpness')
|
|
|
|
# Display Smoothed Mask
|
|
plt.subplot(1, 2, 2)
|
|
plt.imshow(mask, cmap='gray', interpolation='nearest')
|
|
plt.title("Smoothed Mask for Blurred Areas")
|
|
plt.colorbar(label='Blurred Mask')
|
|
|
|
plt.tight_layout()
|
|
plt.show()
|
|
|
|
return smoothed_mask
|
|
|
|
|
|
def graininess_heatmap(image, block_size=32, threshold=100):
|
|
"""
|
|
Compute a graininess heatmap using local variance (texture/noise) over blocks.
|
|
No smoothing or blurring is applied.
|
|
|
|
Args:
|
|
image: BGR or RGB image (NumPy array).
|
|
block_size: Size of the square block to compute variance (graininess).
|
|
|
|
Returns:
|
|
graininess_map: Heatmap highlighting the graininess (texture/noise) in the image.
|
|
"""
|
|
if image.ndim != 3 or image.shape[2] != 3:
|
|
raise ValueError("Input must be a color image (3 channels)")
|
|
|
|
h, w, _ = image.shape
|
|
graininess_map = np.zeros((h // block_size, w // block_size))
|
|
|
|
# Calculate variance for each block
|
|
for y in range(0, h - block_size + 1, block_size):
|
|
for x in range(0, w - block_size + 1, block_size):
|
|
block = image[y:y + block_size, x:x + block_size, :]
|
|
variance_vals = []
|
|
|
|
for c in range(3): # For R, G, B channels
|
|
channel = block[..., c]
|
|
variance = np.var(channel)
|
|
variance_vals.append(variance)
|
|
|
|
# Use average variance across color channels for graininess
|
|
graininess_map[y // block_size, x // block_size] = np.mean(variance_vals)
|
|
|
|
|
|
mask = graininess_map < threshold
|
|
mask = (mask * 255).astype(np.uint8) # Convert to binary mask (0, 255)
|
|
|
|
# Display graininess_map
|
|
plt.subplot(1, 2, 1)
|
|
plt.imshow(graininess_map, cmap='hot', interpolation='nearest')
|
|
plt.title("Graininess Heatmap")
|
|
plt.colorbar(label='Graininess')
|
|
|
|
# Display Smoothed Mask
|
|
plt.subplot(1, 2, 2)
|
|
plt.imshow(mask, cmap='gray', interpolation='nearest')
|
|
plt.title("Mask for Blurred Areas")
|
|
plt.colorbar(label='Blurred Mask')
|
|
|
|
plt.tight_layout()
|
|
plt.show()
|
|
|
|
return graininess_map
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
img_file = "assets/real_test.jpg"
|
|
|
|
#demo("assets/omas.png")
|
|
# deconvolution(img_file, edge_threshold=5)
|
|
|
|
image = cv2.imread(img_file)
|
|
test = graininess_heatmap(image)
|
|
heatmap = sharpness_heatmap(image)
|