Deblurring with OpenCV: Weiner filter reloaded

The problem of deblurring has really caught my fancy though I have only had partial success with it. Deblurring is basically an ill-posed problem where there are 2 unknowns namely the original image and a blurring function. There are many solutions to this problem involving a fair amount of mathematics.

Every now and then I will sneak into some white paper on this topic only to beat a hasty retreat gulping for air as I get drowned in the abstract math. Anyway my search led me to the following presentation “Deblurring in CT (Computer Tomography)” by Kriti Sen Sharma which seemed to make a lot of sense. This presentation shows how a PSF (Point Spread Function or the blur kernel) can blur an image as shown below.

Further it can be shown that the Wiener filter can be represented as
W(u) = P(u*)
——-
|P(u)|^2 + K

Where P(u) is PSD (Point Spread Distribution) of the blur kernel. K is S/N or signal to noise ratio.

For the PSF I took the Gaussian distribution given in Wikipedia – Gaussian blur given by

I tried with various values of SIGMA. The best value was SIGMA = 0.014089642. I get just one pixel with a value of > 0.  This is shown below.

I also tried various values of K. The larger the K the clearer was the DFT INVERSE. This was the best I got.

I am still nowhere near removing the blur but I will get there sometime in the future.
Any and all comments welcome.

Note: You can clone the code from GitHub: Weiner filter reloaded

I am including the code executed with Visual Studio 2010 express

//======================================================================================================================
// Wiener filter implemention using Gaussian blur kernel
// Developed by: Tinniam V Ganesh
// Date: 11 May 2012
//======================================================================================================================
#include “stdafx.h”
#include “math.h”
#include <cxcore.h>
#include <cv.h>
#include <highgui.h>

#define kappa 10000
int main(int argc, char ** argv)
{
int height,width,step,channels,depth;
uchar* data1;
CvMat *dft_A;
CvMat *dft_B;
CvMat *dft_C;
IplImage* im;
IplImage* im1;
IplImage* image_ReB;
IplImage* image_ImB;

IplImage* image_ReC;
IplImage* image_ImC;
IplImage* complex_ImC;
CvScalar val;
IplImage* k_image_hdr;
int i,j,k;

FILE *fp;
fp = fopen(“test.txt”,”w+”);
int dft_M,dft_N;
int dft_M1,dft_N1;

CvMat* cvShowDFT1(IplImage*, int, int,char*);
void cvShowInvDFT1(IplImage*, CvMat*, int, int,char*);

im1 = cvLoadImage(“kutty-1.jpg”);
cvNamedWindow(“Original-Color”, 0);
cvShowImage(“Original-Color”, im1);
im = cvLoadImage(“kutty-1.jpg”, CV_LOAD_IMAGE_GRAYSCALE );
if( !im )
return -1;

cvNamedWindow(“Original-Gray”, 0);
cvShowImage(“Original-Gray”, im);
IplImage* k_image;
int rowLength= 11;
long double kernels[11*11];
CvMat kernel;
int x,y;
long double PI_F=3.14159265358979;

//long double SIGMA = 0.84089642;
long double SIGMA = 0.014089642;
//long double SIGMA = 0.00184089642;
long double EPS = 2.718;
long double numerator,denominator;
long double value,value1;
long double a,b,c,d;

numerator = (pow((float)-3,2) + pow((float) 0,2))/(2*pow((float)SIGMA,2));
printf(“Numerator=%f\n”,numerator);
denominator = sqrt((float) (2 * PI_F * pow(SIGMA,2)));
printf(“denominator=%1.8f\n”,denominator);

value = (pow((float)EPS, (float)-numerator))/denominator;
printf(“Value=%1.8f\n”,value);
for(x = -5; x < 6; x++){
for (y = -5; y < 6; y++)
{
//numerator = (pow((float)x,2) + pow((float) y,2))/(2*pow((float)SIGMA,2));
numerator = (pow((float)x,2) + pow((float)y,2))/(2.0*pow(SIGMA,2));
denominator = sqrt((2.0 * 3.14159265358979 * pow(SIGMA,2)));
value = (pow(EPS,-numerator))/denominator;
printf(” %1.8f “,value);
kernels[x*rowLength +y+55] = (float)value;

}
printf(“\n”);
}
printf(“———————————\n”);
for (i=-5; i < 6; i++){
for(j=-5;j < 6;j++){
printf(” %1.8f “,kernels[i*rowLength +j+55]);
}
printf(“\n”);
}
kernel= cvMat(rowLength, // number of rows
rowLength, // number of columns
CV_32FC1, // matrix data type
&kernels);
k_image_hdr = cvCreateImageHeader( cvSize(rowLength,rowLength), IPL_DEPTH_32F,1);
k_image = cvGetImage(&kernel,k_image_hdr);

height = k_image->height;
width = k_image->width;
step = k_image->widthStep/sizeof(float);
depth = k_image->depth;
channels = k_image->nChannels;
//data1 = (float *)(k_image->imageData);
data1 = (uchar *)(k_image->imageData);
cvNamedWindow(“blur kernel”, 0);
cvShowImage(“blur kernel”, k_image);

dft_M = cvGetOptimalDFTSize( im->height – 1 );
dft_N = cvGetOptimalDFTSize( im->width – 1 );
//dft_M1 = cvGetOptimalDFTSize( im->height+99 – 1 );
//dft_N1 = cvGetOptimalDFTSize( im->width+99 – 1 );
dft_M1 = cvGetOptimalDFTSize( im->height+3 – 1 );
dft_N1 = cvGetOptimalDFTSize( im->width+3 – 1 );
printf(“dft_N1=%d,dft_M1=%d\n”,dft_N1,dft_M1);

// Perform DFT of original image
dft_A = cvShowDFT1(im, dft_M1, dft_N1,”original”);
//Perform inverse (check)
//cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, “original”); – Commented as it overwrites the DFT
// Perform DFT of kernel
dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,”kernel”);
//Perform inverse of kernel (check)
//cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, “kernel”);- Commented as it overwrites the DFT
// Multiply numerator with complex conjugate
dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );
printf(“%d %d %d %d\n”,dft_M,dft_N,dft_M1,dft_N1);

// Multiply DFT(blurred image) * complex conjugate of blur kernel
cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);
//cvShowInvDFT1(im,dft_C,dft_M1,dft_N1,”blur1?);

// Split Fourier in real and imaginary parts
image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);
printf(“%d %d %d %d\n”, dft_M,dft_N,dft_M1,dft_N1);
//cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

// Compute A^2 + B^2 of denominator or blur kernel
image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

// Split Real and imaginary parts
cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
cvPow( image_ReB, image_ReB, 2.0);
cvPow( image_ImB, image_ImB, 2.0);
cvAdd(image_ReB, image_ImB, image_ReB,0);
val = cvScalarAll(kappa);
cvAddS(image_ReB,val,image_ReB,0);
//Divide Numerator/A^2 + B^2
cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

// Merge Real and complex parts
cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);
// Perform Inverse
cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,”Weiner o/p k=10000 SIGMA=0.014089642″);
cvWaitKey(-1);
return 0;
}

CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
IplImage* realInput;
IplImage* imaginaryInput;
IplImage* complexInput;
CvMat* dft_A, tmp;
IplImage* image_Re;
IplImage* image_Im;
char str[80];
double m, M;
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
cvScale(im, realInput, 1.0, 0.0);
cvZero(imaginaryInput);
cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

// copy A to dft_A and pad dft_A with zeros
cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
cvCopy( complexInput, &tmp, NULL );
if( dft_A->cols > im->width )
{
cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols – im->width, im->height));
cvZero( &tmp );
}
// no need to pad bottom part of dft_A with zeros because of
// use nonzero_rows parameter in cvDFT() call below

cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );
strcpy(str,”DFT -“);
strcat(str,src);
cvNamedWindow(str, 0);

// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );
// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );

// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)
cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
cvShowImage(str, image_Re);
return(dft_A);
}

void cvShowInvDFT1(IplImage* im, CvMat* dft_A, int dft_M, int dft_N,char* src)
{
IplImage* realInput;
IplImage* imaginaryInput;
IplImage* complexInput;
IplImage * image_Re;
IplImage * image_Im;
double m, M;
char str[80];
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

//cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, complexInput->height );
cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, dft_M);
strcpy(str,”DFT INVERSE – “);
strcat(str,src);
cvNamedWindow(str, 0);
// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );
// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );

// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)
cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
//cvCvtColor(image_Re, image_Re, CV_GRAY2RGBA);
cvShowImage(str, image_Re);
}

See also
– Experiments with deblurring using OpenCV
– De-blurring revisited with Wiener filter using OpenCV
–  Dabbling with Wiener filter using OpenCV
– Re-working the Lucy-Richardson Algorithm in OpenCV

You may also like
1.  What’s up Watson? Using IBM Watson’s QAAPI with Bluemix, NodeExpress – Part 1
2.  Bend it like Bluemix, MongoDB with autoscaling – Part 1
3. Informed choices through Machine Learning : Analyzing Kohli, Tendulkar and Dravid
4. A crime map of India in R: Crimes against women

Find me on Google+

De-blurring revisited with Wiener filter using OpenCV

In this post I continue to experiment with the de-blurring of images using the Wiener filter. For details on the Wiener filter, please look at my earlier post “Dabbling with Wiener filter using OpenCV”.  Thanks to Egli Simon, Switzerland for pointing out a bug in the earlier post which I have now fixed.  The Wiener filter attempts to de-blur by assuming that the source signal is convolved with a blur kernel in the presence of noise.   I have also included the blur kernel as estimated by E. Simon in the code. I am including the de-blurring with 3 different blur kernel radii and different values for the Wiener constant K.  While the de-blurring is still a long way off there is some success.

One of the reasons I have assumed a non-blind blur kernel and try to de-convolve with that. The Wiener filter tries to minimize the Mean Square Error (MSE)  which can be expressed as
e(f) = E[X(f) – X1(f)]^2                                – (1)

where e(f) is the Mean Square Error(MSE) in the frequency domain, X(f) is the original image and X1(f) is the estimated signal which we get by de-convolving the Wiener filter with the observed blurred image i.e. and E[] is the expectation

X1(f) = G(f) * Y(f)                                        -(2)
where G(f) is the Wiener de-convolution filter and Y(f) is the observed blurred image
Substituting (2) in (1) we get
e(f) = E[X(f) – G(f) *Y(f)]^2

If the above equation is solved we can effectively remove the blur.
Feel free to post comments, opinions or ideas.

Watch this space …
I will be back! Hasta la Vista!


Note: You can clone the code below from Git Hib – Another implementation of Weiner filter in OpenCV

The complete code is included below and should work as is

/*
============================================================================
Name : deblur_wiener.c
Author : Tinniam V Ganesh & Egli Simon
Version :
Copyright :
Description : Implementation of Wiener filter in OpenCV
============================================================================
*/

#include <stdio.h>
#include <stdlib.h>
#include “cxcore.h”
#include “cv.h”
#include “highgui.h”

#define kappa 10.0
#define rad 8

int main(int argc, char ** argv)
{
int height,width,step,channels,depth;
uchar* data1;

CvMat *dft_A;
CvMat *dft_B;

CvMat *dft_C;
IplImage* im;
IplImage* im1;

IplImage* image_ReB;
IplImage* image_ImB;

IplImage* image_ReC;
IplImage* image_ImC;
IplImage* complex_ImC;
CvScalar val;

IplImage* k_image_hdr;
int i,j,k;
char str[80];
FILE *fp;
fp = fopen(“test.txt”,”w+”);

int dft_M,dft_N;
int dft_M1,dft_N1;

CvMat* cvShowDFT1(IplImage*, int, int,char*);
void cvShowInvDFT1(IplImage*, CvMat*, int, int,char*);

im1 = cvLoadImage( “kutty-1.jpg”,1 );
cvNamedWindow(“Original-color”, 0);
cvShowImage(“Original-color”, im1);
im = cvLoadImage( “kutty-1.jpg”, CV_LOAD_IMAGE_GRAYSCALE );
if( !im )
return -1;

cvNamedWindow(“Original-gray”, 0);
cvShowImage(“Original-gray”, im);

// Create a random noise matrix
fp = fopen(“test.txt”,”w+”);
int val_noise[357*383];
for(i=0; i <im->height;i++){
for(j=0;j<im->width;j++){
fprintf(fp, “%d “,(383*i+j));
val_noise[383*i+j] = rand() % 128;
}
fprintf(fp, “\n”);
}

CvMat noise = cvMat(im->height,im->width, CV_8UC1,val_noise);

// Add the random noise matric to the image
cvAdd(im,&noise,im, 0);

cvNamedWindow(“Original + Noise”, 0);
cvShowImage(“Original + Noise”, im);

cvSmooth( im, im, CV_GAUSSIAN, 7, 7, 0.5, 0.5 );
cvNamedWindow(“Gaussian Smooth”, 0);
cvShowImage(“Gaussian Smooth”, im);

// Create a blur kernel
IplImage* k_image;
float r = rad;
float radius=((int)(r)*2+1)/2.0;

int rowLength=(int)(2*radius);
printf(“rowlength %d\n”,rowLength);
float kernels[rowLength*rowLength];
printf(“rowl: %i”,rowLength);
int norm=0; //Normalization factor
int x,y;
CvMat kernel;
for(x = 0; x < rowLength; x++)
for (y = 0; y < rowLength; y++)
if (sqrt((x – (int)(radius) ) * (x – (int)(radius) ) + (y – (int)(radius))* (y – (int)(radius))) <= (int)(radius))
norm++;
// Populate matrix
for (y = 0; y < rowLength; y++) //populate array with values
{
for (x = 0; x < rowLength; x++) {
if (sqrt((x – (int)(radius) ) * (x – (int)(radius) ) + (y – (int)(radius))
* (y – (int)(radius))) <= (int)(radius)) {
//kernels[y * rowLength + x] = 255;
kernels[y * rowLength + x] =1.0/norm;
printf(“%f “,1.0/norm);
}
else{
kernels[y * rowLength + x] =0;
}
}
}

/*for (i=0; i < rowLength; i++){
for(j=0;j < rowLength;j++){
printf(“%f “, kernels[i*rowLength +j]);
}
}*/

kernel= cvMat(rowLength, // number of rows
rowLength, // number of columns
CV_32FC1, // matrix data type
&kernels);
k_image_hdr = cvCreateImageHeader( cvSize(rowLength,rowLength), IPL_DEPTH_32F,1);
k_image = cvGetImage(&kernel,k_image_hdr);

height = k_image->height;
width = k_image->width;
step = k_image->widthStep/sizeof(float);
depth = k_image->depth;

channels = k_image->nChannels;
//data1 = (float *)(k_image->imageData);
data1 = (uchar *)(k_image->imageData);
cvNamedWindow(“blur kernel”, 0);
cvShowImage(“blur kernel”, k_image);

dft_M = cvGetOptimalDFTSize( im->height – 1 );
dft_N = cvGetOptimalDFTSize( im->width – 1 );

//dft_M1 = cvGetOptimalDFTSize( im->height+99 – 1 );
//dft_N1 = cvGetOptimalDFTSize( im->width+99 – 1 );

dft_M1 = cvGetOptimalDFTSize( im->height+3 – 1 );
dft_N1 = cvGetOptimalDFTSize( im->width+3 – 1 );

printf(“dft_N1=%d,dft_M1=%d\n”,dft_N1,dft_M1);

// Perform DFT of original image
dft_A = cvShowDFT1(im, dft_M1, dft_N1,”original”);
//Perform inverse (check)
//cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, “original”); – Commented as it overwrites the DFT

// Perform DFT of kernel
dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,”kernel”);
//Perform inverse of kernel (check)
//cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, “kernel”);- Commented as it overwrites the DFT

// Multiply numerator with complex conjugate
dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

printf(“%d %d %d %d\n”,dft_M,dft_N,dft_M1,dft_N1);

// Multiply DFT(blurred image) * complex conjugate of blur kernel
cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);
//cvShowInvDFT1(im,dft_C,dft_M1,dft_N1,”blur1″);

// Split Fourier in real and imaginary parts
image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);
printf(“%d %d %d %d\n”,dft_M,dft_N,dft_M1,dft_N1);

//cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

// Compute A^2 + B^2 of denominator or blur kernel
image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

// Split Real and imaginary parts
cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
cvPow( image_ReB, image_ReB, 2.0);
cvPow( image_ImB, image_ImB, 2.0);
cvAdd(image_ReB, image_ImB, image_ReB,0);
val = cvScalarAll(kappa);
cvAddS(image_ReB,val,image_ReB,0);

//Divide Numerator/A^2 + B^2
cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

// Merge Real and complex parts
cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);
sprintf(str,”O/P Wiener – K=%6.4f rad=%d”,kappa,rad);

// Perform Inverse
cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,str);

cvWaitKey(-1);
return 0;
}

CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
IplImage* realInput;
IplImage* imaginaryInput;
IplImage* complexInput;
CvMat* dft_A, tmp;
IplImage* image_Re;
IplImage* image_Im;
char str[80];
double m, M;
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
cvScale(im, realInput, 1.0, 0.0);
cvZero(imaginaryInput);
cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

// copy A to dft_A and pad dft_A with zeros
cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
cvCopy( complexInput, &tmp, NULL );
if( dft_A->cols > im->width )
{
cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols – im->width, im->height));
cvZero( &tmp );
}

// no need to pad bottom part of dft_A with zeros because of
// use nonzero_rows parameter in cvDFT() call below
cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );
strcpy(str,”DFT -“);
strcat(str,src);
cvNamedWindow(str, 0);

// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );

// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );

// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)

cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
cvShowImage(str, image_Re);
return(dft_A);
}

void cvShowInvDFT1(IplImage* im, CvMat* dft_A, int dft_M, int dft_N,char* src)
{
IplImage* realInput;
IplImage* imaginaryInput;
IplImage* complexInput;
IplImage * image_Re;
IplImage * image_Im;
double m, M;
char str[80];
realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

//cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, complexInput->height );
cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, dft_M);
strcpy(str,”DFT INVERSE – “);
strcat(str,src);
cvNamedWindow(str, 0);

// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );

// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );

// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)

cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
//cvCvtColor(image_Re, image, CV_GRAY2RGBA);
cvShowImage(str, image_Re);
}
See also
– De-blurring revisited with Wiener filter using OpenCV
–  Dabbling with Wiener filter using OpenCV
– Deblurring with OpenCV: Wiener filter reloaded
– Re-working the Lucy-Richardson Algorithm in OpenCV

You may also like
1.  What’s up Watson? Using IBM Watson’s QAAPI with Bluemix, NodeExpress – Part 1
2.  Bend it like Bluemix, MongoDB with autoscaling – Part 1
3. Informed choices through Machine Learning : Analyzing Kohli, Tendulkar and Dravid
4. A crime map of India in R: Crimes against women

Find me on Google+

Dabbling with Wiener filter using OpenCV

The technique of reduction of blur and restoration of images is an extremely important field of study and finds numerous applications in medical imaging and astronomy.  One such technique is Wiener filter named after the originator of the technique Prof. Norbert Wiener (1894-1964).  It is usually important to consider these techniques in the frequency domain.

This can be represented diagrammatically as follows

Given that we have
b(x,y) = i(x,y) * k(x,y) + n(x,y)                                       – (1)
where b(x,y) is the blurred image, i(x,y) is the latent image, k(x,y) is the blur kernel and n(x,y) is random noise.
The above in frequency domain can be written as
B(u,v) = I(u,v) * K(u,v) + N(u,v)                                     –  (2)
Inv K(u,v) represents the inverse of the blur kernel and can be determined  from equation (2) by disregarding the noise.  However the inverse filter has high gain at low frequencies and hence tends to amplify noise at these low frequencies.
Wiener filter and Butterworth filter try to minimize these
Wiener filter attempts to minimize the Mean Squared Error (MSE) of the following equation
e^2 = E[( i(x,y)   – mean (i (x,y))^2]

The frequency domain expression for this
T(u,v)           =
K*(u,v)
——–                                                   -(3)
| K(u,v)|^2 + Sn(u,v)/Sf(u,v)
Where T(u,v) is the Wiener filter
K*(u,v) is the complex conjugate of the blur kernel
|K(u,v)|^2  = k^2 + m^2 where k and m are the coefficients of the real and imaginary parts of the blur kernel
Sn(u,v) is the power spectra of noise
Sf(u,v) is the power spectra of the signal
Equation (3) can be represented as
T(u,v)            =
K*(u,v)
——–                                                   –   (4)
| K(u,v)|^2 +  L
Where L is a small positive value
Eqn(4) represents Wiener filter

The lecture  “Image deblurring by Frequency Domain Operations” by Prof. Harvey Rhody is worth a read.
One thing that puzzles me is the Wiener filter is the inverse of the blur kernel with the additional factor of ‘L’ in the denominator. The contribution of the factor ‘L’ will be small, so the Wiener filter appears to be the same as a regular inverse filter. I am probably missing something here(??).

You can clone the code below from Git Hub – Weiner filter with OpenCV

The steps to create the Wiener filter were as follow

1. Create the gray image of the blurred original

2. Add additive noise to the blurred original

      // Add the random noise matric to the image
	cvAdd(im,&noise,im, 0);
3.Gaussian smooth the noisy blurred image
   cvSmooth( im, im, CV_GAUSSIAN, 7, 7, 0.5, 0.5 );
4.Perform DFT and inverse DFT (to check) of the original image (noisy, blurred image)
  // Perform DFT of original image
  dft_A = cvShowDFT1(im, dft_M1, dft_N1,"original");
  //Perform inverse (check)
  cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, "original");
5.Perform DFT and inverse DFT of blur kernel
  // Perform DFT of kernel
  dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,"kernel");
  //Perform inverse of kernel (check)
  cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, "kernel");

6.Multiply DFT of original with complex conjugate of blur kernel
   // Multiply numerator with complex conjugate
  dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );
7.Calculate modulus of blur kernel (denominator)
  // Split Real and imaginary parts
  cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
  cvPow( image_ReB, image_ReB, 2.0);
  cvPow( image_ImB, image_ImB, 2.0);
  cvAdd(image_ReB, image_ImB, image_ReB,0);
8.Add Wiener constant to modulus of blur kernel (denominator)
 val = cvScalarAll(0.00001);
  cvAddS(image_ReB,val,image_ReB,0);
9.Divide the numerator with the denominator
 //Divide Numerator/A^2 + B^2
  cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
  cvDiv(image_ImC, image_ReB, image_ImC, 1.0);
10.Perform inverse DFT of the filtered signal

// Perform Inverse

 cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,"Output - Wiener filter");

There is still a residual amount of blur (do also read De-blurring revisited with Wiener filter using OpenCV). This can be tried for different values of the Wiener introduced noise.
Note: I would like to give special thanks to Egli Simon who identified the bug in my previous version of the code. The line to display the inverse DFT of the original image & the blur kernel overwrote the DFT contents and I was not getting the Wiener filter to work
Note: You can clone the code below from Git Hub – Weiner filter with OpenCV
The complete code is given below (re-worked and should work as is)
/*
============================================================================
Name : wiener1.c
Author : Tinniam V Ganesh & Egli Simon
Version :
Copyright :
Description : Wiener filter implementation in OpenCV (22 Nov 2011)
============================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include “cxcore.h”
#include “cv.h”
#include “highgui.h”

#define kappa 1
#define rad 2
int main(int argc, char ** argv)
{
int height,width,step,channels,depth;
uchar* data1;
CvMat *dft_A;
CvMat *dft_B;
CvMat *dft_C;
IplImage* im;
IplImage* im1;
IplImage* image_ReB;
IplImage* image_ImB;
IplImage* image_ReC;
IplImage* image_ImC;
IplImage* complex_ImC;
CvScalar val;
IplImage* k_image_hdr;
int i,j,k;

FILE *fp;
fp = fopen(“test.txt”,”w+”);

int dft_M,dft_N;
int dft_M1,dft_N1;
CvMat* cvShowDFT1(IplImage*, int, int,char*);
void cvShowInvDFT1(IplImage*, CvMat*, int, int,char*);
im1 = cvLoadImage( “kutty-1.jpg”,1 );
cvNamedWindow(“Original-color”, 0);
cvShowImage(“Original-color”, im1);
im = cvLoadImage( “kutty-1.jpg”, CV_LOAD_IMAGE_GRAYSCALE );
if( !im )
return -1;
cvNamedWindow(“Original-gray”, 0);
cvShowImage(“Original-gray”, im);

// Create a random noise matrix
fp = fopen(“test.txt”,”w+”);
int val_noise[357*383];
for(i=0; i <im->height;i++){
for(j=0;j<im->width;j++){
fprintf(fp, “%d “,(383*i+j));
val_noise[383*i+j] = rand() % 128;
}
fprintf(fp, “\n”);
}

CvMat noise = cvMat(im->height,im->width, CV_8UC1,val_noise);

// Add the random noise matric to the image
cvAdd(im,&noise,im, 0);
cvNamedWindow(“Original + Noise”, 0);
cvShowImage(“Original + Noise”, im);
cvSmooth( im, im, CV_GAUSSIAN, 7, 7, 0.5, 0.5 );
cvNamedWindow(“Gaussian Smooth”, 0);
cvShowImage(“Gaussian Smooth”, im);

// Create a blur kernel
IplImage* k_image;
float r = rad;
float radius=((int)(r)*2+1)/2.0;
int rowLength=(int)(2*radius);
printf(“rowlength %d\n”,rowLength);
float kernels[rowLength*rowLength];
printf(“rowl: %i”,rowLength);
int norm=0; //Normalization factor
int x,y;
CvMat kernel;
for(x = 0; x < rowLength; x++)
for (y = 0; y < rowLength; y++)
if (sqrt((x – (int)(radius) ) * (x – (int)(radius) ) + (y – (int)(radius))* (y – (int)(radius))) <= (int)(radius))
norm++;
// Populate matrix
for (y = 0; y < rowLength; y++) //populate array with values
{
for (x = 0; x < rowLength; x++) {
if (sqrt((x – (int)(radius) ) * (x – (int)(radius) ) + (y – (int)(radius))
* (y – (int)(radius))) <= (int)(radius)) {
//kernels[y * rowLength + x] = 255;
kernels[y * rowLength + x] =1.0/norm;
printf(“%f “,1.0/norm);
}
else{
kernels[y * rowLength + x] =0;
}
}
}

/*for (i=0; i < rowLength; i++){
for(j=0;j < rowLength;j++){
printf(“%f “, kernels[i*rowLength +j]);
}
}*/

kernel= cvMat(rowLength, // number of rows
rowLength, // number of columns
CV_32FC1, // matrix data type
&kernels);
k_image_hdr = cvCreateImageHeader( cvSize(rowLength,rowLength), IPL_DEPTH_32F,1);
k_image = cvGetImage(&kernel,k_image_hdr);

height = k_image->height;
width = k_image->width;
step = k_image->widthStep/sizeof(float);
depth = k_image->depth;

channels = k_image->nChannels;
//data1 = (float *)(k_image->imageData);
data1 = (uchar *)(k_image->imageData);
cvNamedWindow(“blur kernel”, 0);
cvShowImage(“blur kernel”, k_image);

dft_M = cvGetOptimalDFTSize( im->height – 1 );
dft_N = cvGetOptimalDFTSize( im->width – 1 );

//dft_M1 = cvGetOptimalDFTSize( im->height+99 – 1 );
//dft_N1 = cvGetOptimalDFTSize( im->width+99 – 1 );

dft_M1 = cvGetOptimalDFTSize( im->height+3 – 1 );
dft_N1 = cvGetOptimalDFTSize( im->width+3 – 1 );

printf(“dft_N1=%d,dft_M1=%d\n”,dft_N1,dft_M1);

// Perform DFT of original image
dft_A = cvShowDFT1(im, dft_M1, dft_N1,”original”);
//Perform inverse (check)
//cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, “original”); – Commented as it overwrites the DFT

// Perform DFT of kernel
dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,”kernel”);
//Perform inverse of kernel (check)
//cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, “kernel”);- Commented as it overwrites the DFT

// Multiply numerator with complex conjugate
dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

printf(“%d %d %d %d\n”,dft_M,dft_N,dft_M1,dft_N1);

// Multiply DFT(blurred image) * complex conjugate of blur kernel
cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);
//cvShowInvDFT1(im,dft_C,dft_M1,dft_N1,”blur1″);

// Split Fourier in real and imaginary parts
image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);

printf(“%d %d %d %d\n”,dft_M,dft_N,dft_M1,dft_N1);

//cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

// Compute A^2 + B^2 of denominator or blur kernel
image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

// Split Real and imaginary parts
cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
cvPow( image_ReB, image_ReB, 2.0);
cvPow( image_ImB, image_ImB, 2.0);
cvAdd(image_ReB, image_ImB, image_ReB,0);

val = cvScalarAll(kappa);
cvAddS(image_ReB,val,image_ReB,0);

//Divide Numerator/A^2 + B^2
cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

// Merge Real and complex parts
cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);

// Perform Inverse
cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,”O/p Wiener k=1 rad=2″);

cvWaitKey(-1);
return 0;
}

CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
IplImage* realInput;
IplImage* imaginaryInput;
IplImage* complexInput;

CvMat* dft_A, tmp;

IplImage* image_Re;
IplImage* image_Im;

char str[80];

double m, M;

realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);
cvScale(im, realInput, 1.0, 0.0);
cvZero(imaginaryInput);
cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);
dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
// copy A to dft_A and pad dft_A with zeros
cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
cvCopy( complexInput, &tmp, NULL );
if( dft_A->cols > im->width )
{
cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols – im->width, im->height));
cvZero( &tmp );
}

// no need to pad bottom part of dft_A with zeros because of
// use nonzero_rows parameter in cvDFT() call below

cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

strcpy(str,”DFT -“);
strcat(str,src);
cvNamedWindow(str, 0);

// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );
// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );

// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)

cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
cvShowImage(str, image_Re);
return(dft_A);
}

void cvShowInvDFT1(IplImage* im, CvMat* dft_A, int dft_M, int dft_N,char* src)
{

IplImage* realInput;
IplImage* imaginaryInput;
IplImage* complexInput;
IplImage * image_Re;
IplImage * image_Im;
double m, M;
char str[80];

realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

//cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, complexInput->height );
cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, dft_M);
strcpy(str,”DFT INVERSE – “);
strcat(str,src);
cvNamedWindow(str, 0);

// Split Fourier in real and imaginary parts
cvSplit( dft_A, image_Re, image_Im, 0, 0 );

// Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cvPow( image_Re, image_Re, 2.0);
cvPow( image_Im, image_Im, 2.0);
cvAdd( image_Re, image_Im, image_Re, NULL);
cvPow( image_Re, image_Re, 0.5 );

// Compute log(1 + Mag)
cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
cvLog( image_Re, image_Re ); // log(1 + Mag)

cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
//cvCvtColor(image_Re, image_Re, CV_GRAY2RGBA);

cvShowImage(str, image_Re);

}

Checkout my book ‘Deep Learning from first principles Second Edition- In vectorized Python, R and Octave’.  My book is available on Amazon  as paperback ($18.99) and in kindle version($9.99/Rs449).

You may also like my companion book “Practical Machine Learning with R and Python:Second Edition- Machine Learning in stereo” available in Amazon in paperback($12.99) and Kindle($9.99/Rs449) versions.

See also
– Experiments with deblurring using OpenCV
–  Dabbling with Wiener filter using OpenCV
– Deblurring with OpenCV: Wiener filter reloaded
– Re-working the Lucy-Richardson Algorithm in OpenCV


Find me on Google+