![]() |
CASToR
1.0
Tomographic Reconstruction (PET/SPECT)
|
00001 00002 /* 00003 Implementation of class vImageConvolver 00004 00005 - separators: done 00006 - doxygen: done 00007 - default initialization: done 00008 - CASTOR_DEBUG: 00009 - CASTOR_VERBOSE: 00010 */ 00011 00018 #include "vImageConvolver.hh" 00019 #include "oImageDimensionsAndQuantification.hh" 00020 #include "oImageSpace.hh" 00021 00022 // ===================================================================== 00023 // --------------------------------------------------------------------- 00024 // --------------------------------------------------------------------- 00025 // ===================================================================== 00026 00027 vImageConvolver::vImageConvolver() 00028 { 00029 // Standards 00030 mp_ImageDimensionsAndQuantification = NULL; 00031 m_verbose = -1; 00032 // Booleans 00033 m_checked = false; 00034 m_initialized = false; 00035 m_stationary = false; 00036 // Padded image 00037 mp_paddedImage = NULL; 00038 m_offsetX = -1; 00039 m_offsetY = -1; 00040 m_offsetZ = -1; 00041 m_dimPadX = -1; 00042 m_dimPadY = -1; 00043 m_dimPadZ = -1; 00044 m_dimPadXY = -1; 00045 m_dimPadXYZ = -1; 00046 // Convolution kernel 00047 m_nbKernels = -1; 00048 mp_dimKernelX = NULL; 00049 mp_dimKernelY = NULL; 00050 mp_dimKernelZ = NULL; 00051 m2p_kernel = NULL; 00052 } 00053 00054 // ===================================================================== 00055 // --------------------------------------------------------------------- 00056 // --------------------------------------------------------------------- 00057 // ===================================================================== 00058 00059 vImageConvolver::~vImageConvolver() 00060 { 00061 // Simply free what should be 00062 if (mp_paddedImage) free(mp_paddedImage); 00063 if (mp_dimKernelX) free(mp_dimKernelX); 00064 if (mp_dimKernelY) free(mp_dimKernelY); 00065 if (mp_dimKernelZ) free(mp_dimKernelZ); 00066 if (m2p_kernel) 00067 { 00068 for (int k=0; k<m_nbKernels; k++) if (m2p_kernel[k]) free(m2p_kernel[k]); 00069 free(m2p_kernel); 00070 } 00071 } 00072 00073 // ===================================================================== 00074 // --------------------------------------------------------------------- 00075 // --------------------------------------------------------------------- 00076 // ===================================================================== 00077 00078 // Check global parameters then check the specific parameters 00079 int vImageConvolver::CheckParameters() 00080 { 00081 // Check verbose 00082 if (m_verbose<0) 00083 { 00084 Cerr("***** vImageConvolver::CheckParameters() -> The verbose level must be set positive !" << endl); 00085 return 1; 00086 } 00087 // Check oImageDimensionsAndQuantification 00088 if (mp_ImageDimensionsAndQuantification==NULL) 00089 { 00090 Cerr("***** vImageConvolver::CheckParameters() -> The image dimensions must be set through a oImageDimensionsAndQuantification !" << endl); 00091 return 1; 00092 } 00093 // Finally check the specific parameters from the child convolver 00094 if (CheckSpecificParameters()) 00095 { 00096 Cerr("***** vImageConvolver::CheckParameters() -> A problem occured while checking specific parameters of the child convolver !" << endl); 00097 return 1; 00098 } 00099 // All set 00100 m_checked = true; 00101 // Normal end 00102 return 0; 00103 } 00104 00105 // ===================================================================== 00106 // --------------------------------------------------------------------- 00107 // --------------------------------------------------------------------- 00108 // ===================================================================== 00109 00110 // Check that the kernel has been successfully built, then allocate the padded image 00111 int vImageConvolver::Initialize() 00112 { 00113 // Check if parameters have been checked 00114 if (!m_checked) 00115 { 00116 Cerr("***** vImageConvolver::Initialize() -> Parameters have not been checked ! Please call CheckParameters() before." << endl); 00117 return 1; 00118 } 00119 00120 // Build the convolution kernel 00121 if (BuildConvolutionKernel()) 00122 { 00123 Cerr("***** vImageConvolver::Initialize() -> A problem occured while building the convolution kernel !" << endl); 00124 return 1; 00125 } 00126 00127 // --------------------------------------------------------------------------------------- 00128 // Check that all kernel dimensions are odd and find the maximum size along each dimension 00129 // --------------------------------------------------------------------------------------- 00130 00131 // Check that the number of kernels is not null or negative 00132 if (m_nbKernels<1) 00133 { 00134 Cerr("***** vImageConvolver::Initialize() -> The number of kernels is negative or null !" << endl); 00135 return 1; 00136 } 00137 // Check that the kernel dimensions have been allocated 00138 if (mp_dimKernelX==NULL || mp_dimKernelY==NULL || mp_dimKernelZ==NULL) 00139 { 00140 Cerr("***** vImageConvolver::Initialize() -> The kernel dimensions are not allocated !" << endl); 00141 return 1; 00142 } 00143 // Check that the kernel has been allocated 00144 if (m2p_kernel==NULL) 00145 { 00146 Cerr("***** vImageConvolver::Initialize() -> The kernel is not allocated !" << endl); 00147 return 1; 00148 } 00149 // In case the kernel is stationary and the user forgot to explicitely specify it, we force it here 00150 if (m_nbKernels==1) m_stationary = true; 00151 00152 // Maximum kernel sizes along each dimension 00153 INTNB max_kern_size_Z = 0; 00154 INTNB max_kern_size_Y = 0; 00155 INTNB max_kern_size_X = 0; 00156 00157 // Loop over all kernels 00158 for (int k=0; k<m_nbKernels; k++) 00159 { 00160 // Check oddity along Z 00161 if (mp_dimKernelZ[k]%2==0) 00162 { 00163 Cerr("***** vImageConvolver::Initialize() -> Dimension of " << k << "th convolution kernel along Z (" << mp_dimKernelZ[k] << " is not odd !" << endl); 00164 return 1; 00165 } 00166 // Check oddity along Y 00167 if (mp_dimKernelY[k]%2==0) 00168 { 00169 Cerr("***** vImageConvolver::Initialize() -> Dimension of " << k << "th convolution kernel along Y (" << mp_dimKernelY[k] << " is not odd !" << endl); 00170 return 1; 00171 } 00172 // Check oddity along X 00173 if (mp_dimKernelX[k]%2==0) 00174 { 00175 Cerr("***** vImageConvolver::Initialize() -> Dimension of " << k << "th convolution kernel along X (" << mp_dimKernelX[k] << " is not odd !" << endl); 00176 return 1; 00177 } 00178 // Check maximum along Z 00179 if (mp_dimKernelZ[k]>max_kern_size_Z) max_kern_size_Z = mp_dimKernelZ[k]; 00180 // Check maximum along Y 00181 if (mp_dimKernelY[k]>max_kern_size_Y) max_kern_size_Y = mp_dimKernelY[k]; 00182 // Check maximum along X 00183 if (mp_dimKernelX[k]>max_kern_size_X) max_kern_size_X = mp_dimKernelX[k]; 00184 } 00185 00186 // Set the offsets 00187 m_offsetZ = max_kern_size_Z / 2; 00188 m_offsetY = max_kern_size_Y / 2; 00189 m_offsetX = max_kern_size_X / 2; 00190 00191 // In the convolution we assume an intensity conservation along the Z axis, so we check if the offset is higher or equal to the image dimension. 00192 // In this case, the convolution will still work (it is implemented so), but computation time will be lost for nothing, so we throw a warning. 00193 if (m_offsetZ>=mp_ImageDimensionsAndQuantification->GetNbVoxZ()) 00194 { 00195 FLTNB over_computation_factor = ((FLTNB)(max_kern_size_Z)) / ((FLTNB)(mp_ImageDimensionsAndQuantification->GetNbVoxZ()*2-1)); 00196 Cerr("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl); 00197 Cerr("!!!!! vImageConvolver::Initialize() -> The maximum half size of the convolution kernels is higher or equal to the number of slices of the image." << endl); 00198 Cerr("!!!!! This won't affect the results, but a convolution running time factor of " << over_computation_factor << " will be lost for nothing." << endl); 00199 Cerr("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << endl); 00200 } 00201 00202 // --------------------------------------------------------------------------------------- 00203 // Build the padded image 00204 // --------------------------------------------------------------------------------------- 00205 00206 // Compute the padded image dimensions 00207 m_dimPadZ = mp_ImageDimensionsAndQuantification->GetNbVoxZ() + max_kern_size_Z - 1; 00208 m_dimPadY = mp_ImageDimensionsAndQuantification->GetNbVoxY() + max_kern_size_Y - 1; 00209 m_dimPadX = mp_ImageDimensionsAndQuantification->GetNbVoxX() + max_kern_size_X - 1; 00210 m_dimPadXY = m_dimPadX * m_dimPadY; 00211 m_dimPadXYZ = m_dimPadXY * m_dimPadZ; 00212 00213 // Allocate the padded image and fill it with 0. 00214 mp_paddedImage = (FLTNB*)calloc(m_dimPadXYZ,sizeof(FLTNB)); 00215 00216 // All set 00217 m_initialized = true; 00218 // Normal end 00219 return 0; 00220 } 00221 00222 // ===================================================================== 00223 // --------------------------------------------------------------------- 00224 // --------------------------------------------------------------------- 00225 // ===================================================================== 00226 00227 int vImageConvolver::ApplyConvolution(FLTNB* ap_image) 00228 { 00229 #ifdef CASTOR_DEBUG 00230 if (!m_initialized) 00231 { 00232 Cerr("***** vImageConvolver::ApplyConvolution() -> Called while not initialized !" << endl); 00233 Exit(EXIT_DEBUG); 00234 } 00235 #endif 00236 00237 // Copy image into padded buffer image 00238 CopyToPaddedImage(ap_image); 00239 // Then convolve 00240 if (Convolve(ap_image)) 00241 { 00242 Cerr("***** vImageConvolver::ApplyConvolution() -> A problem occured while actually convolving !" << endl); 00243 return 1; 00244 } 00245 // Normal end 00246 return 0; 00247 } 00248 00249 // ===================================================================== 00250 // --------------------------------------------------------------------- 00251 // --------------------------------------------------------------------- 00252 // ===================================================================== 00253 00254 int vImageConvolver::ApplyConvolutionTranspose(FLTNB* ap_image) 00255 { 00256 #ifdef CASTOR_DEBUG 00257 if (!m_initialized) 00258 { 00259 Cerr("***** vImageConvolver::ApplyConvolutionTranspose() -> Called while not initialized !" << endl); 00260 Exit(EXIT_DEBUG); 00261 } 00262 #endif 00263 00264 // Copy image into padded buffer image 00265 CopyToPaddedImage(ap_image); 00266 // Then convolve 00267 if (ConvolveTranspose(ap_image)) 00268 { 00269 Cerr("***** vImageConvolver::ApplyConvolutionTranspose() -> A problem occured while actually convolving !" << endl); 00270 return 1; 00271 } 00272 // Normal end 00273 return 0; 00274 } 00275 00276 // ===================================================================== 00277 // --------------------------------------------------------------------- 00278 // --------------------------------------------------------------------- 00279 // ===================================================================== 00280 00281 void vImageConvolver::CopyToPaddedImage(FLTNB* ap_inputImage) 00282 { 00283 #ifdef CASTOR_DEBUG 00284 if (!m_initialized) 00285 { 00286 Cerr("***** vImageConvolver::CopyToPaddedImage() -> Called while not initialized !" << endl); 00287 Exit(EXIT_DEBUG); 00288 } 00289 #endif 00290 00291 // Copy the image into the padded buffer, centered with respect to the pad 00292 INTNB z; 00293 #pragma omp parallel for private(z) schedule(guided) 00294 for (z=0; z<mp_ImageDimensionsAndQuantification->GetNbVoxZ(); z++) 00295 { 00296 // Pad coordinate Z 00297 INTNB padz = z + m_offsetZ; 00298 for (INTNB y=0; y<mp_ImageDimensionsAndQuantification->GetNbVoxY(); y++) 00299 { 00300 // Pad coordinate Y 00301 INTNB pady = y + m_offsetY; 00302 for (INTNB x=0; x<mp_ImageDimensionsAndQuantification->GetNbVoxX(); x++) 00303 { 00304 // Pad coordinate X 00305 INTNB padx = x + m_offsetX; 00306 // Global original coordinate 00307 INTNB coord_orig = z*mp_ImageDimensionsAndQuantification->GetNbVoxXY() + y*mp_ImageDimensionsAndQuantification->GetNbVoxX() + x; 00308 // Global pad coordinate 00309 INTNB coord_pad = padz*m_dimPadXY + pady*m_dimPadX + padx; 00310 // Affect buffer 00311 mp_paddedImage[coord_pad] = ap_inputImage[coord_orig]; 00312 // Zero image 00313 ap_inputImage[coord_orig] = 0.; 00314 } 00315 } 00316 } 00317 } 00318 00319 // ===================================================================== 00320 // --------------------------------------------------------------------- 00321 // --------------------------------------------------------------------- 00322 // ===================================================================== 00323 00324 int vImageConvolver::Convolve(FLTNB* ap_outputImage) 00325 { 00326 #ifdef CASTOR_DEBUG 00327 if (!m_initialized) 00328 { 00329 Cerr("***** vImageConvolver::Convolve() -> Called while not initialized !" << endl); 00330 Exit(EXIT_DEBUG); 00331 } 00332 #endif 00333 00334 // This function is designed to work universally and exclusively on a stationary kernel. 00335 // Note that we suppose that a stationary kernel is also symmetric (but not necessarily isotropic). 00336 // For asymetric and non-stationary kernels, have to overload this function in the child convolver. 00337 // Note also that we choose the out-to-in implementation (outer loop on the output image) so that the 00338 // parallelism is operated on the output and is thus thread-safe. 00339 // Finally note that transaxially, we do not implement an image intensity preservation policy as we suppose that 00340 // the distance between the object and the image border is taller than the kernel half-width (it should be so). 00341 if (m_stationary) 00342 { 00343 INTNB stationary_kernel = 0; 00344 // Convolve (OpenMP on Z dimension) 00345 INTNB z; 00346 #pragma omp parallel for private(z) 00347 for (z=0; z<mp_ImageDimensionsAndQuantification->GetNbVoxZ(); z++) 00348 { 00349 // As a first step, we virtually go through the kernel along Z to compute the actual kernel integral that will 00350 // be really used in the convolution. For each virtual slice contributing to the convolved slice, we test if it 00351 // is inside or outside the image; in the former case we count it into the integral and in the latter we don't. 00352 FLTNB kernel_integral = 0.; 00353 for (INTNB zk=0; zk<mp_dimKernelZ[stationary_kernel]; zk++) 00354 { 00355 // Index of the virtual slice which will (or not) contribute to the convolved slice 00356 INTNB virtualZ = z + zk - m_offsetZ; 00357 // Test if it is not in the image, then we move onto the next virtual slice 00358 if (virtualZ<0 || virtualZ>=mp_ImageDimensionsAndQuantification->GetNbVoxZ()) continue; 00359 // Otherwise, we update the kernel integral 00360 INTNB kernZ_base = zk * mp_dimKernelX[stationary_kernel] * mp_dimKernelY[stationary_kernel]; 00361 for (INTNB xyk=0; xyk<mp_dimKernelX[stationary_kernel]*mp_dimKernelY[stationary_kernel]; xyk++) 00362 kernel_integral += m2p_kernel[stationary_kernel][kernZ_base+xyk]; 00363 } 00364 // Some precomputation for maximum efficiency 00365 INTNB indexZ_base = z*mp_ImageDimensionsAndQuantification->GetNbVoxXY(); 00366 for (INTNB y=0; y<mp_ImageDimensionsAndQuantification->GetNbVoxY(); y++) 00367 { 00368 // Some precomputation for maximum efficiency 00369 INTNB indexZY_base = indexZ_base + y*mp_ImageDimensionsAndQuantification->GetNbVoxX(); 00370 for (INTNB x=0; x<mp_ImageDimensionsAndQuantification->GetNbVoxX(); x++) 00371 { 00372 // Output image index 00373 INTNB index_img = indexZY_base + x; 00374 // Inner loops on convolution kernel dimensions 00375 for (INTNB zk=0, index_kern=0; zk<mp_dimKernelZ[stationary_kernel]; zk++) 00376 { 00377 // Some precomputation for maximum efficiency 00378 INTNB indexZ_pad = (z + zk) * m_dimPadXY; 00379 for (INTNB yk=0; yk<mp_dimKernelY[stationary_kernel]; yk++) 00380 { 00381 // Some precomputation for maximum efficiency 00382 INTNB indexZY_pad = indexZ_pad + (y + yk) * m_dimPadX; 00383 for (INTNB xk=0; xk<mp_dimKernelX[stationary_kernel]; xk++, index_kern++) 00384 { 00385 // Padded image index 00386 INTNB index_pad = indexZY_pad + x + xk; 00387 // Apply contribution 00388 ap_outputImage[index_img] += mp_paddedImage[index_pad] * m2p_kernel[stationary_kernel][index_kern] / kernel_integral; 00389 } 00390 } 00391 } 00392 } 00393 } 00394 } 00395 // Normal end 00396 return 0; 00397 } 00398 // Otherwise the convolve function has to be implemented by the child non-stationary convolver 00399 else 00400 { 00401 Cerr("***** vImageConvolver::Convolve() -> To use a non-stationary kernel, you should overload this function to implement your own in your child convolver !" << endl); 00402 return 1; 00403 } 00404 } 00405 00406 // ===================================================================== 00407 // --------------------------------------------------------------------- 00408 // --------------------------------------------------------------------- 00409 // ===================================================================== 00410 00411 int vImageConvolver::ConvolveTranspose(FLTNB* ap_outputImage) 00412 { 00413 #ifdef CASTOR_DEBUG 00414 if (!m_initialized) 00415 { 00416 Cerr("***** vImageConvolver::ConvolveTranspose() -> Called while not initialized !" << endl); 00417 Exit(EXIT_DEBUG); 00418 } 00419 #endif 00420 00421 // This function is designed to work universally on a stationary kernel. 00422 // Note that we suppose a stationary kernel is also symmetric (otherwise it is absurd) 00423 if (m_stationary) 00424 { 00425 return Convolve(ap_outputImage); 00426 } 00427 // Otherwise the convolve function has to be implemented by the child non-stationary convolver 00428 else 00429 { 00430 Cerr("***** vImageConvolver::ConvolveTranspose() -> To use a non-stationary kernel, you should overload this function to implement your own in your child convolver !" << endl); 00431 return 1; 00432 } 00433 } 00434 00435 // ===================================================================== 00436 // --------------------------------------------------------------------- 00437 // --------------------------------------------------------------------- 00438 // =====================================================================