/* Copyright (C) 2010 University of Oxford Stamatios Sotiropoulos - FMRIB Image Analysis Group */ /* Part of FSL - FMRIB's Software Library http://www.fmrib.ox.ac.uk/fsl fsl@fmrib.ox.ac.uk Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance Imaging of the Brain), Department of Clinical Neurology, Oxford University, Oxford, UK LICENCE FMRIB Software Library, Release 5.0 (c) 2012, The University of Oxford (the "Software") The Software remains the property of the University of Oxford ("the University"). The Software is distributed "AS IS" under this Licence solely for non-commercial use in the hope that it will be useful, but in order that the University as a charitable foundation protects its assets for the benefit of its educational and research purposes, the University makes clear that no condition is made or to be implied, nor is any warranty given or to be implied, as to the accuracy of the Software, or that it will be suitable for any particular purpose or for use under any specific conditions. Furthermore, the University disclaims all responsibility for the use which is made of the Software. It further disclaims any liability for the outcomes arising from using the Software. The Licensee agrees to indemnify the University and hold the University harmless from and against any and all claims, damages and liabilities asserted by third parties (including claims for negligence) which arise directly or indirectly from the use of the Software or the sale of any products based on the Software. No part of the Software may be reproduced, modified, transmitted or transferred in any form or by any means, electronic or mechanical, without the express permission of the University. The permission of the University is not required if the said reproduction, modification, transmission or transference is done without financial return, the conditions of this Licence are imposed upon the receiver of the product, and all original and amended source code is included in any transmitted product. You may be held legally responsible for any copyright infringement that is caused or encouraged by your failure to abide by these terms and conditions. You are not permitted under this Licence to use this Software commercially. Use for which any financial return is received shall be defined as commercial use, and includes (1) integration of all or part of the source code or the Software into a product for sale or license by or on behalf of Licensee to third parties or (2) use of the Software or any derivative of it for research with the final aim of developing software products for sale or license to a third party or (3) use of the Software or any derivative of it for research with the final aim of developing non-software products for sale or license to a third party, or (4) use of the Software to provide any service to an external organisation for which payment is received. If you are interested in using the Software commercially, please contact Isis Innovation Limited ("Isis"), the technology transfer company of the University, to negotiate a licence. Contact details are: innovation@isis.ox.ac.uk quoting reference DE/9564. */ #if !defined(qboot_h) #define qboot_h #include #include "stdlib.h" #include "libprob.h" #include #include "miscmaths/miscprob.h" #include "miscmaths/miscmaths.h" #include "newimage/newimageall.h" #include "qbootOptions.h" #include "peakfinder.h" #include "sphere_tessellation.h" using namespace std; using namespace NEWMAT; using namespace NEWIMAGE; using namespace MISCMATHS; namespace ODFs{ #define b0max 50 //b values up to b0max will be considered b0s #define shellrange 50 //how much +/- b value variability in each q shell is allowed. E.g. a shell at b=1000, can range from b=950 to b=1050. ///////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// //Class to estimate the ODF SH coefficients in a single voxel using single-shell data class ODF_voxel{ Matrix m_coeff_samples; //Stores the ODF SH coefficients (N_coeff x N_samples) ColumnVector m_coeff; //Vector that holds the current ODF SH coefficients ColumnVector m_coeff_signal; //Vector that holds the signal SH coefficients ColumnVector m_current_data; //Vectors used during bootstrapping to keep the current data state ColumnVector m_current_residuals; //and the current residuals ColumnVector m_residuals; //Vector that holds the original residuals that will be bootstrapped const ColumnVector& m_data; //Vector with signal attenuations (for CSA-ODFs, transform to log of the -log of the signal attenuations) const int& m_modelnum; //1 for Descoteaux's ODFs, 2 for CSA-ODFs const Matrix& m_SHT; //Pseudoinverse of spherical harmonic transform matrix, used to get the signal SH coefficients const Matrix& m_HAT; //Hat matrix, used during bootstrap const DiagonalMatrix& m_scaling_factors;//Holds the scaling factors to obtain the ODF SH coefficients from the signal SH ones const int& m_SH_order; //maximum SH order (must be even) const int& Num_of_coeff; //Number of coefficients that will be estimated. const int& n_samples; //Number of samples per coefficient const float& m_delta; //Regularization parameter for signal intensities public: //constructor ODF_voxel(const ColumnVector& data, const Matrix& SHT, const Matrix& HAT, const DiagonalMatrix& scaling_factors, const int& modelnum, const int& SH_order, const int& Num_coeff, const int& nsamples, const float& delta): m_data(data), m_modelnum(modelnum), m_SHT(SHT), m_HAT(HAT), m_scaling_factors(scaling_factors), m_SH_order(SH_order),Num_of_coeff(Num_coeff),n_samples(nsamples), m_delta(delta) { m_current_data=m_data; m_coeff_samples.ReSize(Num_coeff,nsamples); m_coeff_samples=0; } //destructor ~ODF_voxel(){} Matrix& get_SHcoeff_ref() { return m_coeff_samples;} Matrix get_SHcoeff() { return m_coeff_samples;} void compute_signal_SH_coeff(){ m_coeff_signal=m_SHT*m_current_data; } //Regularizes signal attenuations (provided by Iman Aganj) void regularize_intensities (ColumnVector& attenuation, const float& delta) const{ if (delta==0){ //Clip attenuation values. If att<0 => att=0, if att>1 => att=1 for (int i=1; i<=attenuation.Nrows(); i++) attenuation(i)=(attenuation(i)>=0 && attenuation(i)<=1)*attenuation(i)+(attenuation(i)>1); } else{ //Use function from Aganj et al, MRM, 2010 for (int i=1; i<=attenuation.Nrows(); i++) attenuation(i)=(attenuation(i)<0)*(0.5*delta) + (attenuation(i)>=0 && attenuation(i)=delta && attenuation(i)<1-delta)*attenuation(i) + (attenuation(i)>=1-delta && attenuation(i)<1)*(1-0.5*delta-0.5*((1-attenuation(i))*(1-attenuation(i)))/delta) + (attenuation(i)>=1)*(1-0.5*delta); } } //Take the log(-log()) of the signal attenuations void log_log(ColumnVector& signal) const{ for (int i=1; i<=signal.Nrows(); i++) signal(i)=log(-log(signal(i))); } //Transform log intensities back to signal attenuations by taking the exp(-exp()) void exp_exp(ColumnVector& signal) const{ for (int i=1; i<=signal.Nrows(); i++) signal(i)=exp(-exp(signal(i))); } //Performs Linear Regression and estimates SH coefficients void compute_ODF_SH_coeff(){ m_coeff_signal = m_SHT*m_current_data; m_coeff=m_scaling_factors*m_coeff_signal; if (m_modelnum==1){ double norm_const=m_coeff(1)*2*sqrt(M_PI); //Normalize coefficients for (int m=1; m<=Num_of_coeff; m++) m_coeff(m)=m_coeff(m)/norm_const; } else if (m_modelnum==2) m_coeff(1)=1/(2*sqrt(M_PI)); } void bootstrapping() { if (m_modelnum==1) bootstrapping_modelnum1(); else if (m_modelnum==2) bootstrapping_modelnum2(); } //Perform wild residual bootstrap, according to (Whitcher et al, 2008), for Tuch's ODFs void bootstrapping_modelnum1(){ ColumnVector predicted; regularize_intensities(m_current_data,0);//Regularize attenuations compute_ODF_SH_coeff(); //Fit the model to the original data m_coeff_samples.Column(1)=m_coeff; //Save the estimates for the SH coefficients (deterministic estimates) predicted=m_HAT*m_current_data; //Get the predicted data m_residuals=m_current_data-predicted; //And then the residuals modify_residuals(); //Modify the residuals m_current_residuals=m_residuals; for (int n=2; n<=n_samples; n++){ bootstrap_residuals(); //Get boostrapped residuals m_current_data=predicted+m_current_residuals; //Get the new data state regularize_intensities(m_current_data,0);//Regularize attenuations compute_ODF_SH_coeff(); //Fit the model to the current data state m_coeff_samples.Column(n)=m_coeff; //Save the estimates for the SH coefficients } } //Perform wild residual bootstrap for CSA-ODFs void bootstrapping_modelnum2(){ ColumnVector predicted; regularize_intensities(m_current_data,m_delta);//Regularize attenuations log_log(m_current_data); compute_ODF_SH_coeff(); //Fit the model to the original data m_coeff_samples.Column(1)=m_coeff; //Save the estimates for the SH coefficients (deterministic estimates) predicted=m_HAT*m_current_data; //Get the predicted data m_residuals=m_current_data-predicted; //And then the residuals modify_residuals(); //Modify the residuals m_current_residuals=m_residuals; for (int n=2; n<=n_samples; n++){ bootstrap_residuals(); //Get boostrapped residuals m_current_data=predicted+m_current_residuals; //Get the new data state exp_exp(m_current_data); regularize_intensities(m_current_data,m_delta);//Regularize attenuations log_log(m_current_data); compute_ODF_SH_coeff(); //Fit the model to the current data state m_coeff_samples.Column(n)=m_coeff; //Save the estimates for the SH coefficients } } void modify_residuals() { for (int m=1; m<=m_residuals.Nrows(); m++){ m_residuals(m)/=sqrt(1-m_HAT(m,m)); //Correct for heteroscedasticity } } void bootstrap_residuals() { for (int m=1; m<=m_residuals.Nrows(); m++){ if (unifrnd().AsScalar()<0.5) //Change the sign with probability 0.5 m_current_residuals(m)=-m_residuals(m); else m_current_residuals(m)=m_residuals(m); } } }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////////// //Class to estimate the ODF SH coefficients in a single voxel using the biexponential model and multi-shell data class ODF_voxel_biexp{ Matrix m_coeff_samples; //Stores the ODF SH coefficients (N_coeff x N_samples) ColumnVector m_coeff; //Vector that holds the current ODF SH coefficients ColumnVector m_coeff_signal; //Vector that holds the signal SH coefficients ColumnVector m_current_data; //Vectors used during bootstrapping to keep the current data state ColumnVector m_current_residuals; //and the current residuals ColumnVector m_residuals; //Vector that holds the original residuals that will be bootstrapped const Matrix& m_data; //NxM Matrix with signal attenuations. Each of the N rows corresponds to measurements along the same direction at M(=3) different b-values const Matrix& m_SHT; //Pseudoinverse of spherical harmonic transform matrix, used to get the signal SH coefficients const Matrix& m_HAT; //Hat matrix, used during bootstrap const DiagonalMatrix& m_scaling_factors;//Holds the scaling factors to obtain the ODF SH coefficients from the signal SH ones const int& m_SH_order; //maximum SH order (must be even) const int& Num_of_coeff; //Number of coefficients that will be estimated. const int& n_samples; //Number of samples per coefficient const float& m_delta; //Regularization parameter for signal intensities public: //constructor ODF_voxel_biexp(const Matrix& data, const Matrix& SHT, const Matrix& HAT, const DiagonalMatrix& scaling_factors, const int& SH_order, const int& Num_coeff, const int& nsamples, const float& delta): m_data(data), m_SHT(SHT), m_HAT(HAT), m_scaling_factors(scaling_factors), m_SH_order(SH_order),Num_of_coeff(Num_coeff),n_samples(nsamples), m_delta(delta) { m_current_data.ReSize(data.Nrows()); m_current_data=0; m_coeff_samples.ReSize(Num_coeff,nsamples); m_coeff_samples=0; } //destructor ~ODF_voxel_biexp(){} Matrix& get_SHcoeff_ref() { return m_coeff_samples;} Matrix get_SHcoeff() { return m_coeff_samples;} void compute_signal_SH_coeff(){ m_coeff_signal=m_SHT*m_current_data; } //Clip attenuation values. If att<0 => att=0, if att>1 => att=1 void clip_intensities (ColumnVector& attenuation) const{ for (int i=1; i<=attenuation.Nrows(); i++) attenuation(i)=(attenuation(i)>=0 && attenuation(i)<=1)*attenuation(i)+(attenuation(i)>1); } //Regularizes signal attenuations (provided by Iman Aganj) void proj1(Matrix& attenuation, const float& delta) const{ if (delta==0){ //Clip attenuation values. If att<0 => att=0, if att>1 => att=1 for (int i=1; i<=attenuation.Nrows(); i++) for (int j=1; j<=attenuation.Ncols(); j++) attenuation(i,j)=(attenuation(i,j)>=0 && attenuation(i,j)<=1)*attenuation(i,j)+(attenuation(i,j)>1); } else{ //Use function from Aganj et al, MRM, 2010 for (int i=1; i<=attenuation.Nrows(); i++) for (int j=1; j<=attenuation.Ncols(); j++) attenuation(i,j)=(attenuation(i,j)<0)*(0.5*delta) + (attenuation(i,j)>=0 && attenuation(i,j)=delta && attenuation(i,j)<1-delta)*attenuation(i,j)+(attenuation(i,j)>=1-delta && attenuation(i,j)<1)*(1-0.5*delta-0.5*((1-attenuation(i,j))*(1-attenuation(i,j)))/delta) + (attenuation(i,j)>=1)*(1-0.5*delta); } } //Implements the first part of the projection onto the subspace defined by //inequalities [31] (Aganj, 2010). Taking the logarithm of the first three inequalities //converts the quadratic inequalities to linear ones. void proj2(Matrix& attenuation, const float& delta) const{ float sF=sqrt(5); ColumnVector s0, a0, b0,ta,tb,e,m,a,b; Matrix T0, E, C(attenuation.Nrows(),7), A(attenuation.Nrows(),7), B(attenuation.Nrows(),7); T0=-(MISCMATHS::log(attenuation)); s0=MISCMATHS::sum(T0,2); a0=MISCMATHS::SD(T0.Column(1),s0); b0=MISCMATHS::SD(T0.Column(2),s0); ta=3*a0; tb=3*b0; e=tb-2*ta; m=2*tb+ta; for (int j=1; j<=attenuation.Nrows(); j++){ C(j,1)=(tb(j)<1+3*delta) && (0.5+1.5*(sF+1)*delta=1-3*(sF+2)*delta); C(j,3)=(m(j)>3-3*sF*delta) && (-1+3*(2*sF+5)*delta=3-3*sF*delta) && (e(j)>=-3*sF*delta); C(j,5)=(2.5+1.5*(5+sF)*delta-3*sF*delta); C(j,6)=(ta(j)<=0.5+1.5*(sF+1)*delta) && (m(j)<=2.5+1.5*(5+sF)*delta); C(j,7)=!(C(j,1) || C(j,2) || C(j,3) || C(j,4) || C(j,5) || C(j,6)); A(j,1)=C(j,1)*a0(j); A(j,2)=C(j,2)*(1.0/3.0-(sF+2)*delta); A(j,3)=C(j,3)*(0.2+0.8*a0(j)-0.4*b0(j)-delta/sF); A(j,4)=C(j,4)*(0.2+delta/sF); A(j,5)=C(j,5)*(0.2*a0(j)+0.4*b0(j)+2*delta/sF); A(j,6)=C(j,6)*(1.0/6.0+0.5*(sF+1)*delta); A(j,7)=C(j,7)*a0(j); B(j,1)=C(j,1)*(1.0/3.0+delta); B(j,2)=C(j,2)*(1.0/3.0+delta); B(j,3)=C(j,3)*(0.4-0.4*a0(j)+0.2*b0(j)-2*delta/sF); B(j,4)=C(j,4)*(0.4-3*delta/sF); B(j,5)=C(j,5)*(0.4*a0(j)+0.8*b0(j)-delta/sF); B(j,6)=C(j,6)*(1.0/3.0+delta); B(j,7)=C(j,7)*b0(j); } a=MISCMATHS::sum(A,2); b=MISCMATHS::sum(B,2); E=(SP(a,s0) | SP(b,s0) | SP(1-a-b,s0)); attenuation=exp(-E); } // Implements the second part of the projection onto the subspace defined by //inequalities [31]. (Aganj, 2010). void proj3(ColumnVector& A, ColumnVector& a, ColumnVector& b, const float& delta){ float del, s6 = sqrt(6), s15 = s6/2.0; Matrix AM, aM, bM; ColumnVector d(A.Nrows()), I(A.Nrows()); d=delta; AM = (A | A | A | d | ((A+a-b-s6*d)/3.0) | d | d | d | A | (0.2*(2*a+A-2*(s6+1)*d)) | (0.2*(-2*b+A+2-2*(s6+1)*d)) | d | d | d | (0.5-(1+s15)*d) ); aM = (a | a | (1-d) | a | ((2*A+5*a+b+s6*d)/6.0) | a | (1-d) | (0.5*(a+b)+(1+s15)*d) | (1-d) | (0.2*(4*a+2*A+(s6+1)*d)) | (1-d) | ((s6+3)*d) | (1-d) | (1-d) | (1-d) ); bM = (b | d | b | b | ((-2*A+a+5*b-s6*d)/6.0) | d | b | (0.5*(a+b)-(1+s15)*d) | d | d | (0.2*(4*b-2*A+1-(s6+1)*d)) | d | d | (1-(s6+3)*d) | d); Matrix R2(AM.Nrows(),AM.Ncols()); del = delta*.99; for (int i=1; i<=AM.Nrows(); i++){ for (int j=1; j<=AM.Ncols(); j++){ if (deldel && aM(i,j)<1-del) R2(i,j) = (AM(i,j)-A(i))*(AM(i,j)-A(i))+ (aM(i,j)-a(i))*(aM(i,j)-a(i))+(bM(i,j)-b(i))*(bM(i,j)-b(i)); else R2(i,j) = 1e20; } int ind; R2.Row(i).Minimum1(ind); I(i)=ind; } for (int i=1; i<=A.Nrows(); i++){ A(i) = AM(i,(int)I(i)); a(i) = aM(i,(int)I(i)); b(i) = bM(i,(int)I(i)); } } void filter_and_param_estimation(){ float f; Matrix filt_data; ColumnVector P2(m_data.Nrows()), A(m_data.Nrows()), B2(m_data.Nrows()), P(m_data.Nrows()), B(m_data.Nrows()),alpha, beta; filt_data=m_data; //m_data assumed to be a Nx3 matrix, with N directions measured at three b-values proj1(filt_data,m_delta); proj2(filt_data,m_delta); for (int i=1; i<=m_data.Nrows(); i++){ //for each direction //Estimate alpha,beta,f P2(i)=filt_data(i,2)-filt_data(i,1)*filt_data(i,1); A(i)=(filt_data(i,3)-filt_data(i,1)*filt_data(i,2))/(2*P2(i)); B(i)=A(i)*A(i)-(filt_data(i,1)*filt_data(i,3)-filt_data(i,2)*filt_data(i,2))/P2(i); if (B(i)<0) B(i)=0; if (P2(i)<0) P2(i)=0; } P2=sqrt(P2); B=sqrt(B); alpha=A+B; beta=A-B; proj3(P2, alpha, beta, m_delta); //Change P2, alpha, beta for (int i=1; i<=m_data.Nrows(); i++){ //for each direction //fraction is computed in a bit different way from Eq. [30]. Since the next //line is correct with both plus and minus signs, we need to test both //answers to find out which one to use. float ER1, ER2, temp=(2*P2(i)/(alpha(i)-beta(i))); f=0.5+0.5*sqrt(1-temp*temp); ER1= fabs(f*(alpha(i)-beta(i))+beta(i)-filt_data(i,1))+fabs(f*(alpha(i)*alpha(i)-beta(i)*beta(i))+beta(i)*beta(i)-filt_data(i,2))+fabs(f*(alpha(i)*alpha(i)*alpha(i)-beta(i)*beta(i)*beta(i))+beta(i)*beta(i)*beta(i)-filt_data(i,3)); ER2= fabs((1-f)*(alpha(i)-beta(i))+beta(i)-filt_data(i,1))+fabs((1-f)*(alpha(i)*alpha(i)-beta(i)*beta(i))+beta(i)*beta(i)-filt_data(i,2))+fabs((1-f)*(alpha(i)*alpha(i)*alpha(i)-beta(i)*beta(i)*beta(i))+beta(i)*beta(i)*beta(i)-filt_data(i,3)); f = f*(ER1=ER2); //Then obtain the expression for the bi-exponential model of the signal attenuation m_current_data(i)=f*log(-log(alpha(i)))+(1-f)*log(-log(beta(i))); } } //Performs Linear Regression and estimates SH coefficients void compute_ODF_SH_coeff(){ m_coeff_signal = m_SHT*m_current_data; m_coeff=m_scaling_factors*m_coeff_signal; m_coeff(1)=1/(2*sqrt(M_PI)); } //Perform wild residual bootstrap, according to (Whitcher et al, 2008), for multi-shell CSA ODFs. //The uncertainty is not given the data directly, but given a biexponential model of the data. //Bootstrapping is performed on Eq. [24] of (Aganj, MRM, 2010), i.e. residuals are added to the biexponential //model approximation, not the data! void bootstrapping(){ ColumnVector predicted; filter_and_param_estimation(); //Filter attenuations by doing inequality projection and return //the biexponential model approximation to m_current_data compute_ODF_SH_coeff(); //Fit the model to the original approximation m_coeff_samples.Column(1)=m_coeff; //Save the estimates for the SH coefficients (deterministic estimates) predicted=m_HAT*m_current_data; //Get the predicted "data" m_residuals=m_current_data-predicted; //And then the residuals modify_residuals(); //Modify the residuals m_current_residuals=m_residuals; for (int n=2; n<=n_samples; n++){ bootstrap_residuals(); //Get boostrapped residuals m_current_data=predicted+m_current_residuals; //Get the new "data" state compute_ODF_SH_coeff(); //Fit the model to the current "data" state m_coeff_samples.Column(n)=m_coeff; //Save the estimates for the SH coefficients } } void modify_residuals() { for (int m=1; m<=m_residuals.Nrows(); m++){ m_residuals(m)/=sqrt(1-m_HAT(m,m)); //Correct for heteroscedasticity } } void bootstrap_residuals() { for (int m=1; m<=m_residuals.Nrows(); m++){ if (unifrnd().AsScalar()<0.5) //Change the sign with probability 0.5 m_current_residuals(m)=-m_residuals(m); else m_current_residuals(m)=m_residuals(m); } } }; ////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////// //Class that computes generalised FA value for the mean ODF obtained across N_samples of ODF coefficients. //A sphere tesselation is provided to indicate points on the sphere and ODF values are computed at these points. //GFA is then computed numerically as in (Tuch, 2004). class ODF_GFA{ const Matrix& SHcoeff_samples; //SH coefficients that describe the ODF (Num_coeff x N_samples) const int& lmax; //maximum SH order employed const int Num_of_coeff; //number of SH coefficients const int& nsamples; //number of bootstrap samples const Matrix& Eval_SH; //(i,j) element: for each tesselation point i, contains the jth spherical harmonic evaluated at i (Matrix common to all voxels) float gfa; //holds the generalised FA value, corresponding to the mean ODF shape public: //Constructor ODF_GFA(const Matrix& coeff, const int& SH_order, const int& nsamp, const Matrix& EvalSH): SHcoeff_samples(coeff), lmax(SH_order), Num_of_coeff((SH_order+1)*(SH_order+2)/2), nsamples(nsamp), Eval_SH(EvalSH) { gfa=0; } //Destructor ~ODF_GFA(){} float get_gfa(){ return gfa; } //Compute numerically the generalised FA (as in Tuch,2004), using the coefficients of the mean ODF across samples void compute_gfa(){ int num_points=Eval_SH.Nrows(); ColumnVector func_vals(num_points); ColumnVector m_avg_coeff; m_avg_coeff.ReSize(Num_of_coeff); //compute the mean SH coefficients across samples for (int i=1; i<=Num_of_coeff; i++) m_avg_coeff(i)=SHcoeff_samples.Row(i).Sum()/SHcoeff_samples.Ncols(); func_vals=0; for (int i=1; i<=num_points; i++) //compute the mean ODF value at each tesselation point for (int j=1; j<=Num_of_coeff; j++) func_vals(i)+=m_avg_coeff(j)*Eval_SH(i,j); float meanf=func_vals.Sum()/num_points; float sum1=0, sum2=0; for (int i=1; i<=num_points; i++){ //compute the gfa sum1+=(func_vals(i)-meanf)*(func_vals(i)-meanf); sum2+=func_vals(i)*func_vals(i); } gfa=sqrt(num_points*sum1/((num_points-1)*sum2)); } }; ///////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// //Class to record and save samples class Samples{ //for storing samples vector coeff_samples; //The vector will be Num_of_coeff x nsamples x nvoxels vector th_samples; //The vectors will be max_num_of_peaks x nsamples x nvoxels vector ph_samples; vector f_samples; //for storing means vector mean_fsamples; //max_num_of_peaks x nvoxels vector dyadic_vectors; //max_num_of_peaks x 3 x nvoxels Matrix mean_SHcoeff; //Num_of_coeff x nvoxels, saves for each voxel the mean ODF coefficient across all ODF samples RowVector gfa; //If requested, 1 x nvoxels vectors that contains the GFA value for each shell const volume< float >& m_mask; //The brain mask that will be used to create Output images const int& n_samples; //Number of samples const int& n_coeff; //Number of coefficients to store const int& n_voxels; //Number of valid non-zero voxels const int& max_num_peaks; //Maximum number of peaks allowed in each voxel const bool& m_meancoeff; //Flag to indicate whether the mean ODF coefficients will be saved const bool& m_gfa; //Flag to indicate whether the GFA for the mean ODF will be saved public: //Constructor (for saving coefficient samples) Samples(const int& ncoeff, const volume& mask, const int& nvoxels, const int& nsamples, const bool& meancoeff=false, const bool& gfa_flag=false): m_mask(mask),n_samples(nsamples),n_coeff(ncoeff),n_voxels(nvoxels),max_num_peaks(0),m_meancoeff(meancoeff), m_gfa(gfa_flag){ if (m_meancoeff){ mean_SHcoeff.ReSize(ncoeff,nvoxels); mean_SHcoeff=0; } else{ //If m_meancoeff is set, do not save all coefficient samples Matrix temp; temp.ReSize(nsamples,nvoxels); //Initialize structure that will hold the samples (n_coeff x n_samples x n_voxels) temp=0; for (int f=0; f& mask, const int& nvoxels, const int& nsamples, const int& maxNumPeaks, const int& ncoeff, const bool& meancoeff=false, const bool& gfa_flag=false): m_mask(mask),n_samples(nsamples),n_coeff(ncoeff),n_voxels(nvoxels),max_num_peaks(maxNumPeaks), m_meancoeff(meancoeff), m_gfa(gfa_flag){ Matrix temp; RowVector temp2; temp.ReSize(nsamples,nvoxels); //Initialize structure that will hold the samples (n_samples x n_voxels) temp=0; temp2.ReSize(nvoxels); temp2=0; for (int f=0; fdyad_L(2)){ if(dyad_L(1)>dyad_L(3)) maxeig=1; else maxeig=3; } else{ if(dyad_L(2)>dyad_L(3)) maxeig=2; else maxeig=3; } e1(1)=dyad_V(1,maxeig); e1(2)=dyad_V(2,maxeig); e1(3)=dyad_V(3,maxeig); } //Record the coefficient samples obtained for voxel "vox" //The input Matrix is Num_coeff x n_samples void record(const Matrix& Coeff, const int vox){ if (m_meancoeff){ for (int p=1; p<=n_coeff; p++) //Get the average SH coefficients across samples mean_SHcoeff(p,vox)=Coeff.Row(p).Sum()/Coeff.Ncols(); } else{ for (int p=0; pmax_ref_count){ //Keep the sample index with the maximum non-zero peaks max_ref_count=count; ref_samp=n; } } //Swap the reference sample with the first if (ref_samp!=1){ my_temp=theta.Column(1); theta.Column(1)=theta.Column(ref_samp); theta.Column(ref_samp)=my_temp; my_temp=phi.Column(1); phi.Column(1)=phi.Column(ref_samp); phi.Column(ref_samp)=my_temp; my_temp=f.Column(1); f.Column(1)=f.Column(ref_samp); f.Column(ref_samp)=my_temp; } vecx=0; vecy=0; vecz=0; //Convert (theta,phi) to vectors with cartesian coordinates for (int n=1; n<=nsamp; n++) for (int p=1; p<=max_num_peaks; p++) if (f(p,n)!=0){ vecx(p,n)=sin(theta(p,n))*cos(phi(p,n)); vecy(p,n)=sin(theta(p,n))*sin(phi(p,n)); vecz(p,n)=cos(theta(p,n)); } //Sort the remaining samples using the angular agreement with the reference peaks Matrix dots(max_num_peaks,max_num_peaks); Matrix valid(max_num_peaks,max_num_peaks); theta_new=0; phi_new=0; f_new=0; theta_new.Column(1)=theta.Column(1); phi_new.Column(1)=phi.Column(1); f_new.Column(1)=f.Column(1); //save the reference peaks for (int n=2; n<=nsamp; n++){ //For each sample dots=0; valid=1; for (int q=1; q<=max_num_peaks; q++){ //For each non-zero sample peak if (f(q,n)!=0) for (int p=1; p<=max_num_peaks; p++){ //Get the dot product with each reference peak and store to a matrix if (f(p,1)!=0) dots(p,q)=fabs(vecx(p,1)*vecx(q,n)+vecy(p,1)*vecy(q,n)+vecz(p,1)*vecz(q,n)); else valid.Row(p)=0; } else valid.Column(q)=0; //Indicate which entries of the matrix dots are not valid, because peaks have zero value } for (int q=1; q<=max_num_peaks; q++){ int sp,sq; dots.Maximum2(sp,sq); //Return the location of the maximum dot product if (valid(sp,sq)==1){ theta_new(sp,n)=theta(sq,n); //Swap peak location phi_new(sp,n)=phi(sq,n); f_new(sp,n)=f(sq,n); dots.Column(sq)=0; //Neglect the other dot products of this sample peak with the reference ones dots.Row(sp)=0; //and this reference peak, as it has been paired valid.Column(sq)=0; valid.Row(sp)=0; } } } /* for (int n=2; n<=nsamp; n++) //For each sample for (int p=1; p<=max_num_peaks; p++){ //For each reference peak maxdot=-1; pos=1; for (int q=1; q<=max_num_peaks; q++) //Get the dot product with all non-zero sample peaks if (f(q,n)!=0){ dot=fabs(vecx(p,1)*vecx(q,n)+vecy(p,1)*vecy(q,n)+vecz(p,1)*vecz(q,n)); if (dot>=maxdot){ //and pick the closest one maxdot=dot; pos=q; } } theta_new(p,n)=theta(pos,n); phi_new(p,n)=phi(pos,n); f_new(p,n)=f(pos,n); f(pos,n)=0; //Indicate that this sample peak has been already assigned } */ theta=theta_new; phi=phi_new; f=f_new; } //Save all recorded coefficient samples in images void save_coeff(){ volume4D tmp; Log& logger=LogSingleton::getInstance(); if (m_meancoeff){ //Save mean coefficients in a single 4D image tmp.setmatrix(mean_SHcoeff,m_mask); string oname="meanSHcoeff"; save_volume4D(tmp,logger.appendDir(oname)); } else{ for(int m=0; m tmp; Matrix thsamples_tmp(max_num_peaks,n_samples); //temporary structures used when rearranging peaks Matrix phsamples_tmp(max_num_peaks,n_samples); Matrix fsamples_tmp(max_num_peaks,n_samples); Matrix dyadic_vectors_tmp(max_num_peaks,3); RowVector mean_fsamples_tmp(max_num_peaks); Log& logger=LogSingleton::getInstance(); //Sort the output according to the value of the mean ODF across the samples (mean_fsamples) (i.e. determine which population is 1,2,3...) for(int vox=1;vox<=n_voxels; vox++){ //for each voxel vector > sfs; pair ftmp; for(int f=0;f m_mask; //The brain mask that will be used to create Output images const int npeaks; //Maximum number of ODF peaks detected const float peak_threshold; //Mimimum ODF value for a point to be considered a peak const int m_modelnum; //1 for Descoteaux's ODFs, 2 for CSA-ODFs, 3 for multi-shell CSA ODFs const int lmax; //maximum SH order (must be even) const int Num_of_coeff; //Number of SH coefficients to be estimated const int nsamples; //Number of bootstrap samples const float m_lambda; //Laplace-Beltrami regularization parameter const float m_delta; //Regularization parameter for signal attenuations utilized in CSA-ODF estimation const float m_alpha; //Laplacian sharpening parameter for model=1 const bool m_savecoeff; //Flag to indicate whether the ODF SH coeff or the ODF peaks (default) will be saved const bool m_savemeancoeff; //Flag to indicate whether the mean ODF SH coeff (along with the peaks) will be saved const bool m_gfa_flag; //Flag to indicate whether the GFA will be computed and saved for each voxel public: //constructor ODF_Volume_Manager(): opts(qbootOptions::getInstance()),npeaks(opts.npeaks.value()), peak_threshold(opts.peak_threshold.value()), m_modelnum(opts.modelnum.value()), lmax(opts.lmax.value()), Num_of_coeff((lmax+1)*(lmax+2)/2), nsamples(opts.nsamples.value()), m_lambda(opts.lambda.value()), m_delta(opts.delta.value()), m_alpha(opts.alpha.value()), m_savecoeff(opts.savecoeff.value()), m_savemeancoeff(opts.savemeancoeff.value()), m_gfa_flag(opts.gfa.value()) { } //destructor ~ODF_Volume_Manager(){} //Read bvals,bvecs, data and nodif_brain_mask void Read_data(){ if (opts.verbose.value()) cout<<"reading data, bvals and bvecs"<3) m_bvecs=m_bvecs.t(); if(m_bvals.Nrows()>1) m_bvals=m_bvals.t(); for(int i=1;i<=m_bvecs.Ncols();i++){ float tmpsum=sqrt(m_bvecs(1,i)*m_bvecs(1,i)+m_bvecs(2,i)*m_bvecs(2,i)+m_bvecs(3,i)*m_bvecs(3,i)); if(tmpsum!=0){ m_bvecs(1,i)=m_bvecs(1,i)/tmpsum; m_bvecs(2,i)=m_bvecs(2,i)/tmpsum; m_bvecs(3,i)=m_bvecs(3,i)/tmpsum; } } volume4D data_vol; //Read data and brain mask read_volume4D(data_vol,opts.datafile.value()); read_volume(m_mask,opts.maskfile.value()); m_data=data_vol.matrix(m_mask); } //Convert signals to signal attenuations. Also Reshape the data when multiple shells are available. void Prepare_data_for_ODFs(){ int count; float bval3=0, bval1=0; bool multishell=false; for (int n=1; n<=m_bvals.Ncols(); n++) //Check whether more than one non-zero b-values exist if (m_bvals(1,n)>b0max) bval3=m_bvals(1,n); //The last b-value will be stored for (int n=1; n<=m_bvals.Ncols(); n++) if (m_bvals(1,n)>b0max && fabs(m_bvals(1,n)-bval3)>2*shellrange){ //Then we have a b value from a different shell bval1=m_bvals(1,n); multishell=true; break; } if (!multishell){ //Single-shell analysis count=Sig2SigAttenuation(m_data, m_bvals,m_bvecs); //Convert signal to signal attenuation if (m_modelnum!=1 && m_modelnum!=2){ cerr<<"Wrong modelnum chosen! Choose 1 or 2 for single-shell data!"<b0max && fabs(m_bvals(1,n)-bval1)>2*shellrange && fabs(m_bvals(1,n)-bval3)>2*shellrange){ bval2=m_bvals(1,n); break; } //Find the mean b-value per shell float btemp=0; int bcnt=0; for (int n=1; n<=m_bvals.Ncols(); n++) if (m_bvals(1,n)>b0max && fabs(m_bvals(1,n)-bval1)<=2*shellrange){ btemp+=m_bvals(1,n); bcnt++; } bval1=btemp/bcnt; btemp=0; bcnt=0; for (int n=1; n<=m_bvals.Ncols(); n++) if (m_bvals(1,n)>b0max && fabs(m_bvals(1,n)-bval2)<=2*shellrange){ btemp+=m_bvals(1,n); bcnt++; } bval2=btemp/bcnt; btemp=0; bcnt=0; for (int n=1; n<=m_bvals.Ncols(); n++) if (m_bvals(1,n)>b0max && fabs(m_bvals(1,n)-bval3)<=2*shellrange){ btemp+=m_bvals(1,n); bcnt++; } bval3=btemp/bcnt; cout<<"Mean b-values for detected shells are "< shell_num(3); //Keep the Number of samples in each shell (including the b=0) if (opts.qshellsfile.value().empty()){ //If no qshells file provided, assume same number of directions in each shell shell_num[0]=(int) (m_bvals.Ncols()/3.0); shell_num[1]=(int) (m_bvals.Ncols()/3.0); shell_num[2]=(int) (m_bvals.Ncols()/3.0); if (opts.verbose.value()) cout<<"No qshells file provided. Assuming same number of directions in each shell!"< data_shell, bvecs_shell, bvals_shell; data_shell.push_back(m_data.SubMatrix(1,shell_num[0],1,m_data.Ncols())); bvecs_shell.push_back(m_bvecs.SubMatrix(1,3,1,shell_num[0])); bvals_shell.push_back(m_bvals.SubMatrix(1,1,1,shell_num[0])); data_shell.push_back(m_data.SubMatrix(shell_num[0]+1,shell_num[0]+shell_num[1],1,m_data.Ncols())); bvecs_shell.push_back(m_bvecs.SubMatrix(1,3,shell_num[0]+1,shell_num[0]+shell_num[1])); bvals_shell.push_back(m_bvals.SubMatrix(1,1,shell_num[0]+1,shell_num[0]+shell_num[1])); data_shell.push_back(m_data.SubMatrix(shell_num[1]+1,shell_num[1]+shell_num[2],1,m_data.Ncols())); bvecs_shell.push_back(m_bvecs.SubMatrix(1,3,shell_num[1]+1,shell_num[1]+shell_num[2])); bvals_shell.push_back(m_bvals.SubMatrix(1,1,shell_num[1]+1,shell_num[1]+shell_num[2])); for (int n=0; n<3; n++) Sig2SigAttenuation(data_shell[n],bvals_shell[n],bvecs_shell[n]); //Check whether interpolation is needed (i.e. whether different number of directions or directions between shells, then need to interpolate) bool interp_flag=false; int max, max_shell=0, min, min_shell=0; if (bvecs_shell[0].Ncols()!=bvecs_shell[1].Ncols() || bvecs_shell[0].Ncols()!=bvecs_shell[2].Ncols() || bvecs_shell[1].Ncols()!=bvecs_shell[2].Ncols()) interp_flag=true; else{ //if same number of directions, check whether the directions between shells are different (consider them different only if they differ more than 1 degree) for (int i=1; i<=bvecs_shell[0].Ncols(); i++) if (fabs(dot(bvecs_shell[0].Column(i),bvecs_shell[1].Column(i)))<=0.9998) {interp_flag=true; break;} for (int i=1; i<=bvecs_shell[0].Ncols(); i++) if (fabs(dot(bvecs_shell[0].Column(i),bvecs_shell[2].Column(i)))<=0.9998) {interp_flag=true; break;} for (int i=1; i<=bvecs_shell[1].Ncols(); i++) if (fabs(dot(bvecs_shell[1].Column(i),bvecs_shell[2].Column(i)))<=0.9998) {interp_flag=true; break;} } if (interp_flag){ //Find which shell has the fewer directions if (bvecs_shell[0].Ncols()<=bvecs_shell[1].Ncols()){ min=bvecs_shell[0].Ncols(); min_shell=0;} else{ min=bvecs_shell[1].Ncols(); min_shell=1;} if (min>bvecs_shell[2].Ncols()) min_shell=2; //Find which shell has the most directions if (bvecs_shell[0].Ncols()>=bvecs_shell[1].Ncols()){ max=bvecs_shell[0].Ncols(); max_shell=0;} else{ max=bvecs_shell[1].Ncols(); max_shell=1;} if (maxbvecs_shell[min_shell].Ncols()/3.0 && lmax_interp>lmax) lmax_interp-=2; if (opts.verbose.value()) cout<<"Interpolating data in each shell using spherical harmonics of order "<b0max) count++; //Count how many DWs have been acquired (excluding b=0's) if (count==Nd){ cerr<<"At least one b=0 image is required! Exiting now!"<b0max){ bvals_new(1,m)=bvals(1,n); //Remove all b=0 entries from bvals and bvecs bvecs_new.Column(m)=bvecs.Column(n); for(int vox=1;vox<=Mv;vox++) data_new(m,vox)=data(n,vox); m++; } else{ //If it is a b=0 for(int vox=1;vox<=Mv;vox++) S0(vox)+=data(n,vox); } } S0=S0/(Nd-count); //Get the average S0 intensity in each voxel bvals<