shogun: fix compile errors with Eigen 3.4

Fixes #142811.

+77
+3
pkgs/applications/science/machine-learning/shogun/default.nix
··· 95 sha256 = "sha256-AgJJKQA8vc5oKaTQDqMdwBR4hT4sn9+uW0jLe7GteJw="; 96 }) 97 98 ] ++ lib.optional (!withSvmLight) ./svmlight-scrubber.patch; 99 100 nativeBuildInputs = [ cmake swig ctags ]
··· 95 sha256 = "sha256-AgJJKQA8vc5oKaTQDqMdwBR4hT4sn9+uW0jLe7GteJw="; 96 }) 97 98 + # Fix compile errors with Eigen 3.4 99 + ./eigen-3.4.patch 100 + 101 ] ++ lib.optional (!withSvmLight) ./svmlight-scrubber.patch; 102 103 nativeBuildInputs = [ cmake swig ctags ]
+74
pkgs/applications/science/machine-learning/shogun/eigen-3.4.patch
···
··· 1 + From: Sebastián Mancilla <smancill@smancill.dev> 2 + Subject: [PATCH] Fix compile errors when using Eigen 3.4 3 + 4 + --- 5 + .../machine/gp/MultiLaplaceInferenceMethod.cpp | 18 +++++++++--------- 6 + 1 file changed, 9 insertions(+), 9 deletions(-) 7 + 8 + diff --git a/src/shogun/machine/gp/MultiLaplaceInferenceMethod.cpp b/src/shogun/machine/gp/MultiLaplaceInferenceMethod.cpp 9 + index 2e27678d2..60050afea 100644 10 + --- a/src/shogun/machine/gp/MultiLaplaceInferenceMethod.cpp 11 + +++ b/src/shogun/machine/gp/MultiLaplaceInferenceMethod.cpp 12 + @@ -84,9 +84,9 @@ class CMultiPsiLine : public func_base 13 + float64_t result=0; 14 + for(index_t bl=0; bl<C; bl++) 15 + { 16 + - eigen_f.block(bl*n,0,n,1)=K*alpha->block(bl*n,0,n,1)*CMath::exp(log_scale*2.0); 17 + - result+=alpha->block(bl*n,0,n,1).dot(eigen_f.block(bl*n,0,n,1))/2.0; 18 + - eigen_f.block(bl*n,0,n,1)+=eigen_m; 19 + + eigen_f.segment(bl*n,n)=K*alpha->segment(bl*n,n)*CMath::exp(log_scale*2.0); 20 + + result+=alpha->segment(bl*n,n).dot(eigen_f.segment(bl*n,n))/2.0; 21 + + eigen_f.segment(bl*n,n)+=eigen_m; 22 + } 23 + 24 + // get first and second derivatives of log likelihood 25 + @@ -272,7 +272,7 @@ void CMultiLaplaceInferenceMethod::update_alpha() 26 + { 27 + Map<VectorXd> alpha(m_alpha.vector, m_alpha.vlen); 28 + for(index_t bl=0; bl<C; bl++) 29 + - eigen_mu.block(bl*n,0,n,1)=eigen_ktrtr*CMath::exp(m_log_scale*2.0)*alpha.block(bl*n,0,n,1); 30 + + eigen_mu.segment(bl*n,n)=eigen_ktrtr*CMath::exp(m_log_scale*2.0)*alpha.segment(bl*n,n); 31 + 32 + //alpha'*(f-m)/2.0 33 + Psi_New=alpha.dot(eigen_mu)/2.0; 34 + @@ -316,7 +316,7 @@ void CMultiLaplaceInferenceMethod::update_alpha() 35 + 36 + for(index_t bl=0; bl<C; bl++) 37 + { 38 + - VectorXd eigen_sD=eigen_dpi.block(bl*n,0,n,1).cwiseSqrt(); 39 + + VectorXd eigen_sD=eigen_dpi.segment(bl*n,n).cwiseSqrt(); 40 + LLT<MatrixXd> chol_tmp((eigen_sD*eigen_sD.transpose()).cwiseProduct(eigen_ktrtr*CMath::exp(m_log_scale*2.0))+ 41 + MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols)); 42 + MatrixXd eigen_L_tmp=chol_tmp.matrixU(); 43 + @@ -341,11 +341,11 @@ void CMultiLaplaceInferenceMethod::update_alpha() 44 + VectorXd tmp2=m_tmp.array().rowwise().sum(); 45 + 46 + for(index_t bl=0; bl<C; bl++) 47 + - eigen_b.block(bl*n,0,n,1)+=eigen_dpi.block(bl*n,0,n,1).cwiseProduct(eigen_mu.block(bl*n,0,n,1)-eigen_mean_bl-tmp2); 48 + + eigen_b.segment(bl*n,n)+=eigen_dpi.segment(bl*n,n).cwiseProduct(eigen_mu.segment(bl*n,n)-eigen_mean_bl-tmp2); 49 + 50 + Map<VectorXd> &eigen_c=eigen_W; 51 + for(index_t bl=0; bl<C; bl++) 52 + - eigen_c.block(bl*n,0,n,1)=eigen_E.block(0,bl*n,n,n)*(eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_b.block(bl*n,0,n,1)); 53 + + eigen_c.segment(bl*n,n)=eigen_E.block(0,bl*n,n,n)*(eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_b.segment(bl*n,n)); 54 + 55 + Map<MatrixXd> c_tmp(eigen_c.data(),n,C); 56 + 57 + @@ -409,7 +409,7 @@ float64_t CMultiLaplaceInferenceMethod::get_derivative_helper(SGMatrix<float64_t 58 + { 59 + result+=((eigen_E.block(0,bl*n,n,n)-eigen_U.block(0,bl*n,n,n).transpose()*eigen_U.block(0,bl*n,n,n)).array() 60 + *eigen_dK.array()).sum(); 61 + - result-=(eigen_dK*eigen_alpha.block(bl*n,0,n,1)).dot(eigen_alpha.block(bl*n,0,n,1)); 62 + + result-=(eigen_dK*eigen_alpha.segment(bl*n,n)).dot(eigen_alpha.segment(bl*n,n)); 63 + } 64 + 65 + return result/2.0; 66 + @@ -489,7 +489,7 @@ SGVector<float64_t> CMultiLaplaceInferenceMethod::get_derivative_wrt_mean( 67 + result[i]=0; 68 + //currently only compute the explicit term 69 + for(index_t bl=0; bl<C; bl++) 70 + - result[i]-=eigen_alpha.block(bl*n,0,n,1).dot(eigen_dmu); 71 + + result[i]-=eigen_alpha.segment(bl*n,n).dot(eigen_dmu); 72 + } 73 + 74 + return result;