25 using namespace Eigen;
44 return m_cumulant_matrix;
58 Map<MatrixXd> EX(X.
matrix,n,T);
61 VectorXd mean = (EX.rowwise().sum() / (
float64_t)T);
62 MatrixXd SPX = EX.colwise() - mean;
64 MatrixXd cov = (SPX * SPX.transpose()) / (
float64_t)T;
67 std::cout <<
"cov" << std::endl;
68 std::cout << cov << std::endl;
72 SelfAdjointEigenSolver<MatrixXd> eig;
76 std::cout <<
"eigenvectors" << std::endl;
77 std::cout << eig.eigenvectors() << std::endl;
79 std::cout <<
"eigenvalues" << std::endl;
80 std::cout << eig.eigenvalues().asDiagonal() << std::endl;
84 VectorXd scales = eig.eigenvalues().cwiseSqrt();
85 MatrixXd B = scales.cwiseInverse().asDiagonal() * eig.eigenvectors().transpose();
88 std::cout <<
"whitener" << std::endl;
89 std::cout << B << std::endl;
96 int dimsymm = (m * ( m + 1)) / 2;
99 Map<MatrixXd> CM(m_cumulant_matrix.
matrix,m,m*nbcm);
100 MatrixXd R(m,m); R.setIdentity();
101 MatrixXd Qij = MatrixXd::Zero(m,m);
102 VectorXd Xim = VectorXd::Zero(m);
103 VectorXd Xjm = VectorXd::Zero(m);
104 VectorXd Xijm = VectorXd::Zero(m);
107 for (
int im = 0; im < m; im++)
110 Xijm = Xim.cwiseProduct(Xim);
111 Qij = SPX.cwiseProduct(Xijm.replicate(1,m).transpose()) * SPX.transpose() / (float)T - R - 2*R.col(im)*R.col(im).transpose();
112 CM.block(0,Range,m,m) = Qij;
114 for (
int jm = 0; jm < im; jm++)
117 Xijm = Xim.cwiseProduct(Xjm);
118 Qij = SPX.cwiseProduct(Xijm.replicate(1,m).transpose()) * SPX.transpose() / (float)T - R.col(im)*R.col(jm).transpose() - R.col(jm)*R.col(im).transpose();
119 CM.block(0,Range,m,m) = sqrt(2)*Qij;
125 std::cout <<
"cumulatant matrices" << std::endl;
126 std::cout << CM << std::endl;
136 for (
int i = 0; i < nbcm; i++)
139 EM = CM.block(0,i*m,m,m);
144 Map<MatrixXd> EQ(Q.
matrix,m,m);
145 EQ = -1 * EQ.inverse();
148 std::cout <<
"diagonalizer" << std::endl;
149 std::cout << EQ << std::endl;
154 Map<MatrixXd> C(sep_matrix.
matrix,m,m);
155 C = EQ.transpose() * B;
158 VectorXd A = (B.inverse()*EQ).cwiseAbs2().colwise().sum();
163 for (
int j = 1; j < n; j++)
167 std::swap(A(j),A(j-1));
168 C.col(j).swap(C.col(j-1));
175 for (
int j = 0; j < m/2; j++)
176 C.row(j).swap( C.row(m-1-j) );
179 VectorXd signs = VectorXd::Zero(m);
180 for (
int i = 0; i < m; i++)
187 C = signs.asDiagonal() * C;
190 std::cout <<
"un mixing matrix" << std::endl;
191 std::cout << C << std::endl;
204 #endif // HAVE_EIGEN3 T * get_matrix(index_t matIdx) const
static SGMatrix< float64_t > diagonalize(SGNDArray< float64_t > C, SGMatrix< float64_t > V0=SGMatrix< float64_t >(NULL, 0, 0, false), double eps=CMath::MACHINE_EPSILON, int itermax=200)
SGMatrix< float64_t > get_cumulant_matrix() const
virtual CFeatures * apply(CFeatures *features)
static void inverse(SGMatrix< float64_t > matrix)
inverses square matrix in-place
all of classes and functions are contained in the shogun namespace
class ICAConverter Base class for ICA algorithms
The class Features is the base class of all feature objects.
SGMatrix< float64_t > m_mixing_matrix