diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..e69de29 diff --git a/examples/AxialMovingBeam/AxialMovingBeam.m b/examples/AxialMovingBeam/AxialMovingBeam.m new file mode 100644 index 0000000..e291e46 --- /dev/null +++ b/examples/AxialMovingBeam/AxialMovingBeam.m @@ -0,0 +1,271 @@ +%% +% We consider an axially moving beam under 1:3 internal resonance between the +% first two bending modes. The equation of motion is given by +% +% $$\ddot{\mathbf{u}}+(\mathbf{C}+\mathbf{G})\dot{\mathbf{u}}+\mathbf{f}(\mathbf{u},\dot{\mathbf{u}})=\epsilon\Omega^2\mathbf{g}\cos\Omega +% t$$ +% +% where $\mathbf{G}^\top=-\mathbf{G}$ is a gyroscopic matrix. With viscoelastic +% material model, the system has nonlinear damping. In addition, the beam is subject +% to base excitation such that the forcing amplitude is a function of forcing +% frequency. +% +% In this notebook, we will calculate the forced response curve of the system +% with/without the nonlinear damping taken into consideration to explore the effects +% of nonlinear damping +%% Nonlinear Damping +% Setup Dynamical System + +clear all; +n = 10; +[mass,damp,gyro,stiff,fnl,fext] = build_model(n,'nonlinear_damp'); + +%% +% *Create model* +%% +% * Given the assembled damping matrix $\mathbf{C}+\mathbf{G}$ is not Rayleigh +% damping anymore. One should set the corresponding Options in DS to be false. +% * The BaseExcitation option in DS should be set true to account for the $\Omega$-dependent +% forcing amplitude. + +DS = DynamicalSystem(); +set(DS,'M',mass,'C',damp+gyro,'K',stiff,'fnl',fnl); +set(DS.Options,'Emax',6,'Nmax',10,'notation','multiindex'); +set(DS.Options,'RayleighDamping',false,'BaseExcitation',true); + +% Forcing +h = 1.5e-4; % h characterizes the vibration amplitude of base excitation. It plays the role of epsilon here +kappas = [-1; 1]; +coeffs = [fext fext]/2; +DS.add_forcing(coeffs, kappas, h); +% Linear Modal analysis + +[V,D,W] = DS.linear_spectral_analysis(); +%% +% *Choose Master subspace* +% +% Due to the 1:3 internal resonance, we take the first two complex conjugate +% pairs of modes as the spectral subspace to SSM. So we hvae resonant_modes = +% [1 2 3 4]. + +S = SSM(DS); +set(S.Options, 'reltol', 1,'notation','multiindex'); +resonant_modes = [1 2 3 4]; +order = 3; +outdof = [1 2]; +% Primary resonance of the first mode +% We consider the case that $\Omega\approx\omega_1$. Although the second mode +% is not excited externally ($f_2=\langle \phi_2,f^\mathrm{ext}\rangle=0$), the +% response of the second mode is nontrivial due to the modal interactions. + +freqrange = [0.98 1.04]*imag(D(1)); +set(S.FRCOptions, 'nCycle',500, 'initialSolver', 'fsolve'); +set(S.contOptions, 'PtMX', 300, 'h0', 0.1, 'h_max', 0.2, 'h_min', 1e-3); +set(S.FRCOptions, 'coordinates', 'polar'); +%% +% We first compute the FRC with O(3,5,7) expansion of SSM and then check the +% convergence of FRC with increasing orders. + +% O(3) +start = tic; +FRC_ND_O3 = S.SSM_isol2ep('isol-nd-3',resonant_modes, order, [1 3], 'freq', freqrange,outdof); +timings.FRC_ND_O3 = toc(start); +% O(5) +sol = ep_read_solution('isol-nd-3.ep',1); +start = tic; +FRC_ND_O5 = S.SSM_isol2ep('isol-nd-5',resonant_modes, order+2, [1 3],... + 'freq', freqrange,outdof,{sol.p,sol.x}); +timings.FRC_ND_O5 = toc(start); +fig_ssm = gcf; +% O(7) +start = tic; +FRC_ND_O7 = S.SSM_isol2ep('isol-nd-7',resonant_modes, order+4, [1 3],... + 'freq', freqrange,outdof,{sol.p,sol.x}); +timings.FRC_ND_O7 = toc(start); +%% +% Plot FRC at different orders in the same figure to observe the convergence + +FRCs = {FRC_ND_O3,FRC_ND_O5,FRC_ND_O7}; +thm = struct(); +thm.SN = {'LineStyle', 'none', 'LineWidth', 2, ... + 'Color', 'cyan', 'Marker', 'o', 'MarkerSize', 8, 'MarkerEdgeColor', ... + 'cyan', 'MarkerFaceColor', 'white'}; +thm.HB = {'LineStyle', 'none', 'LineWidth', 2, ... + 'Color', 'black', 'Marker', 's', 'MarkerSize', 8, 'MarkerEdgeColor', ... + 'black', 'MarkerFaceColor', 'white'}; +color = {'r','k','b','m'}; +figure(20); +ax1 = gca; +for k=1:3 + FRC = FRCs{k}; + SNidx = FRC.SNidx; + HBidx = FRC.HBidx; + FRC.st = double(FRC.st); + FRC.st(HBidx) = nan; + FRC.st(SNidx) = nan; + % color + ST = cell(2,1); + ST{1} = {[color{k},'--'],'LineWidth',1.5}; % unstable + ST{2} = {[color{k},'-'],'LineWidth',1.5}; % stable + legs = ['SSM-$\mathcal{O}(',num2str(2*k+1),')$-unstable']; + legu = ['SSM-$\mathcal{O}(',num2str(2*k+1),')$-stable']; + hold(ax1,'on'); + plot_stab_lines(FRC.om,FRC.Aout_frc(:,1),FRC.st,ST,legs,legu); + SNfig = plot(FRC.om(SNidx),FRC.Aout_frc(SNidx,1),thm.SN{:}); + set(get(get(SNfig,'Annotation'),'LegendInformation'),... + 'IconDisplayStyle','off'); + HBfig = plot(FRC.om(HBidx),FRC.Aout_frc(HBidx,1),thm.HB{:}); + set(get(get(HBfig,'Annotation'),'LegendInformation'),... + 'IconDisplayStyle','off'); + xlabel('$\Omega$','Interpreter','latex'); + ylabel('$||u_1||_{\infty}$','Interpreter','latex'); + set(gca,'FontSize',14); + grid on; axis tight; +end +% Validation using collocation method from COCO + +figure(fig_ssm); hold on +nCycles = 500; +coco = cocoWrapper(DS, nCycles, outdof); +set(coco.Options, 'PtMX', 1000, 'NTST',20, 'dir_name', 'bd_nd'); +set(coco.Options, 'NAdapt', 0, 'h_max', 200, 'MaxRes', 1); +coco.initialGuess = 'linear'; +start = tic; +bd_nd = coco.extract_FRC(freqrange); +timings.cocoFRCbd_nd = toc(start) +%% Linear Damping +% Setup Dynamical System + +n = 10; +[mass,damp,gyro,stiff,fnl,fext] = build_model(n,'linear_damp'); + +% Create model +DS = DynamicalSystem(); +set(DS,'M',mass,'C',damp+gyro,'K',stiff,'fnl',fnl); +set(DS.Options,'Emax',6,'Nmax',10,'notation','multiindex'); +set(DS.Options,'RayleighDamping',false,'BaseExcitation',true); +DS.add_forcing(coeffs, kappas, h); +% Linear Modal analysis + +[V,D,W] = DS.linear_spectral_analysis(); +%% +% *Choose Master subspace* + +S = SSM(DS); +set(S.Options, 'reltol', 1,'notation','multiindex'); +resonant_modes = [1 2 3 4]; +order = 5; +outdof = [1 2]; +% Primary resonance of the first mode + +freqrange = [0.98 1.04]*imag(D(1)); +set(S.FRCOptions, 'nCycle',500, 'initialSolver', 'fsolve'); +set(S.contOptions, 'PtMX', 300, 'h0', 0.1, 'h_max', 0.2, 'h_min', 1e-3); +set(S.FRCOptions, 'coordinates', 'polar'); + +sol = ep_read_solution('isol-nd-3.ep',1); +start = tic; +FRC_LD_O5 = S.SSM_isol2ep('isol-ld-5',resonant_modes, order, [1 3],... + 'freq', freqrange,outdof,{sol.p,sol.x}); +timings.FRC_LD_O5 = toc(start); +% validation using collocation method from COCO + +nCycles = 500; +coco = cocoWrapper(DS, nCycles, outdof); +set(coco.Options, 'PtMX', 1000, 'NTST',20, 'dir_name', 'bd_ld'); +set(coco.Options, 'NAdapt', 0, 'h_max', 200, 'MaxRes', 1); +coco.initialGuess = 'linear'; +start = tic; +bd_ld = coco.extract_FRC(freqrange); +timings.cocoFRCbd_ld = toc(start) +%% Comparison of FRC with/without nonlinear damping + +% plot SSM results +FRCs = {FRC_LD_O5,FRC_ND_O5}; +legs = {'LD-SSM-$\mathcal{O}(5)$-unstable','ND-SSM-$\mathcal{O}(5)$-unstable'}; +legu = {'LD-SSM-$\mathcal{O}(5)$-stable','ND-SSM-$\mathcal{O}(5)$-stable'}; +color = {'r','b'}; +fig25 = figure; +fig26 = figure; +for k=1:2 + FRC = FRCs{k}; + SNidx = FRC.SNidx; + HBidx = FRC.HBidx; + FRC.st = double(FRC.st); + FRC.st(HBidx) = nan; + FRC.st(SNidx) = nan; + % color + ST = cell(2,1); + ST{1} = {[color{k},'--'],'LineWidth',1.5}; % unstable + ST{2} = {[color{k},'-'],'LineWidth',1.5}; % stable + figure(fig25); hold on + plot_stab_lines(FRC.om,FRC.Aout_frc(:,1),FRC.st,ST,legs{k},legu{k}); + SNfig = plot(FRC.om(SNidx),FRC.Aout_frc(SNidx,1),thm.SN{:}); + set(get(get(SNfig,'Annotation'),'LegendInformation'),... + 'IconDisplayStyle','off'); + xlabel('$\Omega$','Interpreter','latex'); + ylabel('$||u_1||_{\infty}$','Interpreter','latex'); + set(gca,'FontSize',14); + grid on, axis tight; + legend boxoff; + figure(fig26); hold on + plot_stab_lines(FRC.om,FRC.Aout_frc(:,2),FRC.st,ST,legs{k},legu{k}); + SNfig = plot(FRC.om(SNidx),FRC.Aout_frc(SNidx,2),thm.SN{:}); + set(get(get(SNfig,'Annotation'),'LegendInformation'),... + 'IconDisplayStyle','off'); + xlabel('$\Omega$','Interpreter','latex'); + ylabel('$||u_2||_{\infty}$','Interpreter','latex'); + set(gca,'FontSize',14); + grid on; axis tight; + legend boxoff +end + +% load coco solution +legs = {'LD-Collocation-unstable','ND-Collocation-unstable'}; +legu = {'LD-Collocation-stable','ND-Collocation-stable'}; + +bd = bd_nd{:}; +ndom = coco_bd_col(bd,'omega'); +ndamp1 = coco_bd_col(bd, 'amp1'); +ndamp2 = coco_bd_col(bd, 'amp2'); +ndst = coco_bd_col(bd, 'eigs'); +ndst = all(abs(ndst)<1,1); +logs = [1 2 3 4:3:numel(ndst)-4 numel(ndst)-1 numel(ndst)]; +ndamp1 = ndamp1(logs); +ndamp2 = ndamp2(logs); +ndom = ndom(logs); +ndst = ndst(logs); +figure(fig25); hold on +plot(ndom(ndst), ndamp1(ndst), 'ro', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'ND-Collocation-stable'); +plot(ndom(~ndst), ndamp1(~ndst), 'ms', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'ND-Collocation-unstable'); +figure(fig26); hold on +plot(ndom(ndst), ndamp2(ndst), 'ro', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'ND-Collocation-stable'); +plot(ndom(~ndst), ndamp2(~ndst), 'ms', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'ND-Collocation-unstable'); + +bd = bd_ld{:}; +ldom = coco_bd_col(bd,'omega'); +ldamp1 = coco_bd_col(bd, 'amp1'); +ldamp2 = coco_bd_col(bd, 'amp2'); +ldst = coco_bd_col(bd, 'eigs'); +ldst = all(abs(ldst)<1,1); +logs = [1 2 3 4:3:numel(ldst)-4 numel(ldst)-1 numel(ldst)]; +ldamp1 = ldamp1(logs); +ldamp2 = ldamp2(logs); +ldom = ldom(logs); +ldst = ldst(logs); +figure(fig25); hold on +plot(ldom(ldst), ldamp1(ldst), 'kd', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'LD-Collocation-stable'); +plot(ldom(~ldst), ldamp1(~ldst), 'gv', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'LD-Collocation-unstable'); +figure(fig26); hold on +plot(ldom(ldst), ldamp2(ldst), 'kd', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'LD-Collocation-stable'); +plot(ldom(~ldst), ldamp2(~ldst), 'gv', 'MarkerSize', 6, 'LineWidth', 1,... + 'DisplayName', 'LD-Collocation-unstable'); +%% +timings \ No newline at end of file diff --git a/examples/AxialMovingBeam/AxialMovingBeamBook.mlx b/examples/AxialMovingBeam/AxialMovingBeamBook.mlx new file mode 100644 index 0000000..cc9099c Binary files /dev/null and b/examples/AxialMovingBeam/AxialMovingBeamBook.mlx differ diff --git a/examples/AxialMovingBeam/build_model.m b/examples/AxialMovingBeam/build_model.m new file mode 100644 index 0000000..f50de96 --- /dev/null +++ b/examples/AxialMovingBeam/build_model.m @@ -0,0 +1,52 @@ +function [mass,damp,gyro,stiff,fnl,fext] = build_model(n,varargin) + +A = 0.04*0.03; % m^2 +I = 0.04*0.03^3/12; +rho = 7680; +E = 30e9; +% eta = 1e-5*E; +eta = 1e-4*E; +L = 1; +P = 67.5e3; + +kf = sqrt(E*I/(P*L^2)); +k1 = sqrt(E*A/P); +alpha = I*eta/(L^3*sqrt(rho*A*P)); +gamma = 0.5128; +mass = eye(n); +damp = zeros(n); +gyro = zeros(n); +stiff = zeros(n); +fext = zeros(n,1); +cubic_coeff = zeros(n,n,n,n); +for i=1:n + damp(i,i) = alpha*(i*pi)^4; + stiff(i,i) = kf^2*(i*pi)^4-(gamma^2-1)*(i*pi)^2; + fext(i) = (1-(-1)^i)/(i*pi); + for j=1:n + if j~=i + gyro(i,j) = 4*gamma*i*j/(i^2-j^2)*(1-(-1)^(i+j)); + end + cubic_coeff(i,j,j,i) = i^2*j^2; + end +end + +disp('the first four eigenvalues for undamped system'); +lamd = eigs([zeros(n) eye(n);-mass\stiff -mass\(gyro)],4,'smallestabs') + +if numel(varargin)>0 && strcmp(varargin{1}, 'nonlinear_damp') + % visoelastic damping + tmp = sptensor(cubic_coeff); + subs1 = tmp.subs; + subs2 = subs1; + subs2(:,3) = subs1(:,3)+n; + subs = [subs1; subs2]; + vals = [0.25*k1^2*pi^4*tmp.vals; 0.5*alpha*k1^2/kf^2*pi^4*tmp.vals]; + f3 = sptensor(subs, vals, [n,2*n,2*n,2*n]); +else + % viscoelastic damping but ignore nonlinear part + f3 = 0.25*k1^2*pi^4*sptensor(cubic_coeff); +end +fnl = {[],f3}; + +end \ No newline at end of file diff --git a/examples/AxialMovingBeam/cal_parameters.m b/examples/AxialMovingBeam/cal_parameters.m new file mode 100644 index 0000000..d2ac7bc --- /dev/null +++ b/examples/AxialMovingBeam/cal_parameters.m @@ -0,0 +1,64 @@ +function [alpha, omega, nu] = cal_parameters(N,l) + +syms x +% N = 2; % number of modes +% l = 2; + +% construct modal functions +phis = []; +dphis = []; +ddphis = []; +alphal = zeros(N,1); +alphal(1:4) = [3.927 7.069 10.210 13.352]'; +alphal(5:N) = (4*(5:N)'+1)*pi/4; +alpha = alphal/l; +R = sin(alphal)./sinh(alphal); +E = (0.5*l*(1-R.^2)+(R.^2.*sinh(2*alphal)-sin(2*alphal))/(4*alpha)).^(-0.5); +for n=1:N + phin = E(n)*(sin(alpha(n)*x)-R(n)*sinh(alpha(n)*x)); + dphin = diff(phin,x); + ddphin = diff(dphin,x); + phis = [phis; phin]; + dphis = [dphis; dphin]; + ddphis = [ddphis; ddphin]; + normphi = int(phin^2, x, 0, l); + fprintf('L2 norm of phi%d is %d\n', n, normphi); +end + +% compute nonlinear coeffficients +alpha1 = zeros(N); +alpha2 = zeros(N); +alpha = zeros(N,N,N,N); + +for i=1:N + for j=1:N + alpha1(i,j) = int(phis(i)*ddphis(j),x,0,l); + alpha2(i,j) = int(dphis(i)*dphis(j),x,0,l); + end +end + + +for n=1:N + for m=1:N + for p=1:N + for q=1:N + alpha(n,m,p,q) = alpha1(n,q)*alpha2(m,p); + end + end + end +end + +% compute natural frequencies +omega = zeros(N,1); +for i=1:N + int1 = int(phis(i)^2,x,0,l); + int2 = int(diff(phis(i),x,4)*phis(i),x,0,l); + omega(i) = sqrt(int2/int1); +end + +nu = 1/(2*l); + +end + + + \ No newline at end of file diff --git a/examples/BenchamrkSSM1stOrder/build_model.m b/examples/BenchamrkSSM1stOrder/build_model.m new file mode 100644 index 0000000..adb11df --- /dev/null +++ b/examples/BenchamrkSSM1stOrder/build_model.m @@ -0,0 +1,17 @@ +function [A,B,F] = build_model() + +A = [-1 0;0 -sqrt(24)]; +B = eye(2); + +F2 = sptensor([2 2 2]); +F3 = sptensor([2,2,2,2]); +F4 = sptensor([2,2,2,2,2]); +F5 = sptensor([2,2,2,2,2,2]); + +F2(2,1,1) = 1; +F3(2,1,1,1) = 1; +F4(2,1,1,1,1) = 1; +F5(2,1,1,1,1,1) = 1; + +F = {F2,F3,F4,F5}; +end \ No newline at end of file diff --git a/examples/BenchamrkSSM1stOrder/demo.mlx b/examples/BenchamrkSSM1stOrder/demo.mlx new file mode 100644 index 0000000..aebdb8a Binary files /dev/null and b/examples/BenchamrkSSM1stOrder/demo.mlx differ diff --git a/examples/BernoulliBeam/BernoulliBeamBaseExcitation.mlx b/examples/BernoulliBeam/BernoulliBeamBaseExcitation.mlx new file mode 100644 index 0000000..c203544 Binary files /dev/null and b/examples/BernoulliBeam/BernoulliBeamBaseExcitation.mlx differ diff --git a/examples/BernoulliBeamIRs/demo.mlx b/examples/BernoulliBeamIRs/demo.mlx index 15dd861..874d054 100644 Binary files a/examples/BernoulliBeamIRs/demo.mlx and b/examples/BernoulliBeamIRs/demo.mlx differ diff --git a/examples/CharneyDeVore1stOrder/CharneyDeVore.m b/examples/CharneyDeVore1stOrder/CharneyDeVore.m new file mode 100644 index 0000000..6924b3a --- /dev/null +++ b/examples/CharneyDeVore1stOrder/CharneyDeVore.m @@ -0,0 +1,32 @@ +function y = CharneyDeVore(x,x1star) + +% parameter set +gamma = 1; +beta = 1.25; +b = 0.5; +C = 0.1; +m = 1:2; +alpham = 8*sqrt(2)/pi*m.^2./(4*m.^2-1).*(b^2+m.^2-1)./(b^2+m.^2); +betam = beta*b^2./(b^2+m.^2); +deltam = 64*sqrt(2)/(15*pi)*(b^2-m.^2+1)./(b^2+m.^2); +gammaastm = gamma*4*m*sqrt(2)*b./(4*m.^2-1)/pi; +gammam = gamma*4*m.^3./(4*m.^2-1)*sqrt(2)*b./pi./(b^2+m.^2); +epsilon = 16*sqrt(2)/(5*pi); + +x1 = x(1,:); +x2 = x(2,:); +x3 = x(3,:); +x4 = x(4,:); +x5 = x(5,:); +x6 = x(6,:); +x4star = x1star*(-0.4); + +y = zeros(6,numel(x1)); +y(1,:) = gammaastm(1)*x3-C*(x1-x1star); +y(2,:) = -(alpham(1)*x1-betam(1)).*x3-C*x2-deltam(1)*x4.*x6; +y(3,:) = (alpham(1)*x1-betam(1)).*x2-gammam(1)*x1-C*x3+deltam(1)*x4.*x5; +y(4,:) = gammaastm(2)*x6-C*(x4-x4star)+epsilon*(x2.*x6-x3.*x5); +y(5,:) = -(alpham(2)*x1-betam(2)).*x6-C*x5-deltam(2)*x4.*x3; +y(6,:) = (alpham(2)*x1-betam(2)).*x5-gammam(2)*x4-C*x6+deltam(2)*x4.*x2; + +end \ No newline at end of file diff --git a/examples/CharneyDeVore1stOrder/build_model.m b/examples/CharneyDeVore1stOrder/build_model.m new file mode 100644 index 0000000..958dd07 --- /dev/null +++ b/examples/CharneyDeVore1stOrder/build_model.m @@ -0,0 +1,48 @@ +function [A,B,F] = build_model(varargin) + +% parameter set +gamma = 1; +beta = 1.25; +b = 0.5; +C = 0.1; + +% build Charney-DeVore model +m = 1:2; +alpham = 8*sqrt(2)/pi*m.^2./(4*m.^2-1).*(b^2+m.^2-1)./(b^2+m.^2); +betam = beta*b^2./(b^2+m.^2); +deltam = 64*sqrt(2)/(15*pi)*(b^2-m.^2+1)./(b^2+m.^2); +gammaastm = gamma*4*m*sqrt(2)*b./(4*m.^2-1)/pi; +gammam = gamma*4*m.^3./(4*m.^2-1)*sqrt(2)*b./pi./(b^2+m.^2); +epsilon = 16*sqrt(2)/(5*pi); + +% linear part +A = [-C 0 gammaastm(1) 0 0 0 + 0 -C betam(1) 0 0 0 + -gammam(1) -betam(1) -C 0 0 0 + 0 0 0 -C 0 gammaastm(2) + 0 0 0 0 -C betam(2) + 0 0 0 -gammam(2) -betam(2) -C]; +B = eye(6); + +% quadratic part +F2 = sptensor([6 6 6]); +F2(2,1,3) = -alpham(1); +F2(2,4,6) = -deltam(1); +F2(3,1,2) = alpham(1); +F2(3,4,5) = deltam(1); +F2(4,2,6) = epsilon; +F2(4,3,5) = -epsilon; +F2(5,1,6) = -alpham(2); +F2(5,3,4) = -deltam(2); +F2(6,1,5) = alpham(2); +F2(6,2,4) = deltam(2); +F = {F2}; + +% shift of origin +if numel(varargin)>0 + sol = varargin{1}; + x = sol.x; + A = A+spmatrix(ttv(F2,x,2)+ttv(F2,x,3)); +end + +end \ No newline at end of file diff --git a/examples/CharneyDeVore1stOrder/demo.mlx b/examples/CharneyDeVore1stOrder/demo.mlx new file mode 100644 index 0000000..2c7ff80 Binary files /dev/null and b/examples/CharneyDeVore1stOrder/demo.mlx differ diff --git a/examples/CharneyDeVore1stOrder/plot_backbone_curves.m b/examples/CharneyDeVore1stOrder/plot_backbone_curves.m new file mode 100644 index 0000000..378c7fb --- /dev/null +++ b/examples/CharneyDeVore1stOrder/plot_backbone_curves.m @@ -0,0 +1,26 @@ +function plot_backbone_curves(coeffs,exp_idx,rhosamp,orders) +% This function plot the backbone curves at reduced coordinates with +% various orders. It only supports 2D SSM + +if exp_idx(1)>exp_idx(2) + exp_idx = flip(exp_idx); + coeffs = flip(coeffs); +end +figure; hold on +y = 0; +for k=1:numel(exp_idx) + y = y+coeffs(k)*rhosamp.^(exp_idx(k)); + orderk = exp_idx(k)+1; + if any(orderk==orders) + plot(y,rhosamp,'LineWidth',1.5,'DisplayName',['SSM-$\mathcal{O}(',num2str(orderk),')$']); + end +end +hl = legend('show','Location', 'Best'); +set(hl, 'Interpreter','latex') +legend boxoff +set(gca,'FontSize',14); +set(gca,'LineWidth',1.2); +grid on +xlabel('$\omega$','Interpreter',"latex",'FontSize',16); +ylabel('$\rho$','Interpreter',"latex",'FontSize',16); +end \ No newline at end of file diff --git a/examples/Lorenz1stOrder/build_model.m b/examples/Lorenz1stOrder/build_model.m new file mode 100644 index 0000000..5d12860 --- /dev/null +++ b/examples/Lorenz1stOrder/build_model.m @@ -0,0 +1,10 @@ +function [A,B,F] = build_model(sigma,rho,beta) +% linear part +A = [-sigma sigma 0;rho -1 0;0 0 -beta]; +B = eye(3); +% quadratic part +F2 = sptensor([3 3 3]); +F2(2,1,3) = -1; % -xz term +F2(3,1,2) = 1; % xy term +F = {F2}; +end \ No newline at end of file diff --git a/examples/Lorenz1stOrder/demo.mlx b/examples/Lorenz1stOrder/demo.mlx new file mode 100644 index 0000000..07e737a Binary files /dev/null and b/examples/Lorenz1stOrder/demo.mlx differ diff --git a/examples/Lorenz1stOrder/lorenz.m b/examples/Lorenz1stOrder/lorenz.m new file mode 100644 index 0000000..1c68aad --- /dev/null +++ b/examples/Lorenz1stOrder/lorenz.m @@ -0,0 +1,7 @@ +function dz = lorenz(z,sigma,rho,beta) +% vector field of Lorenz system +dz = [sigma*(z(2)-z(1)); + rho*z(1)-z(2)-z(1)*z(3); + -beta*z(3)+z(1)*z(2)]; + +end \ No newline at end of file diff --git a/examples/ThreeOscillators/ThreeOscillators.m b/examples/ThreeOscillators/ThreeOscillators.m index 0ed43fb..2d6bb14 100644 --- a/examples/ThreeOscillators/ThreeOscillators.m +++ b/examples/ThreeOscillators/ThreeOscillators.m @@ -1,19 +1,18 @@ %% % We extract FRC of a three DOFs model with 1:1:1 internal resonance % -% $$\ddot{x}_1+x_1+\epsilon c_1\dot{x}_1+ K(x-y)^3=\epsilon f_1\cos\Omega t,\\\ddot{x}_2+x_2+\epsilon -% c_2\dot{x}_2+ K[(y-x)^3+(y-z)^3]=\epsilon f_2\cos\Omega t,\\\ddot{x}_3+x_3+\epsilon -% c_3\dot{x}_3+ K(z-y)^3=\epsilon f_3\cos\Omega t.$$ +% $$\ddot{x}_1+x_1+c_1\dot{x}_1+ K(x_1-x_2)^3=\epsilon f_1\cos\Omega t,\\\ddot{x}_2+x_2+c_2\dot{x}_2+ +% K[(x_2-x_1)^3+(x_2-x_3)^3]=0,\\\ddot{x}_3+x_3+c_3\dot{x}_3+ K(x_3-x_2)^3=0.$$ clear all, close all, clc %% Example Setup -c1 = 1e-1; -c2 = 2e-1; -c3 = 3e-1; -K = 0.2; epsilon = 5e-3; -[mass,damp,stiff,fnl,fext]=build_model(c1,c2,c3,K,epsilon); +c1 = 5e-4; +c2 = 1e-3; +c3 = 1.5e-3; +K = 1e-3; +[mass,damp,stiff,fnl,fext]=build_model(c1,c2,c3,K); %% Dynamical System Setup DS = DynamicalSystem(); diff --git a/examples/ThreeOscillators/ThreeOscillatorsBook.mlx b/examples/ThreeOscillators/ThreeOscillatorsBook.mlx index ad975d7..cfd1ad7 100644 Binary files a/examples/ThreeOscillators/ThreeOscillatorsBook.mlx and b/examples/ThreeOscillators/ThreeOscillatorsBook.mlx differ diff --git a/examples/ThreeOscillators/build_model.m b/examples/ThreeOscillators/build_model.m index 1415f73..3b033a5 100644 --- a/examples/ThreeOscillators/build_model.m +++ b/examples/ThreeOscillators/build_model.m @@ -1,11 +1,10 @@ -function [mass,damp,stiff,fnl,fext] = build_model(c1,c2,c3,K,epsilon) +function [mass,damp,stiff,fnl,fext] = build_model(c1,c2,c3,K) n = 3; mass = eye(n,n); damp = [c1 0 0; 0 c2 0; 0 0 c3]; -damp = damp*epsilon; stiff = eye(n,n); subs1 = [1 1 1 1 1 2 @@ -19,7 +18,6 @@ 3*ones(4,1), subs2]; vals3 = [vals3; -vals3; vals3; -vals3]*K; f3 = sptensor(subs3, vals3, [n,n,n,n]); -f3 = f3*epsilon; fnl = {[],f3}; fext = [1;0;0]; diff --git a/examples/TimoshenkoBeamIRs/FEM_Timoshenko.m b/examples/TimoshenkoBeamIRs/FEM_Timoshenko.m new file mode 100644 index 0000000..646f6e2 --- /dev/null +++ b/examples/TimoshenkoBeamIRs/FEM_Timoshenko.m @@ -0,0 +1,319 @@ +function [A,K,f,x,x_dot,C,M,A_bc]=FEM_Timoshenko(n,alpha) + +%This code is essentially a function which gives as output the stiffness, +%damping and mass matrices, K, C and M, respectively, of the beam model, as +%well as the nonlinear (material response) forcing vector f, and the global +%DOF of the beam x_dot, as well as their time derivatives x_dot, and takes +%as input the geometric and material parameters of the beam, which here are +%defined as height h, width b, length L, Young's modulus E, shear modulus +%G, damping ratio "damping_ratio" and density rho, as well as the number of +%elements we want to divide the beam into, defined as "n". Taking out the +%lines where these respective parameters are defined in the code below and +%deleting the "%" in the top and bottom line "transforms" this code into a +%real function. Everything below is annotated to clarify the various steps. + +%Define the number of elements you want to split the beam into. 1 Element +%has 8 degrees of freedom, so entering any number "n" yields a model with +%"8*n" degrees of freedom. However, 4*(n-1) of these degrees of freedom are +%"overlapping", i.e. defined on the same nodes, so the total model will in +%the end have "8+4*(n-1)" or "4*(1+n)" degrees of freedom. + + +%Specify boundary conditions: 1 for "First node clamped", 2 for "Both ends +%pinned" + +bc=1; + +%Allocate space for stiffness, damping and mass matrices and nonlinear +%forcing vector as well as the vectors containing the global DOF's, +%x_tilde_tot and x_tilde_dot_tot + +K_tot=zeros(8+4*(n-1)+n,8+4*(n-1)+n); +C_tot=zeros(8+4*(n-1)+n,8+4*(n-1)+n); +M_tot=zeros(8+4*(n-1)+n,8+4*(n-1)+n); +f_tot=zeros(8+4*(n-1)+n,1); +x_tilde_tot=f_tot; +x_tilde_dot_tot=f_tot; + +%Define variable vectors "x_tilde" and its time derivative "x_tilde_dot". +%The entries of these vectors are the degrees of freedom of the beam +%element and their time derivatives. + +R=[1 0 0 0 0 0 0 0 0; 0 1 0 0 0 0 0 0 0; 0 0 0 0 1 0 0 0 0; 0 0 0 0 0 0 1 0 0; 0 0 0 0 0 0 0 1 0; 0 0 1 0 0 0 0 0 0; 0 0 0 1 0 0 0 0 0; 0 0 0 0 0 1 0 0 0; 0 0 0 0 0 0 0 0 1]; +J=inv(R); + +%Define the beam's parameters and from those the dimensionless parameters. +%The viscoelastic constants are estimated from a damping ratio (obtained +%ideally from small amplitude vibration experiments), since this is the +%only quantity readily available for various materials. In the current +%example (11.4.2017), the material paramaters for steel have been used. + +% SSMTOOL 1.0 +% h=100; %Height of beam [mm] +% b=100; %Width of beam [mm] +% L=1000; %Length of beam [mm] + +h=40; %Height of beam [mm] +b=40; %Width of beam [mm] +L=1200; %Length of beam [mm] +Lel=L/n; %Auxiliary variable needed for integration +E=90000; %Young's modulus [kPa] +G=E/(2*1.3); %Shear modulus [kPa] +rho=7850*10^(-9); %Density [kg/mm^3] +damping_ratio=0.2; %Damping Ratio [-] 0.0015; 0.2 +mu=damping_ratio*2*sqrt(E*rho*h*b); %Viscoelastic constant for axial deformation [kPa*s] +gamma=damping_ratio*2*sqrt(G*rho*h*b); %Viscoelastic constant for shear deformation [kPa*s] + +m0=b*h*rho; +m1=b*h^3/12*rho; + +%Define shape functions for discretization + +syms x +phi_1(x)=[1-3*x^2+2*x^3;x-2*x^2+x^3;3*x^2-2*x^3;-x^2+x^3]; +phi_1=[phi_1(x/Lel)]; +phi_1(x)=[1 0 0 0;0 Lel 0 0;0 0 1 0; 0 0 0 Lel]*phi_1; +phi_2(x)=[1-3*x+2*x^2;4*x-4*x^2;-x+2*x^2]; +%phi_2(x)=[1-x^2;x-x^2;x^2]; +phi_2(x)=[phi_2(x/Lel)]; +%phi_2=[1 0 0;0 L 0;0 0 1]*phi_2; +phi_3(x)=[1-x;x]; +phi_3(x)=[phi_3(x/Lel)]; +phi(x)=[phi_1;phi_3;phi_2]; + +% In above, the local DOFs are in the form (Note that phi=[phi1;phi3;phi2] +% [u_a, u'_a, u_b, u_b', phi_a, phi_b, w_a, w_ab, w_b], where a/b/ab +% represents left/middle/right point of the element. The global DOFs is +% obtained by R*local_dofs, yielding +% [u_a, u'_a, phi_a, w_a, w_ab, u_b, u'_b, phi_b, w_b] + + +for i=1:n +x_tilde_full = sym('x_%d',[5*n+4,1]); +x_tilde_dot_full = sym('x_dot_',[5*n+4,1]); +x_tilde=J*x_tilde_full(5*(i-1)+1:5*i+4,:); +x_tilde_dot=J*x_tilde_dot_full(5*(i-1)+1:5*i+4,:); + + +%Define auxiliary vectors and matrices to enter equations more easily. A_u, +%A_w and A_phi transform the "full" DOF vectors x_tilde and x_tilde_dot +%into the smaller DOF vectors corresponding to axial, transverse and shear +%DOF. + +A_w=blkdiag(zeros(4,4),zeros(2,2),eye(3,3)); +A_w(1:6,:)=[]; +A_phi=blkdiag(zeros(4,4),eye(2,2),zeros(3,3)); +A_phi(7:9,:)=[]; +A_phi(1:4,:)=[]; +A_u=blkdiag(eye(4,4),zeros(5,5)); +A_u(5:9,:)=[]; +u_tilde=A_u*x_tilde; +u_tilde_dot=A_u*x_tilde_dot; +w_tilde=A_w*x_tilde; +w_tilde_dot=A_w*x_tilde_dot; +phi_tilde=A_phi*x_tilde; +phi_tilde_dot=A_phi*x_tilde_dot; +u=transpose(phi_1)*u_tilde; +w=transpose(phi_2)*w_tilde; +ph1=transpose(phi_3)*phi_tilde; +u_dot=transpose(phi_1)*u_tilde_dot; +w_dot=transpose(phi_2)*w_tilde_dot; +ph1_dot=transpose(phi_3)*phi_tilde_dot; + +%Define entries of nonlinear material response vector +%f(x_tilde,x_tilde_dot) and stiffness matrix etc. +Mxx0lin=b*h*(E*(diff(u))+mu*diff(u_dot)); +Mxx0nl=b*h*(E*(1/2*(diff(w))^2)+mu*(diff(w)*diff(w_dot))); +Mxx0=b*h*(E*(diff(u)+1/2*(diff(w))^2)+mu*(diff(u_dot)+diff(w)*diff(w_dot))); +Mxz0lin=b*h*(G*(ph1+diff(w))+gamma*(ph1_dot+diff(w_dot))); +Mxz0nl=b*h*(G*(diff(u)*ph1)+gamma*(diff(u_dot)*ph1+diff(u)*ph1_dot)); +Mxz0=b*h*(G*(ph1+diff(w)+diff(u)*ph1)+gamma*(ph1_dot+diff(w_dot)+diff(u_dot)*ph1+diff(u)*ph1_dot)); +Mxx1lin=b*h^3/12*(E*diff(ph1)+mu*diff(ph1_dot)); +Mxx1nl=0; +Mxx1=b*h^3/12*(E*diff(ph1)+mu*diff(ph1_dot)); +Mxz1lin=0; +Mxz1nl=b*h^3/12*(G*ph1*diff(ph1)+gamma*(ph1_dot*diff(ph1)+ph1*diff(ph1_dot))); +Mxz1=b*h^3/12*(G*ph1*diff(ph1)+gamma*(ph1_dot*diff(ph1)+ph1*diff(ph1_dot))); +fu=int(diff(phi_1)*(Mxx0nl+Mxz0*ph1),x,0,Lel); +fw=int(diff(phi_2)*(Mxz0nl+Mxx0*diff(w)),x,0,Lel); +fphi=int(diff(phi_3)*(Mxx1nl+Mxz1*ph1)+(phi_3)*((Mxz0nl+Mxz0*diff(u))+Mxz1*diff(ph1)),x,0,Lel); +f=[fu;fphi;fw]; +M_u=m0*int(phi_1*transpose(phi_1),x,0,Lel); +M_w=m0*int(phi_2*transpose(phi_2),x,0,Lel); +M_phi=m1*int(phi_3*transpose(phi_3),x,0,Lel); +%M_3_var=m1*phi_3*transpose(phi_3); +%M_3=M_3_var(r1(1))*d1(1); +K_u=int(diff(phi_1)*(b*h*E*(diff(transpose(phi_1)))),x,0,Lel); +%K_1_var=diff(B_u*phi)*(b*h*E*(diff(transpose(B_u*phi)))); +%K_1=K_1_var(r1(1))*d1(1); +C_u=mu/E*K_u; +% [r2,d2]=lgwt(2,0,Lel); +% [r1,d1]=lgwt(1,0,Lel); +%%K_2=int(diff(B_w*phi)*b*h*G*(transpose(B_phi*phi)+diff(transpose(B_w*phi))),x,0,L); +K_w=int(diff(phi_2)*b*h*G*diff(transpose(phi_2)),x,0,Lel); +K_w_phi=int(diff(phi_2)*b*h*G*(transpose(phi_3)),x,0,Lel); +%K_w_phi_var=diff(phi_2)*b*h*G*(transpose(phi_3)); +%K_w_phi=K_w_phi_var(r1(1))*d1(1); + +%K_2_var=diff(B_w*phi)*b*h*G*diff(transpose(B_w*phi)); +%K_2_phi_var=diff(B_w*phi)*b*h*G*(transpose(B_phi*phi)); +%=K_2_var(r1(1))*d1(1); +%K_2_phi=K_2_phi_var(r1(1))*d1(1); +C_w=gamma/G*K_w; +C_w_phi=gamma/G*K_w_phi; +%%K_3=int(diff(B_phi*phi)*b*h^3/12*(E*diff(transpose(B_phi*phi)))+(B_phi*phi)*b*h*G*(transpose(B_phi*phi)+diff(transpose(B_w*phi))),x,0,L); +K_phi=int(diff(phi_3)*b*h^3/12*(E*diff(transpose(phi_3)))+(phi_3)*b*h*G*(transpose(phi_3)),x,0,Lel); +K_phi_w=int((phi_3)*b*h*G*(diff(transpose(phi_2))),x,0,Lel); +%K_phi_w_var=(phi_3)*b*h*G*(diff(transpose(phi_2))); +%K_3_var=diff(phi_3)*b*h^3/12*(E*diff(transpose(phi_3)))+(phi_3)*b*h*G*(transpose(phi_3)); +%K_phi=K_3_var(r1(1))*d1(1); +%K_phi_w=K_phi_w_var(r1(1))*d1(1); +%K_3_var_E=diff(B_phi*phi)*b*h^3/12*(E*diff(transpose(B_phi*phi))); +%K_3_var_G=(B_phi*phi)*b*h*G*(transpose(B_phi*phi)); +%K_3_w_var=(B_phi*phi)*b*h*G*(diff(transpose(B_w*phi))); +%K_3_w=K_3_w_var(r1(1))*d1(1)+K_3_w_var(r2(2))*d2(2); +C_phi=int(diff(phi_3)*b*h^3/12*(mu*diff(transpose(phi_3)))+(phi_3)*b*h*gamma*(transpose(phi_3)),x,0,Lel); +%C_phi_w=int((phi_3)*b*h*gamma*(diff(transpose(phi_2))),x,0,L); +C_phi_w=gamma/G*K_phi_w; +%C_3_var=diff(phi_3)*b*h^3/12*(mu*diff(transpose(phi_3)))+(phi_3)*b*h*gamma*(transpose(phi_3)); +%C_phi=C_3_var(r1(1))*d1(1); +%C_3_var=diff(B_phi*phi)*b*h^3/12*(mu*diff(transpose(B_phi*phi)))+(B_phi*phi)*b*h*gamma*(transpose(B_phi*phi)); +%C_3_w_var=(B_phi*phi)*b*h*gamma*(diff(transpose(B_w*phi))); +%C_3=C_3_var(r1(1))*d1(1); +%C_3_w=C_3_w_var(r1(1))*d1(1); +%K_var_E(1:2,1:2)=K_1_var; +%K_var_E(3:6,3:6)=zeros(4,4); +%K_var_E(7:8,7:8)=K_3_var_E; +%K_var_G(1:2,1:2)=zeros(2,2); +%K_var_G(3:6,3:6)=K_2_var(r1(1))*d1(1); +%K_var_G(7:8,7:8)=K_3_var_G(r1(1))*d1(1); +%K_var_G(3:6,7:8)=K_2_phi_var(r1(1))*d1(1); +%K_var_G(7:8,3:6)=K_3_w_var(r1(1))*d1(1); +%K_E=K_var_E; +%K_G=K_var_G; +%K=K_E+K_G; +%C=mu/E*K_E+gamma/G*K_G; + +M=blkdiag(M_u,M_phi,M_w); +K=blkdiag(K_u,K_phi,K_w); +C=blkdiag(C_u,C_phi,C_w); +K(7:9,5:6)=K_w_phi; +K(5:6,7:9)=K_phi_w; +C(7:9,5:6)=C_w_phi; +C(5:6,7:9)=C_phi_w; +%C(1:4,1:4)=C(1:4,1:4)+alpha/m0*M(1:4,1:4); +%C(7:9,7:9)=C(7:9,7:9)+alpha*M(7:9,7:9); +C=C+alpha/m0*M; + + +%Assemble the total model with total stiffness (K_tot), damping (C_tot) and +%mass (M_tot) matrices and global nonlinear forcing vector f_tot. The +%remaining global degrees of freedom and their time derivatives are +%contained in x_tilde_tot and x_tilde_dot_tot. R is a permutation matrix +%which permutes the entries of K, C, M, f, x_tilde, x_tilde_dot to +%facilitate the assembly of the corresponding global system vectors and +%matrices. Permuted quantities are denoted by (name)_rotated. The assembly +%occurs (as always in FEM) after the condition that the expression for the +%virtual work of the assembled system is equal to the virtual work of all +%the parts of the system. + +x_tilde_rotated = R*x_tilde; +x_tilde_dot_rotated = R*x_tilde_dot; +f_rotated= R*f; +K_rotated= R*K*transpose(R); +C_rotated= R*C*transpose(R); +M_rotated= R*M*transpose(R); +K_tot=K_tot+blkdiag(zeros(5*(i-1),5*(i-1)),K_rotated,zeros(size(K_tot)-size(K_rotated)-size(zeros(5*(i-1),5*(i-1))))); +C_tot=C_tot+blkdiag(zeros(5*(i-1),5*(i-1)),C_rotated,zeros(size(C_tot)-size(C_rotated)-size(zeros(5*(i-1),5*(i-1))))); +M_tot=M_tot+blkdiag(zeros(5*(i-1),5*(i-1)),M_rotated,zeros(size(M_tot)-size(M_rotated)-size(zeros(5*(i-1),5*(i-1))))); +sizef=size(f_tot)-size(f_rotated); +sizef1=size(f_tot)-size(zeros(5*(i-1),1))-size(f_rotated); +sizef2=size(f_tot)-size(zeros(4+5*(i-1),1))-size(x_tilde_rotated(5:9,1)); +if i==1 +f_tot=f_tot+[f_rotated;zeros(sizef(1),1)]; +x_tilde_tot=x_tilde_tot+[x_tilde_rotated;zeros(sizef(1),1)]; +x_tilde_dot_tot=x_tilde_dot_tot+[x_tilde_dot_rotated;zeros(sizef(1),1)]; +else +f_tot=f_tot+[zeros(5*(i-1),1);f_rotated;zeros(sizef1(1),1)]; +x_tilde_tot=x_tilde_tot+[zeros(4+5*(i-1),1);x_tilde_rotated(5:9,1);zeros(sizef2(1),1)]; +x_tilde_dot_tot=x_tilde_dot_tot+[zeros(4+5*(i-1),1);x_tilde_dot_rotated(5:9,1);zeros(sizef2(1),1)]; +end +end + + +%Implementation of essential boundary conditions (to get rid of the +%singularities in the K and C matrices, which correspond to rigid body +%motions): The full system Mx''+Cx'+Kx+f(x,x')=0 is transformed into a new, +%smaller reduced equivalent (up to the DOF's where boundary conditions are +%specified) system with M_tot -> M_tot_red, C_tot -> C_tot_red, M_tot -> +%M_tot_red, f_tot -> f_tot_red, x_tilde_tot -> x_tilde_tot_red, +%x_tilde_dot_tot -> x_tilde_dot_tot_red, where the subscript "_red" refers +%to the reduced system. A_bc is an auxiliary matrix to transform the +%system. Depending on the conditions specified, A_bc will change shape. In +%the current example (11.4.2017), the boundary conditions + +%correspond to a clamping of the end of the beam at x=0, so that all DOF's +%on the first node are set equal to zero. + +if bc==1 +%Clamped on first node +A_bc=[zeros(4,5*n);eye(5*n,5*n)]; +A_bc=[ [0;1;0;0;zeros(5*n,1)] A_bc]; +elseif bc==2 +%Pinned at both ends +A_bc=zeros(4+5*n,5*n); + if n==1 + A_bc=transpose([[0 1 0 0] zeros(1,5*n);[0 0 1 0] zeros(1,5*n);zeros(1,5*n) [0 1 0 0];zeros(1,5*n) [0 0 1 0]]); + else + A_bc=A_bc+transpose([[0 1 0 0] zeros(1,5*n);[[0 0 1 0] zeros(1,5*n)];zeros(5*n-4,5*n+4);zeros(1,5*n) [0 1 0 0];zeros(1,5*n) [0 0 1 0]]); + sizeA=size(A_bc); + A_bc(5:sizeA(1)-4,3:sizeA(2)-2)=eye(5*n-4,5*n-4); + % for k=1:(n-1) + % A_bc(5+5*(k-1):4+5*k,3+5*(k-1):2+5*k)=eye(5,5); + % end + end +end + cons = [setdiff(x_tilde_tot,A_bc*transpose(A_bc)*x_tilde_tot);setdiff(x_tilde_dot_tot,A_bc*transpose(A_bc)*x_tilde_dot_tot)]; + +f_tot_red=transpose(A_bc)*subs(f_tot,cons,zeros(numel(cons),1)); +x_tilde_tot_red=transpose(A_bc)*x_tilde_tot; +x_tilde_dot_tot_red=transpose(A_bc)*x_tilde_dot_tot; +M_tot_red=transpose(A_bc)*M_tot*A_bc; +K_tot_red=transpose(A_bc)*K_tot*A_bc; +C_tot_red=transpose(A_bc)*C_tot*A_bc; + + +%Rename everything for simplicity: K global stiffness matrix, C global +%damping matrix, M global mass matrix, f global nonlinear forcing (material +%response) vector, x global DOF, x_dot time derivatives of global DOF. +M_tot_red=vpa(M_tot_red,10); +f_tot_red=vpa(f_tot_red,10); +K_tot_red=vpa(K_tot_red,10); +C_tot_red=vpa(C_tot_red,10); + +%Rename everything for simplicity: K global stiffness matrix, C global +%damping matrix, M global mass matrix, f global nonlinear forcing (material +%response) vector, x global DOF, x_dot time derivatives of global DOF. + +M=double(M_tot_red); +K=K_tot_red; +C=C_tot_red; +f=f_tot_red; +x=x_tilde_tot_red; +x_dot=x_tilde_dot_tot_red; +A=double([zeros(5*n+1,5*n+1) eye(5*n+1,5*n+1);-M\K -M\C]); + +% str='hh=matlabFunction('; +% stre=',''File'',''hh'');'; +% for i=1:5*n+1 +% if i==5*n+1 +% str1=sprintf('f(%d)',i); +% else +% str1=sprintf('f(%d),',i); +% end +% str=strcat(str,str1); +% end +% str=strcat(str,stre); +% +% eval(str); +end \ No newline at end of file diff --git a/examples/TimoshenkoBeamIRs/build_model.m b/examples/TimoshenkoBeamIRs/build_model.m new file mode 100644 index 0000000..ba9ddb5 --- /dev/null +++ b/examples/TimoshenkoBeamIRs/build_model.m @@ -0,0 +1,107 @@ +function [M,C,K,fnl,fext,outdof] = build_model(nElements,isViscoelastic) + +if isViscoelastic + name = append('Vis_Damp_Timo_model_nE=',num2str(nElements),'.mat'); +else + name = append('Lin_Damp_Timo_model_nE=',num2str(nElements),'.mat'); +end +try + load(name,'M','C','K','fnl','fext','outdof'); + fnl = {sptensor(double(fnl{1})),sptensor(double(fnl{2}))}; +catch + % Geometrically Nonlinear Timoshenko beam + n = 5*nElements + 1; + forcing_dof = n - 1; + outdof = forcing_dof; + %% + % construct forcing amplitude vector + P = 120 * 1e7; % moment amplitude + f_0 = sparse(n,1); + f_0(forcing_dof) = P; + fext = f_0; + + [~,K,f,x,xd,C,M,~]=FEM_Timoshenko(nElements,0); + + M = sparse(double(M)); %#ok<*NODEF> + C = sparse(double(C)); + K = sparse(double(K)); + ndof = length(x); + %% Additional mass at 0.25L + mass = 0; + moment_of_inertia = 0; + % determine DOF + node_idx = round(nElements/4)+1; + mDOFs = 5*(node_idx-1)-3+[1, 4]; % translational DOFs + rDOF = 5*(node_idx-1)-3+3; % rotational DOF + M(mDOFs,mDOFs) = M(mDOFs,mDOFs) + mass*eye(2,2); % adding mass to translational DOF + M(rDOF,rDOF) = M(rDOF,rDOF)+moment_of_inertia; + + %% get polynomial stiffness coefficients + if isViscoelastic + [powers,coefficients] = get_coefficients(f,[x;xd]); + else + f = simplify(subs(f,xd,sym(zeros(ndof,1)))); + [powers,coefficients] = get_coefficients(f,x); + end + + % Obtaining tensor from multi-index coefficients + degree = sum(powers,2); % get degree associated to each multi-index + %% + % extract multi-indices and tensors corresponding to degree 2 + + idx_2 = find(degree==2); + If2 = powers(idx_2,:); + Cf2 = coefficients(idx_2,:); + f2 = multi_index_to_tensor(Cf2.',If2); + + %% + % extract multi-indices and tensors corresponding to degree 3 + idx_3 = find(degree==3); + If3 = powers(idx_3,:); + Cf3 = coefficients(idx_3,:); + f3 = multi_index_to_tensor(Cf3.',If3); + + fnl = {f2,f3}; + + save(name,'M','C','K','fnl','fext','outdof','f'); +end +end + +function [E,P] = get_coefficients(f,x) +% this function returns the nonlinear coefficients required in the format +% for Harmonic Balance using NLvib. +n = length(f); +E = []; +P = []; + +for i=1:n + [c,t] = coeffs(f(i),x.'); + c = double(c); + for j = 1:length(c) + exponent = get_exponent(t(j),x); + coefficient = c(j); + + if isempty(E) % taking care of the trivial case + is = false; + else + [is, loc] = ismember(exponent,E,'rows'); + end + + if is + P(loc,i) = coefficient; + else + p = zeros(1,n); + p(i) = coefficient; + P = [P; p]; + E = [E;exponent]; + end + end +end + + + function e = get_exponent(monomial,x) + factors = factor(monomial); + e = sum(double(jacobian(factors.',x))); + end +end + diff --git a/examples/TimoshenkoBeamIRs/demo.mlx b/examples/TimoshenkoBeamIRs/demo.mlx new file mode 100644 index 0000000..2412fe8 Binary files /dev/null and b/examples/TimoshenkoBeamIRs/demo.mlx differ diff --git a/examples/vonKarmanShellIRs/build_model.m b/examples/vonKarmanShellIRs/build_model.m new file mode 100644 index 0000000..ec2d370 --- /dev/null +++ b/examples/vonKarmanShellIRs/build_model.m @@ -0,0 +1,138 @@ +function [M,C,K,fnl,fext, outdof] = build_model(nDiscretization) +% we tune w to trigger 1:2 resonance + +%% Finite Element Setup +% Geometry +startLIN = tic; +l = 1; % width of domain [m] +b = 2; % length of domain [m] +t = 1e-2; % thickness of plate [m] +w = 0.041; % curvature parameter (height of the midpoint relative to ends) [m] +% material properties +E = 70e9; % 70e9 % 200e9 % Young's modulus [Pa] +rho = 2700; % 2700 % 7850 % density [kg/m^3] +nu = 0.33; % Poisson's ratio +kappa = 1e5; % material damping modulus 1e8 + +%% FE model +disp('Building FE model') +% Material +myMaterial = KirchoffMaterial(); +set(myMaterial,'YOUNGS_MODULUS',E,'DENSITY',rho,'POISSONS_RATIO',nu,'DAMPING_MODULUS',kappa); +% Element +myElementConstructor = @()TriShellElement(t, myMaterial); % same element all across the domain + +% Meshing the geometry +nl = nDiscretization; +nb = 2*nDiscretization; +[nodes,elements,bnodes] = RectangularMesh(l,b,nl,nb,w); % Rectangular Mesh definition + +% creating Mesh object +MyMesh = Mesh(nodes); +MyMesh.create_elements_table(elements,myElementConstructor); + +% Plot mesh +figure('Name','Mesh'); PlotMesh(nodes,elements,0); + +%% Assemble linear stiffness, mass and damping +disp('Assembling M,C,K matrices') +% % parallelized assembly +% cluster = parcluster('local'); +% cluster.NumWorkers = 4; +% parpool(cluster, 4) +% MyAssembly = Assembly(myMesh,true); + +MyAssembly = Assembly(MyMesh); +K = MyAssembly.stiffness_matrix(); +M = MyAssembly.mass_matrix(); +% C = MyAssembly.damping_matrix(); + +%% apply boundary conditions +disp('Applying boundary conditions') +MyMesh.set_essential_boundary_condition([bnodes{3}, bnodes{4}],1:3,0) % simply supported on opposite ends +M = MyAssembly.constrain_matrix(M); +K = MyAssembly.constrain_matrix(K); +% C = MyAssembly.constrain_matrix(C); + + + +%% Eigenvalue problem +disp('Solving undamped eigenvalue problem') +n_VMs = 5; % first n_VMs modes with lowest frequency calculated +[V0,omega2] = eigs(K,M,n_VMs,'SM'); +omega = sqrt(diag(omega2)); + +V = MyAssembly.unconstrain_vector(V0); +mod = 1; +v1 = reshape(V(:,mod),6,[]); +figure; +PlotFieldonDeformedMesh(nodes,elements,v1(1:3,:).','factor',5) +title(['Frequency = ' num2str(omega(mod)/(2*pi)) ' Hz'] ) +set(colorbar,'visible','off') + +mod = 2; +v1 = reshape(V(:,mod),6,[]); +figure; +PlotFieldonDeformedMesh(nodes,elements,v1(1:3,:).','factor',5) +title(['Frequency = ' num2str(omega(mod)/(2*pi)) ' Hz'] ) +set(colorbar,'visible','off') + +%% Damping matrix +disp('Using Rayleigh damping') +W = omega(1:2); +a = [W(1) 1/W(1);W(2) 1/W(2)]\[0.004;0.004]; +C = a(2) * M + a(1) * K; + +%% external force assembly +disp('Assembling external force vector') +outcoord = [l/2,b/4]; % output coordinate +outdir = 3; % transverse displacement +dist = vecnorm(MyMesh.nodes(:,1:2) - repmat(outcoord,[MyMesh.nNodes,1]),2,2); +[~,outnode] = min(dist); +outdof = (outnode-1)*MyAssembly.Mesh.nDOFPerNode+outdir; + +outdofvec = sparse(outdof,ones(size(outdof)),1,MyMesh.nDOFs,1); +outdofvec = MyAssembly.constrain_vector(outdofvec); +outdof = find(outdofvec); + +fext = 100*outdofvec; + +centcoord = [l/2,b/2]; +dist = vecnorm(MyMesh.nodes(:,1:2) - repmat(centcoord,[MyMesh.nNodes,1]),2,2); +[~,outnode] = min(dist); +centdof = (outnode-1)*MyAssembly.Mesh.nDOFPerNode+outdir; +centdofvec = sparse(centdof,ones(size(centdof)),1,MyMesh.nDOFs,1); +centdofvec = MyAssembly.constrain_vector(centdofvec); +centdof = find(centdofvec); +outdof = [outdof;centdof]; + + +% fext = MyAssembly.constrain_vector(MyAssembly.uniform_body_force()); + +computationTimeLIN = toc(startLIN); +%% Tensor Assembly +disp('Getting nonlinearity coefficients') +filename = ['tensors_' num2str(MyMesh.nElements) '.mat']; +try + load(filename,'fnl') + disp('Loaded tensors from storage') + load(filename, 'computationTimeTensors') + disp(['Total time spent on model assembly = ' datestr(datenum(0,0,0,0,0,computationTimeTensors + computationTimeLIN),'HH:MM:SS')]) +catch + fnl = cell(1,2); + disp('Assembling Tensors') + startTensors = tic; + fnl{1} = MyAssembly.tensor('T2',[MyMesh.nDOFs, MyMesh.nDOFs, MyMesh.nDOFs], [2,3]); + fnl{2} = MyAssembly.tensor('T3',[MyMesh.nDOFs, MyMesh.nDOFs, MyMesh.nDOFs, MyMesh.nDOFs], [2,3,4]); + computationTimeTensors = toc(startTensors); + disp('Saving Tensors') + save(filename,'fnl','computationTimeTensors','-v7.3') + disp(['Total time spent on model assembly = ' datestr(datenum(0,0,0,0,0,computationTimeTensors + computationTimeLIN),'HH:MM:SS')]) +end + +% apply boundary conditions +for j = 1:length(fnl) + fnl{j} = MyAssembly.constrain_tensor(fnl{j}); +end + + diff --git a/examples/vonKarmanShellIRs/vonKarmanShell.m b/examples/vonKarmanShellIRs/vonKarmanShell.m new file mode 100644 index 0000000..52fd458 --- /dev/null +++ b/examples/vonKarmanShellIRs/vonKarmanShell.m @@ -0,0 +1,93 @@ +%% Shallow-curved shell structure with geometric nonlinearities +% Finite element model used in the following reference: +% +% Jain, S., & Tiso, P. (2018). Simulation-free hyper-reduction for geometrically +% nonlinear structural dynamics: a quadratic manifold lifting approach. _Journal +% of Computational and Nonlinear Dynamics_, _13_(7), 071003. +% +% Finite element code taken from the following package: +% +% Jain, S., Marconi, J., Tiso P. (2020). YetAnotherFEcode (Version v1.1). Zenodo. +% +% +% +%% +% *System parameters* + +clear all +nDiscretization = 10; % Discretization parameter (#DOFs is proportional to the square of this number) +epsilon = 0.1; % converge at order 5 +%% generate model + +[M,C,K,fnl,f_0,outdof] = build_model(nDiscretization); +n = length(M); % number of degrees of freedom +disp(['Number of degrees of freedom = ' num2str(n)]) +disp(['Phase space dimensionality = ' num2str(2*n)]) +%% Dynamical system setup +% We consider the forced system +% +% $$\mathbf{M}\ddot{\mathbf{x}}+\mathbf{C}\dot{\mathbf{x}}+\mathbf{K}\mathbf{x}+\mathbf{f}(\mathbf{x},\dot{\mathbf{x}})=\epsilon\mathbf{f}^{ext}(\mathbf{\Omega}t),$$ +% +% which can be written in the first-order form as +% +% $$\mathbf{B}\dot{\mathbf{z}} =\mathbf{Az}+\mathbf{F}(\mathbf{z})+\epsilon\mathbf{F}^{ext}(\mathbf{\phi}),\\\dot{\mathbf{\phi}} +% =\mathbf{\Omega}$$ +% +% where +% +% $\mathbf{z}=\left[\begin{array}{c}\mathbf{x}\\\dot{\mathbf{x}}\end{array}\right],\quad\mathbf{A}=\left[\begin{array}{cc}-\mathbf{K} +% & \mathbf{0}\\\mathbf{0} & \mathbf{M}\end{array}\right],\mathbf{B}=\left[\begin{array}{cc}\mathbf{C} +% & \mathbf{M}\\\mathbf{M} & \mathbf{0}\end{array}\right],\quad\quad\mathbf{F}(\mathbf{z})=\left[\begin{array}{c}\mathbf{-\mathbf{f}(\mathbf{x},\dot{\mathbf{x}})}\\\mathbf{0}\end{array}\right],\quad\mathbf{F}^{ext}(\mathbf{z},\mathbf{\phi})=\left[\begin{array}{c}\mathbf{f}^{ext}(\mathbf{\phi})\\\mathbf{0}\end{array}\right]$. + +DS = DynamicalSystem(); +set(DS,'M',M,'C',C,'K',K,'fnl',fnl); +set(DS.Options,'Emax',5,'Nmax',10,'notation','multiindex') +% set(DS.Options,'Emax',5,'Nmax',10,'notation','tensor') +%% +% We assume periodic forcing of the form +% +% $$\mathbf{f}^{ext}(\phi) = \mathbf{f}_0\cos(\phi)=\frac{\mathbf{f}_0}{2}e^{i\phi} +% + \frac{\mathbf{f}_0}{2}e^{-i\phi} $$ +% +% Fourier coefficients of Forcing + +kappas = [-1; 1]; +coeffs = [f_0 f_0]/2; +DS.add_forcing(coeffs, kappas,epsilon); +%% Linear Modal analysis and SSM setup + +[V,D,W] = DS.linear_spectral_analysis(); +%% +% *Choose Master subspace (perform resonance analysis)* + +S = SSM(DS); +set(S.Options, 'reltol', 0.1,'notation','multiindex') +% set(S.Options, 'reltol', 0.1,'notation','tensor') +masterModes = [1,2,3,4]; +S.choose_E(masterModes); +%% Forced response curves using SSMs +% Obtaining *forced response curve* in reduced-polar coordinate + +order = 3; +set(S.Options, 'reltol', 0.5,'IRtol',0.05,'notation', 'multiindex','contribNonAuto',true) +%% +% choose frequency range around the first natural frequency + +set(S.FRCOptions,'coordinates','polar','initialSolver','forward'); +set(S.contOptions, 'h_min', 1e-2,'h_max',2,'PtMX',300); +omega0 = imag(S.E.spectrum(1)); +omegaRange = omega0*[0.92 1.07]; +mFreq = [1 2]; +%% +% extract forced response curve + +p0 = [omegaRange(1) epsilon]'; +z0 = 1e-3*[1 1 1 1]'; +S.SSM_isol2ep('isol-3',masterModes,order,mFreq,'freq',omegaRange,outdof,{p0,z0}); +%% +% increase order +order = 5; +sol = ep_read_solution('','isol-3.ep',1); +set(S.FRCOptions,'initialSolver','fsolve'); +S.SSM_isol2ep('isol-5',masterModes,order,mFreq,'freq',omegaRange,outdof,{sol.p,sol.x}); \ No newline at end of file diff --git a/examples/vonKarmanShellIRs/vonKarmanShellWorkbook.mlx b/examples/vonKarmanShellIRs/vonKarmanShellWorkbook.mlx new file mode 100644 index 0000000..e78039d Binary files /dev/null and b/examples/vonKarmanShellIRs/vonKarmanShellWorkbook.mlx differ diff --git a/ext/Wrappers/@cocoWrapper/extract_FRC.m b/ext/Wrappers/@cocoWrapper/extract_FRC.m index 010c758..d586cc7 100644 --- a/ext/Wrappers/@cocoWrapper/extract_FRC.m +++ b/ext/Wrappers/@cocoWrapper/extract_FRC.m @@ -18,20 +18,15 @@ assert(numel(obj.system.Omega)==1, 'coco run assumes single freq component'); assert(obj.system.order == 2, 'fnl avaliable only for second-order systems') -for i=1:numel(obj.system.fnl) - fnli = obj.system.fnl{i}; - if ~isempty(fnli) - assert(size(fnli,2)==n, 'current implementation assumes f(x) instead of f(x,xd)'); - end -end +obj.fnlTensor2Multi(); +odedata.fnl = obj.multiFnl; +odedata.isbaseForce = obj.system.Options.BaseExcitation; switch obj.initialGuess case 'forward' %% initial solution by forward simulation % ode45 is used here. Integration option may be added in future x0_init = zeros(N,1); - obj.fnlTensor2Multi(); - odedata.fnl = obj.multiFnl; odefun = @(t,x) obj.ode_het(t,x,[omega0;epsilon],odedata); [~, x0_po] = ode15s(odefun , [0 tf], x0_init); % transient options = odeset('RelTol', 1e-9, 'AbsTol',1e-9); @@ -60,8 +55,6 @@ prob = cocoSet(obj, prob); prob = coco_set(prob, 'ode', 'autonomous', false); prob = coco_set(prob, 'ode', 'vectorized', true); -obj.fnlTensor2Multi(); -odedata.fnl = obj.multiFnl; odefun = @(t,x,p) obj.ode_het(t,x,p,odedata); funcs = {odefun}; diff --git a/ext/Wrappers/@cocoWrapper/ode_het.m b/ext/Wrappers/@cocoWrapper/ode_het.m index d7cd637..1b79db3 100644 --- a/ext/Wrappers/@cocoWrapper/ode_het.m +++ b/ext/Wrappers/@cocoWrapper/ode_het.m @@ -28,7 +28,7 @@ expind = find(ind); s = 1; for j=1:numel(expind) - s = s.*x(expind(j),:).^ind(expind(j)); + s = s.*z(expind(j),:).^ind(expind(j)); end s = repmat(s, [n, 1]); y2 = y2+coeff.*s; @@ -37,7 +37,11 @@ % external forcing assert(~isempty(obj.system.fext), 'no external forcing'); fext_coeffs = repmat(obj.system.fext.coeffs(:,1), [1, nt]); - fext_harm = repmat(ep.*cos(obj.system.fext.kappas(1)*om.*t), [n, 1]); + if data.isbaseForce + fext_harm = repmat(ep.*om.^2.*cos(obj.system.fext.kappas(1)*om.*t), [n, 1]); + else + fext_harm = repmat(ep.*cos(obj.system.fext.kappas(1)*om.*t), [n, 1]); + end y3 = 2*fext_coeffs.*fext_harm; y = y1 + [zeros(n,nt); obj.system.M\(-y2+y3)]; diff --git a/ext/Wrappers/cocoOptions.m b/ext/Wrappers/cocoOptions.m index 2f9bba7..c067e8c 100644 --- a/ext/Wrappers/cocoOptions.m +++ b/ext/Wrappers/cocoOptions.m @@ -7,7 +7,7 @@ % settings for continuation NPR = 10 % frequency of screen outputs NSV = 10 % frequency of storing solutions to disk - NAdapt = 10 % adaptation period, 0 == off + NAdapt = 0 % adaptation period, 0 == off h0 = 0.1 % initial step size h_max = 0.5 % max step size h_min = 0.01 % min step size diff --git a/ext/YetAnotherFEcode/README.md b/ext/YetAnotherFEcode/README.md index 6eb8e21..c17b42f 100644 --- a/ext/YetAnotherFEcode/README.md +++ b/ext/YetAnotherFEcode/README.md @@ -1,5 +1,5 @@ # YetAnotherFEcode -[![DOI](https://zenodo.org/badge/292112576.svg)](https://zenodo.org/badge/latestdoi/292112576) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4011281.svg)](https://doi.org/10.5281/zenodo.4011281) A simple MATLAB-based code for implementing the Finite Element method in an object oriented fashion. @@ -26,6 +26,7 @@ To use the code, simply add the main folder and its contents to the MATLAB path. Further usage and development instructions to follow. To showcase the relevance, please cite the following reference if you use this package in your work -Shobhit Jain, Jacopo Marconi, & Paolo Tiso. (2020). YetAnotherFEcode (Version v1.1). Zenodo. http://doi.org/10.5281/zenodo.4011282 + +Shobhit Jain, Jacopo Marconi & Paolo Tiso (2020). YetAnotherFEcode. Zenodo. http://doi.org/10.5281/zenodo.4011281 Please report any issues/bugs to Shobhit Jain diff --git a/ext/YetAnotherFEcode/examples/Mechanical/3D/Beam3D.m b/ext/YetAnotherFEcode/examples/Mechanical/3D/Beam3D.m index 803a166..177b90a 100644 --- a/ext/YetAnotherFEcode/examples/Mechanical/3D/Beam3D.m +++ b/ext/YetAnotherFEcode/examples/Mechanical/3D/Beam3D.m @@ -3,8 +3,8 @@ close all; clc -elementType = 'HEX20'; -% elementType = 'TET10'; +% elementType = 'HEX20'; +elementType = 'TET10'; %% PREPARE MODEL @@ -37,18 +37,17 @@ % filename = 'Job-BeamHex'; % [nodes, elements, nset, elset] = mesh_ABAQUSread(filename); % HEX20 mesh -myMesh = Mesh(nodes); -myMesh.create_elements_table(elements,myElementConstructor); +MyMesh = Mesh(nodes); +MyMesh.create_elements_table(elements,myElementConstructor); % MESH > BOUNDARY CONDITONS -myMesh.set_essential_boundary_condition([nset{1} nset{4}],1:3,0) -% myMesh.BC.set_dirichlet_dofs([nset{2} nset{3}],1:3,0) % abaqus +MyMesh.set_essential_boundary_condition([nset{1} nset{4}],1:3,0) +% MyMesh.BC.set_dirichlet_dofs([nset{2} nset{3}],1:3,0) % abaqus % ASSEMBLY ________________________________________________________________ -BeamAssembly = Assembly(myMesh); +BeamAssembly = Assembly(MyMesh); M = BeamAssembly.mass_matrix(); -nNodes = size(nodes,1); -u0 = zeros(nNodes*nDOFPerNode,1); +u0 = zeros(MyMesh.nDOFs,1); [K,~] = BeamAssembly.tangent_stiffness_and_force(u0); @@ -74,6 +73,9 @@ PlotFieldonDeformedMesh(nodes,elements,v1,'factor',1) title(['$$\Phi_' num2str(mod) '$$ - Frequency = ' num2str(f0(mod),3) ' Hz']) +%% nonlinear tensors +% T2 = BeamAssembly.tensor('T2',[MyMesh.nDOFs, MyMesh.nDOFs, MyMesh.nDOFs], [2,3]); +% T3 = BeamAssembly.tensor('T3',[MyMesh.nDOFs, MyMesh.nDOFs, MyMesh.nDOFs, MyMesh.nDOFs], [2,3,4]); %% EXAMPLE 2 @@ -83,10 +85,10 @@ % F = Pressure*BeamAssembly.uniform_body_force(); % Nodal force -F = zeros(myMesh.nDOFs,1); +F = zeros(MyMesh.nDOFs,1); nf = find_node(l/2,w/2,t/2,nodes); % node where to put the force -node_force_dofs = get_index(nf,nDOFPerNode); +node_force_dofs = get_index(nf,MyMesh.nDOFPerNode); F(node_force_dofs(3)) = 10e3; u_lin = BeamAssembly.solve_system(K,F); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/.gitignore b/ext/YetAnotherFEcode/external/tensor_toolbox/.gitignore new file mode 100644 index 0000000..3b1cc0f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/.gitignore @@ -0,0 +1,3 @@ +*~ +*.asv +/maintenance/create_doc.m diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/.gitlab-ci.yml b/ext/YetAnotherFEcode/external/tensor_toolbox/.gitlab-ci.yml new file mode 100644 index 0000000..af7f5c7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/.gitlab-ci.yml @@ -0,0 +1,11 @@ +pages: + stage: deploy + script: + - mkdir .public + - cp -r doc/html/* .public/ + - mv .public public + artifacts: + paths: + - public + only: + - master diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/arrange.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/arrange.m new file mode 100644 index 0000000..4ce3237 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/arrange.m @@ -0,0 +1,56 @@ +function X = arrange(X,foo) +%ARRANGE Arranges the rank-1 components of a ktensor. +% +% ARRANGE(X) normalizes the columns of the factor matrices and then sorts +% the ktensor components by magnitude, greatest to least. +% +% ARRANGE(X,N) absorbs the weights into the Nth factor matrix instead of +% lambda. +% +% ARRANGE(X,P) rearranges the components of X according to the +% permutation P. P should be a permutation of 1 to NCOMPONENTS(X). +% +% Examples +% K = ktensor([3; 2], rand(4,2), rand(5,2), rand(3,2)) +% arrange(K) %<--Normalize and sort according to weight vector +% arrange(K,[2, 1]) %<--Order components according to permutation +% +% See also KTENSOR, NCOMPONENTS, NORMALIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Just rearrange and return if second argument is a permutation +if exist('foo','var') && (length(foo) > 1) + X.lambda = X.lambda(foo); + for i = 1 : ndims(X) + X.u{i} = X.u{i}(:,foo); + end + return; +end + +%% Ensure that matrices are normalized +X = normalize(X); + +%% Sort +[X.lambda, idx] = sort(X.lambda, 1, 'descend'); +for i = 1 : ndims(X) + X.u{i} = X.u{i}(:,idx); +end + +%% Absorb the weight into one factor, if requested +if exist('foo','var') + r = length(X.lambda); + X.u{end} = full(X.u{end} * spdiags(X.lambda,0,r,r)); + X.lambda = ones(size(X.lambda)); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/datadisp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/datadisp.m new file mode 100644 index 0000000..fb7eb73 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/datadisp.m @@ -0,0 +1,119 @@ +function datadisp(T, dimlabels, opts) +%DATADISP Special display of a ktensor. +% +% DATADISP(T,LABELS) displays the largest positive entries of each rank-1 +% factor of T using the corresponding labels. LABELS is a cell array of +% size ndims(T) such that LABELS{n} is a string cell array of length +% size(T,n). +% +% DATADISP(T,LABELS,OPTS) specify options: +% OPTS.dimorder: Order to display the dimensions of T {1:ndims(T)} +% OPTS.maxentries: Number of entries to show for each factor {10} +% OPTS.printneg: Boolean to print the most negative entries {false} +% OPTS.threshold: Threshold of smallest magnitude score to show {1e-4} +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Fill in optional variable +if ~exist('opts','var') + opts = struct; +end + +%% Set options from input or use defaults +dimorder = setparam(opts,'dimorder',1:ndims(T)); +maxentries = setparam(opts,'maxentries',10); +printneg = setparam(opts,'printneg',false); +threshold = setparam(opts,'threshold',1e-6); + +%% Main loop +R = size(T.lambda,1); % Rank +r = 1; +while (r <= R) + + fprintf(1, '\n======== Group %d ========\n', r); + fprintf('\nWeight = %f\n', T.lambda(r)); + + for i = dimorder(1:end) + + print_sublist(T.u{i}(:,r), dimlabels{i}, 'positive', maxentries, threshold); + + if printneg + print_sublist(T.u{i}(:,r), dimlabels{i}, 'negative', maxentries, threshold); + end + + end + + if r == R, + break, + end; + + foo = input('\nReturn to continue, jump to rank, or ''0'' (zero) to quit: '); + if foo == 0 + return; + elseif isempty(foo) + r = r+1; + else + r = foo; + end +end + +return; + + +%% +function print_sublist(score, labels, type, maxentries, threshold) + +if isequal(type,'positive') + [sortedScore, sortedIdx] = sort(score, 'descend'); +elseif isequal(type, 'negative') + [sortedScore, sortedIdx] = sort(score, 'ascend'); +else + error('Invalid type'); +end + +sortedRefs = labels(sortedIdx); +entries = min([maxentries, length(score)]); + +if isequal(type,'positive') + range = 1:entries; +else + range = entries:-1:1; +end + +fprintf('%-10s %-4s %s\n','Score','Id','Name'); + +for k = range + if abs(sortedScore(k)) < threshold + continue; + end + if isequal(type,'negative') && sortedScore(k) >= 0 + continue; + end + if (abs(sortedScore(k)) < 1e-4) + fprintf(1, '%10.3e %4d %s\n', sortedScore(k), sortedIdx(k), ... + sortedRefs{k}); + else + fprintf(1, '%10.7f %4d %s\n', sortedScore(k), sortedIdx(k), ... + sortedRefs{k}); + end +end + +%% +function x = setparam(opts,name,default) +if isfield(opts,name); + x = opts.(name); +else + x = default; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/disp.m new file mode 100644 index 0000000..f49521b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/disp.m @@ -0,0 +1,37 @@ +function disp(t, name) +%DISP Command window display for a ktensor. +% +% DISP(T) displays a Kruskal tensor with no name. +% +% DISP(T,NAME) display a Kruskal tensor with the given name. +% +% See also DISP, KTENSOR/DISPLAY, KTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('name','var') + name = 'ans'; +end + +fprintf('%s is a ktensor of size %s\n', name, tt_size2str(size(t))); +output = tt_matrix2cellstr(t.lambda'); +fprintf('\t%s.lambda = \n',name); +fprintf('\t\t%s\n',output{:}); + +if (ndims(t) > 0) + for j = 1 : ndims(t) + fprintf('\t%s.U{%d} = \n', name, j); + output = tt_matrix2cellstr(t.u{j}); + fprintf('\t\t%s\n',output{:}); + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/display.m new file mode 100644 index 0000000..b9549b1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/display.m @@ -0,0 +1,20 @@ +function display(t) +%DISPLAY Command window display for a ktensor. +% +% DISPLAY(T) displays a Kruskal tensor with its name. +% +% See also DISPLAY, KTENSOR/DISP, KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/double.m new file mode 100644 index 0000000..318ab7f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/double.m @@ -0,0 +1,27 @@ +function A = double(X) +%DOUBLE Convert a ktensor to a double array. +% +% A = double(X) converts X to a standard multidimensional array. +% +% See also KTENSOR, KTENSOR/FULL. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(X.lambda) % check for empty tensor + A = []; + return; +end + +sz = [size(X) 1]; +A = X.lambda' * khatrirao(X.u,'r')'; +A = reshape(A,sz); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/end.m new file mode 100644 index 0000000..d99682c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/end.m @@ -0,0 +1,32 @@ +function e = end(X,k,n) +%END Last index of indexing expression for ktensor. +% +% The expression X(end,:,:) will call END(X,1,3) to determine +% the value of the first index. +% +% See also KTENSOR, KTENSOR/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%TODO (after 2.0 release): Resolve ambiguity w.r.t X{end}and X(end,1,1) +%for 1st-order tensors. + +if n > ndims(X) + error('Subscript out of range.'); +end + +if (n ~= 1) + e = size(X,k); +else + e = ndims(X); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/extract.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/extract.m new file mode 100644 index 0000000..974a54c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/extract.m @@ -0,0 +1,30 @@ +function new_X = extract(X,idx) +%EXTRACT Creates a new ktensor with only the specified components. +% +% Y = EXTRACT(X,S) selected the subset of components in X as defined by +% S. It should be the case that S is a subset of [1,...,NCOMPONENTS(X)]. +% +% See also KTENSOR, NCOMPONENTS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Set-up +N = ndims(X); +%% Extract +new_lambda = X.lambda(idx); +new_U = cell(N,1); +for i = 1 : N + new_U{i} = X.u{i}(:,idx); +end +new_X = ktensor(new_lambda, new_U); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/fixsigns.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/fixsigns.m new file mode 100644 index 0000000..df4a599 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/fixsigns.m @@ -0,0 +1,117 @@ +function K = fixsigns(K,K0) +%FIXSIGNS Fix sign ambiguity of a ktensor. +% +% K = FIXSIGNS(K) makes it so that the largest magnitude entries for +% each vector in each factor of K are positive, provided that the +% sign on *pairs* of vectors in a rank-1 component can be flipped. +% +% K = FIXSIGNS(K,K0) returns a version of K where some of the signs of +% the columns of the factor matrices have been flipped to better align +% with K0. +% +% See also KTENSOR and KTENSOR/ARRANGE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if nargin == 1 + K = fixsigns_oneargin(K); +else + K = fixsigns_twoargin(K, K0); +end + + +%% +function K = fixsigns_oneargin(K) +R = length(K.lambda); +for r = 1 : R + + for n = 1:ndims(K) + [val(n),idx(n)] = max(abs(K.u{n}(:,r))); + sgn(n) = sign(K.u{n}(idx(n),r)); + end + + negidx = find(sgn == -1); + nflip = 2 * floor(numel(negidx)/2); + + for i = 1:nflip + n = negidx(i); + K.u{n}(:,r) = -K.u{n}(:,r); + end + +end + +%% +function [A, best_sign] = fixsigns_twoargin(A,B) +% T. Kolda, November 2010. + +if ~isa(A,'ktensor') + A = ktensor(A); +end +if ~isa(B,'ktensor') + B = ktensor(B); +end +A = normalize(A); +B = normalize(B); + +N = ndims(A); +RA = ncomponents(A); +RB = ncomponents(B); + +%% Try to fix the signs for each component +best_sign = ones(N,RA); +for r = 1:RB + + % Compute the inner products. They should mostly be O(1) if there is a + % good match because the factors have prevsiouly been normalized. If + % the signs are correct, then the score should be +1. Otherwise we need + % to flip the sign and the score should be -1. + sgn_score = zeros(N,1); + for n = 1:N + sgn_score(n) = A{n}(:,r)' * B{n}(:,r); + end + + % Sort the sign scores. + [sort_sgn_score, sort_idx] = sort(sgn_score,'ascend'); + + % Determine the number of scores that should be flipped. + breakpt = find(sort_sgn_score < 0, 1, 'last'); + + % If nothing needs to be flipped, then move on the the next component. + if isempty(breakpt) + continue; + end + + % Need to flip signs in pairs. If we don't have an even number of + % negative sign scores, then we need to decide to do one fewer or one + % more. + if (mod(breakpt,2) == 0) + endpt = breakpt; + else + fprintf('Trouble fixing signs for mode %d\n', r); + if (breakpt < RB) && (-sort_sgn_score(breakpt) > sort_sgn_score(breakpt+1)) + endpt = breakpt + 1; + else + endpt = breakpt - 1; + end + end + + % Flip the signs + for i = 1:endpt + A{sort_idx(i)}(:,r) = -1 * A{sort_idx(i)}(:,r); + best_sign(sort_idx(i),r) = -1; + end + +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/full.m new file mode 100644 index 0000000..22305c5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/full.m @@ -0,0 +1,71 @@ +function t = full(t,vers) +%FULL Convert a ktensor to a (dense) tensor. +% +% T = FULL(C) converts a ktensor to a (dense) tensor. +% +% Examples +% X = ktensor([3; 2], rand(4,2), rand(5,2), rand(3,2)); +% Y = full(A) %<-- equivalent dense tensor +% +% See also KTENSOR, TENSOR, KTENSOR/DOUBLE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ~exist('vers','var') + vers=2; +end + +sz = size(t); +if (vers==1) + data = t.lambda' * khatrirao(t.u,'r')'; +else + % Given d=length(sz) find a partition of modes 1:s and s+1:d that + % minimizes the memory for the following matrix-multiply. + minS=minSplit(sz); + if (minS==length(sz)) + data = khatrirao(t.u,'r') * t.lambda; + else + % This unrolls modes 1:minS into rows and minS+1:end into columns + % of the column-major matrix data which is then converted into a + % tensor without permutation. + data = khatrirao(t.u(1:minS),'r') * diag(t.lambda) * khatrirao(t.u(minS+1:end),'r')'; + end +end + +t = tensor(data,sz); +end + +function [minS]=minSplit(sz) + % Scan for optimal splitting with minimal memory footprint. + mLeft=sz(1); + mRight=prod(sz(2:end)); + minS=1; + minSum=mLeft+mRight; + for s=2:length(sz)-1 + mLeft=mLeft*sz(s); + mRight=mRight/sz(s); + if (mLeft+mRight= mR and n >= 1. + % Then: mL*n + mR/n = mL + mL*(n-1) + mR/n + % >= mL + mR*(n-1+1/n) >= mL + mR. + % + % Initially the right term dominates the sum and every factor + % reduction on the right gives a much smaller increase on the + % left. Once the left term begins to dominate it will always + % grow faster than the corresponding reduction on the right. + break; + end + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/innerprod.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/innerprod.m new file mode 100644 index 0000000..8237b13 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/innerprod.m @@ -0,0 +1,53 @@ +function res = innerprod(X,Y) +%INNERPROD Efficient inner product with a ktensor. +% +% R = INNERPROD(X,Y) efficiently computes the inner product between +% two tensors X and Y. If Y is a ktensor, the inner product is +% computed using inner products of the factor matrices, X{i}'*Y{i}. +% Otherwise, the inner product is computed using ttv with all of +% the columns of X's factor matrices, X{i}. +% +% See also KTENSOR, KTENSOR/TTV +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~isequal(size(X),size(Y)) + error('X and Y must be the same size.'); +end + +% X is a ktensor +switch class(Y) + + case {'ktensor'} + M = X.lambda * Y.lambda'; + for n = 1:ndims(X) + M = M .* (X.u{n}' * Y.u{n}); + end + res = sum(M(:)); + + case {'tensor','sptensor','ttensor'} + R = length(X.lambda); + vecs = cell(1,ndims(X)); + res = 0; + for r = 1:R + for n = 1:ndims(X) + vecs{n} = X.u{n}(:,r); + end + res = res + X.lambda(r) * ttv(Y,vecs); + end + + otherwise + disp(['Inner product not available for class ' class(Y)]); +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/isequal.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/isequal.m new file mode 100644 index 0000000..04cdaf0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/isequal.m @@ -0,0 +1,38 @@ +function [tf, tf_lambda, tf_U] = isequal(A,B) +%ISEQUAL True if each datum of two ktensor's are numerically equal. +% +% TF = ISEQUAL(A,B) returns true if each factor matrix and the lambda +% values are equal for A and B. Does not do any scaling or normalization +% first. +% +% [TF, TF_LAMBDA, TF_FACTORS] = ISEQUAL(A,B) returns also the result of +% comparing the lambda vectors (TF_LAMBDA) and an array with the results +% of comparing the factor matrices (TF_FACTORS). +% +% See also KTENSOR, KTENSOR/NORMALIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +tf = false; +tf_lambda = false; +tf_U = false; + +if ~isa(B,'ktensor') + return; +end + +tf_lambda = isequal(A.lambda, B.lambda); +if ncomponents(A) == ncomponents(B) + tf_U = cellfun(@isequal, A.u, B.u); +end +tf = tf_lambda & all(tf_U); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/isscalar.m new file mode 100644 index 0000000..e313a1c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for ktensors. +% ISSCALAR(S) returns logical 0 (false) if S is a ktensor. +% +% See also KTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/issymmetric.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/issymmetric.m new file mode 100644 index 0000000..ebf9c28 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/issymmetric.m @@ -0,0 +1,41 @@ +function [tf,diffs] = issymmetric(X) +%ISSYMMETRIC Verify that a ktensor X is symmetric in all modes. +% +% TF = ISSYMMETRIC(X) returns true if X is exactly symmetric for every +% permutation. +% +% [TF,DIFFS] = ISSYMMETRIC(X) also returns the matrix of the norm of the +% differences between the normalized factor matrices. +% +% See also SYMMETRIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%T. Kolda, June 2014. + +n = ndims(X); +sz = size(X); +diffs = zeros(n,n); + +for i = 1:n + for j = i+1:n + if ~isequal(size(X.u{i}), size(X.u{j})) + diffs(i,j) = Inf; + elseif isequal(X.u{i},X.u{j}) + diffs(i,j) = 0; + else + diffs(i,j) = norm(X.u{i} - X.u{j}); + end + end +end + +tf = all(diffs(:) == 0); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ktensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ktensor.m new file mode 100644 index 0000000..28f289a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ktensor.m @@ -0,0 +1,226 @@ +%KTENSOR Class for Kruskal tensors (decomposed). +% +%KTENSOR Methods: +% arrange - Arranges the rank-1 components of a ktensor. +% datadisp - Special display of a ktensor. +% disp - Command window display for a ktensor. +% display - Command window display for a ktensor. +% double - Convert a ktensor to a double array. +% end - Last index of indexing expression for ktensor. +% extract - Creates a new ktensor with only the specified components. +% fixsigns - Fix sign ambiguity of a ktensor. +% full - Convert a ktensor to a (dense) tensor. +% innerprod - Efficient inner product with a ktensor. +% isequal - True if each datum of two ktensor's are numerically equal. +% isscalar - False for ktensors. +% issymmetric - Verify that a ktensor X is symmetric in all modes. +% ktensor - Tensor stored as a Kruskal operator (decomposed). +% mask - Extract values as specified by a mask tensor. +% minus - Binary subtraction for ktensor. +% mtimes - Implement A*B (scalar multiply) for ktensor. +% mttkrp - Matricized tensor times Khatri-Rao product for ktensor. +% ncomponents - Number of components for a ktensor. +% ndims - Number of dimensions for a ktensor. +% norm - Frobenius norm of a ktensor. +% normalize - Normalizes the columns of the factor matrices. +% nvecs - Compute the leading mode-n vectors for a ktensor. +% permute - Permute dimensions of a ktensor. +% plus - Binary addition for ktensor. +% redistribute - Distribute lambda values to a specified mode. +% score - Checks if two ktensors match except for permutation. +% size - Size of ktensor. +% subsasgn - Subscripted assignment for ktensor. +% subsref - Subscripted reference for a ktensor. +% symmetrize - Symmetrize a ktensor X in all modes. +% times - Element-wise multiplication for ktensor. +% tocell - Convert X to a cell array. +% tovec - Convert Ktensor to vector. +% ttm - Tensor times matrix for ktensor. +% ttv - Tensor times vector for ktensor. +% uminus - Unary minus for ktensor. +% update - Update one or more modes of the ktensor with new data. +% uplus - Unary plus for a ktensor. +% viz - Visualize a ktensor. +% +% Documentation page for Kruskal tensor class +% +% See also TENSOR_TOOLBOX +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function t = ktensor(varargin) +%KTENSOR Tensor stored as a Kruskal operator (decomposed). +% +% K = KTENSOR(lambda,U1,U2,...,UM) creates a Kruskal tensor from its +% constituent parts. Here lambda is a k-vector and each Um is a +% matrix with k columns. +% +% K = KTENSOR(lambda, U) is the same as above except that U is a +% cell array containing matrix Um in cell m. +% +% K = KTENSOR(U) assumes U is a cell array containing matrix Um in +% cell m and assigns the weight of each factor to be one. +% +% K = KTENSOR(T) creates a ktensor by copying an existing ktensor. +% +% K = KTENSOR(S) creates a ktensor from a symktensor. +% +% K = KTENSOR(FH, SZ, NC) creates a ktensor using function FH to create +% the factor matrices. Here SZ is the size of the final ktensor and NC is +% the number of components. The function specified by FH should take two +% size arguments and create a matrix of that size. +% +% Examples +% K = ktensor([3; 2], ones(4,2), ones(5,2), ones(3,2)) %<- Constructor +% K = ktensor(@rand, [4 5 3], 2) %<- Create a random tensor +% +% See also KTENSOR, CP_ALS, CP_OPT, CP_WOPT, CP_APR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% EMPTY CONSTRUCTOR +if nargin == 0 + t.lambda = []; + t.u = {}; + t = class(t,'ktensor'); + return; +end + +% Copy CONSTRUCTOR +if (nargin == 1) && isa(varargin{1}, 'ktensor') + t.lambda = varargin{1}.lambda; + t.u = varargin{1}.u; + t = class(t, 'ktensor'); + return; +end + + +% CONSTRUCTOR from SYMKTENSOR +% TODO: Need to check that this works! +if (nargin == 1) && isa(varargin{1}, 'symktensor') + t.lambda = varargin{1}.lambda; + [t.u{1:varargin{1}.m,1}] = deal(varargin{1}.u); + t = class(t, 'ktensor'); + return; +end + +% CONSTRUCTOR FROM FUNCTION HANDLE +if (nargin == 3) && isa(varargin{1},'function_handle') + fh = varargin{1}; + sz = varargin{2}; + nc = varargin{3}; + nd = length(sz); + lambda = ones(nc,1); + U = cell(nd,1); + for i = 1:length(sz) + U{i} = feval(fh,sz(i),nc); + end + t.lambda = lambda; + t.u = U; + t = class(t,'ktensor'); + return; +end + +% CONSTRUCTOR from VECTOR, SIZE, NDIMS, LAMBDAFLAG +if (nargin == 4) && isvector(varargin{1}) && islogical(varargin{4}) + x = varargin{1}; + sz = varargin{2}; + nd = varargin{3}; + lambdaflag = varargin{4}; + + if isrow(x) + x = x'; + end + + if lambdaflag + nc = length(x) / (sum(sz) + 1); + else + nc = length(x) / sum(sz); + end + if round(nc) ~= nc + error('Vector is not the right length'); + end + + if lambdaflag + t.lambda = x(1:nc); + shift = nc; + else + t.lambda = ones(nc,1); + shift = 0; + end + + t.u = cell(nd,1); + for n = 1:nd + mstart = nc*sum(sz(1:n-1))+shift+1; + mend = nc*sum(sz(1:n))+shift; + t.u{n} = reshape(x(mstart:mend),[],nc); + end + t = class(t, 'ktensor'); + return; +end + +% CONSTRUCTOR FROM CELL ARRAY +if (nargin == 1) && isa(varargin{1},'cell') + + u = varargin{1}; + nc = size(u{1},2); + if ~all(cellfun(@(x) ismatrix(x) && size(x,2) == nc, u)) + error('Invalid factor matrix') + end + t.lambda = ones(nc,1); + t.u = u; + if ~isvector(t.u) + error('U must be a vector'); + end + if isrow(t.u) + t.u = t.u'; + end + t = class(t, 'ktensor'); + return; + +end + +% CONSTRUCTOR FOR LAMBDA and LIST OF MATRICES +if (nargin >= 2) + + t.lambda = varargin{1}; + if ~iscolumn(t.lambda) + error('LAMBDA must be a column vector.'); + end + + if isa(varargin{2},'cell') + t.u = varargin{2}; + else + t.u = varargin(2:end); + end + + if ~isvector(t.u) + error('U must be a vector'); + end + if isrow(t.u) + t.u = t.u'; + end + + nc = length(t.lambda); + if ~all(cellfun(@(x) ismatrix(x) && size(x,2) == nc, t.u)) + error('Invalid factor matrix') + end + + t = class(t, 'ktensor'); + + return; +end + +error('Invalid ktensor constructor'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mask.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mask.m new file mode 100644 index 0000000..2f91a75 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mask.m @@ -0,0 +1,35 @@ +function vals = mask(X,W) +%MASK Extract values as specified by a mask tensor. +% +% V = MASK(X,W) extracts the values in X that correspond to nonzero +% values in the mask tensor W. +% +%MATLAB Tensor Toolbox. +%Copyright 2017, Sandia Corporation. + +% Error check +if any(size(W) > size(X)) + error('Mask cannot be bigger than the input tensor') +end + +% Collect info +r = ncomponents(X); +d = ndims(X); +A = X.u; % factor matrices +lambda = X.lambda; + +% Extract locations of nonzeros in W +wsubs = find(W); +vsz = [size(wsubs,1) 1]; + +% Initialize vals array +vals = zeros(vsz); +for j = 1:r + tmpvals = lambda(j) * ones(vsz); + for k = 1:d + akvals = A{k}(wsubs(:,k),j); + tmpvals = tmpvals .* akvals; + end + vals = vals + tmpvals; +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/minus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/minus.m new file mode 100644 index 0000000..8419d6c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/minus.m @@ -0,0 +1,45 @@ +function C = minus(A,B) +%MINUS Binary subtraction for ktensor. +% +% C = MINUS(A,B) computes C = A - B. A and B must both be ktensors +% and have the same size, and the result is another ktensor of the +% same size. +% +% C = MINUS(A,B) is called for the syntax 'A - B' when A or B is a +% ktensor. +% +% See also KTENSOR, SIZE, ISEQUAL. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if (isa(A,'ktensor') && isa(B,'ktensor')) + + if ~isequal(size(A),size(B)) + error('Tensor size mismatch.') + end + + lambda = [A.lambda; -B.lambda]; + M = ndims(A); + u = cell(M,1); + for m = 1 : M + u{m} = [A.u{m} B.u{m}]; + end + C = ktensor(lambda,u); + return; + +end + +error('Use minus(full(A),full(B)).'); + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mtimes.m new file mode 100644 index 0000000..62a0275 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mtimes.m @@ -0,0 +1,30 @@ +function C = mtimes(A,B) +%MTIMES Implement A*B (scalar multiply) for ktensor. +% +% C = mtimes(A,B) computes A * B where A is a Kruskal tensor and B is +% a scalar (or vice versa). The result C is the same size as A. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Note: We can do scalar times a tensor, but anything more complex is +% an error. + +if isa(B,'numeric') && isequal(size(B),[1 1]) + C = ktensor(B * A.lambda, A.u); +elseif isa(A,'numeric') && isequal(size(A),[1 1]) + C = ktensor(A * B.lambda, B.u); +else + error('Use mtimes(full(A),full(B)).'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mttkrp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mttkrp.m new file mode 100644 index 0000000..fde19d9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/mttkrp.m @@ -0,0 +1,43 @@ +function V = mttkrp(X,U,n) +%MTTKRP Matricized tensor times Khatri-Rao product for ktensor. +% +% V = MTTKRP(X,U,n) efficiently calculates the matrix product of the +% n-mode matricization of X with the Khatri-Rao product of all +% entries in U, a cell array of matrices, except the nth. How to +% most efficiently do this computation depends on the type of tensor +% involved. +% +% See also TENSOR/MTTKRP, KTENSOR, KTENSOR/TTV +% +% Examples +% K = ktensor([2; 4], rand(3,2), rand(4,2), rand(5,2)); +% mttkrp(K, {rand(3,6), rand(4,6), rand(5,6)}, 3) +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +N = ndims(X); + +if (n==1) + R = size(U{2},2); +else + R = size(U{1},2); +end + +% Compute matrix of weights +W = repmat(X.lambda,1,R); +for i = [1:n-1,n+1:N] + W = W .* (X.u{i}' * U{i}); +end + +% Find each column of answer by multiplying columns of X.u{n} with weights +V = X.u{n} * W; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ncomponents.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ncomponents.m new file mode 100644 index 0000000..30f0517 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ncomponents.m @@ -0,0 +1,23 @@ +function n = ncomponents(t) +%NCOMPONENTS Number of components for a ktensor. +% +% NCOMPONENTS(T) returns the number of components in the ktensor T. +% +% X = ktensor(ones(4,1), rand(2,4), randn(3,4), randi(5,4,4)); +% ncomponents(X) %<--Returns 4 +% +% See also KTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = length(t.lambda); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ndims.m new file mode 100644 index 0000000..d20e1c4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ndims.m @@ -0,0 +1,20 @@ +function n = ndims(t) +%NDIMS Number of dimensions for a ktensor. +% +% NDIMS(T) returns the number of dimensions of tensor T. +% +% See also KTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = numel(t.u); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/norm.m new file mode 100644 index 0000000..8fd7052 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/norm.m @@ -0,0 +1,31 @@ +function nrm = norm(A) +%NORM Frobenius norm of a ktensor. +% +% NORM(T) returns the Frobenius norm of a ktensor. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Retrieve the factors of A +U = A.u; + +% Compute the matrix of correlation coefficients +coefMatrix = A.lambda * A.lambda'; +for i = 1:ndims(A) + coefMatrix = coefMatrix .* (U{i}'*U{i}); +end + +nrm = sqrt(abs(sum(coefMatrix(:)))); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/normalize.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/normalize.m new file mode 100644 index 0000000..e698e27 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/normalize.m @@ -0,0 +1,100 @@ +function X = normalize(X,N,normtype,mode) +%NORMALIZE Normalizes the columns of the factor matrices. +% +% NORMALIZE(X) normalizes the columns of each factor matrix using the +% vector 2-norm, absorbing the excess weight into lambda. Also ensures +% that lambda is positive. +% +% NORMALIZE(X,N) absorbs the weights into the Nth factor matrix instead +% of lambda. (All the lambda values are 1.) +% +% NORMALIZE(X,0) equally divides the weights across the factor matrices. +% (All the lambda values are 1.) +% +% NORMALIZE(X,[]) is equivalent to NORMALIZE(X). +% +% NORMALIZE(X,'sort') is the same as the above except it sorts the +% components by lambda value, from greatest to least. +% +% NORMALIZE(X,V,1) normalizes using the vector one norm (sum(abs(x)) +% rather than the two norm (sqrt(sum(x.^2))), where V can be any of the +% second arguments decribed above. +% +% NORMALIZE(X,[],1,I) just normalizes the I-th factor using whatever norm +% is specified by the 3rd argument (1 or 2). +% +% See also KTENSOR, ARRANGE, REDISTRIBUTE, TOCELL. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% +if ~exist('N','var') + N = -1; +end + +if isempty(N) + N = -1; +end + +if isequal(N,'sort') + N = -2; +end + +if ~exist('normtype','var') + normtype = 2; +end + +if exist('mode', 'var') + for r = 1:length(X.lambda) + tmp = norm(X.u{mode}(:,r),normtype); + if (tmp > 0) + X.u{mode}(:,r) = X.u{mode}(:,r) / tmp; + end + X.lambda(r) = X.lambda(r) * tmp; + end + return; +end + +%% Ensure that matrices are normalized +for r = 1:length(X.lambda) + for n = 1:ndims(X) + tmp = norm(X.u{n}(:,r),normtype); + if (tmp > 0) + X.u{n}(:,r) = X.u{n}(:,r) / tmp; + end + X.lambda(r) = X.lambda(r) * tmp; + end +end + +%% Check that all the lambda values are positive +idx = find(X.lambda < 0); +X.u{1}(:,idx) = -1 * X.u{1}(:,idx); +X.lambda(idx) = -1 * X.lambda(idx); + +%% Absorb the weight into one factor, if requested +if (N == 0) + D = diag(nthroot(X.lambda,ndims(X))); + X.u = cellfun(@(x) x*D, X.u, 'UniformOutput', false); + X.lambda = ones(size(X.lambda)); +elseif (N > 0) + X.u{N} = X.u{N} * diag(X.lambda); + X.lambda = ones(size(X.lambda)); +elseif (N == -2) + if ncomponents(X) > 1 + [~,p] = sort(X.lambda,'descend'); + X = arrange(X,p); + end +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/nvecs.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/nvecs.m new file mode 100644 index 0000000..e9d4e68 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/nvecs.m @@ -0,0 +1,76 @@ +function u = nvecs(X,n,r,opts) +%NVECS Compute the leading mode-n vectors for a ktensor. +% +% U = NVECS(X,N,R) computes the R leading eigenvalues of Xn*Xn' +% (where Xn is the mode-N matricization of X), which provides +% information about the mode-N fibers. In two-dimensions, the R +% leading mode-1 vectors are the same as the R left singular vectors +% and the r leading mode-2 vectors are the same as the R right +% singular vectors. By default, this method computes the top R +% eigenvectors of the matrix Xn*Xn' using EIGS. +% +% Examples +% K = ktensor(@rand, [3 4 5], 2); +% nvecs(K, 3, 1) %<--The largest eigenvector of the 3-mode matricization +% +% Documentation page for n-vecs +% +% See also KTENSOR, TENMAT, EIGS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% HIDDEN OPTIONS... +% +% U = NVECS(X,n,r,OPTS) specifies additional options: +% OPTS.eigsopts: options passed to the EIGS routine [struct('disp',0)] +% OPTS.flipsign: make each column's largest element positive [true] +% + +if ~exist('opts','var') + opts = struct; +end + +if isfield(opts,'eigsopts') + eigsopts = opts.eigsopts; +else + eigsopts.disp = 0; +end + +% Compute Xn * Xn' excluding the nth factor +M = X.lambda * X.lambda'; +for i = 1:ndims(X) + if i == n, continue, end; + M = M .* (X.u{i}' * X.u{i}); +end + +% Compute Xn * Xn' +Y = X.u{n} * M * X.u{n}'; + +[u,d] = eigs(Y, r, 'LM', eigsopts); + +if isfield(opts,'flipsign') + flipsign = opts.flipsign; +else + flipsign = true; +end + +if flipsign + % Make the largest magnitude element be positive + [val,loc] = max(abs(u)); + for i = 1:r + if u(loc(i),i) < 0 + u(:,i) = u(:,i) * -1; + end + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/permute.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/permute.m new file mode 100644 index 0000000..5a1bcf5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/permute.m @@ -0,0 +1,35 @@ +function b = permute(a,order) +%PERMUTE Permute dimensions of a ktensor. +% +% B = PERMUTE(A,ORDER) rearranges the dimensions of A so that they +% are in the order specified by the vector ORDER. The output is a ktensor +% with components rearranged as specified by ORDER. The corresponding +% tensor has the same components as A but the order of the subscripts +% needed to access any particular element is rearranged as specified by +% ORDER. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +N = ndims(a); + +if ~isequal(1:N,sort(order)) + error('Invalid permuation'); +end + +b = ktensor(a.lambda, a.u(order)); + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/plus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/plus.m new file mode 100644 index 0000000..63bb233 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/plus.m @@ -0,0 +1,41 @@ +function C = plus(A,B) +%PLUS Binary addition for ktensor. +% +% C = PLUS(A,B) adds two ktensors of the same size, and the +% result is a ktensor of the same size. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if isa(B,'sumtensor') %If the 2nd argument is a sumtensor + C = plus(B,A); %Call plus for sumtensor + return +end + +if (isa(A,'ktensor') && isa(B,'ktensor')) + + if ~isequal(size(A),size(B)) + error('Tensor size mismatch.') + end + + lambda = [A.lambda; B.lambda]; + M = ndims(A); + u = cell(M,1); + for m = 1 : M + u{m} = [A.u{m} B.u{m}]; + end + C = ktensor(lambda, u); + return; +end + +error('Use plus(full(A),full(B)).'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/redistribute.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/redistribute.m new file mode 100644 index 0000000..7a22a32 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/redistribute.m @@ -0,0 +1,27 @@ +function X = redistribute(X,mode) +%REDISTRIBUTE Distribute lambda values to a specified mode. +% +% K = REDISTRIBUTE(K,N) absorbs the weights from the lambda vector +% into mode N. The lambda vector is then set to all ones. +% +% Examples +% K = ktensor([2; 4], ones(3,2), ones(4,2), ones(2,2)); +% redistribute(K, 3) %<--Weight vector is absorbed into factor matrix +% +% See also KTENSOR, NORMALIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +for r = 1:length(X.lambda) + X.u{mode}(:,r) = X.u{mode}(:,r) * X.lambda(r); + X.lambda(r) = 1; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/score.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/score.m new file mode 100644 index 0000000..e242635 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/score.m @@ -0,0 +1,185 @@ +function [best_score, A, flag, best_perm] = score(A,B,varargin) +%SCORE Checks if two ktensors match except for permutation. +% +% SCORE(A,B) returns the score of the match between A and B where +% A is trying to be matched against B. +% +% We define matching as follows. If A and B are single component ktensors +% that have been normalized so that their weights are lambda_a and +% lambda_b, then the score is defined as +% +% score = penalty * (a1'*b1) * (a2'*b2) * ... * (aR'*bR), +% +% where the penalty is defined by the lambda values such that +% +% penalty = 1 - abs(lambda_a - lambda_b) / max(lamdba_a, lambda_b). +% +% The score of multi-components ktensors is a normalized sum of the +% scores across the best permutation of the components of A. A can have +% more components than B --- any extra components are ignored in terms of +% the matching score. +% +% [SCORE,A] = SCORE(...) also returns A which has been normalized +% and permuted to best match B. +% +% [SCORE,A,FLAG] = SCORE(...) also returns a boolean to indicate +% a match according to a user-specified threshold. +% +% [SCORE,A,FLAG,PERM] = SCORE(...) also returns the permutation +% of the components of A that was used to best match B. +% +% SCORE(A,B,'param',value,...) takes the following parameters... +% +% 'lambda_penalty' - Boolean indicating whether or not to consider the +% lambda values in the calculations. Default: true +% +% 'threshold' - Threshold specified in the formula above for +% determining a match. Default: 0.99^N where N = ndims(A) +% +% 'greedy' - Boolean indicating whether or not to consider all +% possible matchings (exponentially expensive) or just do a greedy +% matching. Default: true +% +% Examples +% A = ktensor([2; 1; 2], rand(3,3), rand(4,3), rand(5,3)); +% B = ktensor([2; 4], ones(3,2), ones(4,2), ones(5,2)); +% score(A, B) %<--score(B,A) does not work: B has more components than A +% score(A, B, 'greedy', false) %<--Check all permutations +% score(A, B, 'lambda_penalty', false) %<--Without lambda penalty +% +% This method is described in G. Tomasi and R. Bro, A Comparison of +% Algorithms for Fitting the PARAFAC Model, Computational Statistics & +% Data Analysis, Vol. 50, No. 7, pp. 1700-1734, April 2006, +% doi:10.1016/j.csda.2004.11.013. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% E. Acar & T. Kolda, 2010. + +%% Make sure A and B are ktensors +if ~isa(A,'ktensor') + A = ktensor(A); +end +if ~isa(B,'ktensor') + B = ktensor(B); +end + +%% Error checking +if ~isequal(size(A),size(B)) + error('Size mismatch'); +end + +%% Set-up +N = ndims(A); +RA = ncomponents(A); +RB = ncomponents(B); + +%% We're matching components in A to B +if (RA < RB) + error('Tensor A must have at least as many components as tensor B.'); +end + +%% Parse parameters +params = inputParser; +params.addParamValue('lambda_penalty', true, @islogical); +params.addParamValue('greedy', true, @islogical); +params.addParamValue('threshold', 0.99^N, @(x)(x<1)); +params.parse(varargin{:}); + +%% Make sure columns of factor matrices in A and B are normalized +A = normalize(A); +B = normalize(B); + +%% Compute all possible vector-vector congruences. + +% Compute every pair for each mode +Cbig = tenzeros([RA,RB,N]); +for n = 1:N + Cbig(:,:,n) = abs(A.u{n}' * B.u{n}); +end + +% Collapse across all modes using the product +C = double(collapse(Cbig,3,@prod)); + +%% Calculate penalty based on differences in the Lambda's +% Note that we are assuming the the lambda value are positive because the +% ktensor's were previously normalized. +if (params.Results.lambda_penalty) + P = zeros(RA,RB); + for ra = 1:RA + la = A.lambda(ra); + for rb = 1:RB + lb = B.lambda(rb); + P(ra,rb) = 1 - (abs(la-lb) / max(abs(la),abs(lb))); + end + end + C = P.*C; +end + +%% Option to do greedy matching +if (params.Results.greedy) + + best_perm = zeros(1,RA); + best_score = 0; + for r = 1:RB + [~,idx] = max(C(:)); + [i,j] = ind2sub([RA RB], idx); + best_score = best_score + C(i,j); + C(i,:) = -10; + C(:,j) = -10; + best_perm(j) = i; + end + best_score = best_score / RB; + flag = 1; + + % Rearrange the components of A according to the best matching + foo = 1:RA; + tf = ismember(foo,best_perm); + best_perm(RB+1:RA) = foo(~tf); + A = arrange(A, best_perm); + return; +end + +%% Compute all possible matchings +% Creates a matrix P where each row is a possible matching of components in +% A to components of B. We assume A has at least as many components as B. +idx = nchoosek(1:RA,RB); +M = []; +for i = 1:size(idx,1) + M = [M; perms(idx(i,:))]; %#ok +end + +%% Calculate the congruences for each matching +scores = zeros(size(M)); +for i = 1:size(M,1) + for r = 1:RB + scores(i,r) = C(M(i,r),r); + end +end + +%% Figure out the best matching based on sum's across the components +score = sum(scores,2)/RB; +[best_score, max_score_id] = max(score); +if min(scores(max_score_id,:)) >= params.Results.threshold + flag = 1; +else + flag = 0; +end +best_match = M(max_score_id,:); +best_perm = [best_match setdiff(1:RA, best_match)]; + +%% Rearrange the components of A according to the best matching +A = arrange(A, best_perm); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/size.m new file mode 100644 index 0000000..ca4d120 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/size.m @@ -0,0 +1,33 @@ +function m = size(t,idx) +%SIZE Size of ktensor. +% +% D = SIZE(T) returns the size of the tensor. +% +% I = SIZE(T,DIM) returns the size of the dimension specified by +% the scalar DIM. +% +% See also KTENSOR, KTENSOR/NDIMS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(t.lambda) + m = []; +end + +if exist('idx','var') + m = size(t.u{idx}, 1); +else + for i = 1 : ndims(t) + m(i) = size(t.u{i}, 1); + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/subsasgn.m new file mode 100644 index 0000000..b0440a6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/subsasgn.m @@ -0,0 +1,59 @@ +function t = subsasgn(t,s,b) +%SUBSASGN Subscripted assignment for ktensor. +% +% Subscripted assignment can be used to alter the lambda vector or the +% factor matrices of a ktensor. The entire factor matrix or weight vector +% must be provided. +% +% Examples +% X = ktensor(ones(4,1), rand(2,4), rand(3,4), rand(4,4)); +% X.lambda = 2*ones(4,1) %<--Redefine weight vector +% X.U{1} = zeros(2,4) %<--Redefine first factor matrix +% X.U = {zeros(2,4), zeros(3,4), zeros(4,4)} %<--Redefine factor matrices +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '.' + switch s(1).subs + case 'lambda' + if length(s) == 1 + t = ktensor(b, t.u); + else + newlambda = subsasgn(t.lambda, s(2:end), b); + t = ktensor(newlambda, t.u); + end + case {'u','U'} + if length(s) == 1 + t = ktensor(t.lambda, b); + else + tmpu = subsasgn(t.u, s(2:end), b); + t = ktensor(t.lambda, tmpu); + end + otherwise + error(['No such field: ', s(1).subs]); + end + case '()' + error('Cannot change individual entries in a ktensor.') + case '{}' + new_s(1).type = '.'; + new_s(1).subs = 'u'; + new_s(2:length(s)+1) = s; + t = subsasgn(t, new_s, b); + otherwise + error('Invalid subsasgn.'); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/subsref.m new file mode 100644 index 0000000..6887d43 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/subsref.m @@ -0,0 +1,63 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for a ktensor. +% +% Subscripted reference is used to query the components of a ktensor. +% +% Examples +% X = ktensor([3; 2], rand(4,2), rand(5,2), rand(3,2)); +% X.lambda %<--returns the lambda array ([3;2]). +% X.U %<--returns a cell array of 3 matrices. +% X.U{1} %<--returns the matrix corresponding to the first mode. +% X(2,3,1) %<--calculates and returns that single element of X. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '.' + switch s(1).subs + case 'lambda' + a = tt_subsubsref(t.lambda,s); + case {'u','U','a','A'} + a = tt_subsubsref(t.u,s); + otherwise + error(['No such field: ', s(1).subs]); + end + case '()' + if length(s.subs) == 1 + subs = s.subs{1}; + r = length(t.lambda); + ns = size(subs,1); + d = size(subs,2); + b = repmat(reshape(t.lambda,1,r),ns,1); + for k = 1:d + tmp = b; + b = tmp .* t.u{k}(subs(:,k),:); + end + a = sum(b,2); + else + a = 0; + for k = 1 : length(t.lambda) + b = t.lambda(k); + for i = 1 : length(s.subs) + b = b * t.u{i}(s.subs{i},k); + end + a = a + b; + end + end + case '{}' + a = subsref(t.u,s); + otherwise + error('Invalid subsref'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/symmetrize.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/symmetrize.m new file mode 100644 index 0000000..d205320 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/symmetrize.m @@ -0,0 +1,79 @@ +function Y = symmetrize(X) +%SYMMETRIZE Symmetrize a ktensor X in all modes. +% +% Y = symmetrize(X) will symmetrize a ktensor X with respect to all +% modes so that Y is symmetric with respect to any permutation of +% indices. +% +% See also ISSYMMETRIC, SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%T. Kolda, June 2014 + +n = ndims(X); +sz = size(X); + +% Check tensor dimensions for compatibility with symmetrization +if any(sz(2:end) ~= sz(1)) + error('Tensor is not cubic -- cannot be symmetrized'); +end + +% Distribute lambda evenly into factors +X = normalize(X,0); + +lambda = X.lambda; +U = X.u; +U1 = U{1}; + +V = U1; +for i = 2:n + + Ui = U{i}; + + for j = 1:size(U1,2); + if dot( U1(:,j), Ui(:,j) ) < 0 + Ui(:,j) = -Ui(:,j); + lambda(j) = -lambda(j); + end + end + + V = V + Ui; +end + +V = V./ n; + +% Odd-ordered tensors should not have any negative lambda values +if mod(ndims(X),2) == 1 + for j = 1:length(lambda) + if lambda(j) < 0 + lambda(j) = -lambda(j); + V(:,j) = -V(:,j); + end + end +end + +Y = cell(n,1); +for i = 1:n + Y{i} = V; +end +Y = ktensor(lambda,Y); + +%Y = arrange(Y); + + + + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/times.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/times.m new file mode 100644 index 0000000..362e9e9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/times.m @@ -0,0 +1,35 @@ +function C = times(A,B) +%TIMES Element-wise multiplication for ktensor. +% +% TIMES(A,B) denotes element-by-element multiplication (only supports the +% second argument being a tensor or sptensor). +% +% C = TIMES(A,B) is called for the syntax 'A .* B'. Either A or B must be +% a tensor or sptensor. +% +% See also KTENSOR, SPTENSOR/TIMES, TENSOR/TIMES. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~isequal(size(A),size(B)) + error('Must be two tensors of the same size'); +end + +switch class(B) + case {'sptensor','tensor'} + % Call back to sptensor version. + C = times(B,A); + return; + otherwise + error('Invalid second argument for ktensor/times'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/tocell.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/tocell.m new file mode 100644 index 0000000..7cafde7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/tocell.m @@ -0,0 +1,47 @@ +function U = tocell(X,N) +%TOCELL Convert X to a cell array. +% +% TOCELL(X) converts X to a cell array, evenly distributing the +% weight in lambda. +% +% TOCELL(X,N) absorbs the weights into the Nth factor matrix. +% +% Examples +% K = ktensor([2; 4], ones(3,2), ones(4,2), ones(2,2)); +% tocell(K) %<--Output in a cell array with 3 matrices +% tocell(K,3) %<--Same as above, but weight absorbed into 3rd matrix +% +% See also KTENSOR, NORMALIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if exist('N','var') + X = normalize(X,N); + U = X.u; + return; +end + +if isequal(X.lambda,ones(size(X.lambda))) + U = X.u; + return; +end + +lsgn = sign(X.lambda); +lsplit = nthroot(abs(X.lambda),ndims(X)); +U = X.u; +U{1} = U{1} * diag(lsgn); +D = diag(lsplit); +for n = 1:ndims(X) + U{n} = U{n} * D; +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/tovec.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/tovec.m new file mode 100644 index 0000000..4d41081 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/tovec.m @@ -0,0 +1,36 @@ +function x = tovec(K,lambdaflag) +%TOVEC Convert Ktensor to vector. +% +% V = TOVEC(K) converts the Ktensor to a column vector of length +% sum(size(K)+1)*ncomponents(K). The format is +% [ K.lambda; K.U{1}(:); K.U{2}(:); ... ] +% +% V = TOVEC(K,false) ignores lambda in the conversion, so the vector V is +% of length P where = sum(size(K))*ncomponents(K). +% +% Examples +% K = ktensor([3; 2], rand(4,2), rand(5,2), rand(3,2)); +% V = tovec(K); +% Kcopy = ktensor(V, size(K), ndims(K), true); +% norm(K-Kcopy) %<- Zero (or close to it) +% +% K = ktensor({rand(4,2), rand(5,2), rand(3,2)}); +% V = tovec(K); +% Kcopy = ktensor(V, size(K), ndims(K), false); +% norm(K-Kcopy) %<- Zero (or close to it) +% +% See also KTENSOR, KTENSOR/SIZE, KTENSOR/NCOMPONENTS. +% +%MATLAB Tensor Toolbox. +%Copyright 2017, Sandia Corporation. + +if ~exist('lambdaflag','var') + lambdaflag = true; +end + +xcell = cellfun(@(x) x(:), K.u, 'UniformOutput', false); +x = cell2mat(xcell); + +if lambdaflag + x = [K.lambda; x]; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ttm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ttm.m new file mode 100644 index 0000000..b5dbb23 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ttm.m @@ -0,0 +1,110 @@ +function X = ttm(X,V,varargin) +%TTM Tensor times matrix for ktensor. +% +% Y = TTM(X,A,N) computes the n-mode product of the ktensor X with a +% matrix A; i.e., X x_N A. The integer N specifies the dimension +% (or mode) of X along which A should be multiplied. If size(A) = +% [J,I], then X must have size(X,N) = I. The result will be a +% ktensor of the same order and size as X except that size(Y,N) = J. +% +% Y = TTM(X,{A,B,C,...}) computes the n-mode product of the ktensor +% X with a sequence of matrices in the cell array. The n-mode +% products are computed sequentially along all dimensions (or modes) +% of X. The cell array contains ndims(X) matrices. +% +% Y = TTM(X,{A,B,C,...},DIMS) computes the sequence tensor-matrix +% products along the dimensions specified by DIMS. +% +% Y = TTM(...,'t') performs the same computations as above except +% the matrices are transposed. +% +% Examples +% X = ktensor({rand(5,2),rand(3,2),rand(4,2),rand(2,2)}); +% A = rand(4,5); B = rand(4,3); C = rand(3,4); D = rand(3,2); +% Y = ttm(X, A, 1) %<-- computes X times A in mode-1 +% Y = ttm(X, {A,B,C,D}, 1) %<-- same as above +% Y = ttm(X, A', 1, 't') %<-- same as above +% Y = ttm(X, {A,B,C,D}, [1 2 3 4]) %<-- 4-way multiply +% Y = ttm(X, {D,C,B,A}, [4 3 2 1]) %<-- same as above +% Y = ttm(X, {A,B,C,D}) %<-- same as above +% Y = ttm(X, {A',B',C',D'}, 't') %<-- same as above +% Y = ttm(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4 +% Y = ttm(X, {A,B,C,D}, [3 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, [1 2 4]) %<-- 3-way multiply +% Y = ttm(X, {A,B,C,D}, [1 2 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, -3) %<-- same as above +% Y = ttm(X, {A,B,C,D}, -3) %<-- same as above +% +% See also KTENSOR, KTENSOR/TTV, TENSOR/TTM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTM requires at least two arguments.'); +end + +% Check for transpose option +isTranspose = false; +if numel(varargin) > 0 + if isnumeric(varargin{1}); + dims = varargin{1}; + end + isTranspose = (ischar(varargin{end}) && (varargin{end} == 't')); +end + +% Check for dims argument +if ~exist('dims','var') + dims = []; +end + +% Check that 2nd argument is cell array. If not, recall with V as a +% cell array with one element. +if ~iscell(V) + X = ttm(X,{V},dims,varargin{end}); + return; +end + +% Get sorted dims and index for multiplicands +[dims,vidx] = tt_dimscheck(dims,ndims(X),numel(V)); + +% Determine correct size index +if isTranspose + j = 1; +else + j = 2; +end + +% Check that each multiplicand is the right size. +for i = 1:numel(dims) + if (ndims(V) ~= 2) || (size(V{vidx(i)},j) ~= size(X,dims(i))) +disp(size(V{vidx(i)})) +disp(size(X)) + + error('Multiplicand is wrong size'); + end +end + +% Do the multiplications in the specified modes. +for i = 1:numel(dims) + if isTranspose + X.u{dims(i)} = V{vidx(i)}'* X.u{dims(i)}; + else + X.u{dims(i)} = V{vidx(i)} * X.u{dims(i)}; + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ttv.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ttv.m new file mode 100644 index 0000000..8fbe907 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/ttv.m @@ -0,0 +1,77 @@ +function c = ttv(a,v,dims) +%TTV Tensor times vector for ktensor. +% +% Y = TTV(X,A,N) computes the product of Kruskal tensor X with a +% (column) vector A. The integer N specifies the dimension in X +% along which A is multiplied. If size(A) = [I,1], then X must have +% size(X,N) = I. Note that ndims(Y) = ndims(X) - 1 because the N-th +% dimension is removed. +% +% Y = TTV(X,{A1,A2,...}) computes the product of tensor X with a +% sequence of vectors in the cell array. The products are computed +% sequentially along all dimensions (or modes) of X. The cell array +% contains ndims(X) vectors. +% +% Y = TTV(X,{A1,A2,...},DIMS) computes the sequence tensor-vector +% products along the dimensions specified by DIMS. +% +% See also TENSOR/TTV, KTENSOR, KTENSOR/TTM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTV requires at least two arguments.'); +end + +% Check for 3rd argument +if ~exist('dims','var') + dims = []; +end + +% Check that 2nd argument is cell array. If not, recall with v as a +% cell array with one element. +if ~iscell(v) + c = ttv(a,{v},dims); + return; +end + +% Get sorted dims and index for multiplicands +[dims,vidx] = tt_dimscheck(dims,ndims(a),numel(v)); + +% Check that each multiplicand is the right size. +for i = 1:numel(dims) + if ~isequal(size(v{vidx(i)}),[size(a,dims(i)) 1]) + error('Multiplicand is wrong size'); + end +end + +% Figure out which dimensions will be left when we're done +remdims = setdiff(1:ndims(a),dims); + +% Collapse dimensions that are being multiplied out +newlambda = a.lambda; +for i = 1:numel(dims) + newlambda = newlambda .* ( a.u{dims(i)}' * v{vidx(i)} ); +end + +% Create final result +if isempty(remdims) + c = sum(newlambda); +else + c = ktensor(newlambda,a.u{remdims}); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/uminus.m new file mode 100644 index 0000000..702a28c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus for ktensor. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.lambda = -t.lambda; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/update.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/update.m new file mode 100644 index 0000000..7e82795 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/update.m @@ -0,0 +1,61 @@ +function M = update(M,modes,data) +%UPDATE Update one or more modes of the ktensor with new data. +% +% K = update(K,N,U) updates the ktensor K in mode N with the data in U +% (in vector or matrix form). The value of N must be an integer between 1 +% and NDIMS(K). Further, NUMEL(U) must equal SZ(K,N) * NCOMPONENTS(K). +% +% K = update(K,0,LAMBDA) updates the ktensor's lambda vector with the +% data in LAMBDA, which must be a vector of length NDIMS(K). +% +% K = update(K, MODES, DATA) updates the modes of K that are specified by +% MODES with DATA. Here we assume that MODES is an ordered subset of +% {0,1,...,NDIMS(K)}. The vector DATA is a concatenation of vector data +% to be used to replace each mode. This mode is particularly useful when +% working with an optimization method that understands only vectors of +% unknowns. +% +%MATLAB Tensor Toolbox. +%Copyright 2017, Sandia Corporation. + +%% Error checking +if nargin < 3 + error('Update requires three arguments'); +end + +if ~isscalar(modes) + if ~isequal(modes,sort(modes,'ascend')) + error('Modes must be sorted'); + end +end + +%% +loc = 1; % Location in data array +sz = size(M); +r = ncomponents(M); +for k = modes + if k == 0 + endloc = loc + r - 1; + if length(data) < endloc + error('Data is too short'); + end + M.lambda = data(loc:endloc); + loc = endloc+1; + elseif k <= ndims(M) + endloc = loc + sz(k)*r - 1; + if length(data) < endloc + error('Data is too short'); + end + M.u{k} = reshape(data(loc:endloc),sz(k),r); + loc = endloc+1; + else + error('Invalid mode: %d', k); + end + +end + +%% Check that we used all the data +if loc ~= length(data)+1 + warning('Failed to consume all of the input data'); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/uplus.m new file mode 100644 index 0000000..308dd49 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus for a ktensor. +% +% See also KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/viz.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/viz.m new file mode 100644 index 0000000..8ceb54d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ktensor/viz.m @@ -0,0 +1,283 @@ +function info = viz(K, varargin) +%VIZ Visualize a ktensor. +% +% VIZ(K) visualizes the components of an D-way ktensor with R components +% in an R x D arrangment of plots. Each column of plots represents the +% columns of the associated factor matrix, and each row represents a +% single rank-one component. +% +% INFO = VIZ(K, parameter, value...) takes optional parameters and +% returns additional information from the plot, including handles to all +% the axes. +% +% Primary optional parameters: +% +% 'PlotCommands' - Plot command, passed as name ('plot','bar', or'scatter') +% or a function handle of the form |@(x,y) plot(x,y);|. +% Can also be a cell array, one entry per mode. +% 'ModeTitles' - Cell array of mode titles. Use 'none' to disable. +% 'Figure' - Figure number. Default: [] (new figure). +% 'RelModeWidth' - Relative vertical width per mode. Default: ones(D,1). +% 'FactorTitles' - Choices are 'Weight' (relative), 'Number' or 'None'. +% 'Normalize' - Automatically normalizes the factor matrix columns and +% sorts the components by weight. Options: -1 (none), 1 +% (1-norm), 2 (2-norm). Can also be a function handle to an +% appropriate function. Default: 2. +% +% Detailed optional parameters: +% +% -- Spacing (all proportions in [0,1]) -- +% 'TopSpace' - Space at the top, for titles. Default: 0.05. +% 'BottomSpace' - Space at bottom, for xticklabels. Default: 0.05. +% 'LeftSpace' - Space at left, for component labels. Default: 0.025. +% 'RightSpace' - Space at right. Default: 0.025. +% 'VertSpace' - Vertical space inbetween factor axes. Default: 0.01. +% 'HorzSpace' - Horizontal space inbetween factor axes. Default: 0.01. +% 'YLims' - Choose one per mode: +% o 'same' - Same y-limits on all axes in the same mode +% o 'addzero' - Adjust limits so that zero is shown +% o [xl yl] - Specific limits +% o [] - No modification to what is done by the plot routine +% Default: repmat({'same'},[nd 1]). +% 'ShowZero' - Show dashed line at zero? Default: true(nd,1). +% 'YTicks' - Boolean for showing yticks or not. Default: false. (Note +% that if this is true, then need to increase 'HorzSpace'.) +% 'BaseFontSize' - Smallest font size. Default: 14. +% +% Return values: +% 'height' - Height of each plot (as a proportion in [0,1]). +% 'width' - Width of each plot (as a proportion in [0,1]). +% 'ModeTitles' - Handles for the D mode titles. +% 'GlobalAxis' - Handle for main axes in which all others are embedded. +% 'FactorAxes' - D x R array of handles for the subfigure axes. +% 'ModeTitleHandles' - D-array of the handles to the mode titles (on the top). +% 'CompTitleHandles' - R-array of handles to the factor titles (on the left). +% 'PlotHandles'- D x R array of handles to the figures for each factor. +% +% Examples: +% K = ktensor([3; 2], rand(40,2), rand(50,2), rand(30,2)); +% viz(K,'Figure',1,'Hspace',0.05,'Vspacebottom',0.075); +% +% Thanks to Alex Williams for the prototype for this functionality. +% +%MATLAB Tensor Toolbox. +%Copyright 2017, Sandia Corporation. + +% TGK: Need to add options around line at zero, marks to use, font sizes, etc. + + + +%% +nd = ndims(K); % Order +nc = ncomponents(K); % Rank + +%% Parse optional inputs + +params = inputParser; +% Normalize +params.addParameter('Normalize',2); +% Figure +params.addParameter('Figure', []); +% Spacing +params.addParameter('RelModeWidth', ones(nd,1)); % Horizontal space for each mode +params.addParameter('HorzSpace',0.01); % Horizontal space between axes +params.addParameter('RightSpace',0.025); % Horizontal space on left +params.addParameter('LeftSpace',0.075); % Horizontal space on right +params.addParameter('VertSpace',0.01); % Vertical space between axes +params.addParameter('TopSpace',0.05); % Vertical space at top +params.addParameter('BottomSpace',0.05); % Vertical space at bottom +% Titles +params.addParameter('ModeTitles', 'default'); +params.addParameter('FactorTitles', 'weight'); % Default is 'none'. Options are 'weight' or 'number' +% Plots +params.addParameter('PlotCommands', repmat({@(x,y) plot(x,y,'LineWidth',1,'Color','b');}, [nd 1])); +params.addParameter('YLims', repmat({'same'},[nd 1])); +params.addParameter('ShowZero',true(nd,1)); +params.addParameter('YTicks',false); +params.addParameter('BaseFontSize',14); + + +params.parse(varargin{:}); +res = params.Results; + +%% Clean up tensor +if isa(res.Normalize,'function_handle') + K = res.Normalize(K); +elseif res.Normalize > 0 + fprintf('ktensor/viz: Normalizing factors and sorting components according to the %d-norm.\n', res.Normalize); + K = normalize(K,'sort',res.Normalize); +end + +%% Create new figure or reset old figure +if isempty(res.Figure) + figure; +else + figure(res.Figure); + clf; +end + +%% Create number-of-modes (nd) x number-of-components (nc) axes + +% Calculate the amount of vertical space available for the plots themselves +% by subtracting off the top and bottom space as well as the inbetween +% space. +Vplotspace = 1 - res.TopSpace - res.BottomSpace - (nc - 1) * res.VertSpace; +height = Vplotspace / nc; + +% Do likewise for the horizontal space. +Hplotspace = 1 - res.LeftSpace - res.RightSpace - (nd - 1) * res.HorzSpace; +width = (res.RelModeWidth ./ sum(res.RelModeWidth)) .* Hplotspace; + +% Create the global axis +GlobalAxis = axes('Position',[0 0 1 1]); % Global Axes +axis off; + +% Create the nd x nc factor axes array +FactorAxes = gobjects(nd,nc); % Factor Axes +for k = 1 : nd + for j = 1 : nc + xpos = res.LeftSpace + (k-1) * res.HorzSpace + sum(width(1:k-1)); + ypos = 1 - res.TopSpace - height - (j-1) * (height + res.VertSpace); + FactorAxes(k,j) = axes('Position',[xpos ypos width(k) height]); + FactorAxes(k,j).FontSize = res.BaseFontSize; + end +end + + +%% Plot each factor +PlotCommands = res.PlotCommands; +if ~iscell(PlotCommands) + PlotCommands = repmat({PlotCommands}, [nd 1]); +end +for k = 1:nd + if isempty(PlotCommands{k}) || strcmpi(PlotCommands{k},'plot') || strcmpi(PlotCommands{k},'line') + PlotCommands{k} = @(x,y) plot(x,y,'Linewidth',1,'Color','b'); + elseif strcmpi(PlotCommands{k},'bar') + PlotCommands{k} = @(x,y) bar(x,y,'EdgeColor','b','FaceColor','b'); + elseif strcmpi(PlotCommands{k},'scatter') + PlotCommands{k} = @(x,y) scatter(x,y,10,'b','filled'); + end +end + +h = cell(nd,nc); +for k = 1 : nd + + % Extract component, no modifications + U = K.u{k}; + + % Add one extra at end of ticks + xl = [0 size(K,k)+1]; + + % Create y-axes that include zero + yl = [min( -0.01, min(U(:)) ), max( 0.01, max(U(:)) )]; + + for j = 1 : nc + + % Extract x & y data + xx = (1:size(K,k))'; + yy = U(:,j); + + % Set up plot + hold(FactorAxes(k,j), 'off'); + axes(FactorAxes(k,j)); + + % Do the plot command + hh = PlotCommands{k}(xx, yy); + + % Set x-axes + xlim(FactorAxes(k,j),xl); + + % Set y-axes + if isequal(res.YLims{k}, 'same') + ylim(FactorAxes(k,j),yl); + elseif isequal(res.YLims{k},'addzero') + % Create y-axes that include zero + tmpyl = [ min(-0.01, min(U(:,j))), max( 0.01, max(U(:,j))) ]; + ylim(FactorAxes(k,j),tmpyl); + elseif isnumeric(res.YLims{k}) && isequal(size(res.YLims{k}),[1 2]) + ylim(FactorAxes(k,j),res.YLims{k}); + else + fprintf('Do nothing to FactorAxes\n'); + end + + % Turn off y-label + set(FactorAxes(k,j), 'Ylabel', []); + + % Turn off y-ticks + if ~res.YTicks + set(FactorAxes(k,j),'Ytick',[]); + end + + % Draw a box around the axes + set(FactorAxes(k,j),'Box','on') + + % Turn of x-labels if not the bottom plot + if j < nc + set(FactorAxes(k,j),'XtickLabel',{}); + end + + % Draw dashed line at zero + if res.ShowZero(k) + hold(FactorAxes(k,j), 'on'); + plot(FactorAxes(k,j), xl, [0 0], 'k:', 'Linewidth', 1.5); + end + + % Save handle for main plot + h{k,j} = hh; + + % Make the fonts on the xtick labels big + set(FactorAxes(k,j),'FontSize',res.BaseFontSize) + end +end + +%% Title for each mode, along the top +if ( isa(res.ModeTitles,'char') && strcmpi(res.ModeTitles,'none') ) + ModeTitleHandles = repmat({[]},nd,1); +else + ModeTitleHandles = gobjects(nd,1); + if ( isa(res.ModeTitles,'char') && strcmpi(res.ModeTitles,'default') ) + ModeTitles = cell(nd,1); + for i = 1:nd + ModeTitles{i} = sprintf('Mode %d',i); + end + else + ModeTitles = res.ModeTitles; + end + + axes(GlobalAxis); + for k = 1:nd + xpos = res.LeftSpace + (k-1) * res.HorzSpace + sum(width(1:k-1)) + 0.5 * width(k); + %xpos = res.LeftSpace + (k-1) * (width + res.HorzSpace) + 0.5 * width; + ypos = 1 - res.TopSpace; + ModeTitleHandles(k) = text(xpos,ypos,ModeTitles{k},'VerticalAlignment','Bottom','HorizontalAlignment','Center'); + set(ModeTitleHandles(k),'FontSize',res.BaseFontSize+2) + set(ModeTitleHandles(k),'FontWeight','bold') + end +end + +%% Print component titles along the left side +CompTitleHandles = gobjects(nc,1); +if ~strcmpi(res.FactorTitles,'none') + axes(GlobalAxis); + rellambda = abs (K.lambda / K.lambda(1)); + for j = 1:nc + xpos = 0.1 * res.LeftSpace; + ypos = 1 - res.TopSpace - 0.5 * height - (j-1) * (height + res.VertSpace); + if strcmpi(res.FactorTitles,'weight') + txt = sprintf('%3.2f', rellambda(j)); + else + txt = sprintf('%d', j); + end + CompTitleHandles(j) = text(xpos,ypos,txt,'VerticalAlignment','Middle','HorizontalAlignment','Left'); + set(CompTitleHandles(j),'FontSize',res.BaseFontSize) + end +end +%% Save stuff to return +info.height = height; +info.width = width; +info.GlobalAxis = GlobalAxis; +info.FactorAxes = FactorAxes; +info.ModeTitleHandles = ModeTitleHandles; +info.CompTitleHandles = CompTitleHandles; +info.PlotHandles = h; + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/aatx.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/aatx.m new file mode 100644 index 0000000..d5e3b18 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/aatx.m @@ -0,0 +1,39 @@ +function z = aatx(a,x) +%AATX Implicitly compute A * A' * x for sptenmat. +% +% Z = AATX(A,X) takes a sptenmat object A and computes A * A' * +% X. This is done without converting A to a standard MATLAB sparse +% matrix. +% +% This function is likely most useful as an argument to a routine +% such as EIGS. +% +% See also SPTENMAT, SPTENSOR/EIGS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +subs = a.subs; +s1 = subs(:,1); +s2 = subs(:,2); +m = size(a,1); +n = size(a,2); +vals = a.vals; + +v1 = x(s1); +v1 = vals .* v1; +y = accumarray(s2, v1, [n 1]); + +v2 = y(s2); +v2 = vals .* v2; +z = accumarray(s1, v2, [m 1]); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/disp.m new file mode 100644 index 0000000..0dbd8d9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/disp.m @@ -0,0 +1,66 @@ +function disp(a,name) +%DISP Command window display of a sptenmat. +% +% DISP(T) displays the tensor without printing its name. +% +% DISP(T,NAME) displays the tensor with the given name. +% +% See also SPTENMAT/DISPLAY. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('name','var') + name = 'ans'; +end + +% Extract the number of nonzeros and number of dimensions +nz = size(a.vals,1); + +% Print an intro sentence giving the name and the size +if (nz == 0) + fprintf('%s is an all-zero sptenmat from an sptensor of size %s\n',... + name, tt_size2str(a.tsize)); +else + fprintf('%s is a sptenmat from an sptensor of size %s with %d nonzeros\n',... + name, tt_size2str(a.tsize), nz); +end + +fprintf(1,'\t%s.rindices = %s (modes of tensor corresponding to rows)\n',... + name,['[ ' num2str(a.rdims) ' ]'] ); +fprintf(1,'\t%s.cindices = %s (modes of tensor corresponding to columns)\n',... + name,['[ ' num2str(a.cdims) ' ]'] ); + + +% Stop insane printouts +if (nz > 1000) + r = input('Are you sure you want to print all nonzeros? (Y/N) ','s'); + if upper(r) ~= 'Y', return, end; +end + +% Return now if there are no nonzeros +if (nz == 0) + return; +end + +% Pre-allocate memory for the output +output = cell(nz,1); +spc = floor(log10(max(a.subs)))+1; +fmt = ['\t(%' num2str(spc(1)) 'd,%' num2str(spc(2)) 'd)\t%g']; + +for i = 1:nz + output{i} = sprintf(fmt, a.subs(i,:), a.vals(i)); +end +fprintf('%s\n',output{:}); + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/display.m new file mode 100644 index 0000000..c379ac0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/display.m @@ -0,0 +1,20 @@ +function display(t) +%DISPLAY Command window display of a sptenmat. +% +% DISPLAY(T) displays the tensor with its name. +% +% See also SPTENMAT/DISP. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/double.m new file mode 100644 index 0000000..5d74c8d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/double.m @@ -0,0 +1,26 @@ +function A = double(T) +%DOUBLE Convert a sptenmat to a sparse matrix. +% +% A = double(T) converts T stored as a SPTENMAT to a sparse matrix. +% +% See also SPTENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +m = prod(T.tsize(T.rdims)); +n = prod(T.tsize(T.cdims)); +if isempty(T.subs) + A = sparse(m,n); +else + A = sparse(T.subs(:,1), T.subs(:,2), T.vals, m, n); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/end.m new file mode 100644 index 0000000..933de9d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/end.m @@ -0,0 +1,24 @@ +function e = end(X,k,n) +%END Last index of indexing expression for sptenmat. +% +% The expression X(end,:) will call END(X,1,2) to determine +% the value of the first index. +% +% See also SPTENMAT, SPTENMAT/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if n > 2 + error('Subscript out of range.'); +end +e = size(X,k); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/full.m new file mode 100644 index 0000000..0ed4a86 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/full.m @@ -0,0 +1,30 @@ +function B = full(A) +%FULL Convert a sptenmat to a (dense) tenmat. +% +% B = FULL(A) converts a sptenmat A to a (dense) tenmat B. +% +% See also SPTENMAT, TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Extract the order and size of A +siz = size(A); + +% Create a dense zero tensor B that is the same size as A +B = tenmat(zeros([siz,1,1]), A.rdims, A.cdims, A.tsize); + +% Extract the linear indices of entries in A +idx = tt_sub2ind(siz,A.subs); + +% Copy the values of A into B using linear indices +B(idx) = A.vals; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/nnz.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/nnz.m new file mode 100644 index 0000000..f11150c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/nnz.m @@ -0,0 +1,20 @@ +function n = nnz(a) +%NNZ Return number of nonzeros in a sptenmat. +% +% nnz(A) returns the number of nonzeros in A. +% +% See also SPTENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = length(a.vals); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/norm.m new file mode 100644 index 0000000..4ca7a31 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/norm.m @@ -0,0 +1,22 @@ +function nrm = norm(T) +%NORM Frobenius norm of a sptenmat. +% +% NORM(T) returns the Frobenius norm of a matricized sparse tensor. +% +% See also SPTENMAT, NORM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +nrm = norm(T.vals); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/size.m new file mode 100644 index 0000000..c1aa5d4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/size.m @@ -0,0 +1,33 @@ +function siz = size(a,idx) +%SIZE Return size of sptenmat. +% +% D = SIZE(T) returns the size of the tensor. +% +% I = size(T,DIM) returns the sizes of the dimensions specified by DIM. +% +% See also SPTENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(a.tsize) + siz = []; + return; +end + +m = prod(a.tsize(a.rdims)); +n = prod(a.tsize(a.cdims)); +siz = [m n]; + +if exist('idx','var') + siz = siz(idx); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/sptenmat.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/sptenmat.m new file mode 100644 index 0000000..5dda382 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/sptenmat.m @@ -0,0 +1,271 @@ +%SPTENMAT Store sparse tensor as a sparse matrix. +% +%SPTENMAT Methods: +% aatx - Implicitly compute A * A' * x for sptenmat. +% disp - Command window display of a sptenmat. +% display - Command window display of a sptenmat. +% double - Convert a sptenmat to a sparse matrix. +% end - Last index of indexing expression for sptenmat. +% full - Convert a sptenmat to a (dense) tenmat. +% nnz - Return number of nonzeros in a sptenmat. +% norm - Frobenius norm of a sptenmat. +% size - Return size of sptenmat. +% sptenmat - Matricized sparse tensor stored as a sparse 2D array. +% subsasgn - Subscripted assignment for sptenmat. +% subsref - Subscripted reference for a sptenmat. +% tsize - Tensor size of sptenmat. +% uminus - Unary minus (-) for sptenmat. +% uplus - Unary plus (+) for sptenmat. +% +% Documentation page for sparse tensor-as-matrix class +% +% See also TENSOR_TOOLBOX +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function a = sptenmat(varargin) +%SPTENMAT Matricized sparse tensor stored as a sparse 2D array. +% +% A = SPTENMAT(T, RDIMS) creates a sparse matrix representation of +% an sptensor T. The dimensions (or modes) specified in RDIMS map +% to the rows of the matrix, and the remaining dimensions (in +% ascending order) map to the columns. +% +% A = SPTENMAT(T, CDIMS, 't') does the same as above, but instead +% the column dimensions are specified, and the remaining dimensions +% (in ascending order) map to the rows. +% +% A = SPTENMAT(T, RDIMS, CDIMS) creates a sparse matrix +% representation of sptensor T. The dimensions specified in RDIMS +% map to the rows of the matrix, and the dimensions specified in +% CDIMS map to the columns, in the order given. +% +% A = SPTENMAT(T, RDIM, STR) creates the same matrix representation +% as above, except only one dimension in RDIM maps to the rows of +% the matrix, and the remaining dimensions span the columns in an +% order specified by the string argument STR as follows: +% +% 'fc' - Forward cyclic. Order the remaining dimensions in the +% columns by [RDIM+1:ndims(T), 1:RDIM-1]. This is the +% ordering defined by Kiers. +% +% 'bc' - Backward cyclic. Order the remaining dimensions in the +% columns by [RDIM-1:-1:1, ndims(T):-1:RDIM+1]. This is the +% ordering defined by De Lathauwer, De Moor, and Vandewalle. +% +% A = SPTENAMT(B,RDIMS,CDIMS,TSIZE) creates a sptenmat from a matrix B +% along with the mappings of the row (RDIMS) and column indices (CDIMS) +% and the size of the original tensor (TSIZE). +% +% A = SPTENMAT(SUBS, VALS, RDIMS, CDIMS, TSIZE) creates a sptenmat +% from a set of 2D subscripts (SUBS) and values (VALS) along with +% the mappings of the row (RDIMS) and column indices (CDIMS) and the +% size of the original tensor (TSIZE). +% +% A = SPTENMAT is the empty constructor. +% +% See also SPTENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%---------- +% EMPTY CONSTRUCTOR +%---------- +if (nargin == 0) + a.tsize = []; + a.rdims = []; + a.cdims = []; + a.subs = []; + a.vals = []; + a = class(a, 'sptenmat'); + return; +end + +%---------- +% COPY CONSTRUCTOR +%---------- +if (nargin == 1) && isa(varargin{1},'sptenmat') + t = varargin{1}; + a.tsize = t.tsize; + a.rdims = t.rdims; + a.cdims = t.cdims; + a.subs = t.subs; + a.vals = t.vals; + a = class(a, 'sptenmat'); + return; +end + +%---------- +% CONVERT LIST OF SUBS/VALS +%---------- +if (nargin == 5) + + subs = varargin{1}; + vals = varargin{2}; + rdims = varargin{3}; + cdims = varargin{4}; + tsize = varargin{5}; + + % Error check + n = numel(tsize); + if ~isequal(1:n, sort([rdims cdims])) + error('Incorrect specification of dimensions'); + elseif ~isempty(subs) && prod(tsize(rdims)) < max(subs(:,1)) + error('Invalid row index'); + elseif ~isempty(subs) && prod(tsize(cdims)) < max(subs(:,2)) + error('Invalid column index'); + end + + % Sum any duplicates + if isempty(subs) + newsubs = []; + newvals = []; + else + % Identify only the unique indices + [newsubs,junk,loc] = unique(subs,'rows'); + + % Sum the corresponding values + newvals = accumarray(loc,vals,[size(newsubs,1) 1]); + end + + % Find the nonzero indices of the new values + nzidx = find(newvals); + newsubs = newsubs(nzidx,:); + newvals = newvals(nzidx); + + % Save class variables + a.tsize = tsize; + a.rdims = rdims; + a.cdims = cdims; + a.subs = newsubs; + a.vals = newvals; + a = class(a, 'sptenmat'); + return; + +end + +%---------- +% CONVERT SPARSE or DENSE MATLAB MATRIX +%---------- +if (nargin == 4) + + B = varargin{1}; + [i,j,vals] = find(B); + subs = [i j]; + rdims = varargin{2}; + cdims = varargin{3}; + tsize = varargin{4}; + + % Error check + n = numel(tsize); + if ~isequal(1:n, sort([rdims cdims])) + error('Incorrect specification of dimensions'); + elseif ~isempty(subs) && prod(tsize(rdims)) < max(subs(:,1)) + error('Invalid row index'); + elseif ~isempty(subs) && prod(tsize(cdims)) < max(subs(:,2)) + error('Invalid column index'); + end + + % Save class variables + a.tsize = tsize; + a.rdims = rdims; + a.cdims = cdims; + a.subs = subs; + a.vals = vals; + a = class(a, 'sptenmat'); + return; + +end + + +%---------- +% CONVERT SPTENSOR +%---------- + +if (nargin < 2) || (nargin > 3) + error('Incorrect number of arguments.'); +end + +% Save the size of T and the number of dimensions +T = varargin{1}; +tsize = size(T); +tsubs = T.subs; +tvals = T.vals; +n = ndims(T); + +% Figure out which dimensions get mapped where +if (nargin == 2) + rdims = varargin{2}; + cdims = setdiff(1:n, rdims); +elseif isa(varargin{3},'char') + switch varargin{3} + case 't' % Transpose + cdims = varargin{2}; + rdims = setdiff(1:n, cdims); + case 'fc' % Forward cyclic + rdims = varargin{2}; + if (numel(rdims) ~= 1) + error('Only one row dimension if third argument is ''fc''.'); + end + cdims = [rdims+1:n, 1:rdims-1]; + case 'bc' % Backward cyclic + rdims = varargin{2}; + if (numel(rdims) ~= 1) + error('Only one row dimension if third argument is ''bc''.'); + end + cdims = [rdims-1:-1:1, n:-1:rdims+1]; + otherwise + error('Unrecognized option'); + end +else + rdims = varargin{2}; + cdims = varargin{3}; +end + +% Error check +if ~isequal(1:n, sort([rdims cdims])) + error('Incorrect specification of dimensions'); +end + +% Extract the appropriate sizes +rsize = tsize(rdims); +csize = tsize(cdims); + +% Reshape by transforming the indices +if isempty(rsize) + ridx = ones(nnz(T),1); +elseif isempty(tsubs) + ridx = []; +else + ridx = tt_sub2ind(rsize,tsubs(:,rdims)); +end + +if isempty(csize) + cidx = ones(nnz(T),1); +elseif isempty(tsubs) + cidx = []; +else + cidx = tt_sub2ind(csize,tsubs(:,cdims)); +end + +% Save class variables +a.tsize = tsize; +a.rdims = rdims; +a.cdims = cdims; +a.subs = [ridx, cidx]; +a.vals = tvals; +a = class(a, 'sptenmat'); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/subsasgn.m new file mode 100644 index 0000000..fdc94c7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/subsasgn.m @@ -0,0 +1,70 @@ +function t = subsasgn(t,s,b) +%SUBSASGN Subscripted assignment for sptenmat. +% +% Examples +% X = sptenmat(sptenrand([3 4 2],10),1); +% X(1:2,1:2) = ones(2,2); <-- Calls SUBSASGN +% +% See also SPTENMAT, SPTENMAT/SUBSREF. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%TODO: Make implementation efficient. It's not right now. +%TODO: Add error checking. + +switch s.type + case '()' + rsubs = s.subs{1}; + csubs = s.subs{2}; + + % If called with single element in rhs, then create a vector + if numel(b) == 1 + b = repmat(b, numel(rsubs) * numel(csubs), 1); + end + + % Initialize some variables for new entries in the matrix + newsubs = []; + newvals = []; + + k = 0; + + % Loop over the row and column indices, finding the + % appropriate row index for the (i,j) subscript + for j = 1:length(csubs) + indxc = find(t.subs(:,2) == csubs(j)); + for i = 1:length(rsubs) + indxr = find(t.subs(indxc,1) == rsubs(i)); + indx = indxc(indxr); + + k = k + 1; % increment counter into b + if isempty(indx) + newsubs = [newsubs; rsubs(i) csubs(j)]; + newvals = [newvals; b(k)]; + else + %t.subs(indx,:); + t.vals(indx) = b(k); + end + end + end + + % If there are new values to append, then add them on and sort + if ~isempty(newvals) + t.subs = [t.subs; newsubs]; + t.vals = [t.vals; newvals]; + [t.subs,indx] = sortrows(t.subs); + t.vals = t.vals(indx); + end + + otherwise + error('Invalid assignment for sptenmat.') +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/subsref.m new file mode 100644 index 0000000..d65ac83 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/subsref.m @@ -0,0 +1,43 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for a sptenmat. +% +% Examples +% A.subs <-- returns the nonzero values as an array +% A.vals <-- returns the corresponding 2D subscripts +% A.tsize <-- returns the size original tensor +% A.rdims <-- tensor dimensions that were mapped to rows +% A.cdims <-- tensor dimensions that were mapped to columns +% +% See also SPTENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '.' + switch s(1).subs + case 'vals' + a = tt_subsubsref(t.vals,s); + case 'tsize' + a = t.tsize; + case 'rdims' + a = t.rdims; + case 'cdims' + a = t.cdims; + case 'subs' + a = tt_subsubsref(t.subs,s); + otherwise + error(['No such field: ', s.subs]); + end + otherwise + error('Invalid subsref into tenmat.') +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/tsize.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/tsize.m new file mode 100644 index 0000000..86cb1c7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/tsize.m @@ -0,0 +1,36 @@ +function sz = tsize(a,idx) +%TSIZE Tensor size of sptenmat. +% +% D = TSIZE(X) returns the size of the tensor being stored as a +% matrix. +% +% M = TSIZE(X,DIM) returns the length of the dimension(s) specified +% by DIM. For example, SIZE(X,1) returns the size of the first +% dimension of the tensor. +% +% See also SPTENMAT, SPTENMAT/SIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(a.tsize) + sz = []; + return; +end + +if exist('idx', 'var') + sz = a.tsize(idx); +else + sz = a.tsize; +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/uminus.m new file mode 100644 index 0000000..bcaee40 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus (-) for sptenmat. +% +% See also SPTENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.vals = -t.vals; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/uplus.m new file mode 100644 index 0000000..78d6562 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptenmat/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus (+) for sptenmat. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/and.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/and.m new file mode 100644 index 0000000..7f1435a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/and.m @@ -0,0 +1,58 @@ +function C = and(A,B) +%AND Logical AND (&) for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a & 5 is sparse. +% The result of a & 0 is sparse. +% The result of a & full(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(B) + if B == 0 + C = sptensor([],[],size(A)); + else + C = sptensor(A.subs,true,size(A)); + end + return; +end + +% Call back with the arguments reversed. +if isscalar(A) + C = and(B,A); + return; +end + +%% Case 2: Both x and y are tensors of some sort +% Check that the sizes match +if ~isequal(size(A),size(B)) + error('Must be tensors of the same size'); +end + +if isa(A,'sptensor') && isa(B,'sptensor') + C = sptensor([A.subs; B.subs], [A.vals; B.vals], size(A), ... + @(x) length(x) == 2); + return; +end + +if isa(B,'tensor') + BB = sptensor(A.subs,B(A.subs),size(A)); + C = and(A,BB); + return; +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/collapse.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/collapse.m new file mode 100644 index 0000000..4f72259 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/collapse.m @@ -0,0 +1,75 @@ +function s = collapse(t,dims,fun) +%COLLAPSE Collapse sparse tensor along specified dimensions. +% +% S = COLLAPSE(T,DIMS) sums the entries of T along all dimensions +% specified in DIMS. If DIMS is negative, then T is summed across +% all dimensions *not* specified by -DIMS. +% +% S = COLLAPSE(T) is shorthand for S = COLLAPSE(T,1:ndims(T)). +% +% S = COLLAPSE(T,DIMS,FUN) accumulates the entries of T using the +% accumulation function @FUN. +% +% Examples +% subs = [1 1 1; 1 1 3; 2 2 4; 4 4 4] +% vals = [10.5; 1.5; 2.5; 3.5] +% X = sptensor(subs,vals,[4 4 4]); +% Y = collapse(X,[2 3]) %<-- sum of entries in each mode-1 slice +% Y = collapse(ones(X),[1 2]) %<-- nnz in each mode-3 slide +% Y = collapse(ones(X),[1 2],@max) %<-- 1 if mode-3 has any entry +% Y = collapse(ones(X),-3,@max); %<-- equivalent +% +% Documentation page for collapsing and scaling tensors +% +% See also SPTENSOR, SPTENSOR/SCALE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('fun', 'var') + fun = @sum; +end + +if ~exist('dims', 'var') + dims = 1:ndims(t); +end + +dims = tt_dimscheck(dims,ndims(t)); +remdims = setdiff(1:ndims(t),dims); + +% Check for the case where we accumulate over *all* dimensions +if isempty(remdims) + s = fun(t.vals); + return; +end + +% Calculate the size of the result +newsiz = size(t,remdims); + +% Check for the case where the result is just a dense vector +if numel(remdims) == 1 + if ~isempty(t.subs) + s = accumarray(t.subs(:,remdims), t.vals, [newsiz 1], fun); + else + s = zeros(newsiz,1); + end + return; +end + +% Create the result +if ~isempty(t.subs) + s = sptensor(t.subs(:,remdims), t.vals, newsiz, fun); +else + s = sptensor([],[],newsiz); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/contract.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/contract.m new file mode 100644 index 0000000..6e300ca --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/contract.m @@ -0,0 +1,60 @@ +function y = contract(x,i,j) +%CONTRACT Contract sparse tensor along two dimensions (array trace). +% +% Y = CONTRACT(X,I,J) contracts the entries of X along dimensions I +% and J. Contraction is a generalization of matrix trace. In other +% words, the trace is performed along the two-dimensional slices +% defined by dimensions I and J. It is possible to implement tensor +% multiplication as an outer product followed by a contraction. +% +% Examples +% X = sptenrand([4 3 2],10); Y = sptenrand([3 2 4],10); +% Z1 = ttt(X,Y,1,3); %<-- Normal tensor multiplication +% Z2 = contract(ttt(X,Y),1,6); %<-- Outer product + contract +% norm(Z1-Z2) %<-- Should be zero +% +% See also SPTENSOR, SPTENSOR/TTT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Error checking +if x.size(i) ~= x.size(j) + error('Must contract along equally sized dimensions'); +end + +% Error checking +if i == j + error('Must contract along two different dimensions'); +end + +% Easy case - returns a scalar +if ndims(x) == 2 + tfidx = (x.subs(:,1) == x.subs(:,2)); % find diagonal entries + y = sum(x.vals(tfidx)); + return; +end + +% Remaining dimensions after contract +remdims = setdiff(1:ndims(x),[i j]); + +% Find index of values on diagonal +indx = find(x.subs(:,i) == x.subs(:,j)); + +% Let the constructor sum up the entries +y = sptensor(x.subs(indx,remdims),x.vals(indx),x.size(remdims)); + +% Check if result should be dense +if nnz(y) > 0.5 * prod(y.size) + % Final result is a *dense* tensor + y = tensor(y); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/disp.m new file mode 100644 index 0000000..5e77544 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/disp.m @@ -0,0 +1,85 @@ +function disp(X,name) +%DISP Command window display of a sparse tensor. +% +% DISP(X) displays the tensor without printing its name. +% +% DISP(X,NAME) displays the tensor with the given name. +% +% See also SPTENSOR, SPTENSOR/DISPLAY. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Extract the number of nonzeros and number of dimensions +nz = nnz(X); + +if ~exist('name','var') + name = 'ans'; +end + +if (nz == 0) + fprintf('%s is an all-zero sparse tensor of size %s\n',... + name, tt_size2str(X.size)); + return; +else + fprintf('%s is a sparse tensor of size %s with %d nonzeros\n',... + name, tt_size2str(X.size), nz); +end + +% Stop insane printouts +if (nz > 10000) + r = input('Are you sure you want to print all nonzeros? (Y/N) ','s'); + if upper(r) ~= 'Y', return, end; +end + +% preallocate +output = cell(nz,1); +%% +spc = floor(log10(max(double(X.subs),[],1)))+1; +if numel(spc) == 1 + fmt = ['\t(%' num2str(spc(1)) 'd)%s']; +else + fmt = ['\t(%' num2str(spc(1)) 'd,']; + for i = 2:numel(spc)-1 + fmt = [fmt '%' num2str(spc(i)) 'd,']; + end + fmt = [fmt '%' num2str(spc(end)) 'd)%s']; +end +%% +% Get values out so that they look nice +savefmt = get(0,'FormatSpacing'); +format compact +S = evalc('disp(X.vals)'); +set(0,'FormatSpacing',savefmt) +S = textscan(S,'%s','delimiter','\n','whitespace',''); +S = S{1}; +if ~isempty(strfind(S{1},'*')) + fprintf('%s\n',S{1}); + S = S(2:end); +end +%% +for i = 1:nz + output{i} = sprintf(fmt,X.subs(i,:),S{i}); +end +fprintf('%s\n',output{:}); + +% function y = fmt(s,v) +% % nested function has access to n from disp function workspace +% if n > 1 +% y = [sprintf('\t(') sprintf('%d,',s(1:n-1))... +% sprintf('%d) ',s(n)) sprintf('\t%g',v)]; +% else +% y = sprintf('\t(%d) \t%f',s,v); +% end +% end + +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/display.m new file mode 100644 index 0000000..a31f8dc --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/display.m @@ -0,0 +1,20 @@ +function display(t) +%DISPLAY Command window display of a sparse tensor. +% +% DISPLAY(T) displays the tensor with its name. +% +% See also SPTENSOR, SPTENSOR/DISP. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/divide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/divide.m new file mode 100644 index 0000000..49b5f0e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/divide.m @@ -0,0 +1,39 @@ +function Y = divide(X,K,epsilon) +%DIVIDE Divide an SPTENSOR by a nonnegative KTENSOR. +% +% Y = DIVIDE(X,K,EPSILON) divides the sparse tensor X by the +% nonnegative ktensor K. Avoids divide-by-zero errors by dividing +% by MIN(EPSILON,K-VALUE) at each nonzero of X. +% +% See also SPTENSOR, KTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Assumes K is a nonnegative ktensor + +Y = X; + +subs = Y.subs; +vals = zeros(size(Y.vals)); +R = numel(K.lambda); +N = ndims(Y); +for r = 1:R + tvals = ones(size(vals)) * K.lambda(r); + for n = 1:N + v = K{n}(:,r); + tvals = tvals .* v(subs(:,n)); + end + vals = vals + tvals; +end +Y.vals = Y.vals ./ max(epsilon, vals); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/double.m new file mode 100644 index 0000000..12c9499 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/double.m @@ -0,0 +1,21 @@ +function a = double(s) +%DOUBLE Converts a sparse tensor to a dense multidimensional array. +% +% See also SPTENSOR, SPTENSOR/FULL. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +a = zeros([size(s) 1 1]); +if nnz(s) > 0 + a(tt_sub2ind(size(s),s.subs)) = s.vals; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/elemfun.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/elemfun.m new file mode 100644 index 0000000..25466f4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/elemfun.m @@ -0,0 +1,41 @@ +function a = elemfun(a,fun) +%ELEMFUN Manipulate the nonzero elements of a sparse tensor. +% +% X = ELEMFUN(X,@FUN) modifies the elements of X according to the +% function @FUN which should take and array and output an equally +% sized array. +% +% Examples +% X = sptenrand([10,10,10],10); +% X = elemfun(X,@sqrt) %<-- square root of every entry +% X = elemfun(X, @(x) x+1) %<-- increase every entry by 1 +% X = elemfun(X, @(x) x ~= 0) %<-- change every nonzero to be 1 +% +% See also SPTENSOR, SPFUN. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +if ~isa(a,'sptensor') + error('First argument must be a sparse tensor.'); +end + +a.vals = fun(a.vals); +idx = find(a.vals); +if isempty(idx) + a.vals = []; + a.subs = []; +else + a.vals = a.vals(idx); + a.subs = a.subs(idx,:); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/end.m new file mode 100644 index 0000000..51a6918 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/end.m @@ -0,0 +1,28 @@ +function e = end(X,k,n) +%END Last index of indexing expression for sparse tensor. +% +% The expression X(end,:,:) will call END(X,1,3) to determine +% the value of the first index. +% +% See also SPTENSOR, SPTENSOR/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +switch n + case 1 %linear indexing + e = prod(X.size); + case ndims(X) %subscript indexing + e = X.size(k); + otherwise + error('Invalid subscripting'); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/eq.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/eq.m new file mode 100644 index 0000000..6f7d8aa --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/eq.m @@ -0,0 +1,88 @@ +function z = eq(x,y) +%EQ Equal (==) for sptensors. +% +% A == B compares the elements of A and B for equality. The arguments can +% be a pair of sptensors, an sptensor and a tensor, or an sptensor and a +% scalar. Regardless, the result is always returned as a sparse tensor. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a == 5 is sparse. +% The result of a == 0 is sparse. +% The result of a == full(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(y) + if y == 0 + z = ~x; + else + idx = (x.vals == y); + z = sptensor(x.subs(idx,:),true,size(x)); + end + return; +end + +% Call back with the arguments reversed. +if isscalar(x) + z = eq(y,x); + return; +end + +%% Case 2: Both x and y are tensors of some sort +% Check that the sizes match +if ~isequal(x.size,y.size) + error('Size mismatch'); +end + +% Case 2a: Two sparse tensors +if isa(x,'sptensor') && isa(y,'sptensor') + + % Find where their zeros intersect + xzerosubs = setdiff(allsubs(x),x.subs,'rows'); + yzerosubs = setdiff(allsubs(y),y.subs,'rows'); + zzerosubs = intersect(xzerosubs,yzerosubs,'rows'); + + % find where their nonzeros intersect + [nzsubs,ix,iy] = intersect(x.subs,y.subs,'rows'); + znzsubs = nzsubs(x.vals(ix) == y.vals(iy),:); + + % Build z + z = sptensor([zzerosubs;znzsubs],true,x.size); + + return; + +end + +% Case 2b: One dense tensor +if isa(y,'tensor') + + % Find where their zeros intersect + yzerosubs = find(y == 0); + zzerosubs = yzerosubs(extract(x,yzerosubs) == 0,:); + + % Find where their nonzeros intersect + yvals = y(x.subs); + znzsubs = x.subs(yvals == x.vals,:); + + % Build z + z = sptensor([zzerosubs;znzsubs],true,x.size); + + return; + +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/find.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/find.m new file mode 100644 index 0000000..199439a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/find.m @@ -0,0 +1,27 @@ +function [subs,vals] = find(t) +%FIND Find subscripts of nonzero elements in a sparse tensor. +% +% [SUBS,VALS] = FIND(T) returns the subscripts and corresponding +% values of the nonzero elements of T. +% +% Note that unlike the standard MATLAB find function for an array, +% find does not return linear indices. Instead, it returns an M x N +% array where M is the number of nonzero values and N = ndims(T). +% Thus, I(k,:) specifies the subscript of value V(k). +% +% See also SPTENSOR, FIND. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +subs = t.subs; +vals = t.vals; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/full.m new file mode 100644 index 0000000..efe3398 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/full.m @@ -0,0 +1,41 @@ +function B = full(A) +%FULL Convert a sparse tensor to a (dense) tensor. +% +% B = FULL(A) converts a sptensor A to a (dense) tensor B. +% +% See also SPTENSOR, TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Extract the order and size of A +siz = size(A); + +% Handle the completely empty (no size) case +if isempty(siz) + B = tensor; + return; +end + +% Create a dense zero tensor B that is the same size as A +B = tensor(zeros([siz,1,1]),siz); + +if isempty(A.subs) + return; +end + +% Extract the linear indices of entries in A +idx = tt_sub2ind(siz,A.subs); + +% Copy the values of A into B using linear indices +B(idx) = A.vals; + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ge.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ge.m new file mode 100644 index 0000000..5f65b08 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ge.m @@ -0,0 +1,71 @@ +function z = ge(x,y) +%GE Greater than or equal for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a >= 5 is sparse. +% The result of a >= 0 is sparse. +% The result of a >= full(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(y) + subs1 = x.subs((x.vals >= y),:); + if y <= 0 + subs2 = setdiff(allsubs(x),x.subs,'rows'); + else + subs2 = []; + end + z = sptensor([subs1;subs2],true,size(x)); + return; +end + +% Call back with the arguments reversed. +if isscalar(x) + z = le(y,x); + return; +end + +%% Case 2: Both x and y are tensors of some sort +% Check that the sizes match +if ~isequal(x.size,y.size) + error('Size mismatch'); +end + +% Case 2a: Two sparse tensors +if isa(x,'sptensor') && isa(y,'sptensor') + z = le(y,x); + return; +end + +% Case 2b: One dense tensor +if isa(y,'tensor') + + % x zero + subs1 = find(y <= 0); + subs1 = setdiff(subs1,x.subs,'rows'); + + % x nonzero + subs2 = x.subs(x.vals >= y(x.subs,'extract'),:); + + % assemble + z = sptensor([subs1;subs2],true,size(x)); + + return; + +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/gt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/gt.m new file mode 100644 index 0000000..0bb5fe1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/gt.m @@ -0,0 +1,74 @@ +function z = gt(x,y) +%GT Greater than for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a > 5 is sparse. +% The result of a > 0 is sparse. +% The result of a > full(a) is sparse. +% The result of a > zeros(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(y) + subs1 = x.subs((x.vals > y),:); + if y < 0 + subs2 = setdiff(allsubs(x),x.subs,'rows'); + else + subs2 = []; + end + z = sptensor([subs1;subs2],true,size(x)); + return; +end + +% Call back with the arguments reversed. +if isscalar(x) + z = lt(y,x); + return; +end + +%% Case 2: Both x and y are tensors of some sort +% Check that the sizes match +if ~isequal(x.size,y.size) + error('Size mismatch'); +end + +% Case 2a: Two sparse tensors +if isa(x,'sptensor') && isa(y,'sptensor') + z = lt(y,x); + return; +end + +% Case 2b: One dense tensor +if isa(y,'tensor') + + % x zero and y < 0 + subs1 = find(y < 0); + if ~isempty(subs1) + subs1 = setdiff(subs1,x.subs,'rows'); + end + + % x and y nonzero + subs2 = x.subs(x.vals > y(x.subs,'extract'),:); + + % assemble + z = sptensor([subs1;subs2],true,size(x)); + + return; + +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/innerprod.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/innerprod.m new file mode 100644 index 0000000..4942520 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/innerprod.m @@ -0,0 +1,68 @@ +function res = innerprod(X,Y) +%INNERPROD Efficient inner product with a sparse tensor. +% +% R = INNERPROD(X,Y) efficiently computes the inner product between +% two tensors X and Y. If Y is a tensor or sptensor, the inner +% product is computed directly and the computational complexity is +% O(min(nnz(X),nnz(Y))). If Y is a ktensor or a ttensor, the +% inner product method for that type of tensor is called. +% +% See also SPTENSOR, KTENSOR/INNERPROD, TTENSOR/INNERPROD. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% X is a sptensor +if nnz(X) == 0 %There are no nonzero terms in X. + res = 0; + return +end + +switch class(Y) + + case {'sptensor'} + if ~isequal(size(X),size(Y)) + error('X and Y must be the same size.'); + end + if nnz(Y) == 0 %There are no nonzero terms in Y. + res = 0; + return + end + if nnz(X) < nnz(Y); + [SX,VX] = find(X); + VY = extract(Y,SX); %<-----VY = Y(SX); + else + [SY,VY] = find(Y); + VX = extract(X,SY); %<-----VX = X(SY); + end + res = VY'*VX; + return; + + case {'tensor'} + if ~isequal(size(X),size(Y)) + error('X and Y must be the same size.'); + end + [SX,VX] = find(X); + VY = Y(SX,'extract'); + res = VY'*VX; + return; + + case {'ktensor','ttensor'} + % Reverse arguments to call ktensor/ttensor implementation + res = innerprod(Y,X); + return; + + otherwise + error(['Inner product not available for class ' class(Y)]); + +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/isequal.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/isequal.m new file mode 100644 index 0000000..53152da --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/isequal.m @@ -0,0 +1,32 @@ +function z = isequal(x,y) +%ISEQUAL Compare spares tensors for equality. +% +% ISEQUAL(A,B) compares the sparse tensors A and B for equality. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of isequal(a,full(a)) is true! + +%% +if ~isequal(x.size,y.size) + z = false; +elseif isa(x,'sptensor') && isa(y,'sptensor') + z = (nnz(x-y) == 0); +elseif isa(y,'tensor') + z = isequal(full(x),y); +else + z = false; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/isscalar.m new file mode 100644 index 0000000..b547095 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for sptensors. +% ISSCALAR(S) returns logical 0 (false) if S is a sptensor. +% +% See also SPTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ldivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ldivide.m new file mode 100644 index 0000000..90f73f6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ldivide.m @@ -0,0 +1,21 @@ +function C = ldivide(A,B) +%LDIVIDE Array right division for sparse tensors. +% +% LDIVIDE(A,B) is called for the syntax 'A .\ B' when A or B is a sparse +% tensor. A and B must have the same size, unless one is a scalar. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +C = rdivide(B,A); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/le.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/le.m new file mode 100644 index 0000000..7814257 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/le.m @@ -0,0 +1,91 @@ +function z = le(x,y) +%LE Less than or equal for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a <= 5 is sparse. +% The result of a <= 0 is sparse. +% The result of a <= full(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(y) + subs1 = x.subs((x.vals <= y),:); + if y >= 0 + subs2 = setdiff(allsubs(x),x.subs,'rows'); + else + subs2 = []; + end + z = sptensor([subs1;subs2],true,size(x)); + return; +end + +% Call back with the arguments reversed. +if isscalar(x) + z = ge(y,x); + return; +end + +%% Case 2: Both x and y are tensors of some sort +% Check that the sizes match +if ~isequal(x.size,y.size) + error('Size mismatch'); +end + +% Case 2a: Two sparse tensors +if isa(x,'sptensor') && isa(y,'sptensor') + + % x not zero, y zero + subs1 = setdiff(x.subs,y.subs,'rows'); + subs1 = subs1(extract(x,subs1) < 0, :); + + % x zero, y not zero + subs2 = setdiff(y.subs,x.subs,'rows'); + subs2 = subs2(extract(y,subs2) > 0, :); + + % x and y not zero + subs3 = intersect(x.subs,y.subs,'rows'); + subs3 = subs3(extract(x,subs3) <= extract(y,subs3),:); + + % x and y zero + xzerosubs = setdiff(allsubs(x),x.subs,'rows'); + yzerosubs = setdiff(allsubs(y),y.subs,'rows'); + subs4 = intersect(xzerosubs,yzerosubs,'rows'); + + % assemble + z = sptensor([subs1;subs2;subs3;subs4],true,size(x)); + return; + +end + +% Case 2b: One dense tensor +if isa(y,'tensor') + + % x zero + subs1 = find(y >= 0); + subs1 = setdiff(subs1,x.subs,'rows'); + + % x nonzero + subs2 = x.subs(x.vals <= y(x.subs,'extract'),:); + + % assemble + z = sptensor([subs1;subs2],true,size(x)); + + return; + +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/lt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/lt.m new file mode 100644 index 0000000..b1495b6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/lt.m @@ -0,0 +1,88 @@ +function z = lt(x,y) +%LT Less than for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a < 5 is sparse. +% The result of a < 0 is sparse. +% The result of a < full(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(y) + subs1 = x.subs((x.vals < y),:); + if y > 0 + subs2 = setdiff(allsubs(x),x.subs,'rows'); + else + subs2 = []; + end + z = sptensor([subs1;subs2],true,size(x)); + return; +end + +% Call back with the arguments reversed. +if isscalar(x) + z = gt(y,x); + return; +end + +%% Case 2: Both x and y are tensors or some sort +% Check that the sizes match +if ~isequal(x.size,y.size) + error('Size mismatch'); +end + +% Case 2a: Two sparse tensors +if isa(x,'sptensor') && isa(y,'sptensor') + + % x not zero, y zero + subs1 = setdiff(x.subs,y.subs,'rows'); + subs1 = subs1(extract(x,subs1) < 0, :); + + % x zero, y not zero + subs2 = setdiff(y.subs,x.subs,'rows'); + subs2 = subs2(extract(y,subs2) > 0, :); + + % x and y not zero + subs3 = intersect(x.subs,y.subs,'rows'); + subs3 = subs3(extract(x,subs3) < extract(y,subs3),:); + + % assemble + z = sptensor([subs1;subs2;subs3],true,size(x)); + return; + +end + +% Case 2b: y is a dense tensor +if isa(y,'tensor') + + % x zero and y > 0 + subs1 = find(y > 0); + if ~isemtpy(subs1) + subs1 = setdiff(subs1,x.subs,'rows'); + end + + % x and y nonzero + subs2 = x.subs(x.vals < y(x.subs,'extract'),:); + + % assemble + z = sptensor([subs1;subs2],true,size(x)); + + return; + +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mask.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mask.m new file mode 100644 index 0000000..4e66e1b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mask.m @@ -0,0 +1,24 @@ +function vals = mask(X,W) +%MASK Extract values as specified by a mask tensor. +% +% V = MASK(X,W) extracts the values in X that correspond to nonzero +% values in the mask tensor W. +% +%MATLAB Tensor Toolbox. +%Copyright 2017, Sandia Corporation. + +% Error check +if any(size(W) > size(X)) + error('Mask cannot be bigger than the data tensor') +end + +% Extract locations of nonzeros in W +wsubs = find(W); + +% Find which values in the mask match nonzeros in X +[tf,loc] = ismember(wsubs,X.subs,'rows'); + +% Assemble the final array +nvals = size(wsubs,1); +vals = zeros(nvals,1); +vals(tf) = X.vals(loc(tf)); \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/minus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/minus.m new file mode 100644 index 0000000..8cb369a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/minus.m @@ -0,0 +1,56 @@ +function C = minus(A,B) +%MINUS Binary subtraction for sparse tensors. +% +% MINUS(A,B) is called for the syntax 'A - B' when A or B is a sparse +% tensor. A and B must have the same size, unless one is a scalar. A +% scalar can be subtracted from a sparse tensor of any size. +% +% Examples +% A = sptenrand([4 3 2],5); B = sptenrand([4 3 2],3); +% A - B %<-- sparse +% A - 5 %<-- dense +% A - 0 %<-- dense +% A - full(A) %<-- dense +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a - 5 is dense! +% The result of a - 0 is dense! +% The result of a - full(a) is dense! +% The result of a - b (two sparse matrices) is sparse. + +%% Case 1: One argument is a scalar +% Emulating the sparse matrix case here, which creates and returns +% a dense result, even if the scalar is zero. + +% Case 1a: Second argument is a scalar or a dense tensor +if isscalar(B) || isa(B,'tensor') + C = full(A) - B; + return; +end + +% Case 1b: First argument is a scalar or a dense tensor +if isscalar(A) || isa(A,'tensor') + C = A - full(B); + return; +end + +%% Case 2: Both are sparse tensors +if ~isa(A,'sptensor') || ~isa(B,'sptensor') || ~isequal(size(A),size(B)) + error('Must be two sparse tensors of the same size'); +end + +C = sptensor([A.subs; B.subs], [A.vals; -B.vals], size(A)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mldivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mldivide.m new file mode 100644 index 0000000..4e7f6e0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mldivide.m @@ -0,0 +1,37 @@ +function C = mldivide(A,B) +%MLDIVIDE Slash left division for sparse tensors. +% +% MlDIVIDE(A,B) is called for the syntax 'A \ B' when A is a scalar and B +% is a sparse tensor. +% +% Example +% X = sptenrand([4 3 2],5); +% 3 \ X +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(A) + newsubs = B.subs; + newvals = B.vals / A; + if A == 0 + nansubs = setdiff(allsubs(A),newsubs,'rows'); + newsubs = [newsubs; nansubs]; + newvals = [newvals; repmat(NaN,size(nansubs,1),1)]; + end + C = sptensor(newsubs,newvals,B.size); + return; +end + +error('MLDIVIDE only supports the scalar case for sparse tensors'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mrdivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mrdivide.m new file mode 100644 index 0000000..e61f5b1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mrdivide.m @@ -0,0 +1,37 @@ +function C = mrdivide(A,B) +%MRDIVIDE Slash right division for sparse tensors. +% +% MRDIVIDE(A,B) is called for the syntax 'A / B' when A is a sparse +% tensor and B is a scalar. +% +% Example +% X = sptenrand([4 3 2],5); +% X / 3 +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(B) + newsubs = A.subs; + newvals = A.vals / B; + if B == 0 + nansubs = setdiff(allsubs(A),newsubs,'rows'); + newsubs = [newsubs; nansubs]; + newvals = [newvals; repmat(NaN,size(nansubs,1),1)]; + end + C = sptensor(newsubs,newvals,A.size); + return; +end + +error('MRDIVIDE only supports the scalar case for sparse tensors'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mtimes.m new file mode 100644 index 0000000..be719bb --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mtimes.m @@ -0,0 +1,35 @@ +function C = mtimes(A,B) +%MTIMES sptensor-scalar multiplication. +% +% C = MTIMES(A,B) is called for the syntax 'A * B' when A or B is a +% sparse tensor and the other argument is a scalar. +% +% For tensor-matrix multiplication, use TTM. +% For tensor-tensor multiplication, use TTT. +% For tensor-tensor array multiplication, use TIMES or 'A .* B'. +% +% See also SPTENSOR, SPTENSOR/TTM, SPTENSOR/TTT, SPTENSOR/TIMES +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(B) + C = sptensor(A.subs, A.vals * B, size(A)); + return; +end + +if isscalar(A) + C = sptensor(B.subs, B.vals * A, size(B)); + return; +end + +error('MTIMES only supports the scalar case for sparse tensors'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mttkrp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mttkrp.m new file mode 100644 index 0000000..0261f99 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/mttkrp.m @@ -0,0 +1,72 @@ +function V = mttkrp(X,U,n) +%MTTKRP Matricized tensor times Khatri-Rao product for sparse tensor. +% +% V = MTTKRP(X,U,n) efficiently calculates the matrix product of the +% n-mode matricization of X with the Khatri-Rao product of all +% entries in U, a cell array of matrices, except the nth. How to +% most efficiently do this computation depends on the type of tensor +% involved. +% +% V = MTTKRP(X,K,N) instead uses the Khatri-Rao product formed by the +% matrices and lambda vector stored in the ktensor K. As with the cell +% array, it ignores the Nth factor matrix. The lambda vector is absorbed +% into one of the factor matrices. +% +% Examples +% S = sptensor([3 3 3; 1 3 3; 1 2 1], 4, [3, 4, 3]); %<-Declare sptensor +% mttkrp(S, {rand(3,3), rand(3,3), rand(3,3)}, 2) +% +% See also TENSOR/MTTKRP, SPTENSOR/TTV, SPTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% In the sparse case, it is most efficient to do a series of TTV operations +% rather than forming the Khatri-Rao product. + +N = ndims(X); + +if isa(U,'ktensor') + % Absorb lambda into one of the factors, but not the one that's skipped + if n == 1 + U = redistribute(U,2); + else + U = redistribute(U,1); + end + % Extract the factor matrices + U = U.u; +end + +if (length(U) ~= N) + error('Cell array is the wrong length'); +end + +if ~iscell(U) + error('Second argument should be a cell array or a ktensor'); +end + +if (n == 1) + R = size(U{2},2); +else + R = size(U{1},2); +end + +V = zeros(size(X,n),R); +for r = 1:R + % Set up cell array with appropriate vectors for ttv multiplication + Z = cell(N,1); + for i = [1:n-1,n+1:N] + Z{i} = U{i}(:,r); + end + % Perform ttv multiplication + V(:,r) = double(ttv(X, Z, -n)); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ndims.m new file mode 100644 index 0000000..af74ddf --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ndims.m @@ -0,0 +1,24 @@ +function n = ndims(t) +%NDIMS Number of dimensions of a sparse tensor. +% +% NDIMS(T) returns the number of dimensions of sparse tensor T. +% +% Examples: +% T = sptenrand([3 2 2],5); +% ndims(T) %<-- should return 3 +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = size(t.size,2); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ne.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ne.m new file mode 100644 index 0000000..aded4c2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ne.m @@ -0,0 +1,82 @@ +function z = ne(x,y) +%NE Not equal (~=) for sptensors. +% +% A ~= B compares the elements of A and B for equality. The arguments can +% be a pair of sptensors, an sptensor and a tensor, or an sptensor and a +% scalar. Regardless, the result is always returned as a sparse tensor. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a ~= 5 is sparse. +% The result of a ~= 0 is sparse. +% The result of a ~= full(a) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(y) + if y == 0 + z = sptensor(x.subs,true,size(x)); + else + subs1 = x.subs(x.vals ~= y,:); + subs2 = setdiff(allsubs(x),x.subs,'rows'); + z = sptensor([subs1;subs2],true,size(x)); + end + return; +end + +% Call back with the arguments reversed. +if isscalar(x) + z = ne(y,x); + return; +end + +%% Case 2: Both x and y are tensors or some sort +% Check that the sizes match +if ~isequal(x.size,y.size) + error('Size mismatch'); +end + +% Case 2a: Two sparse tensors +if isa(x,'sptensor') && isa(y,'sptensor') + + % find entries where either x *or* y is nonzero, but not both + subs1 = setxor(x.subs,y.subs,'rows'); + % find entries where both are nonzero, but inequal + subs2 = intersect(x.subs,y.subs,'rows'); + subs2 = subs2(extract(x,subs2) ~= extract(y,subs2),:); + % put it all together + z = sptensor([subs1;subs2],true,size(x)); + return; + +end + +% Case 2b: y is a dense tensor +if isa(y,'tensor') + + % find entries where x is zero but y is nonzero + subs1 = setdiff(allsubs(x),union(x.subs,find(y == 0),'rows'),'rows'); + + % find entries where x is nonzero but not equal to y + subs2 = x.subs(x.vals ~= y(x.subs,'extract'),:); + + % put it all together + z = sptensor([subs1;subs2],true,size(x)); + return; + +end + + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/nnz.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/nnz.m new file mode 100644 index 0000000..0d88571 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/nnz.m @@ -0,0 +1,24 @@ +function a = nnz(t) +%NNZ Number of nonzeros in sparse tensor. +% +% NNZ(T) is the number of nonzero elements in T. +% +% See also SPTENSOR, SPTENSOR/FIND. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(t.subs) + a = 0; +else + a = size(t.subs,1); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/norm.m new file mode 100644 index 0000000..49e6032 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/norm.m @@ -0,0 +1,22 @@ +function nrm = norm(T) +%NORM Frobenius norm of a sparse tensor. +% +% NORM(T) returns the Frobenius norm of a sparse tensor. +% +% See also SPTENSOR, NORM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +nrm = norm(T.vals); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/not.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/not.m new file mode 100644 index 0000000..13d7eb9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/not.m @@ -0,0 +1,28 @@ +function y = not(x) +%NOT Logical NOT (~) for sptensors. +% +% ~X performs a logical not on the input tensor X. The result always +% returned as a sparse tensor. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of ~a is sparse. + +%% Then compute those indicies that are not in x +subs = setdiff(allsubs(x),x.subs,'rows'); + +%% Assemble final result +y = sptensor(subs,true,x.size); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/nvecs.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/nvecs.m new file mode 100644 index 0000000..ac6b087 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/nvecs.m @@ -0,0 +1,70 @@ +function u = nvecs(t,n,r,opts) +%NVECS Compute the leading mode-n vectors for a sparse tensor. +% +% U = NVECS(X,n,r) computes the r leading eigenvalues of Xn*Xn' +% (where Xn is the mode-n matricization of X), which provides +% information about the mode-n fibers. In two-dimensions, the r +% leading mode-1 vectors are the same as the r left singular vectors +% and the r leading mode-2 vectors are the same as the r right +% singular vectors. +% +% U = NVECS(X,n,r,OPTS) specifies options: +% OPTS.eigsopts: options passed to the EIGS routine [struct('disp',0)] +% OPTS.flipsign: make each column's largest element positive [true] +% +% Examples +% S = sptensor([3 3 3; 1 3 2; 1 1 3], 1, [3,3,3]); %<--Declare an sptensor +% nvecs(S,3,2) +% +% Documentation page for n-vecs +% +% See also SPTENSOR, SPTENMAT, EIGS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('opts','var') + opts = struct; +end + + +if isfield(opts,'eigsopts') + eigsopts = opts.eigsopts; +else + eigsopts.disp = 0; +end + +tnt = double(sptenmat(t,n,'t')); +y = tnt' * tnt; +opts.disp = 0; +[u,d] = eigs(y,r,'LM',eigsopts); + +%tn = sptenmat(t,n); +%[u,d] = eigs(@(x)aatx(tn,x), size(t,n), r, 'LM', eigsopts); + +if isfield(opts,'flipsign') + flipsign = opts.flipsign; +else + flipsign = true; +end + +if flipsign + % Make the largest magnitude element be positive + [val,loc] = max(abs(u)); + for i = 1:r + if u(loc(i),i) < 0 + u(:,i) = u(:,i) * -1; + end + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ones.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ones.m new file mode 100644 index 0000000..2fe41f7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ones.m @@ -0,0 +1,21 @@ +function t = ones(t) +%ONES Replace nonzero elements of sparse tensor with ones. +% +% S = ONES(T) generates a sparse tensor with the same sparsity +% structure as T, but with ones in the nonzero position. +% +% See also SPTENSOR, SPONES. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.vals = ones(size(t.vals)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/or.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/or.m new file mode 100644 index 0000000..a33e10b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/or.m @@ -0,0 +1,45 @@ +function C = or(A,B) +%OR Logical OR (|) for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a | 5 is dense! +% The result of a | 0 is dense! +% The result of a | full(a) is dense! +% The result of a | a is sparse. + +%% Case 1: One argument is a scalar +if isscalar(B) || isa(B,'tensor') + C = full(A) | B; + return; +end +if isscalar(A) + C = A | full(B); + return; +end + +%% Case 2: Both A and B are sparse tensors +if ~isequal(size(A),size(B)) + error('Must be tensors of the same size'); +end + +if isa(A,'sptensor') && isa(B,'sptensor') + C = sptensor([A.subs; B.subs], 1, size(A), @(x) length(x) >= 1); + return; +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/permute.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/permute.m new file mode 100644 index 0000000..2500709 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/permute.m @@ -0,0 +1,37 @@ +function t = permute(t,order) +%PERMUTE Rearrange the dimensions of a sparse tensor. +% +% B = PERMUTE(A,ORDER) rearranges the dimensions of A so that they +% are in the order specified by the vector ORDER. The result has the +% same values of A, but the order of the subscripts needed to access +% any particular element are rearranged as specified by ORDER. +% +% See also SPTENSOR, PERMUTE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Error checking +if (ndims(order) ~= 2) || (size(order,1) ~= 1) + error('ORDER must be a row vector'); +end + +% Check that the permuation is valid +if ~isequal(sort(order),1:ndims(t)) + error('Invalid permutation.'); +end + +% Do the permutation +if ~isempty(t.subs) + t.subs = t.subs(:,order); +end +t.size = t.size(order); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/plus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/plus.m new file mode 100644 index 0000000..6a9be26 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/plus.m @@ -0,0 +1,62 @@ +function C = plus(A,B) +%PLUS Binary addition for sparse tensors. +% +% PLUS(A,B) is called for the syntax 'A + B' when A or B is a sparse +% tensor. A and B must have the same size, unless one is a scalar. A +% scalar can be added to a sparse tensor of any size. +% +% Examples +% A = sptenrand([4 3 2],5); B = sptenrand([4 3 2],3); +% A + B %<-- sparse +% A + 5 %<-- dense +% A + 0 %<-- dense +% A + full(A) %<-- dense +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%% Observations for sparse matrix case. +% The result of a + 5 is dense! +% The result of a + 0 is dense! +% The result of a + full(a) is dense! +% The result of a + b (two sparse matrices) is sparse. + +%% Case 1: One argument is a scalar +% Emulating the sparse matrix case here, which creates and returns +% a dense result, even if the scalar is zero. + +% Case 1a: Second argument is a scalar or a dense tensor +if isscalar(B) || isa(B,'tensor') + C = full(A) + B; + return; +end + +% Case 1b: First argument is a scalar +if isscalar(A) + C = A + full(B); + return; +end + +%% Case 2: B is a sumtensor +if isa(B,'sumtensor') + C = plus(B,A); % Call sumtensor's plus. + return +end + +%% Case 3: Both are sparse tensors + +if ~isa(A,'sptensor') || ~isa(B,'sptensor') || ~isequal(size(A),size(B)) + error('Must be two sparse tensors of the same size'); +end + +C = sptensor([A.subs; B.subs], [A.vals; B.vals], size(A)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/allsubs.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/allsubs.m new file mode 100644 index 0000000..270eef2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/allsubs.m @@ -0,0 +1,34 @@ +function s = allsubs(x) +%ALLSUBS Generate all possible subscripts for a sparse tensor X. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Generate all possible indicies + +% Preallocate (discover any memory issues here!) +s = zeros(prod(x.size),ndims(x)); + +% Generate appropriately sized ones vectors. +o = cell(ndims(x),1); +for n = 1:ndims(x) + o{n} = ones(size(x,n),1); +end + +% Generate each column of the subscripts in turn +for n = 1:ndims(x) + i = o; + i{n} = (1:size(x,n))'; + s(:,n) = khatrirao(i); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/extract.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/extract.m new file mode 100644 index 0000000..9d109e2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/extract.m @@ -0,0 +1,47 @@ +function a = extract(t,srchsubs) +%EXTRACT Extract value for a sptensor. +% +% EXTRACT(X,SUBS) returns a list of values. +% +% See also SPTENSOR/SUBSREF. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +% Check range of requested subscripts +p = size(srchsubs,1); + +% Check that all subscripts are positive and less than the max +invalid = (srchsubs < 0) | (srchsubs > ones(p,1)*t.size); +badloc = find(sum(invalid,2)); +if ~isempty(badloc) + fprintf('The following subscripts are invalid: \n'); + badsubs = srchsubs(badloc,:); + badidx = tt_sub2ind(size(t),badsubs); + for i = 1:numel(badloc) + fprintf('\tsubscript = %s (linear index = %d)\n',... + tt_intvec2str(badsubs(i,:)), badidx(i)); + end + error('Invalid subscripts'); +end + +% Set the default answer to zero +a = zeros(p,1); + +% Find which indices already exist and their locations +[tf,loc] = ismember(srchsubs,t.subs,'rows'); + +% Fill in the non-zero elements in the answer +nzsubs = find(tf); +a(nzsubs,1) = t.vals(loc(nzsubs)); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/irenumber.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/irenumber.m new file mode 100644 index 0000000..186dddb --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/irenumber.m @@ -0,0 +1,35 @@ +function newsubs = irenumber(t, sz, range) +%RENUMBER indices for sptensor subsasgn +% +% See also SPTENSOR/SUBSASGN +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +nz = nnz(t); +if (nz == 0) + newsubs = []; + return; +end + +newsubs = t.subs; +for i = 1 : numel(range) + r = range{i}; + if ischar(r) && r == ':' + continue; + elseif numel(r) == 1 + newsubs = [newsubs(:,1:i-1), r*ones(nz,1), newsubs(:,i:end)]; + else + newsubs(:,i) = r(newsubs(:,i)); + end +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/renumber.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/renumber.m new file mode 100644 index 0000000..37f61a4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/renumber.m @@ -0,0 +1,52 @@ +function [newsubs, newsz] = renumber(subs, sz, range) +%RENUMBER indices for sptensor subsref +% +% [NEWSUBS,NEWSZ] = RENUMBER(SUBS,SZ,RANGE) takes a set of +% original subscripts SUBS with entries from a tensor of size +% SZ. All the entries in SUBS are assumed to be within the +% specified RANGE. These subscripts are then renumbered so that, +% in dimension i, the numbers range from 1:numel(RANGE(i)). +% +% See also SPTENSOR/SUBSREF +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +newsz = sz; +newsubs = subs; +for i = 1 : size(sz,2) + if ~(ischar(range{i}) && range{i} == ':') + if (isempty(subs)) + newsz(i) = numel(range{i}); + else + [newsubs(:,i), newsz(i)] = ... + renumberdim(subs(:,i), sz(i), range{i}); + end + end +end + +%------------------------------------------------------ +function [newidx, newsz] = renumberdim(idx, sz, range) +%RENUMBERDIM helper function for RENUMBER +% See also SPTENSOR/PRIVATE/RENUMBER + +% Determine the size of the new range +newsz = numel(range); + +% Create a map from the old range to the new range +map = zeros(1, sz); +for i = 1 : newsz + map(range(i)) = i; +end + +% Do the mapping +newidx = map(idx); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/subdims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/subdims.m new file mode 100644 index 0000000..ad76405 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/private/subdims.m @@ -0,0 +1,63 @@ +function loc = subdims(subd, t) +%SUBDIMS Compute the locations of subscripts within a subdimension. +% +% LOC = SUBDIMS(SUBD,T) finds the locations of the subscripts in T +% that are within the range specified by the cell array SUBD. For +% example, if SUBD = {1, [1,2], [1,2]}, then the locations of +% all elements of T that have a first subscript equal to 1, a +% second subscript equal to 1 or 2, and a third subscript equal to +% 1 or 2 is returned. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Error check that subd is the right size +if size(subd,2) ~= ndims(t) + error('Number of subdimensions must equal number of dimensions'); +end + +% Error check that range is valid +for i = 1 : ndims(t) + r = subd{i}; + okcolon = ischar(r) && r == ':'; + oknumeric = isreal(r) && ~any(isnan(r(:))) && ~any(isinf(r(:))) ... + && isequal(r,round(r)) && all(r(:) > 0); + if ~(okcolon || oknumeric) + error('Invalid subdimension.'); + end +end + +% Copy out the subscripts from t +subs = t.subs; + +if isempty(subs) + loc = []; + return; +end + +% Compute the indices of the subscripts that are within the +% specified range. We start with all indices in loc and +% pare it down to a final list. + +loc = (1:size(subs,1))'; +for i = 1:ndims(t) + if ~(ischar(subd{i}) && subd{i} == ':') + + % Find the subscripts that match in dimension i + tf = ismember(subs(loc,i), subd{i}); + + % Pare down the list of indices + loc = loc(tf); + + end +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/rdivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/rdivide.m new file mode 100644 index 0000000..af24db2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/rdivide.m @@ -0,0 +1,107 @@ +function C = rdivide(A,B) +%RDIVIDE Array right division for sparse tensors. +% +% RDIVIDE(A,B) is called for the syntax 'A ./ B' when A or B is a sparse +% tensor. A and B must have the same size, unless one is a scalar. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% a ./ 5 -> sparse +% 5 ./ a -> dense! +% a ./ full(a) -> sparse! +% full(a) ./ a -> dense + +% Divide by a scalar -> result is sparse +if isscalar(B) + C = mrdivide(A,B); + return; +end + +% Scalar divided by a tensor -> result is dense +if isscalar(A) + C = A ./ full(B); + return; +end + +% Tensor divided by a tensor +if ~isequal(size(A),size(B)) + error('Must be two tensors of the same size'); +end + +% Two sparse tensors +if isa(A,'sptensor') && isa(B,'sptensor') + + % Find where their zeros are + if isempty(A.subs) + Azerosubs = allsubs(A); + else + Azerosubs = setdiff(allsubs(A),A.subs,'rows'); + end + if isempty(B.subs) + Bzerosubs = allsubs(B); + else + Bzerosubs = setdiff(allsubs(B),B.subs,'rows'); + end + + % Both nonzero + [newsubs,ia,ib] = intersect(A.subs,B.subs,'rows'); + newvals = A.vals(ia) ./ B.vals(ib); + + % A nonzero and B zero + moresubs = intersect(A.subs,Bzerosubs,'rows'); + morevals = repmat(Inf, size(moresubs,1),1); + newsubs = [newsubs; moresubs]; + newvals = [newvals; morevals]; + + % Both zero + moresubs = intersect(Azerosubs,Bzerosubs,'rows'); + morevals = repmat(NaN, size(moresubs,1),1); + newsubs = [newsubs; moresubs]; + newvals = [newvals; morevals]; + + C = sptensor(newsubs,newvals,size(A)); + return; +end + +% Some other tensor type! +switch class(B) + case {'tensor'} + csubs = A.subs; + cvals = A.vals ./ B(csubs); + C = sptensor(csubs, cvals, size(A)); + return; + case {'ktensor'} + R = numel(B.lambda); + N = ndims(A); + NZ = nnz(A); + csubs = A.subs; + avals = A.vals; + bvals = zeros(NZ,1); + for r = 1:R + tvals = B.lambda(r) * ones(NZ,1); + for n = 1:N + v = B{n}(:,r); + tvals = tvals .* v(csubs(:,n)); + end + bvals = bvals + tvals; + end + cvals = avals ./ bvals; + C = sptensor(csubs, cvals, size(A)); + return; +end + +error('Invalid arguments for RDIVIDE.'); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/reshape.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/reshape.m new file mode 100644 index 0000000..79942fe --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/reshape.m @@ -0,0 +1,48 @@ +function a = reshape(a,new_size,old_modes) +%RESHAPE Reshape sparse tensor. +% +% RESHAPE(X,SIZ) reshapes the sparse tensor to the given size. PROD(SIZ) +% must be the same as PROD(SIZE(X)). +% +% RESHAPE(X,SIZ,MODES) reshapes only the specifies modes and appends the +% new reshaped modes to the end of the indices. +% +% See also SPTENSOR, SPTENSOR/PERMUTE, RESHAPE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('old_modes','var') + old_modes = 1:ndims(a); + keep_modes = []; +else + keep_modes = setdiff(1:ndims(a),old_modes); +end +old_size = a.size(old_modes); +keep_size = a.size(keep_modes); + + +if prod(new_size) ~= prod(old_size) + error('prod(SIZ) must be the same size of prod(SIZE(X,MODES))'); +end + +if isempty(a.subs) + a.size = [keep_size new_size]; + a.subs = []; +else + inds = tt_sub2ind(old_size,a.subs(:,old_modes)); + new_subs = tt_ind2sub(new_size,inds); + + a.size = [keep_size new_size]; + a.subs = [a.subs(:,keep_modes) new_subs]; +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/scale.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/scale.m new file mode 100644 index 0000000..31067d2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/scale.m @@ -0,0 +1,54 @@ +function t = scale(t,s,dims) +%SCALE Scale along specified dimensions for sparse tensors. +% +% Y = SCALE(X,S,DIMS) scales the sparse tensor X along the +% dimension(s) specified in DIMS using the scaling data in S. If +% DIMS contains only one dimensions, then S can be a column +% vector. Otherwise, S should be a tensor or sparse tensor. +% +% Examples +% X = ones(sptenrand([3 4 5], 10)) +% S = 10 * [1:5]'; Y = scale(X,S,3) +% S = tensor(10 * [1:5]',5); Y = scale(X,S,3) +% S = tensor(1:12,[3 4]); Y = scale(X,S,[1 2]) +% S = tensor(1:12,[3 4]); Y = scale(X,S,-3) +% +% Documentation page for collapsing and scaling tensors +% +% See also SPTENSOR, SPTENSOR/COLLAPSE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +dims = tt_dimscheck(dims,ndims(t)); + +switch(class(s)) + case {'tensor'} + if ~isequal(size(s), t.size(dims)) + error 'Size mismatch'; + end + t.vals = t.vals .* s(t.subs(:,dims),'extract'); + case {'sptensor'} + if ~isequal(s.size, t.size(dims)) + error 'Size mismatch'; + end + t.vals = t.vals .* extract(s,(t.subs(:,dims))); + case {'double'} + if size(s,1) ~= t.size(dims) + error 'Size mismatch'; + end + t.vals = t.vals .* s(t.subs(:,dims)); + otherwise + error('Invalid scaling factor'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/size.m new file mode 100644 index 0000000..8d22dd7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/size.m @@ -0,0 +1,27 @@ +function m = size(t,idx) +%SIZE Sparse tensor dimensions. +% +% D = SIZE(T) returns the size of the tensor. +% +% I = size(T,DIM) returns the sizes of the dimensions specified by DIM, +% which is either a scalar or a vector of dimensions. +% +% See also SPTENSOR, SPTENSOR/NDIMS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if exist('idx','var') + m = t.size(idx); +else + m = t.size; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/spmatrix.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/spmatrix.m new file mode 100644 index 0000000..5f21ef8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/spmatrix.m @@ -0,0 +1,30 @@ +function s = spmatrix(a) +%SPMATRIX Converts a two-way sparse tensor to sparse matrix. +% +% SPMATRIX(X) converts a sparse tensor to a sparse matrix. The sparse +% tensor must be two-dimensional. +% +% See also SPTENSOR, SPTENSOR/RESHAPE, SPTENMAT +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ndims(a) ~= 2 + error('Sparse tensor must be two dimensional.'); +end + + +if isempty(a.subs) + s = sparse(a.size(1), a.size(2)); +else + s = sparse(a.subs(:,1), a.subs(:,2), a.vals, a.size(1), a.size(2)); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/spones.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/spones.m new file mode 100644 index 0000000..7424524 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/spones.m @@ -0,0 +1,20 @@ +function X = spones(X) +%SPONES Replace nonzero sparse tensor elements with ones. +% +% Y = SPONES(X) generates a tensor with the same sparsity structure as X, +% but with ones in the nonzero positions. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2019, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +X.vals = ones(size(X.vals)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/sptensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/sptensor.m new file mode 100644 index 0000000..2aa115b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/sptensor.m @@ -0,0 +1,367 @@ +%SPTENSOR Class for sparse tensors. +% +%SPTENSOR Methods: +% and - Logical AND (&) for sptensors. +% collapse - Collapse sparse tensor along specified dimensions. +% contract - Contract sparse tensor along two dimensions (array trace). +% disp - Command window display of a sparse tensor. +% display - Command window display of a sparse tensor. +% divide - Divide an SPTENSOR by a nonnegative KTENSOR. +% double - Converts a sparse tensor to a dense multidimensional array. +% elemfun - Manipulate the nonzero elements of a sparse tensor. +% end - Last index of indexing expression for sparse tensor. +% eq - Equal (==) for sptensors. +% find - Find subscripts of nonzero elements in a sparse tensor. +% full - Convert a sparse tensor to a (dense) tensor. +% ge - Greater than or equal for sptensors. +% gt - Greater than for sptensors. +% innerprod - Efficient inner product with a sparse tensor. +% isequal - Compare spares tensors for equality. +% isscalar - False for sptensors. +% ldivide - Array right division for sparse tensors. +% le - Less than or equal for sptensors. +% lt - Less than for sptensors. +% mask - Extract values as specified by a mask tensor. +% minus - Binary subtraction for sparse tensors. +% mldivide - Slash left division for sparse tensors. +% mrdivide - Slash right division for sparse tensors. +% mtimes - sptensor-scalar multiplication. +% mttkrp - Matricized tensor times Khatri-Rao product for sparse tensor. +% ndims - Number of dimensions of a sparse tensor. +% ne - Not equal (~=) for sptensors. +% nnz - Number of nonzeros in sparse tensor. +% norm - Frobenius norm of a sparse tensor. +% not - Logical NOT (~) for sptensors. +% nvecs - Compute the leading mode-n vectors for a sparse tensor. +% ones - Replace nonzero elements of sparse tensor with ones. +% or - Logical OR (|) for sptensors. +% permute - Rearrange the dimensions of a sparse tensor. +% plus - Binary addition for sparse tensors. +% rdivide - Array right division for sparse tensors. +% reshape - Reshape sparse tensor. +% scale - Scale along specified dimensions for sparse tensors. +% size - Sparse tensor dimensions. +% spmatrix - Converts a two-way sparse tensor to sparse matrix. +% spones - Replace nonzero sparse tensor elements with ones. +% sptensor - Create a sparse tensor. +% squeeze - Remove singleton dimensions from a sparse tensor. +% subsasgn - Subscripted assignment for sparse tensor. +% subsref - Subscripted reference for a sparse tensor. +% times - Array multiplication for sparse tensors. +% ttm - Sparse tensor times matrix. +% ttt - Sparse tensor times sparse tensor. +% ttv - Sparse tensor times vector. +% uminus - Unary minus (-) for sptensor. +% uplus - Unary plus (+) for sptensor. +% xor - Logical XOR for sptensors. +% +% Documentation page for Sparse Tensor Class +% +% See also TENSOR_TOOLBOX +% +% How to cite the sptensor class: +% * BW Bader and TG Kolda. Efficient MATLAB Computations with Sparse +% and Factored Tensors, SIAM J Scientific Computing 30:205-231, 2007. +% DOI: 10.1137/060676489. [BibTeX] +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function t = sptensor(varargin) +%SPTENSOR Create a sparse tensor. +% +% X = SPTENSOR(SUBS, VALS, SZ, FUN) uses the rows of SUBS and VALS +% to generate a sparse tensor X of size SZ = [m1 m2 ... mn]. SUBS is +% an p x n array specifying the subscripts of the values to be +% inserted into S. The k-th row of SUBS specifies the subscripts for +% the k-th value in VALS. The values are accumulated at repeated +% subscripts using the function FUN, which is specified by a +% function handle. +% +% There are several simplifications of this four argument call. +% +% X = SPTENSOR(SUBS,VALS,SZ) uses FUN=@SUM. +% +% X = SPTENSOR(SUBS,VALS) uses SM = max(SUBS,[],1). +% +% X = SPTENSOR(SZ) abbreviates X = SPTENSOR([],[],SZ). +% +% X = SPTENSOR(Y) copies/converts Y if it is an sptensor, an sptenmat, or +% a dense tensor or MDA (the zeros are squeezed out), an sptensor3, or a +% sparse matrix. Note that a row-vector, integer MDA is interpreted as a +% size (see previous constructor). +% +% X = SPTENSOR is the empty constructor. +% +% X = SPTENSOR(FH,SZ,NZ) creates a random sparse tensor of the specified +% size with NZ nonzeros (this can be an explit value or a proportion). +% The function handle FH is used to create the nonzeros. +% +% The argument VALS may be scalar, which is expanded to be the +% same length as SUBS, i.e., it is equivalent to VALS*(p,1). +% +% Examples +% subs = [1 1 1; 1 1 3; 2 2 2; 4 4 4; 1 1 1; 1 1 1] +% vals = [0.5; 1.5; 2.5; 3.5; 4.5; 5.5] +% siz = [4 4 4]; +% X = sptensor(subs,vals,siz) %<-- sparse 4x4x4, repeats summed +% X = sptensor(subs,1,siz) %<-- scalar 2nd argument +% X = sptensor(subs,vals,siz,@max) %<-- max for accumulation +% myfun = @(x) sum(x) / 3; +% X = sptensor(subs,vals,siz,myfun) %<-- custom accumulation +% +% See also SPTENSOR, SPTENRAND. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% EMPTY Constructor +if (nargin == 0) || ((nargin == 1) && isempty(varargin{1})) + t.subs = []; + t.vals = []; + t.size = []; + t = class(t,'sptensor'); + return; +end + +% SINGLE ARGUMENT +if (nargin == 1) + + source = varargin{1}; + + switch(class(source)) + + % COPY CONSTRUCTOR + case 'sptensor', + t.subs = source.subs; + t.vals = source.vals; + t.size = source.size; + t = class(t, 'sptensor'); + return; + + % CONVERT SPTENMAT + case 'sptenmat', + + % Extract the tensor size and order + siz = source.tsize; + + if isempty(source.subs) %There are no nonzero terms + subs = []; + else % Convert the 2d-subscipts into nd-subscripts + if ~isempty(source.rdims) + subs(:,source.rdims) = ... + tt_ind2sub(siz(source.rdims),source.subs(:,1)); + end + if ~isempty(source.cdims) + subs(:,source.cdims) = ... + tt_ind2sub(siz(source.cdims),source.subs(:,2)); + end + end + % Copy the values (which do not need to be modified) + vals = source.vals; + + % Store everything + t.subs = subs; + t.vals = vals; + t.size = siz; + t = class(t, 'sptensor'); + return; + + % CONVERT TENSOR + case 'tensor', + [subs,vals] = find(source); + t.subs = subs; + t.vals = vals; + t.size = size(source); + t = class(t, 'sptensor'); + return; + + % CONVERT SPTENSOR3 + case 'sptensor3', + K = size(source,3); + [I,J] = size(source{1}); + nz = nnz(K); + bigsubs = []; + bigvals = []; + for k = 1:K + [subs,vals] = find(source{k}); + if isempty(bigsubs) + bigsubs = [subs, k*ones(size(subs,1),1)]; + bigvals = [vals]; + else + bigsubs = [bigsubs; subs, k*ones(size(subs,1),1)]; + bigvals = [bigvals; vals]; + end + end + t.subs = bigsubs; + t.vals = bigvals; + t.size = [ I J K ]; + t = class(t,'sptensor'); + return; + + % SPARSE MATRIX, SIZE, or MDA + case {'numeric','logical','double'}, + + % Case 1: SPARSE MATRIX + if issparse(source) + [i,j,s] = find(source); + siz = size(source); + t.subs = [i,j]; + t.vals = s; + t.size = siz; + t = class(t,'sptensor'); + return; + end + + % Case 2: SPECIFYING THE SIZE + if tt_sizecheck(source) + t.subs = []; + t.vals = []; + t.size = source; + t = class(t, 'sptensor'); + return; + end + + % Case 3: An MDA + t = sptensor(tensor(source)); + return; + + end % switch + +end % nargin == 1 + +% SPECIAL CASE for INTERACTION WITH MEX FILES OR DIRECT CREATION OF +% SPTENSOR WITHOUT ANY SORTING OR OTHER STANDARD CHECKS +if (nargin == 4) && (isnumeric(varargin{4})) && (varargin{4} == 0) + + % Store everything + t.subs = varargin{1}; + t.vals = varargin{2}; + t.size = varargin{3}; + + % Create the tensor + t = class(t, 'sptensor'); + + return; + +end + +% RANDOM TENSOR +if (nargin == 3) && isa(varargin{1},'function_handle') + fh = varargin{1}; + sz = varargin{2}; + nz = varargin{3}; + + if (nz < 0) || (nz >= prod(sz)) + error('Requested number of nonzeros must be positive and less than the total size') + elseif (nz < 1) + nz = ceil(prod(sz) * nz); + else + nz = floor(nz); + end + + % Keep iterating until we find enough unique nonzeros or we give up + subs = []; + cnt = 0; + while (size(subs,1) < nz) && (cnt < 10) + subs = ceil( rand(nz, size(sz,2)) * diag(sz) ); + subs = unique(subs, 'rows'); + cnt = cnt + 1; + end + + nz = min(nz, size(subs,1)); + subs = subs(1:nz,:); + vals = fh(nz,1); + + % Store everything + t.subs = subs; + t.vals = vals; + t.size = sz; + + % Create the tensor + t = class(t, 'sptensor'); + return; +end + +% CONVERT A SET OF INPUTS +if (nargin == 2) || (nargin == 3) || (nargin == 4) + + % Extract the subscripts and values + subs = varargin{1}; + vals = varargin{2}; + + tt_subscheck(subs); + tt_valscheck(vals); + if ~isempty(vals) && (numel(vals) ~= 1) && (size(vals,1) ~= size(subs,1)) + error('Number of subscripts and values must be equal'); + end + + % Extract the size + if (nargin > 2) + siz = varargin{3}; + tt_sizecheck(siz); + else + siz = max(subs,[],1); + end + + % Check for wrong input + if size(subs,2) > size(siz,2) + error('More subscripts than specified by size') + end + + % Check for subscripts out of range + for j = 1:numel(siz) + if ~isempty(subs) && max(subs(:,j)) > siz(j) + error('Subscript exceeds sptensor size') + end + end + + % Extract the 'combiner' function handle + if (nargin == 4) + fun = varargin{4}; + else + fun = @sum; + end + + if isempty(subs) + newsubs = []; + newvals = []; + else + % Identify only the unique indices + [newsubs,junk,loc] = unique(subs,'rows'); + + % Sum the corresponding values + newvals = accumarray(loc,vals,[size(newsubs,1) 1],fun); + end + + % Find the nonzero indices of the new values + nzidx = find(newvals); + newsubs = newsubs(nzidx,:); + newvals = newvals(nzidx); + + % Store everything + t.subs = newsubs; + t.vals = newvals; + t.size = siz; + + % Create the tensor + t = class(t, 'sptensor'); + + return; +end + +error('Unsupported use of function SPTENSOR.'); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/squeeze.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/squeeze.m new file mode 100644 index 0000000..d654a5b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/squeeze.m @@ -0,0 +1,45 @@ +function Y = squeeze(X) +%SQUEEZE Remove singleton dimensions from a sparse tensor. +% +% Y = SQUEEZE(X) returns a sparse tensor Y with the same elements as +% X but with all the singleton dimensions removed. A singleton +% is a dimension such that size(X,dim)==1. +% +% If X has *only* singleton dimensions, then Y is a scalar. +% +% Examples +% squeeze( sptenrand([2,1,3],0.5) ) %<-- returns a 2-by-3 sptensor +% squeeze( sptensor([1 1 1],1,[1 1 1]) ) %<-- returns a scalar +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if all(X.size > 1) + % No singleton dimensions to squeeze + Y = X; +else + idx = find(X.size > 1); + if numel(idx) == 0 + % Scalar case - only singleton dimensions + Y = X.vals; + else + siz = X.size(idx); + if isempty(X.vals) + Y = sptensor([],[],siz); + else + Y = sptensor(X.subs(:,idx), X.vals, siz); + end + end +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/subsasgn.m new file mode 100644 index 0000000..79f6e6f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/subsasgn.m @@ -0,0 +1,346 @@ +function t = subsasgn(t,s,rhs) +%SUBSASGN Subscripted assignment for sparse tensor. +% +% We can assign elements to a sptensor in three ways. +% +% Case 1: X(R1,R2,...,RN) = Y, in which case we replace the +% rectangular subtensor (or single element) specified by the ranges +% R1,...,RN with Y. The right-hand-side can be a scalar or an +% sptensor. +% +% Case 2: X(S) = V, where S is a p x n array of subscripts and V is +% a scalar value or a vector containing p values. +% +% Linear indexing is not supported for sparse tensors. +% +% Examples +% X = sptensor([30 40 20]) %<-- Create an emtpy 30 x 40 x 20 sptensor +% X(30,40,20) = 7 %<-- Assign a single element to be 7 +% X([1,1,1;2,2,2]) = 1 %<-- Assign a list of elements to the same value +% X(11:20,11:20,11:20) = sptenrand([10,10,10],10) %<-- subtensor! +% X(31,41,21) = 7 %<-- grows the size of the tensor +% X(111:120,111:120,111:120) = sptenrand([10,10,10],10) %<-- grows +% X(1,1,1,1) = 4 %<-- increases the number of dimensions from 3 to 4 +% +% X = sptensor([30]) %<-- empty one-dimensional tensor +% X([4:6]) = 1 %<-- set subtensor to ones (does not increase dimension) +% X([10;12;14]) = (4:6)' %<-- set three elements +% X(31) = 7 %<-- grow the first dimension +% X(1,1) = 0 %<-- add a dimension, but no nonzeros +% +% Note regarding singleton dimensions: It is not possible to do, for +% instance, X(1,1:10,1:10) = sptenrand([1 10 10],5). However, it is okay +% to do X(1,1:10,1:10) = squeeze(sptenrand([1 10 10],5)). +% +% See also SPTENSOR, TENSOR/SUBSASGN. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +switch s.type + + case '.' + error(['Cannot change field ', s.subs, ' directly.']); + + case '()' + + % Do nothing if both subscripts and RHS are empty + if isempty(s.subs{1}) && isempty(t.vals) + return; + end + + % Figure out if we are doing a subtensor or a list of subscripts... + type = tt_assignment_type(t,s.subs,rhs); + + %% Case I: Replace a sub-tensor + if isequal(type,'subtensor') + + %% Case I(a): RHS is another sparse tensor + if isa(rhs,'sptensor') + + %% First, Resize the tensor and check the size match with + %% the tensor that's being inserted. + m = 1; + for n = 1:numel(s.subs) + if ischar(s.subs{n}) && (s.subs{n} == ':') + if ndims(t) < n + newsz(1,n) = rhs.size(m); + else + newsz(1,n) = max([t.size(n), rhs.size(m)]); + end + m = m + 1; + elseif numel(s.subs{n}) == 1 + if ndims(t) < n + newsz(1,n) = s.subs{n}; + else + newsz(1,n) = max([t.size(n) s.subs{n}]); + end + else + if numel(s.subs{n}) ~= rhs.size(m) + error('RHS does not match range size'); + end + if ndims(t) < n + newsz(1,n) = max(s.subs{n}); + else + newsz(1,n) = max([t.size(n) s.subs{n}]); + end + m = m + 1; + end + end + t.size = newsz; + + % Expand subs array if there are new modes, i.e., if the order + % has increased. + if ~isempty(t.subs) && (size(t.size,2) > size(t.subs,2)) + t.subs(:,end+1:size(t.size,2)) = 1; + end + + % Delete what currently occupies the specified range + rmloc = subdims(s.subs,t); + kploc = setdiff(1:nnz(t),rmloc); + newsubs = t.subs(kploc,:); + newvals = t.vals(kploc); + + % Renumber the subscripts + addsubs = irenumber(rhs, t.size, s.subs); + t.subs = [newsubs; addsubs]; + t.vals = [newvals; rhs.vals]; + + return; + end + + %% Case I(b): RHS is zero or scalar + + % First, Resize the tensor. + % Determine new size of existing modes + for n = 1:ndims(t) + if ischar(s.subs{n}) && (s.subs{n} == ':') + newsz(1,n) = t.size(n); + else + newsz(1,n) = max([t.size(n) s.subs{n}]); + end + end + % Determine size of new modes, if any + for n = ndims(t)+1:numel(s.subs) + newsz(1,n) = max(s.subs{n}); + end + t.size = newsz; + + % Expand subs array if there are new modes, i.e., if the order + % has increased. + if ~isempty(t.subs) && (size(t.size,2) > size(t.subs,2)) + t.subs(:,end+1:size(t.size,2)) = 1; + end + + % Case I(b)i: Zero right-hand-side + if numel(rhs) == 1 && rhs == 0 + + % Delete what currently occupies the specified range + rmloc = subdims(s.subs,t); + kploc = setdiff(1:nnz(t),rmloc); + t.subs = t.subs(kploc,:); + t.vals = t.vals(kploc); + return; + end + + % Case I(b)ii: Scalar right-hand-side + if numel(rhs) == 1 + + % Determine number of dimensions (may be larger than + % current number) + N = numel(s.subs); + + % Figure out how many indices are in each dimension + nssubs = zeros(N,1); + for n = 1:N + if ischar(s.subs{n}) && s.subs{n} == ':' + s.subs{n} = 1:size(t,n); + end + nssubs(n) = numel(s.subs{n}); + end + + % Preallocate (discover any memory issues here!) + addsubs = zeros(prod(nssubs),N); + + % Generate appropriately sized ones vectors. + o = cell(N,1); + for n = 1:N + o{n} = ones(nssubs(n),1); + end + + % Generate each column of the subscripts in turn + for n = 1:N + i = o; + i{n} = s.subs{n}'; + addsubs(:,n) = khatrirao(i); + end + + if ~isempty(t.subs) + % replace existing values + [junk,loc] = intersect(t.subs,addsubs,'rows'); + t.vals(loc) = rhs; + % pare down list of subscripts to add + addsubs = setdiff(addsubs,t.subs,'rows'); + end + t.subs = [t.subs; addsubs]; + t.vals = [t.vals; rhs*ones(size(addsubs,1),1)]; + return; + end + + error('Invalid RHS') + + end + + % Case II: Subscripts + if isequal(type,'subscripts') + + % Case II: Replacing values at specified indices + + newsubs = [s.subs{1}]; + tt_subscheck(newsubs); + + % Error check on subscripts + if size(newsubs,2) < ndims(t) + error('Invalid subscripts'); + end + + % Check for expanding the order + if size(newsubs,2) > ndims(t) + t.size(end+1:size(newsubs,2)) = 1; + if ~isempty(t.subs) + t.subs(:,end+1:size(newsubs,2)) = 1; + end + end + + % Copy rhs to newvals + newvals = rhs; + + % Error check the RHS is a column vector. We do not bother to + % handle any other type of RHS with the sparse tensor. + tt_valscheck(newvals); + + % Determine number of nonzeros being inserted. (This is + % determined by the number of subscripts. Later we will check + % to see that it matches the size of the RHS.) + newnnz = size(newsubs,1); + + % Error check on size of newvals + if numel(newvals) == 1 + + % Special case where newvals is a single element to be + % assigned to multiple RHS. Fix to be correct size. + newvals = newvals * ones(newnnz,1); + + elseif size(newvals,1) ~= newnnz + + % Sizes don't match! + error('Number of subscripts and number of values do not match!'); + + end + + % Remove duplicates & print warning if any duplicates were + % removed. + [newsubs,idx] = unique(newsubs,'rows'); + if size(newsubs,1) ~= newnnz + warning('Duplicate assignments discarded.'); + end + newvals = newvals(idx); + + % Find which subscripts already exist and their locations + [tf,loc] = ismember(newsubs,t.subs,'rows','legacy'); + + % Split into three groups for processing: + % + % Group A: Elements that already exist and need to be changed + % Group B: Elements that already exist and need to be removed + % Group C: Elements that do not exist and need to be added + % + % Note that we are ignoring any new zero elements, because + % those obviously do not need to be added. Also, it's + % important to process Group A before Group B because the + % processing of Group B may change the locations of the + % remaining elements. + + idxa = find((tf .* newvals) ~= 0); + idxb = find((tf .* ~abs(newvals)) ~= 0); + idxc = find((~tf .* newvals) ~= 0); + + % Process Group A: Changing values + if ~isempty(idxa) + t.vals(loc(idxa)) = newvals(idxa); + end + + % Process Group B: Removing values + if ~isempty(idxb) + removesubs = loc(idxb); + keepsubs = setdiff(1:nnz(t),removesubs); + t.subs = t.subs(keepsubs,:); + t.vals = t.vals(keepsubs); + end + + % Process Group C: Adding new, nonzero values + if ~isempty(idxc) + t.subs = [t.subs; newsubs(idxc,:)]; + t.vals = [t.vals; newvals(idxc)]; + end + + % Resize the tensor! + for n = 1:length(t.size) + smax = max(newsubs(:,n)); + t.size(n) = max(t.size(n), smax); + end + + return; + + end + + error('Invalid call to sptensor/subsasgn'); + + case '{}' + error('Subscript cell reference not supported for sptensor.'); + + otherwise + error('Incorrect indexing into sptensor.') + +end + +function type = tt_assignment_type(x,subs,rhs) +%TT_ASSIGNMENT_TYPE What type of subsasgn is this? +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isequal(class(x),class(rhs)) + type = 'subtensor'; + return; +end + +if (numel(subs) >= 2) + type = 'subtensor'; + return; +end + +type = 'subscripts'; + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/subsref.m new file mode 100644 index 0000000..9b17a66 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/subsref.m @@ -0,0 +1,159 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for a sparse tensor. +% +% We can extract elements or subtensors from a sparse tensor in the +% following ways. +% +% Case 1a: y = X(i1,i2,...,iN), where each in is an index, returns a +% scalar. +% +% Case 1b: Y = X(R1,R2,...,RN), where one or more Rn is a range and +% the rest are indices, returns a sparse tensor. The elements are +% renumbered here as appropriate. +% +% Case 2a: V = X(S) or V = X(S,'extract'), where S is a p x n array +% of subscripts, returns a vector of p values. +% +% Case 2b: V = X(I) or V = X(I,'extract'), where I is a set of p +% linear indices, returns a vector of p values. +% +% Any ambiguity results in executing the first valid case. This +% is particularily an issue if ndims(X)==1. +% +% S = X.subs returns the subscripts of the nonzero entries in X. +% +% V = X.vals returns the values of the nonzero entries in X. +% +% Examples +% X = sptensor([4,4,4;2,2,1;2,3,2],[3;5;1],[4 4 4]); +% X(1,2,1) %<-- returns zero +% X(4,4,4) %<-- returns 3 +% X(3:4,:,:) %<-- returns 2 x 4 x 4 sptensor +% X(2,:,:) %<-- returns a 2 x 2 tensor +% X([1,1,1;2,2,1]) %<-- returns a vector of 2 elements +% X = sptensor([6;16;26],[1;1;1],30); +% X([1:6]') %<-- extracts a subtensor +% X([1:6]','extract') %<-- extracts a vector of 6 elements +% +% See also SPTENSOR, SPTENSOR/FIND, TENSOR/SUBSREF. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + + case '{}' + error('Subscript cell reference cannot be used with sptensor.') + + case '.' + switch s(1).subs + case {'subs','indices'} + a = tt_subsubsref(t.subs, s); + case {'vals','values'} + a = tt_subsubsref(t.vals, s); + case 'size' + a = tt_subsubsref(t.size, s); + otherwise + error(['No such field: ', s(1).subs]); + end + return; + + case '()' + + % *** CASE 1: Rectangular Subtensor *** + if (numel(s(1).subs) == ndims(t)) && ... + ~isequal(s(1).subs{end},'extract') + + % Extract the subdimensions to be extracted from t + region = s(1).subs; + + % Pare down the list of subscripts (and values) to only + % those within the subdimensions specified by region. + loc = subdims(region, t); + subs = t.subs(loc,:); + vals = t.vals(loc); + + % Find the size of the subtensor and renumber the + % subscripts + [subs, sz] = renumber(subs, size(t), region); + + % Determine the subscripts + newsiz = []; % (future) new size + kpdims = []; % dimensions to keep + rmdims = []; % dimensions to remove + + % Determine the new size and what dimensions to keep + for i = 1:length(region) + if ischar(region{i}) && (region{i} == ':') + newsiz = [newsiz size(t,i)]; + kpdims = [kpdims i]; + elseif numel(region{i}) > 1 + newsiz = [newsiz numel(region{i})]; + kpdims = [kpdims i]; + else + rmdims = [rmdims i]; + end + end + + % Return a single double value for a zero-order sub-tensor + if isempty(newsiz) + if isempty(vals) + a = 0; + else + a = vals; + end + return; + end + + % Assemble the resulting sparse tensor + if isempty(subs) + a = sptensor([],[],sz(kpdims)); + else + a = sptensor(subs(:,kpdims), vals, sz(kpdims)); + end + return; + end + + % Case 2: EXTRACT + + % *** CASE 2a: Subscript indexing *** + if size(s(1).subs{1},2) == ndims(t) + + % extract array of subscripts + srchsubs = s(1).subs{1}; + + % *** CASE 2b: Linear indexing *** + else + + % Error checking + if numel(s(1).subs) ~= 1 + error('Invalid indexing'); + end + + idx = s(1).subs{1}; + if ndims(idx) ~=2 || size(idx,2) ~= 1 + error('Expecting a column index'); + end + + % extract linear indices and convert to subscripts + srchsubs = tt_ind2sub(size(t),idx); + + end + + a = extract(t,srchsubs); + a = tt_subsubsref(a,s); + + return; + + otherwise + error('Incorrect indexing into sptensor.') +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/times.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/times.m new file mode 100644 index 0000000..7906c5f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/times.m @@ -0,0 +1,70 @@ +function C = times(A,B) +%TIMES Array multiplication for sparse tensors. +% +% TIMES(A,B) is called for the syntax 'A .* B' when A or B is a +% sparse tensor. A and B must have the same size, unless one is a scalar. +% A scalar can be multiplied by a sparse tensor of any size. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of a .* 5 is sparse. +% The result of a .* 0 is sparse. +% The result of a .* full(a) is sparse. + +%% +if isscalar(B) + C = sptensor(A.subs, A.vals * B, size(A)); + return; +end + +if isscalar(A) + C = sptensor(B.subs, B.vals * A, size(B)); + return; +end + +if ~isequal(size(A),size(B)) + error('Must be two tensors of the same size'); +end + +switch class(B) + case {'sptensor'} + [csubs,ia,ib] = intersect(A.subs,B.subs,'rows'); + cvals = A.vals(ia) .* B.vals(ib); + C = sptensor(csubs, cvals, size(A)); + return; + case {'tensor'} + csubs = A.subs; + cvals = A.vals .* B(csubs); + C = sptensor(csubs, cvals, size(A)); + return; + case {'ktensor'} + csubs = A.subs; + cvals = zeros(size(A.vals)); + R = numel(B.lambda); + N = ndims(A); + for r = 1:R + tvals = B.lambda(r) * A.vals; + for n = 1:N + v = B{n}(:,r); + tvals = tvals .* v(csubs(:,n)); + end + cvals = cvals + tvals; + end + C = sptensor(csubs, cvals, size(A)); + return; + otherwise + error('Invalid second argument for sptensor/times'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttm.m new file mode 100644 index 0000000..60fae12 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttm.m @@ -0,0 +1,138 @@ +function Y = ttm(X,V,varargin) +%TTM Sparse tensor times matrix. +% +% Y = TTM(X,A,N) computes the n-mode product of the sptensor X with +% a dense matrix A; i.e., X x_N A. The integer N specifies the +% dimension (or mode) of X along which A should be multiplied. If +% size(A) = [J,I], then X must have size(X,N) = I. The result will +% will be a (dense) tensor or sptensor of the same order and size as X +% except that size(Y,N) = J. +% +% Y = TTM(X,{A,B,C,...}) computes the n-mode product of the sptensor +% X with a sequence of matrices in the cell array. The n-mode +% products are computed sequentially along all dimensions (or modes) +% of X. The cell array contains ndims(X) matrices. +% +% Y = TTM(X,{A,B,C,...},DIMS) computes the sequence tensor-matrix +% products along the dimensions specified by DIMS. +% +% Y = TTM(...,'t') performs the same computations as above except +% the matrices are transposed. +% +% In all cases, the result Z is a sparse tensor if it has 50% or +% fewer nonzeros; otherwise the result is returned as a dense +% tensor. +% +% Examples +% X = sptenrand([5 3 4 2], 10); +% A = rand(4,5); B = rand(4,3); C = rand(3,4); D = rand(3,2); +% Y = ttm(X, A, 1) %<-- computes X times A in mode-1 +% Y = ttm(X, {A,B,C,D}, 1) %<-- same as above +% Y = ttm(X, A', 1, 't') %<-- same as above +% Y = ttm(X, {A,B,C,D}, [1 2 3 4]) %<-- 4-way multiply +% Y = ttm(X, {D,C,B,A}, [4 3 2 1]) %<-- same as above +% Y = ttm(X, {A,B,C,D}) %<-- same as above +% Y = ttm(X, {A',B',C',D'}, 't') %<-- same as above +% Y = ttm(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4 +% Y = ttm(X, {A,B,C,D}, [3 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, [1 2 4]) %<-- 3-way multiply +% Y = ttm(X, {A,B,C,D}, [1 2 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, -3) %<-- same as above +% Y = ttm(X, {A,B,C,D}, -3) %<-- same as above +% +% See also SPTENSOR, TENSOR/TTM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Check the number of arguments +if (nargin < 2) + error('TTM requires at least two arguments.'); +end + +%% Create 'n' and 'tflag' arguments from varargin +n = 1:ndims(X); +tflag = ''; +if numel(varargin) == 1 + if ischar(varargin{1}) + tflag = varargin{1}; + else + n = varargin{1}; + end +elseif numel(varargin) == 2 + n = varargin{1}; + tflag = varargin{2}; +end + +%% Handle cell array +if iscell(V) + % Copy n into dims + dims = n; + % Check that the dimensions are valid + [dims,vidx] = tt_dimscheck(dims,ndims(X),numel(V)); + % Calculate individual products + Y = ttm(X, V{vidx(1)}, dims(1), tflag); + for i = 2 : numel(dims) + Y = ttm(Y, V{vidx(i)}, dims(i), tflag); + end + % All done + return; +end + +%% Check the second argument +if ndims(V) ~= 2 + error('tensor/ttm: 2nd argument must be a matrix.'); +end + +%% Flip V is transposed +if tflag == 't' + V = V'; +end + +%% Check n +if numel(n) ~= 1 || (n < 0) || (n > ndims(X)) + error('Dimension N must be between 1 and NDIMS(X).'); +end + +%% Compute the product + +% Check that sizes match! +if size(X,n) ~= size(V,2) + error('Size mismatch on V'); +end + +% Compute the new size +siz = size(X); +siz(n) = size(V,1); + +% Compute Xn' +Xnt = sptenmat(X,n,'t'); + +% Extract the dimensions +rdims = Xnt.rdims; +cdims = Xnt.cdims; + +% Convert to sparse matrix and do the multiplication; result is generally a +% dense matrix +Z = double(Xnt) * V'; + +if nnz(Z) <= 0.5 * prod(siz) + % Final result is a *sparse* tensor + Ynt = sptenmat(Z, rdims, cdims, siz); + Y = sptensor(Ynt); +else + % Final result is a *dense* tensor + Ynt = tenmat(full(Z), rdims, cdims, siz); + Y = tensor(Ynt); +end + +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttt.m new file mode 100644 index 0000000..caff728 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttt.m @@ -0,0 +1,173 @@ +function c = ttt(varargin) +%TTT Sparse tensor times sparse tensor. +% +% Z = TTT(X,Y) computes the outer product of tensors X and Y. +% +% Z = TTT(X,Y,XDIMS,YDIMS) computes the contracted product of +% tensors X and Y in the dimensions specified by the row vectors +% XDIMS and YDIMS. The sizes of the dimensions specified by XDIMS +% and YDIMS must match; that is, size(X,XDIMS) must equal +% size(Y,YDIMS). +% +% Z = TTT(X,Y,DIMS) computes the inner product of tensors X and Y in +% the dimensions specified by the vector DIMS. The sizes of the +% dimensions specified by DIMS must match; that is, size(X,DIMS) +% must equal size(Y,DIMS). +% +% In all cases, the result Z is a sparse tensor if it has 50% or +% fewer nonzeros; otherwise the result is returned as a dense +% tensor. +% +% Examples +% X = sptenrand([4 2 3], 10); +% Y = sptenrand([3 4 2], 10); +% Z = ttt(X,Y) %<-- outer product of X and Y +% Z = ttt(X,X,1:3) %<-- inner product of X with itself +% Z = ttt(X,Y,[1 2 3],[2 3 1]) %<-- inner product of X & Y +% Z = ttt(X,Y,[1 3],[2 1]) %<-- product of X & Y along specified dims +% +% See also SPTENSOR, TENSOR/TTT, SPTENSOR/TTV, SPTENSOR/TTM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTT requires at least two arguments.'); +end + +% Check the first argument +if ~isa(varargin{1}, 'sptensor') + error('First argument must be a sptensor.'); +else + a = varargin{1}; +end + +% Check the second argument +if ~isa(varargin{2}, 'sptensor') + error('Second argument must be a sptensor.'); +else + b = varargin{2}; +end + +% Optional 3rd argument +if nargin >= 3 + adims = varargin{3}; +else + adims = []; +end + +% Optional 4th argument +if nargin >= 4 + bdims = varargin{4}; +else + bdims = adims; +end + +if ~isempty(adims) + tt_dimscheck(adims,ndims(a)); +end +if ~isempty(bdims) + tt_dimscheck(bdims,ndims(b)); +end + +asiz = size(a); +bsiz = size(b); +if ~isequal(asiz(adims),bsiz(bdims)) + error('Specified dimensions do not match.'); +end + +%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% COMPUTE THE PRODUCT %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Remaining dimensions +aremdims = setdiff(1:ndims(a),adims); +bremdims = setdiff(1:ndims(b),bdims); + +if (nnz(a) == 0) || (nnz(b) == 0) + if isempty(aremdims) && isempty(bremdims) + c = 0; + else + c = sptensor([],[],[a.size(aremdims) b.size(bremdims)]); + end + return; +end + +if isempty(adims) && isempty(bdims) + aii = ones(nnz(a),1); + bii = ones(nnz(b),1); + m = 1; +else + innersubs = [a.subs(:,adims); b.subs(:,bdims)]; + [junk1,junk2,loc] = unique(innersubs,'rows'); + aii = loc(1:nnz(a)); + bii = loc(nnz(a)+1:end); + m = length(junk2); +end + +if isempty(aremdims) + ajj = ones(nnz(a),1); + asubs = []; + n = 1; +else + [asubs,junk,ajj] = unique(a.subs(:,aremdims),'rows'); + n = length(junk); +end +if isempty(bremdims) + bjj = ones(nnz(b),1); + bsubs = []; + p = 1; +else + [bsubs,junk,bjj] = unique(b.subs(:,bremdims),'rows'); + p = length(junk); +end + +aa = sparse(aii,ajj,a.vals,m,n); +bb = sparse(bii,bjj,b.vals,m,p); +% We don't use aa' here because it makes mistakes in the complex case. We +% really just want transpose, not the conjugate transpose. +cc = transpose(aa)*bb; + +% Check for a scalar result, corresponding to an inner product. +if isempty(aremdims) && isempty(bremdims) + c = sum(nonzeros(cc)); + return; +end + +% If cc is a row vector, then transpose to work as a column vector +% (otherwise 'find' returns row vectors) +if size(cc,1) == 1 + [jj,ii,newvals] = find(cc'); +else + [ii,jj,newvals] = find(cc); +end + +if isempty(asubs) && ~isempty(bsubs) + newsubs = bsubs(jj,:); +elseif ~isempty(asubs) && isempty(bsubs) + newsubs = asubs(ii,:); +else + newsubs = [asubs(ii,:), bsubs(jj,:)]; +end + +c = sptensor(newsubs,newvals,[a.size(aremdims) b.size(bremdims)]); + +% Convert the result to dense if it has more than 50% nonzeros. +if nnz(c) > 0.5 * prod(c.size) + c = tensor(c); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttv.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttv.m new file mode 100644 index 0000000..4fd66b3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/ttv.m @@ -0,0 +1,107 @@ +function c = ttv(a,v,dims) +%TTV Sparse tensor times vector. +% +% Y = TTV(X,V,N) computes the product of a sparse tensor X with a +% (column) vector V. The integer N specifies the dimension in X +% along which V is multiplied. If size(V) = [I,1], then X must have +% size(X,N) = I. Note that ndims(Y) = ndims(X) - 1 because the N-th +% dimension is removed. +% +% Y = TTV(X,U) computes the product of a sparse tensor X with a +% sequence of vectors in the cell array U. The products are +% computed sequentially along all dimensions (or modes) of X. The +% cell array U contains ndims(X) vectors. +% +% Y = TTV(X,U,DIMS) computes the sequence tensor-vector products +% along the dimensions specified by DIMS. +% +% In all cases, the result Y is a sparse tensor if it has 50% or +% fewer nonzeros; otherwise the result is returned as a dense +% tensor. +% +% See also SPTENSOR, SPTENSOR/TTM, TENSOR, TENSOR/TTV. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Check the number of arguments +if (nargin < 2) + error('TTV requires at least two arguments.'); +end + +% Check for 3rd argument +if ~exist('dims','var') + dims = []; +end + +% Check that 2nd argument is cell array. If not, recall with v as a +% cell array with one element. +if ~iscell(v) + c = ttv(a,{v},dims); + return; +end + +% Get sorted dims and index for multiplicands +[dims,vidx] = tt_dimscheck(dims,ndims(a),numel(v)); +remdims = setdiff(1:ndims(a),dims); + +% Check that each multiplicand is the right size. +for i = 1:numel(dims) + if ~isequal(size(v{vidx(i)}),[size(a,dims(i)) 1]) + error('Multiplicand is wrong size'); + end +end + +% Multiply each value by the appropriate elements of the +% appropriate vector +newvals = a.vals; +subs = a.subs; +if isempty(subs) %There are no nonzero terms in a + newsubs = []; +else + for n = 1:length(dims) + idx = subs(:,dims(n)); % extract indices for dimension n + w = v{vidx(n)}; % extract nth vector + bigw = w(idx); % stretch out the vector + newvals = newvals .* bigw; + end + newsubs = subs(:,remdims); +end +% Case 0: If all dimensions were used, then just return the sum +if isempty(remdims) + c = sum(newvals); + return; +end + +% Otherwise, figure new subscripts and accumulate the results. +newsiz = a.size(remdims); + +% Case I: Result is a vector +if numel(remdims) == 1 + c = accumarray(newsubs,newvals,[newsiz 1]); + if nnz(c) <= 0.5 * newsiz + c = sptensor((1:newsiz)',c,newsiz); + else + c = tensor(c,newsiz); + end + return; +end + +% Case II: Result is a multiway array +c = sptensor(newsubs, newvals, newsiz); + +% Convert to a dense tensor if more than 50% of the result is nonzero. +if nnz(c) > 0.5 * prod(c.size) + c = tensor(c); +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/uminus.m new file mode 100644 index 0000000..c4e24aa --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus (-) for sptensor. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.vals = -t.vals; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/uplus.m new file mode 100644 index 0000000..d97152d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus (+) for sptensor. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/xor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/xor.m new file mode 100644 index 0000000..555c693 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sptensor/xor.m @@ -0,0 +1,52 @@ +function C = xor(A,B) +%XOR Logical XOR for sptensors. +% +% See also SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Observations for sparse matrix case. +% The result of xor(a,5) is dense! +% The result of xor(a,0) is dense! +% The result of xor(a,full(a)) is dense! +% The result of xor(a,b) is sparse. + +%% Case 1: One argument is a scalar +if isscalar(B) || isa(B,'tensor') + C = xor(full(A),B); + return; +end +if isscalar(A) + C = xor(A,full(B)); + return; +end + + +%% Case 2: Both x and y are tensors of some sort +if ~isequal(size(A),size(B)) + error('Must be tensors of the same size'); +end + +if isa(A,'sptensor') && isa(B,'sptensor') + C = sptensor([A.subs; B.subs], 1, size(A), @(x) length(x) == 1); + return; +end + +if isa(B,'tensor') + Bsubs = find(B ~= 0); + C = sptensor([A.subs; Bsubs], 1, size(A), @(x) length(x) == 1); + return; +end + +%% Otherwise +error('The arguments must be two sptensors or an sptensor and a scalar.'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/disp.m new file mode 100644 index 0000000..4067eea --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/disp.m @@ -0,0 +1,36 @@ +function disp(t,name) +%DISP Command window display of a sumtensor. +% +% DISP(T) displays a sumtensor with no name. +% +% DISP(T,NAME) display a sumtensor with the given name. +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ~exist('name','var') + name = 'ans'; +end + +if isempty(t.part) + fprintf(1,'%s is an empty sumtensor\n', name); + return; +end + +fprintf(1,'%s is a sumtensor of size %s with %d parts\n', name, tt_size2str(size(t)), length(t.part)); +for i = 1:length(t.part) + subname = sprintf('%s.part{%d}',name,i); + disp(t.part{i},subname); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/display.m new file mode 100644 index 0000000..ccb3ecb --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/display.m @@ -0,0 +1,17 @@ +function display(t) +%DISPLAY Command window display of a sumtensor. +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +disp(t, inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/double.m new file mode 100644 index 0000000..a6f2032 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/double.m @@ -0,0 +1,19 @@ +function A = double(T) +%DOUBLE Convert sumtensor to double array. +% +% A = double(T) converts T to a standard multidimensional array. +% +% See also SUMTENSOR, SUMTENSOR/FULL. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +A = double(full(T)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/full.m new file mode 100644 index 0000000..cdbad2f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/full.m @@ -0,0 +1,30 @@ +function X = full(T) +%FULL Convert a sumtensor to a (dense) tensor. +% +% X = FULL(T) converts sumtensor T to (dense) tensor X. This may be an +% expensive operation for large-scale tensors. +% +% See also SUMTENSOR, TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if isempty(T.part) + X = tensor; + return; +end + +X = full(T.part{1}); +for i = 2:length(T.part) + X = X + full(T.part{i}); +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/innerprod.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/innerprod.m new file mode 100644 index 0000000..cf5228a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/innerprod.m @@ -0,0 +1,29 @@ +function res = innerprod(X,Y) +%INNERPROD Efficient inner product with a sumtensor. +% +% R = INNERPROD(X,Y) efficiently computes the inner product between +% two tensors X and Y, where X is a sumtensor. +% +% See also TENSOR/INNERPROD, SPTENSOR/INNERPROD, TTENSOR/INNERPROD, +% KTENSOR/INNERPROD +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% X is a sumtensor + +res = innerprod(X.part{1},Y); +for i = 2:length(X.part) + res = res + innerprod(X.part{i},Y); +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/isscalar.m new file mode 100644 index 0000000..8ce3ac5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for sumtensors. +% ISSCALAR(S) returns logical 0 (false) if S is a sumtensor. +% +% See also SUMTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/mttkrp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/mttkrp.m new file mode 100644 index 0000000..7aa45b8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/mttkrp.m @@ -0,0 +1,34 @@ +function V = mttkrp(X,U,n) +%MTTKRP Matricized tensor times Khatri-Rao product for sumtensor. +% +% V = MTTKRP(X,U,n) efficiently calculates the matrix product of the +% n-mode matricization of X with the Khatri-Rao product of all +% entries in U, a cell array of matrices, except the nth. How to +% most efficiently do this computation depends on the type of tensor +% involved. +% +% Examples +% T1 = tensor(rand(3,4,3)); +% T2 = sptensor([2 1 1; 3 4 2; 1 2 3], 1, [3,4,3]); +% T = sumtensor(T1, T2); %<--Declaring a sumtensor +% +% mttkrp(T, {rand(3,2), rand(4,3), rand(3,2)}, 2) +% +% See also SUMTENSOR, TENSOR/MTTKRP, SPTENSOR/MTTKRP, KTENSOR/MTTKRP, +% TTENSOR/MTTKRP +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +V = mttkrp(X.part{1},U,n); +for i = 2:length(X.part) + V = V + mttkrp(X.part{i},U,n); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/ndims.m new file mode 100644 index 0000000..1b0be8e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/ndims.m @@ -0,0 +1,19 @@ +function n = ndims(t) +%NDIMS Return the number of dimensions for a sumtensor. +% +% NDIMS(T) returns the number of dimensions of tensor T. +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +n = ndims(t.part{1}); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/norm.m new file mode 100644 index 0000000..119c0b6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/norm.m @@ -0,0 +1,26 @@ +function n = norm(T) +%NORM Frobenius norm of a sumtensor. +% +% It is not possible to efficiently compute the NORM of sumtensor. We +% therefore just return zero and print a warning. This function is +% included for compatibility with certain routines that expect to compute +% the norm but don't *really* need it. +% +% NORM(X) returns 0. +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +warning('The NORM function is not supported by SUMTENSOR. Returning zero.'); +n = 0; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/plus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/plus.m new file mode 100644 index 0000000..8c702c4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/plus.m @@ -0,0 +1,31 @@ +function t = plus( t, x ) +%PLUS Plus for sumtensor. +% +% T = T + A adds A to the sumtensor parts, where T is a sumtensor and +% A is any valid sumtensor component. +% +% T = T + {A,B,C} adds A, B, and C to the sumtensor parts, assuming they +% are valid sumtensor components. +% +% Note that new parts are appended to the end of T, even if A + T is called +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.t + + + +if iscell(x) + t = sumtensor(t.part{:}, x{:}); +else + t = sumtensor(t.part{:}, x); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/size.m new file mode 100644 index 0000000..5b772c5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/size.m @@ -0,0 +1,29 @@ +function m = size(t,idx) +%SIZE Size of a sumtensor. +% +% D = SIZE(T) returns the size of the tensor. +% +% I = size(T,DIM) returns the size of the dimension specified by +% the scalar DIM. +% +% See also SUMTENSOR, SUMTENSOR/NDIMS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if isempty(t.part) + m = []; +elseif exist('idx','var') + m = size(t.part{1},idx); +else + m = size(t.part{1}); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/subsref.m new file mode 100644 index 0000000..4c8558f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/subsref.m @@ -0,0 +1,26 @@ +function B = subsref(A, S) +%SUBSREF Subscript reference for sumtensor. +% +% T.part{i} returns the ith part of the sumtensor T +% +% Examples +% T1 = tensor(rand(3,3,3)); +% T2 = sptensor([1 1 1; 3 1 2; 1 1 3], 1, [3,3,3]); +% T = sumtensor(T1,T2); +% T.part{2} %<--Returns the symmetric tensor +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +B = builtin('subsref', A, S); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/sumtensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/sumtensor.m new file mode 100644 index 0000000..d2ea3d0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/sumtensor.m @@ -0,0 +1,99 @@ +%SUMTENSOR Class for implicit sum of other tensors. +% +%SUMTENSOR Methods: +% disp - Command window display of a sumtensor. +% display - Command window display of a sumtensor. +% double - Convert sumtensor to double array. +% full - Convert a sumtensor to a (dense) tensor. +% innerprod - Efficient inner product with a sumtensor. +% isscalar - False for sumtensors. +% mttkrp - Matricized tensor times Khatri-Rao product for sumtensor. +% ndims - Return the number of dimensions for a sumtensor. +% norm - Frobenius norm of a sumtensor. +% plus - Plus for sumtensor. +% size - Size of a sumtensor. +% subsref - Subscript reference for sumtensor. +% sumtensor - Tensor stored as sum of tensors. +% ttv - Tensor times vector for sumtensor. +% uminus - Unary minus for sumtensor. +% uplus - Unary plus for sumtensor. +% +% Documentation page for sum of tensors class +% +% See also TENSOR_TOOLBOX +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function t = sumtensor(varargin) +%SUMTENSOR Tensor stored as sum of tensors. +% +% The SUMTENSOR class is limited to certain operations that easily +% decompose as sums: INNERPROD, MTTKRP, TTV. Note that the NORM function +% is not easily computed for a SUMTENSOR. +% +% T = SUMTENSOR(T1,T2,...) creates a tensor that is the sum of its +% constituent parts. The tensor is stored implicitly, i.e., each +% component is retained. This may lead to storage and computation +% efficiency. All input tensors must be the same size, but they can be +% any type of tensor. +% +% T = SUMTENSOR(S) creates a SUMTENSOR by copying an existing +% SUMTENSOR. +% +% T = SUMTENSOR is the empty constructor. +% +% Examples +% T1 = tensor(rand(4,3,3)); +% T2 = sptensor([1 1 1; 3 1 2; 4 3 3], 1, [4,3,3]); +% T = sumtensor(T1,T2); %<--A sumtensor with parts T1 and T2 +% +% See also TENSOR, SPTENSOR, TTENSOR, KTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Empty constructor +if (nargin == 0) + t.part = cell(0); + t = class(t, 'sumtensor'); + return; +end + +% Copy constructor +if (nargin == 1) && isa(varargin{1}, 'sumtensor') + t.part = varargin{1}.part; + t = class(t, 'sumtensor'); + return; +end + +% Multiple arguments constructor +t.part = cell(nargin,1); +for i = 1:nargin + cl = class(varargin{i}); + if ismember(cl,'double') % Convert an MDA + varargin{i} = tensor(varargin{i}); + elseif ~ismember(cl, {'tensor','sptensor','ktensor','ttensor'}) + error('Inputs must be tensors. Symtensors are not supported.'); + end + + if (i > 1) + if ~isequal(size(varargin{i}), size(varargin{1})) + error('All inputs must be the same size.'); + end + end + + t.part{i} = varargin{i}; + +end +t = class(t, 'sumtensor'); +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/ttv.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/ttv.m new file mode 100644 index 0000000..d359536 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/ttv.m @@ -0,0 +1,66 @@ +function c = ttv(a,v,dims) +%TTV Tensor times vector for sumtensor. +% +% Y = TTV(X,A,N) computes the product of sumtensor X with a +% (column) vector A. The integer N specifies the dimension in X +% along which A is multiplied. If size(A) = [I,1], then X must have +% size(X,N) = I. Note that ndims(Y) = ndims(X) - 1 because the N-th +% dimension is removed. +% +% Y = TTV(X,{A1,A2,...}) computes the product of sumtensor X with a +% sequence of vectors in the cell array. The products are computed +% sequentially along all dimensions (or modes) of X. The cell array +% contains ndims(X) vectors. +% +% Y = TTV(X,{A1,A2,...},DIMS) computes the sequence tensor-vector +% products along the dimensions specified by DIMS. +% +% Examples +% T1 = tensor(rand(3,3,3)); +% T2 = sptensor([1 1 1; 3 1 2; 1 1 3], 1, [3,3,3]); +% T = sumtensor(T1, T2); %<--Declaring a sumtensor +% +% ttv(T, [1 1 1]', 3) %<--Multiply ones vector along mode 3 +% ttv(T, {[1 1 1]', [1 1 1]', [1 1 1]'}) %<--ones vector along all modes +% ttv(T, {[1 1 1]', [1 1 1]'}, [1 3]) %<--ones vector along modes 1 and 3 +% +% See also TENSOR/TTV, SUMTENSOR, SUMTENSOR/TTM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTV requires at least two arguments.'); +end + +% Check for 3rd argument +if ~exist('dims','var') + dims = []; +end + +tmp = cell(length(a.part),1); +for i = 1:length(a.part) + tmp{i} = ttv(a.part{i},v,dims); +end + +if isscalar(tmp{1}) + c = sum(cell2mat(tmp)); +else + c = sumtensor(tmp{:}); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/uminus.m new file mode 100644 index 0000000..9478377 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/uminus.m @@ -0,0 +1,21 @@ +function t = uminus(t) +%UMINUS Unary minus for sumtensor. +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +for i = 1:lenght(t.part) + t.part{i} = -t.part{i}; +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/uplus.m new file mode 100644 index 0000000..8a833b8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@sumtensor/uplus.m @@ -0,0 +1,17 @@ +function t = uplus(t) +%UPLUS Unary plus for sumtensor. +% +% See also SUMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% This function does nothing! diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/arrange.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/arrange.m new file mode 100644 index 0000000..713768a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/arrange.m @@ -0,0 +1,39 @@ +function X = arrange(X,perm) +%ARRANGE Arranges the rank-1 components of a symktensor. +% +% ARRANGE(X) normalizes the columns of the factor matrices and then sorts +% the components by magnitude, greatest to least. +% +% ARRANGE(X,P) rearranges the components of X according to the +% permutation P. P should be a permutation of 1 to NCOMPONENTS(X). +% +% See also SYMKTENSOR, NCOMPONENTS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Just rearrange and return if second argument is a permutation +if exist('perm','var') && isvector(perm) + X.lambda = X.lambda(perm); + X.u = X.u(:,perm); + return; +end + +%% Ensure that matrices are normalized +X = normalize(X); + +%% Sort +[~, idx] = sort(abs(X.lambda), 1, 'descend'); +X.lambda = X.lambda(idx); +X.u = X.u(:,idx); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/disp.m new file mode 100644 index 0000000..3c47be4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/disp.m @@ -0,0 +1,30 @@ +function disp(t, name) +%DISP Command window display for a symktensor. +% +% DISP(T) displays a symmetric Kruskal tensor with no name. +% +% DISP(T,NAME) display a symmetric Kruskal tensor with the given name. +% +% See also DISP, SYMKTENSOR/DISPLAY, SYMKTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('name','var') + name = 'ans'; +end + +fprintf('%s is a symktensor of order %d and dimension %d\n', name, t.m, size(t.u,1)); +fprintf('\t%s.lambda = %s\n',name, ['[ ' num2str(t.lambda') ' ]'] ); +fprintf('\t%s.U = \n', name); +output = tt_matrix2cellstr(t.u); +fprintf('\t\t%s\n',output{:}); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/display.m new file mode 100644 index 0000000..eec5cab --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/display.m @@ -0,0 +1,20 @@ +function display(t) +%DISPLAY Command window display for a symktensor. +% +% DISPLAY(T) displays a symmetric Kruskal tensor with its name. +% +% See also DISPLAY, SYMKTENSOR/DISP, SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/double.m new file mode 100644 index 0000000..520f13f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/double.m @@ -0,0 +1,25 @@ +function A = double(X) +%DOUBLE Convert a symktensor to a double array. +% +% A = double(X) converts X to a standard multidimensional array. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(X.lambda) % check for empty tensor + A = []; + return; +end + +A = double(ktensor(X)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/end.m new file mode 100644 index 0000000..5db6dca --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/end.m @@ -0,0 +1,32 @@ +function e = end(X,k,n) +%END Last index of indexing expression for symktensor. +% +% The expression X(end,:,:) will call END(X,1,3) to determine +% the value of the first index. +% +% See also SYMKTENSOR, SYMKTENSOR/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%TODO (after 2.0 release): Resolve ambiguity w.r.t X{end}and X(end,1,1) +%for 1st-order tensors. + +if n > ndims(X) + error('Subscript out of range.'); +end + +if (n ~= 1) + e = size(X,k); +else + e = ndims(X); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/entry.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/entry.m new file mode 100644 index 0000000..4a3e305 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/entry.m @@ -0,0 +1,47 @@ +function value = entry(model,I) +%ENTRY Extract a single entry from a symktensor. +% +% V = ENTRY(M,I) returns the value of M at entry I. M is a symktensor and +% I is a matrix with rows which are indexes to query. +% This value is not stored explicitly and so must be computed. +% +% Examples +% S = symtenrand(3,4); <-- Declare a random symtensor of size [4,4,4] +% M = cp_sym(S,2); <-- Decompose S into a symktensor with rank 2 +% I = [1,2,4; 4,4,4]; <-- Matrix of indices to query +% entry(S,I) <-- Query elements [1,2,4] and [4,4,4] +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Extract +lambda = model.lambda; +X = model.u; + +%% Case I: I = row +if isrow(I) + value = dot(lambda,prod(X(I,:))); + return; +end + + +%% Case II: list of indices +q = size(I,1); +m = size(I,2); +p = size(X,2); +foo = X(I,:); +foo = reshape(foo, [q m p]); +% squeeze(foo(q,:,:)) = X(I(q,:),:) +bar = squeeze(prod(foo,2)); +value = bar * lambda; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/fg.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/fg.m new file mode 100644 index 0000000..058bdeb --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/fg.m @@ -0,0 +1,163 @@ +function [F,G] = fg(Model,Data) +%FG Master objective function for optimization of symmetric Kruskal model. +% +% [F,G] = FG(MODEL,DATA) computes the function (F) and gradient (G) for +% the given MODEL and DATA. The input DATA should be computed by calling +% the FG_SETUP function. The function value F is a scalar and the +% gradient G is returned as a column vector of length Q = P*(N+1) (or +% Q=P*N if 'nolambda' is true in FG_SETUP). Here, P is the rank of the +% decomposition and N is the number of modes in MODEL. +% +% Example: +% A = symmetrize(tenrand(2,2,2)); +% P = 3; +% model = symktensor(P,A); %<- Create random symtensor of size 2x2x2 +% data = fg_setup(model,A,'unique',false); +% [f,g] = fg(model,data); +% f - norm(A - full(model)).^2 % Should be zero or close to it +% +% See also symktensor, sym_cp, symktensor/fg_setup, symktensor/tovec. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%% F/G computation + +% Extract model +X = Model.u; +lambda = Model.lambda; + +% Extract options +fgopts = Data.fgopts; + +if fgopts.fast + + % Extract data + M = Data.M; + N = Data.N; + P = Data.P; + A = Data.A; + + % Precomputation + XtX = X'*X; + XtXMM = XtX.^(M-1); + XtXM = XtXMM .* XtX; + %XtXM = XtX.^M; + Y = zeros(size(X)); + Z = zeros(P,1); + for p = 1:P + Y(:,p) = ttsv(A,X(:,p),-1); % Ax^(m-1) + Z(p) = dot(X(:,p),Y(:,p)); % Ax^m + end + + % Compute F + F1 = Data.normAsqr; + F2 = -2 * dot(lambda,Z); + F3 = lambda' * XtXM * lambda; + F = F1 + F2 + F3; + + % Compute G + + % Compute G wrt Lambda + if ~fgopts.nolambda + G.lambda = -2 * Z + 2 * XtXM * lambda; + end + + % Compute G wrt X + G.X = -2 * M * Y * diag(lambda) + 2 * M * X*diag(lambda)*XtXMM*diag(lambda); + + % Convert G to vector + G = stackgrad(G,fgopts.nolambda); + + +else + + % Extract data + M = Data.M; + N = Data.N; + P = Data.P; + Q = Data.Q; + %R = Data.R; + avals = Data.avals; + I = Data.I; + C = Data.C; + W = Data.W; + onesidx = Data.onesidx; + zerosidx = Data.zerosidx; + %lb = Data.lb; + %ub = Data.ub; + + % --- Compute F/G + + % Compute diffs + foo = X(I,:); + foo = reshape(foo, [Q M P]); + xprods = squeeze(prod(foo,2)); % q x p matrix + vals = xprods * lambda; + diffs = avals - vals; + + % Compute F + F = sum(W.*(diffs.^2)); + + % Compute G wrt Lambda + if ~fgopts.nolambda + for k = 1:P + G.lambda(k,1) = -2 * sum( W .* diffs .* xprods(:,k) ); + end + end + + % Compute G wrt X + for n = 1:N + bar = foo; + bar = reshape(bar,Q*M,P); + bar(onesidx{n},:) = 1; + bar = reshape(bar,Q,M,P); + bar(zerosidx{n},:,:) = 0; + xprodsjm1 = squeeze(prod(bar,2)); + for k = 1:P + G.X(n,k) = -2 * lambda(k) * sum( C(:,n) .* W .* diffs .* xprodsjm1(:,k) ); + end + end + + % Convert G to vector + G = stackgrad(G,fgopts.nolambda); +end +% --- Penalties --- + +% Norm weight +if (fgopts.l2weight > 0) + tmp = bsxfun(@dot,X,X)-1; + ftmp = fgopts.l2weight * sum(tmp.^2); + gtmp.lambda = zeros(P,1); + gtmp.X = fgopts.l2weight * 4 * bsxfun(@mtimes,X,tmp); + F = F + ftmp; + G = G + stackgrad(gtmp); +end + +% Lambda weight +if (fgopts.l1weight > 0) + alpha = fgopts.l1param; + lambda_alpha = (1/alpha) * (log(1 + exp(-alpha*lambda)) + log(1+exp(alpha*lambda))); + ftmp = fgopts.l1weight * sum(lambda_alpha); + grad_alpha = 1./(1 + exp(-alpha*lambda)) - 1./(1 + exp(alpha*lambda)); + gtmp.lambda = fgopts.l1weight * grad_alpha; + gtmp.X = zeros(N,P); + F = F + ftmp; + G = G + stackgrad(gtmp); +end + + +function g = stackgrad(G,nolambda) +if exist('nolambda','var') && nolambda + g = reshape(G.X,[],1); +else + g = [G.lambda; reshape(G.X,[],1)]; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/fg_setup.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/fg_setup.m new file mode 100644 index 0000000..a62c293 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/fg_setup.m @@ -0,0 +1,201 @@ +function data = fg_setup(Model, A, varargin) +%FG_SETUP Setup for optimization of symmetric Kruskal model. +% +% DATA = FG_SETUP(MODEL,A,'param',value,...) is a setup routine +% for computing an objective function that compares a symmetric +% Kruskal tensor, MODEL, with a symmetric tensor, A. Parameter-value +% pairs control the exact definition of the objective function and +% constraints. For an M-way, N-dimension tensor A, a symmetric Kruskal +% tensor model is defined by a P-vector LAMBDA and an NxP matrix X, where +% P is the number of components. The number of optimization variables is +% Q = P*(N+1). The DATA produces by this setup function can be used +% repeatedly for the same tensor, A, and *any* model that is the same +% size as MODEL, i.e., the same number of components, same number of +% modes, and same size. The objective function is generically defined as +% the sum of the squared differences between the model and the tensor at +% each element. The following parameter-value pairs define the objective +% function and constraint details... +% +% o 'unique' - In an M-way symmetric tensor, an element may appear up +% to M! times. This parameter controls whether or not to +% give each unique index equal weight. Otherwise, each +% unique index is weighted by the number of appearances +% in the symmetric tensor. Default: True. +% o 'fast' - Use fast version of code if 'unique' is false. Not +% compatible with weights. Default: True. +% o 'l2weight' - Weight for the penalty term that is defined by +% sum_k (norm(X(:,k))^2 - 1)^2. Encourage column norms in +% X to be 1. Default: 0. +% o 'l1weight' - Weight for the penalty term that is defined by +% sum(LAMBDA). Encourages sparsity in LAMBDA. +% Default: 0. +% o 'l1param' - Alpha-term in L1 approximation for penalizing +% encouraging sparsity in LAMBDA. Default: 10. +% o 'nonneg' - Specify nonnegativity constraints: X >= 0, LAMBDA >= 0. +% Default: False. +% o 'nolambda' - Remove LAMBDA from the optimization. Changes number of +% optimizations varaibles to Q = P*N. Default: False. +% o 'weights' - Specity weight tensor for the optimization. Must also +% be symmetric. Default: []. +% +% Example: +% A = symmetrize(tenrand(2,2,2)); P = 3; Model = symktensor(P,A); +% data = fg_setup(MODEL,A); +% +% See also SYMKTENSOR, CP_SYM, SYMKTENSOR/FG. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% --- Process inputs --- +params = inputParser; +params.addParameter('unique', true, @islogical); +params.addParameter('fast', true, @islogical); +params.addParameter('l2weight', 0, @(x) isscalar(x) && x >= 0); +params.addParameter('l1weight', 0, @(x) isscalar(x) && x >= 0); +params.addParameter('l1param', 10, @(x) isscalar(x) && x >= 0); +params.addParameter('nonneg', false); +params.addParameter('nolambda', false, @islogical); +params.addParameter('weights',[]); +params.parse(varargin{:}); +fgopts = params.Results; + +% Check tensor +if ~(isa(A,'tensor') && issymmetric(A)) && ~isa(A,'symtensor') + error('A must be a symmetric tensor'); +end + +% Check weights +if ~isempty(fgopts.weights) + if ~issymmetric(fgopts.weights) + error('Weights must be symmetric'); + end + if ~isequal(size(A),size(fgopts.weights)) + error('Weight tensor must be the same size as A'); + end +end + +% Force fast option to be false if unique is true +if fgopts.unique + fgopts.fast = false; +end + +% Check fast option +if fgopts.fast + if ~isempty(fgopts.weights) + error('Cannot specify weights when ''fast'' is true'); + end +end + +% Extract sizes +M = ndims(A); +N = size(A,1); +P = ncomponents(Model); + +% Check lambda options +if fgopts.nolambda + if (mod(M,2) == 0) && ~fgopts.nonneg + error('Cannot have nolamdba=true and nonneg=false'); + end + if fgopts.l2weight + error('Cannot have nolambda=true and l2weight>0'); + end + if fgopts.l1weight + error('Cannot have nolambda=true and l1weight>0'); + end +end + +if fgopts.nolambda + R = N*P; +else + R = (N+1)*P; % Number of free variables in model +end + +% --- Bounds from constraints --- + +% Variable bounds +lb = -Inf * ones(R,1); +ub = Inf * ones(R,1); + +% Nonnegative +if (fgopts.nonneg) + lb = zeros(R,1); +end + +% --- Compute index sets and weights --- + +if fgopts.fast + + % Convert to dense tensor + A = full(A); + + % Compute squared norm + normAsqr = norm(A)^2; + + % --- Assemble results & return --- + data = var2struct(fgopts,M,N,P,A,normAsqr,lb,ub); + +else + A = symtensor(A); + [I,C,W,Q] = indices(A); + if fgopts.unique + W = ones(size(W)); + end + + % --- Extract A values --- + avals = A.val; + + % --- Incorporate weights if specified --- + if ~isempty(fgopts.weights) + if isa(fgopts.weights,'symtensor') + W = W .* fgopts.weights.val; + else + W = W .* fgopts.weights(I); + end + end + + % --- Indices for gradient computation: onesidx and zeroidx --- + % IDX(q,n) = mode of index that equal n in row q; 0 if DNE + idx = zeros(Q,N); + for n = 1:N + [II,JJ] = find(I == n); + idx(:,n) = accumarray(II,JJ,[Q 1],@min,0); + end + tf = idx > 0; + + len = zeros(N,1); + idx1 = zeros(Q,N); + idx2 = zeros(Q,N); + for n = 1:N + len(n) = sum(tf(:,n) > 0); + idx1(1:len(n),n) = find(tf(:,n)); + idx1(len(n)+1:end,n) = find(~tf(:,n)); + idx2(1:len(n),n) = idx(tf(:,n),n); + end + + onesidx = cell(n,1); + zerosidx = cell(n,1); + for n = 1:N + onesidx{n} = tt_sub2ind([Q M],[idx1(1:len(n),n),idx2(1:len(n),n)]); + zerosidx{n} = idx1(len(n)+1:end,n); + end + + % --- Assemble results & return --- + data = var2struct(fgopts,M,N,P,Q,R,avals,I,C,W,onesidx,zerosidx,lb,ub); +end + +function s = var2struct(varargin) +%VAR2STRUCT +%http://stackoverflow.com/questions/3470654/how-can-i-move-variables-into-and-out-of-a-structure-akin-to-load-and-save-in-ma +names = arrayfun(@inputname,1:nargin,'UniformOutput',false); +s = cell2struct(varargin,names,2); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/full.m new file mode 100644 index 0000000..80850b3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/full.m @@ -0,0 +1,44 @@ +function t = full(t) +%FULL Convert a symktensor to a symtensor. +% +% T = FULL(C) converts a symktensor to a symtensor. +% +% Examples +% X = symktensor([3; 2], ones(4,2)); +% Y = full(A) %<-- equivalent dense tensor +% +% See also SYMKTENSOR, TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +n = size(t,1); +m = ndims(t); +sz = nchoosek(m+n-1,m); + +I = zeros(sz,m); +for loc = 1:sz + if loc == 1 + I(loc,:) = ones(1,m); + else + I(loc,:) = I(loc-1,:); + j = m; + while (I(loc,j) == n) + j = j - 1; + end + I(loc,j:m) = I(loc,j)+1; + end +end + +tnew = symtensor(@ones,m,n); +vals = entry(t,I); +tnew(I) = vals; +t = tnew; \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/isequal.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/isequal.m new file mode 100644 index 0000000..50c0139 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/isequal.m @@ -0,0 +1,26 @@ +function [tf, tf_lambda, tf_U] = isequal(A,B) +%ISEQUAL True if each component of two symktensors is numerically equal. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +tf = false; +tf_lambda = false; +tf_U = false; + +if ~isa(B,'symktensor') + return; +end + +tf_lambda = isequal(A.lambda, B.lambda); +tf_U = isequal(A.u, B.u); +tf = tf_lambda & tf_U; + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/isscalar.m new file mode 100644 index 0000000..2b210ab --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for symktensors. +% ISSCALAR(S) returns logical 0 (false) if S is a symktensor. +% +% See also SYMKTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/issymmetric.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/issymmetric.m new file mode 100644 index 0000000..745611d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/issymmetric.m @@ -0,0 +1,22 @@ +function tf = issymmetric(X) +%ISSYMMETRIC Rhetorical function for a symktensor. +% +% TF = ISSYMMETRIC(X) returns true. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%T. Kolda, June 2014. + + +tf = true; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/mtimes.m new file mode 100644 index 0000000..17ab465 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/mtimes.m @@ -0,0 +1,29 @@ +function C = mtimes(A,B) +%MTIMES Implement A*B (scalar multiply) for symktensor. +% +% C = mtimes(A,B) computes A * B where A is a symktensor and B is +% a scalar (or vice versa). The result C is the same size as A. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Note: We can do scalar times a tensor, but anything more complex is +% an error. + +if isa(B,'numeric') && isequal(size(B),[1 1]) + C = symktensor(B * A.lambda, A.u, A.m); +elseif isa(A,'numeric') && isequal(size(A),[1 1]) + C = symktensor(A * B.lambda, B.u, B.m); +else + error('Use mtimes(full(A),full(B)).'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/ncomponents.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/ncomponents.m new file mode 100644 index 0000000..2d9df16 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/ncomponents.m @@ -0,0 +1,25 @@ +function n = ncomponents(t) +%NCOMPONENTS Number of components for a symktensor. +% +% NCOMPONENTS(T) returns the number of components in the symktensor T. +% This is the size of the lambda vector, equivalently the number of +% columns in the factor matrix. +% +% S = symktensor(3, symtensor(@rand,4,3)); %<--Random symktensor +% ncomponents(S) %<--Returns 3 +% +% See also SYMKTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = length(t.lambda); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/ndims.m new file mode 100644 index 0000000..043c6b8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/ndims.m @@ -0,0 +1,20 @@ +function n = ndims(t) +%NDIMS Number of modes for a symktensor. +% +% NDIMS(T) returns the number of modes of symktensor T. +% +% See also SYMKTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = t.m; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/norm.m new file mode 100644 index 0000000..26cab6e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/norm.m @@ -0,0 +1,30 @@ +function nrm = norm(A) +%NORM Frobenius norm of a symktensor. +% +% NORM(T) returns the Frobenius norm of a symktensor. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Retrieve the factors of A +U = A.u; +UtU = A.u'*A.u; + +% Compute the matrix of correlation coefficients +coefMatrix = A.lambda * A.lambda'; +coefMatrix = coefMatrix .* ((UtU).^(A.m)); + +nrm = sqrt(abs(sum(coefMatrix(:)))); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/normalize.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/normalize.m new file mode 100644 index 0000000..38fa0f9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/normalize.m @@ -0,0 +1,82 @@ +function X = normalize(X,N,normtype) +%NORMALIZE Normalizes the columns of the factor matrix. +% +% NORMALIZE(X) normalizes the columns of the factor matrix using the +% vector 2-norm, absorbing the excess weight into lambda. +% +% NORMALIZE(X,0) absorbs the weight into the factor matrix. +% (All the lambda values are +/-1.) +% +% NORMALIZE(X,[]) is equivalent to NORMALIZE(X). +% +% NORMALIZE(X,'sort') is the same as the above except it sorts the +% components by lambda value, from greatest magnitude to least. +% +% NORMALIZE(X,V,1) normalizes using the vector one norm (sum(abs(x)) +% rather than the two norm (sqrt(sum(x.^2))), where V can be any of the +% second arguments decribed above. +% +% See also SYMKTENSOR, SYMKTENSOR/ARRANGE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% +if ~exist('N','var') + N = -1; +end + +if isempty(N) + N = -1; +end + +if isequal(N,'sort') + N = -2; +end + +if N > 0 + error('Invalid second argument'); +end + +if ~exist('normtype','var') + normtype = 2; +end + +%% Ensure that matrix is normalized +for r = 1:length(X.lambda) + tmp = norm(X.u(:,r),normtype); + if (tmp > 0) + X.u(:,r) = X.u(:,r) / tmp; + end + X.lambda(r) = X.lambda(r) * tmp.^(X.m); + + % Odd-ordered tensors should not have negative lambda values + if (X.lambda(r) < 0) && (mod(X.m,2) == 1) + X.u(:,r) = -X.u(:,r); + X.lambda(r) = -X.lambda(r); + end +end + +%% Absorb the weight into one factor, if requested +if (N == 0) + D = diag(nthroot(abs(X.lambda),X.m)); + X.u = X.u * D; + X.lambda = sign(X.lambda) .* ones(size(X.lambda)); +elseif (N == -2) + if ncomponents(X) > 1 + [~,p] = sort(abs(X.lambda),'descend'); + X = arrange(X,p); + end +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/permute.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/permute.m new file mode 100644 index 0000000..831b604 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/permute.m @@ -0,0 +1,26 @@ +function b = permute(a,order) +%PERMUTE Permute dimensions of a symktensor. +% +% B = PERMUTE(A,ORDER) rearranges the dimensions of A so that they +% are in the order specified by the vector ORDER. For a symmetric tensor, +% this changes nothing, so this function is a no-op. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +b = a; + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/score.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/score.m new file mode 100644 index 0000000..176d7d4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/score.m @@ -0,0 +1,32 @@ +function [best_score, A, flag, best_perm] = score(A,B,varargin) +%SCORE Checks if two symktensors match except for permutation. +% +% SCORE(A,B) returns the score of the match between A and B where +% A is trying to be matched against B. It converts both to single-mode +% ktensors and calls the ktensor SCORE function. +% +% See also SYMKTENSOR, KTENSOR/SCORE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%% Make sure A and B are symmetric ktensors +if ~isa(A,'symktensor') || ~isa(B,'symktensor') + error('Both arguments must be symktensors'); +end + +A = normalize(A); +B = normalize(B); + +AA = ktensor(A.lambda, A.u); +BB = ktensor(B.lambda, B.u); + +[best_score, A, flag, best_perm] = score(AA,BB,varargin{:}); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/size.m new file mode 100644 index 0000000..3b1de7c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/size.m @@ -0,0 +1,31 @@ +function sz = size(t,idx) +%SIZE Size of symktensor. +% +% D = SIZE(T) returns the size of the tensor. +% +% I = SIZE(T,DIM) returns the size of the dimension specified by +% the scalar DIM. +% +% See also SYMKTENSOR, SYMKTENSOR/NDIMS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(t.lambda) + sz = []; +end + +if exist('idx','var') + sz = size(t.u, 1); +else + sz = size(t.u, 1) * ones(1,t.m); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/subsasgn.m new file mode 100644 index 0000000..3bdb9b7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/subsasgn.m @@ -0,0 +1,58 @@ +function t = subsasgn(t,s,b) +%SUBSASGN Subscripted assignment for symktensor. +% +% Subscript assignment can be used to change the order of a symmetric +% tensor decomposition, the weight vector, or the factor matrix. +% +% Examples +% S = symktensor(2, symtensor('rand', 4,3)); %<--Declare a symtensor +% S.lambda = [2; 1]; %<-- Change the weight vector +% S.X = rand(3,2); %<-- Change the factor matrix +% S.U = rand(3,2); %<-- Same as above +% S.m = 5; %<-- Change the order of the decomposition. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '.' + switch s(1).subs + case {'m','M'} + if isscalar(b) + t.m = b; + else + error('M must be a scalar'); + end + case 'lambda' + if length(b) == length(t.lambda) + t.lambda = b; + else + error('Incorrect size for weight vector assignment'); + end + case {'X','U'} + if all(size(b) == size(t.u)) + t.u = b; + else + error('Incorrect size for factor matrix assignment'); + end + otherwise + error(['Field not writable or does not exist: ', s(1).subs]); + end + case '()' + error('Cannot change individual entries in a symktensor.') + otherwise + error('Invalid subsasgn.'); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/subsref.m new file mode 100644 index 0000000..6157723 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/subsref.m @@ -0,0 +1,55 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for a symktensor. +% +% Subscripted reference for a symtensor can be used to return the weight +% vector, factor matrix, or expanded components of a symktensor. +% +% Examples +% S = symktensor(3, symtensor(@rand,4,3)); % <-- Declare a symktensor +% S.lambda % <-- Returns the weight vector. +% S.X % <-- Returns the factor matrix. +% S.M % <-- Returns the order (same as ndims(X)). +% S(2,3,1) % <-- Calculates and returns that single element of X. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '.' + switch s(1).subs + case 'lambda' + a = tt_subsubsref(t.lambda,s); + case {'u','U','X','x'} + a = tt_subsubsref(t.u,s); + case {'m','M'} + a = tt_subsubsref(t.m,s); + otherwise + error(['No such field: ', s(1).subs]); + end + case '()' + if length(s.subs) == t.m %Needs to be polished. + a = 0; + for k = 1 : length(t.lambda) + b = t.lambda(k); + for i = 1 : length(s.subs) + b = b * t.u(s.subs{i},k); + end + a = a + b; + end + else + error('Incorrect index length'); + end + otherwise + error('Invalid subsref.'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/symktensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/symktensor.m new file mode 100644 index 0000000..ad5911a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/symktensor.m @@ -0,0 +1,256 @@ +%SYMKTENSOR Class for storing symmetric Kruskal tensor (decomposed). +% +%SYMKTENSOR Methods: +% arrange - Arranges the rank-1 components of a symktensor. +% disp - Command window display for a symktensor. +% display - Command window display for a symktensor. +% double - Convert a symktensor to a double array. +% end - Last index of indexing expression for symktensor. +% entry - Extract a single entry from a symktensor. +% fg - Master objective function for optimization of symmetric Kruskal model. +% fg_setup - Setup for optimization of symmetric Kruskal model. +% full - Convert a symktensor to a symtensor. +% isequal - True if each component of two symktensors is numerically equal. +% isscalar - False for symktensors. +% issymmetric - Rhetorical function for a symktensor. +% mtimes - Implement A*B (scalar multiply) for symktensor. +% ncomponents - Number of components for a symktensor. +% ndims - Number of modes for a symktensor. +% norm - Frobenius norm of a symktensor. +% normalize - Normalizes the columns of the factor matrix. +% permute - Permute dimensions of a symktensor. +% score - Checks if two symktensors match except for permutation. +% size - Size of symktensor. +% subsasgn - Subscripted assignment for symktensor. +% subsref - Subscripted reference for a symktensor. +% symktensor - Tensor stored as a symmetric Kruskal operator (decomposed). +% tovec - Convert symktensor to vector representation. +% uminus - Unary minus for symktensor. +% uplus - Unary plus for a symktensor. +% +% Documentation page for symmetric Kruskal tensor class +% +% See also TENSOR_TOOLBOX, SYMTENSOR +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function t = symktensor(varargin) +%SYMKTENSOR Tensor stored as a symmetric Kruskal operator (decomposed). +% +% A symmetric Kruskal tensor is used to build a model of a M-way +% N-dimensional symmetric tensor. We have to specify the number of +% components, denoted by P. Each component comprises a scalar weight and +% the M-way symmetric outer product of a vector. We store the P weights +% in the length-P vector LAMBDA and the vectors in the NxP matrix X. The +% number of modes, M, is stored as a scalar. +% +% S = SYMKTENSOR(LAMBDA,U,M) creates a symmetric M-way Kruskal tensor +% from its constituent parts. Here lambda is a K-vector and each U is a +% matrix with K columns. +% +% S = SYMKTENSOR(K) creates a symmetric Kruskal tensor by "symmetrizing" +% a ktensor, i.e., averaging the constituent factor matrices and taking +% care to get the signs aligned. +% +% S = SYMKTENSOR(S0) creates a symktensor by copying an existing +% symktensor. +% +% *** Below are specialized constructore for use in optimization. There +% are not recommended for general use. *** +% +% S = SYMKTENSOR(P,A) creates a symmetric Krukal tensor with P components +% that is sized to match the tensor A. The LAMBDA is set to be all ones, +% and the X is initialized using to uniform random values in [0,1]. +% +% S = SYMKTENSOR(V,A) creates a symmetric Kruskal tensor from a +% vectorized verion V. The second argument is a tensor of the size that +% is being modeled. +% +% S = SYMKTENSOR(V,A,NOLAMBDA) creates a symmetric Kruskal tensor from a +% vectorized version V. The second argument is a tensor of the size that +% is being modeled. The third argument indicates whether or not LAMBDA is +% stored in V. If NOLAMBDA=TRUE, then only X is stored (i.e., LAMBDA +% values are set to 1). +% +% S = SYMKTENSOR(V,M,P) creates an M-way symmetric Kruskal +% tensor with P components from a vectorized version V. +% +% S = SYMKTENSOR(V,M,P,NOLAMBDA) uses the NOLAMBDA parameter as described +% above. +% +% Examples +% LAMBDA = randn(2, 1); %<-- Lambda vector (should be a column) +% U = randn(4, 2); %<-- Factor matrix +% S = symktensor(LAMDBA, U, 3) %<--Declare symktensor +% +% See also KTENSOR, TOVEC +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% EMPTY CONSTRUCTOR +if nargin == 0 + t.lambda = []; + t.u = []; + t.m = 0; + t = class(t,'symktensor'); + return; +end + +% Copy CONSTRUCTOR +if (nargin == 1) && isa(varargin{1}, 'symktensor') + t.lambda = varargin{1}.lambda; + t.u = varargin{1}.u; + t.m = varargin{1}.m; + t = class(t, 'symktensor'); + return; +end + +% Symmetrize a KTENSOR +if (nargin == 1) && isa(varargin{1}, 'ktensor') + K = varargin{1}; + if ~issymmetric(K) + K = symmetrize(K); + end + t.lambda = K.lambda; + t.u = K.U{1}; + t.m = length(K.U); + t = class(t, 'symktensor'); + return; +end + +% Create a random SYMKTENSOR from the number of components and a tensor. +if (nargin == 2) && isscalar(varargin{1}) + P = varargin{1}; + A = varargin{2}; + if ~isa(A,'tensor') && ~isa(A,'symtensor') + error('Second argument should be a tensor when the first argument is a scalar'); + end + M = ndims(A); + N = size(A,1); + t.lambda = ones(P,1); %Column vector + t.u = rand(N,P); %Robert development note: Document this better + t.m = M; + t = class(t,'symktensor'); + return; +end + +% Convert from vector representation, given tensor to determine shape +if nargin == 2 + v = varargin{1}; + A = varargin{2}; + if ~isvector(v) || ~(isa(varargin{2},'tensor') || isa(varargin{2},'symtensor')) + error('Wrong arguments'); + end + n = size(A,1); + p = length(v) / (n+1); + if round(p) ~= p + error('Length of input vector is not evenly divisible by (n+1)'); + end + t.lambda = v(1:p); + u = v(p+1:end); + t.u = reshape(u,n,p); + t.m = ndims(A); + t = class(t, 'symktensor'); + return; +end + + +% Convert from vector representation, given tensor to determine shape +if (nargin == 3) && (isa(varargin{2},'tensor') || isa(varargin{2},'symtensor')) + + tf = varargin{3}; %No lambda option + + if ~tf + t = symktensor(varargin{1},varargin{2}); + return; + end + + v = varargin{1}; + A = varargin{2}; + if ~isvector(v) + error('Wrong arguments'); + end + n = size(A,1); + p = length(v) / n; + if round(p) ~= p + error('Length of input vector is not evenly divisible by n'); + end + t.lambda = ones(p,1); %Column vector + u = v; + t.u = reshape(u,n,p); + t.m = ndims(A); + t = class(t, 'symktensor'); + return; +end + +% Convert from vector representation, given sizes +if (nargin == 3) && isscalar(varargin{2}) + v = varargin{1}; + m = varargin{2}; + p = varargin{3}; + t.lambda = v(1:p); + u = v(p+1:end); + t.u = reshape(u,[],p); + t.m = m; + t = class(t, 'symktensor'); + return; +end + +% Convert from vector representation, given sizes and nolambda! +if (nargin == 4) + tf = varargin{4}; + + if ~tf + t = symktensor(varargin{1},varargin{2},varargin{3}); + return; + end + + v = varargin{1}; + m = varargin{2}; + p = varargin{3}; + t.lambda = ones(p,1); %Column vector + u = v; + t.u = reshape(u,[],p); + t.m = m; + t = class(t, 'symktensor'); + return; +end + + +if nargin ~= 3 + error('Check arguments to create symktensor'); +end + +t.lambda = varargin{1}; +t.u = varargin{2}; +t.m = varargin{3}; + +if ~isa(t.lambda,'numeric') || ~iscolumn(t.lambda) + error('LAMBDA must be a column vector.'); +end + +% Check that each Um is indeed a matrix +if ~ismatrix(t.u) + error(['Matrix U is not a matrix!']); +end + +% Size error checking +k = length(t.lambda); +if size(t.u,2) ~= k + error(['Matrix U does not have ' int2str(k) ' columns.']); +end + +t = class(t, 'symktensor'); +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/tovec.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/tovec.m new file mode 100644 index 0000000..aaf4f6f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/tovec.m @@ -0,0 +1,28 @@ +function x = tovec(S,nolambda) +%TOVEC Convert symktensor to vector representation. +% +% V = TOVEC(S) converts a symktensor to a vector. It stacks the LAMBDA +% vector on top of a vectorized version of the matrix X. +% +% V = TOVEC(S,TRUE) just returns a vectorized version of the matrix +% X. It requires LAMBDA=1. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if exist('nolambda','var') && nolambda + if any(S.lambda ~= 1) + error('Not all lambda values are 1.') + end + x = reshape(S.u,[],1); +else + x = [S.lambda; reshape(S.u,[],1)]; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/uminus.m new file mode 100644 index 0000000..3df49f3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus for symktensor. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.lambda = -t.lambda; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/uplus.m new file mode 100644 index 0000000..0671fb4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symktensor/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus for a symktensor. +% +% See also SYMKTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/and.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/and.m new file mode 100644 index 0000000..a3b6f6e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/and.m @@ -0,0 +1,18 @@ +function Z = and(X,Y) +%AND Logical AND (&) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@and,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/disp.m new file mode 100644 index 0000000..7b6022f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/disp.m @@ -0,0 +1,70 @@ +function disp(X, name) +%DISP Command window display for a symtensor. +% +% DISP(X) displays a symtensor with no name. +% +% DISP(X,NAME) displays a tensor with the given name. +% +% See also SYMTENSOR, SYMTENSOR/DISPLAY. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ~exist('name','var') + name = 'ans'; +end + +% preallocate +n=X.n; +m=X.m; +sz=length(X.val); +if sz==0 %empty array + fprintf('%s is an empty symmetric tensor\n', name); + return +end +output = cell(sz,1); + +fprintf('%s is a symmetric tensor with %s modes of dimension %s\n',... + name, num2str(X.m), num2str(X.n)); + +I=indices(X); + +spc = floor(log10(max(double(I),[],1)))+1; +if numel(spc) == 1 + fmt = ['\t(%' num2str(spc(1)) 'd)%s']; +else + fmt = ['\t(%' num2str(spc(1)) 'd,']; + for i = 2:numel(spc)-1 + fmt = [fmt '%' num2str(spc(i)) 'd,']; + end + fmt = [fmt '%' num2str(spc(end)) 'd)%s']; +end +%% +% Get values out so that they look nice +savefmt = get(0,'FormatSpacing'); +format compact +S = evalc('disp(X.val)'); +set(0,'FormatSpacing',savefmt) +S = textscan(S,'%s','delimiter','\n','whitespace',''); +S = S{1}; +if ~isempty(strfind(S{1},'*')) + fprintf('%s\n',S{1}); + S = S(2:end); +end +%% +for i = 1:sz + output{i} = sprintf(fmt, I(i,:) ,S{i}); +end +fprintf('%s\n',output{:}); + +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/display.m new file mode 100644 index 0000000..7b3216b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/display.m @@ -0,0 +1,21 @@ +function display(t) +%DISPLAY Command window display of a symtensor. +% +% DISPLAY(X) displays a tensor with its name. +% +% See also SYMTENSOR, SYMTENSOR/DISP. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +disp(t, inputname(1)); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/eq.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/eq.m new file mode 100644 index 0000000..191db7a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/eq.m @@ -0,0 +1,18 @@ +function Z = eq(X,Y) +%EQ Equal (==) for tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@eq,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/full.m new file mode 100644 index 0000000..04bca36 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/full.m @@ -0,0 +1,70 @@ +function Y = full(X,ver) +%FULL Convert symtensor to a tensor. +% +% FULL(S) returns a tensor from a symmetric tensor S. +% +% See also SYMTENSOR, TENSOR. +% +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%The idx option allows us to pass indexsets as an argument so we don't call +%it each time. Useful if full is called repeatedly on same sized tensors. +%Should probably be deleted because it was broken in the previous release +%anyway (see git repo before 9-1-16) + + +% Default to new version +if ~exist('ver', 'var'); + ver = 0; +end + + +switch ver + case 0 % New version + + n = X.n; + m = X.m; + idx = tt_ind2sub(size(X), (1:n^m)'); + classidx = sort(idx, 2); %Sort indices + symidx = indices(X); + [~,refidx] = ismember(classidx, symidx, 'rows'); + newdata = X.val(refidx); + Y = tensor(reshape(newdata, [size(X) 1]), size(X)); + return; + + case 1 + + I = indices(X); + sz = X.n * ones(1,X.m); + Y = tenzeros(sz); + + Q = size(I,1); + + for q = 1:Q + i = I(q,:); + pi = perms(i); + Y(pi) = X.val(q); + end + return; + + otherwise; + error('Incorrect version specification'); +end + +end + + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ge.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ge.m new file mode 100644 index 0000000..46dfc62 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ge.m @@ -0,0 +1,18 @@ +function Z = ge(X,Y) +%GE Greater than or equal (>=) for tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@ge,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/gt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/gt.m new file mode 100644 index 0000000..489af8e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/gt.m @@ -0,0 +1,18 @@ +function Z = gt(X,Y) +%GT Greater than (>) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@gt,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/indices.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/indices.m new file mode 100644 index 0000000..db5ecb7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/indices.m @@ -0,0 +1,84 @@ +function [I,C,W,Q] = indices(varargin) +%INDICES Compute unique indices of a symmetric tensor. +% +% [I,C,W,Q] = INDICES(A) returns all unique indices for a +% symmetric tensor. Each row of I is an index listed in increasing order. +% Each row of C is the corresponding monomial representation, and W +% is the count of how many times that index appears in the symmetric +% tensor. Q is the number of rows of I, the number of unique indices. +% + +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if nargin == 0 + error('INDICES requires at least one input argument'); +elseif nargin == 2 % Specify m and n + m = varargin{1}; + n = varargin{2}; +elseif nargin == 1 + A = varargin{1}; + if ~(isa(A,'symktensor') || isa(A,'tensor') || isa(A,'symtensor')) + error('First argument must be a scalar or a tensor-like class'); + end + m = ndims(A); + n = size(A,1); +else + error('Wrong number of input arguments'); +end + +%% Determine size +sz = nchoosek(m+n-1,m); + +%% Create I +% Following from function UpdateIndex (Figure 4) in +% G. Ballard, T. G. Kolda and T. Plantenga, Efficiently Computing Tensor +% Eigenvalues on a GPU, IPDPSW'11: Proceedings of the 2011 IEEE +% International Symposium on Parallel and Distributed Processing Workshops +% and PhD Forum, 12th IEEE International Workshop on Parallel and +% Distributed Scientific and Engineering Computing (PDSEC-11), Anchorage, +% Alaska (2011-05-16 to 2011-05-20), IEEE Computer Society, pp. 1340-1348, +% May 2011, doi:10.1109/IPDPS.2011.287 + +I = zeros(sz,m); + +for loc = 1:sz + if loc == 1 + I(loc,:) = ones(1,m); + else + I(loc,:) = I(loc-1,:); + j = m; + while (I(loc,j) == n) + j = j - 1; + end + I(loc,j:m) = I(loc,j)+1; + end +end + +if nargout==1 %Function can be called without monomials or weights + return +end + +%% Compute C from I +C = zeros(sz,n); +for i = 1:n + C(:,i) = sum(I == i,2); +end + +%% COMPUTE W (weights) from C +W = zeros(sz,1); +for i = 1:sz + W(i) = multinomial(m,C(i,:)); +end +Q=sz; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/isequal.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/isequal.m new file mode 100644 index 0000000..2c1c36c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/isequal.m @@ -0,0 +1,27 @@ +function z = isequal(x,y) +%ISEQUAL for symmetric tensors. +% +% ISEQUAL(A,B) compares the symmetric tensors A and B for equality. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% +if ~isequal(size(x),size(y)) + z = false; +elseif isa(x,'symtensor') && isa(y,'symtensor') + z = isequal(x.val,y.val); +else + z = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/isscalar.m new file mode 100644 index 0000000..8b71d95 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for symtensors. +% ISSCALAR(S) returns logical 0 (false) if S is a symtensor. +% +% See also SYMTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/issymmetric.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/issymmetric.m new file mode 100644 index 0000000..c5d3540 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/issymmetric.m @@ -0,0 +1,19 @@ +function tf = issymmetric(t) +%ISSYMMETRIC Checks if tensor is symmetric (always true for symtensor). +% +% ISSYMMETRIC(S) returns true when S is a symtensor. +% +% See also SYMTENSOR, TENSOR/ISSYMMETRIC. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +tf = true; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ldivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ldivide.m new file mode 100644 index 0000000..61379e6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ldivide.m @@ -0,0 +1,26 @@ +function Z = ldivide(X,Y) +%LDIVIDE Left array divide (.\) for symmetric tensors. +% +% LDIVIDE(A,B) is called for the syntax 'A .\ B' when A or B is a symmetric +% tensor. A and B must have the same size, unless one is a scalar. +% +% Examples +% X = symtenrand([4 4 4]); +% X .\ 3 +% X .\ X +% +% See also SYMTENSOR, SYMTENSOR/RDIVIDE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@ldivide,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/le.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/le.m new file mode 100644 index 0000000..dba30b7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/le.m @@ -0,0 +1,18 @@ +function Z = le(X,Y) +%LE Less than or equal (<=) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@le,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/lt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/lt.m new file mode 100644 index 0000000..7577c16 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/lt.m @@ -0,0 +1,18 @@ +function Z = lt(X,Y) +%LT Less than (<) for symmetric tensor. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@lt,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/minus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/minus.m new file mode 100644 index 0000000..890eec1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/minus.m @@ -0,0 +1,22 @@ +function Z = minus(X,Y) +%MINUS Binary subtraction (-) for symmetric tensors. +% +% MINUS(A,B) is called for the syntax 'A - B' when A or B is a symmetric +% tensor. A and B must have the same size, unless one is a scalar. +% A scalar can be subtracted from a tensor of any size. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@minus,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mldivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mldivide.m new file mode 100644 index 0000000..34c63f1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mldivide.m @@ -0,0 +1,31 @@ +function Z = mldivide(X,Y) +%MLDIVIDE Slash left division for symmetric tensors. +% +% MLDIVIDE(A,B) is called for the syntax 'A \ B' when A is a scalar and B +% is a symmetric tensor. +% +% Example +% X = symtenrand([4 4 4]); +% 3 \ X +% +% See also TENSOR, TENSOR/LDIVIDE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(X) + Z = tenfun(@ldivide,X,Y); + return; +end + +error('MLDIVIDE only supports the scalar case for symmetric tensors'); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mrdivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mrdivide.m new file mode 100644 index 0000000..36187bb --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mrdivide.m @@ -0,0 +1,31 @@ +function Z = mrdivide(X,Y) +%MRDIVIDE Slash right division for symmetric tensors. +% +% MRDIVIDE(A,B) is called for the syntax 'A / B' when A is a symmetric +% tensor and B is a scalar. +% +% Example +% X = tenrand([4 4 4]); +% X / 3 +% +% See also SYMTENSOR, SYMTENSOR/RDIVIDE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(Y) + Z = tenfun(@rdivide,X,Y); + return; +end + +error('MRDIVIDE only supports the scalar case for symmetric tensors'); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mtimes.m new file mode 100644 index 0000000..1759b03 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/mtimes.m @@ -0,0 +1,45 @@ +function C = mtimes(A,B) +%MTIMES tensor-scalar multiplication. +% +% C = MTIMES(A,B) is called for the syntax 'A * B' when A or B is a +% symtensor and the other argument is a scalar. +% +% For symtensor-symtensor array multiplication, use TIMES or 'A .* B'. +% +% Examples +% X = symtenrand([4,4,4]) +% W = 5 * X +% +% See also SYMTENSOR, SYMTENSOR/TIMES +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% +if isscalar(B) + C = A; + C.val = B * C.val; + return; +end + +if isscalar(A) + C = B; + C.val = A * C.val; + return; +end + +error('Mtimes only supports a symtensor times a scalar'); + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ndims.m new file mode 100644 index 0000000..11c7880 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ndims.m @@ -0,0 +1,23 @@ +function n = ndims(t) +%NDIMS Number of dimensions for a symtensor. +% +% NDIMS(S) returns the number of dimensions of a symtensor S. +% +% Examples +% X = symtensor(rand([4,4,4])); ndims(X) %<-- Returns 3 +% +% See also SYMTENSOR, TENSOR/NDIMS +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = t.m; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ne.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ne.m new file mode 100644 index 0000000..830e518 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/ne.m @@ -0,0 +1,18 @@ +function Z = ne(X,Y) +%NE Not equal (~=) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@ne,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/not.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/not.m new file mode 100644 index 0000000..2ebb7a8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/not.m @@ -0,0 +1,18 @@ +function B = not(A) +%NOT Logical NOT (~) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +B = A; +B.val = not(A.val); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/or.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/or.m new file mode 100644 index 0000000..f4a3b5c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/or.m @@ -0,0 +1,18 @@ +function Z = or(X,Y) +%OR Logical OR (|) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@or,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/plus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/plus.m new file mode 100644 index 0000000..36b4777 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/plus.m @@ -0,0 +1,26 @@ +function Z = plus( X, Y ) +%PLUS Binary addition (+) for symtensors. +% +% PLUS(A,B) is called for the syntax 'X + Y' when X and Y are symtensors. +% A and B must be the same size, unless one is a scalar. A scalar can be +% added to a symtensor of any size. +% +% See also SYMTENSOR, TENSOR/PLUS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +Z = tenfun(@plus,X,Y); + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/power.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/power.m new file mode 100644 index 0000000..5010b3a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/power.m @@ -0,0 +1,17 @@ +function Z = power(X,Y) +%POWER Elementwise power (.^) operator for a symmetric tensor. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +Z = tenfun(@power,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/private/multinomial.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/private/multinomial.m new file mode 100644 index 0000000..6861f8d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/private/multinomial.m @@ -0,0 +1,65 @@ +function c = multinomial(n,varargin) +% MULTINOMIAL Multinomial coefficients. +% +% MULTINOMIAL(N, K1, K2, ..., Km) where N and Ki are floating point +% arrays of non-negative integers satisfying N = K1 + K2 + ... + Km, +% returns the multinomial coefficient N!/( K1!* K2! ... *Km!). +% +% MULTINOMIAL(N, [K1 K2 ... Km]) when Ki's are all scalar, is the +% same as MULTINOMIAL(N, K1, K2, ..., Km) and runs faster. +% +% Non-integer input arguments are pre-rounded by FLOOR function. +% +% EXAMPLES: +% multinomial(8, 2, 6) returns 28 +% binomial(8, 2) returns 28 +% +% multinomial(8, 2, 3, 3) returns 560 +% multinomial(8, [2, 3, 3]) returns 560 +% +% multinomial([8 10], 2, [6 8]) returns [28 45] + +% Mukhtar Ullah +% November 1, 2004 +% mukhtar.ullah@informatik.uni-rostock.de + +nIn = nargin; +%error(nargchk(2, nIn, nIn)) + +if ~isreal(n) || ~isfloat(n) || any(n(:)<0) + error('Inputs must be floating point arrays of non-negative reals') +end + +arg2 = varargin; +dim = 2; + +if nIn < 3 + k = arg2{1}(:).'; + if isscalar(k) + error('In case of two arguments, the 2nd cannot be scalar') + end +else + [arg2{:},sizk] = sclrexpnd(arg2{:}); + if sizk == 1 + k = [arg2{:}]; + else + if ~isscalar(n) && ~isequal(sizk,size(n)) + error('Non-scalar arguments must have the same size') + end + dim = numel(sizk) + 1; + k = cat(dim,arg2{:}); + end +end + +if ~isreal(k) || ~isfloat(k) || any(k(:)<0) + error('Inputs must be floating point arrays of non-negative reals') +end + +n = floor(n); +k = floor(k); + +if any(sum(k,dim)~=n) + error('Inputs must satisfy N = K1 + K2 ... + Km ') +end + +c = floor(exp(gammaln(n+1) - sum(gammaln(k+1),dim)) + .5); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/rdivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/rdivide.m new file mode 100644 index 0000000..57835d5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/rdivide.m @@ -0,0 +1,25 @@ +function Z = rdivide(X,Y) +%RDIVIDE Right array divide (./) for tensors. +% +% RDIVIDE(A,B) is called for the syntax 'A ./ B' when A or B is a tensor. +% A and B must have the same size, unless one is a scalar. +% +% Examples +% X = tenrand([4 4 4]); +% X ./ 3 +% X ./ X +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +Z = tenfun(@rdivide,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/size.m new file mode 100644 index 0000000..01add2d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/size.m @@ -0,0 +1,33 @@ +function sz = size(t,idx) +%SIZE Dimensions of a symmetric tensor. +% +% D = SIZE(T) returns the size of a symtensor T. +% +% I = SIZE(T,DIM) returns the size of symtensor T in the dimension +% specified by the scalar DIM. +% +% See also SYMTENSOR, SYMTENSOR/NDIMS, SIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if (t.m == 0) + sz = []; +elseif exist('idx','var') + if 1<=idx && idx<=t.m %Bounds check + sz = t.n; + else + error('Index exceeds tensor dimensions'); + end +else + sz = t.n * ones(1,t.m); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/subsasgn.m new file mode 100644 index 0000000..33b6f1a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/subsasgn.m @@ -0,0 +1,72 @@ +function A = subsasgn(A, S, B) +%SUBSASGN Subassignment for symtensor. +% +% Examples +% X = symtensor(1:20,3,4); +% X(:) = (20:-1:1)'; %<- Assign all values +% X.val = (1:20)'; %<- Another way to assign values +% X((1:2)') = [-1 -2] %<- Set the first two elements in array +% X(1,2,3) = -6 %<- Set the value at index (1,2,3) to -6 +% X([4 3 4; 4 4 4]) = -1 * X([4 3 4; 4 4 4]); %<- Reverse the sign +% +% Note: It is not recommended to assign the same element twice, e.g., +% X([5;5]) = [7;8] will result in X(5) = 8. But this behavior is not +% guaranteed nor tested. +% +% See also SYMTENSOR, TENSOR/SUBSASGN +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +switch S(1).type + case '{}' + error('Cell contents reference from a non-cell array object.') + + case '.' + if isequal(S(1).subs,'val') + if ~isequal(size(B),size(A.val)) + error('Cannot change the size of ''val'' array'); + end + A.val = B; + else + error('Invalid assignment'); + end + + case '()' + if (A.m > 1) && (numel(S.subs) == A.m) + if ~isscalar(B) + error('Can only assign scalars when using subindex'); + end + if ~all(cellfun(@(x) isscalar(x) && isnumeric(x), S.subs)) + error('Invalid indexing for symktensor'); + end + newS = S; + newS.subs = cell(1,1); + newS.subs{1} = cell2mat(S.subs); + A = subsasgn(A,newS,B); + elseif numel(S.subs) == 1 + if size(S.subs{1},2) == 1 + A.val = subsasgn(A.val, S, B); + elseif size(S.subs{1},2) == A.m + qsubs = sort(S.subs{1},2); %Sort the indices + asubs = indices(A); + [~,loca] = ismember(qsubs,asubs,'rows'); + A.val(loca) = B; + else + error('Invalid Indexing') + end + else + error('Invalid indexing'); + end + otherwise + %error('Subassignment only allowed for values.'); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/subsref.m new file mode 100644 index 0000000..7c21c59 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/subsref.m @@ -0,0 +1,81 @@ +function B = subsref(A, S) +%SUBSREF Subreference function for symtensor. +% +% Examples: +% X = symtensor(1:20,3,4); +% X.val %<- Returns the distinct values +% X.m %<- Tensor order +% X.n %<- Tensor dimension +% X(5) %<- Fifth distinct element +% X((1:4)') %<- Linear indexing of distinct values +% X(1,2,1) %<- Returns X(1,2,1) = X(1,1,2) = 2nd element +% X([1 1 2;3 2 1]) %<- Return two elements +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt +sz=size(S); + +switch S(1).type + case '{}' + error('Cell contents reference from a non-cell array object.') + case '.' + fieldname = S(1).subs; + switch fieldname + case {'val','vals'} + if sz(2) == 2 % Can query into the val array + B = subsref(A.val, S(2)); + else + B = A.val; + end + + case 'm' + B = A.m; % Query the number of modes + case 'n' + B = A.n; % Query the dimension + otherwise + error(['No such field in symtensor: ', fieldname]); + end + case '()' + if (A.m > 1) && (numel(S.subs) == A.m) + if ~all(cellfun(@(x) isscalar(x) && isnumeric(x), S.subs)) + error('Invalid indexing for symktensor'); + end + newS = S; + newS.subs = cell(1,1); + newS.subs{1} = cell2mat(S.subs); + B = subsref(A,newS); + return; + elseif numel(S.subs) == 1 + if size(S.subs{1},2) == 1 % Linear indexing (into value array) + B = subsref(A.val,S); + return + end + if size(S.subs{1},2) == A.m % Query is a matrix whose rows are indices + qsubs = sort(S.subs{1},2); % Sort the indices + asubs = indices(A); + [tf,loca] = ismember(qsubs,asubs,'rows'); + if ~all(tf) + error('TTB:Symtensor:BadIdx', 'Invalid Indexing'); + end + B = A.val(loca); + return + end + error('Invalid Indexing') + else + error('Invalid indexing'); + end + otherwise + error('Invalid indexing for symktensor'); +end + + +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/symtensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/symtensor.m new file mode 100644 index 0000000..2e70da6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/symtensor.m @@ -0,0 +1,182 @@ +%SYMTENSOR Class for storing only unique entries of symmetric tensor. +% +%SYMTENSOR Methods: +% and - Logical AND (&) for symmetric tensors. +% disp - Command window display for a symtensor. +% display - Command window display of a symtensor. +% eq - Equal (==) for tensors. +% full - Convert symtensor to a tensor. +% ge - Greater than or equal (>=) for tensors. +% gt - Greater than (>) for symmetric tensors. +% indices - Compute unique indices of a symmetric tensor. +% isequal - for symmetric tensors. +% isscalar - False for symtensors. +% issymmetric - Checks if tensor is symmetric (always true for symtensor). +% ldivide - Left array divide (.\) for symmetric tensors. +% le - Less than or equal (<=) for symmetric tensors. +% lt - Less than (<) for symmetric tensor. +% minus - Binary subtraction (-) for symmetric tensors. +% mldivide - Slash left division for symmetric tensors. +% mrdivide - Slash right division for symmetric tensors. +% mtimes - tensor-scalar multiplication. +% ndims - Number of dimensions for a symtensor. +% ne - Not equal (~=) for symmetric tensors. +% not - Logical NOT (~) for symmetric tensors. +% or - Logical OR (|) for symmetric tensors. +% plus - Binary addition (+) for symtensors. +% power - Elementwise power (.^) operator for a symmetric tensor. +% rdivide - Right array divide (./) for tensors. +% size - Dimensions of a symmetric tensor. +% subsasgn - Subassignment for symtensor. +% subsref - Subreference function for symtensor. +% symtensor - Symmetric tensor that stores only the unique values. +% tenfun - Apply a function to each element in a symmetric tensor. +% times - Array multiplication (.*) for symmetric tensors. +% uminus - Unary minus (-) for tensors. +% uplus - Unary plus (+) for symmetric tensors. +% xor - Logical EXCLUSIVE OR for symmetric tensors. +% +% Documentation page for symmetric tensor class +% +% See also TENSOR_TOOLBOX, TENSOR/SYMMETRIZE, SYMKTENSOR, CP_SYM +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function [t,I] = symtensor(varargin) +%SYMTENSOR Symmetric tensor that stores only the unique values. +% +% S = SYMTENSOR(X) creates a symmetric tensor from a given tensor X. If X +% is not symmetric, than it is symmetrized. +% +% [S,I] = SYMTENSOR(X) when X is a tensor also returns I, a matrix of +% indices of X which define the symmetric tensor. (This is the only case +% where two arguments are returned.) +% +% S = SYMTENSOR(S0) copies the symtensor S0. +% +% S = SYMTENSOR(VALS,M,N) constructs a symmetric tensor with M modes, +% dimension N, and values specified by VALS. +% +% S = SYMTENSOR(FUN,M,N) constructs a symmetric tensor with M +% modes, dimension N, and values generated with function FUN. The +% function fun must return a matrix of values given arguments of row and +% column size. +% +% Examples +% X = tenrand([3 3 3]); +% S = symtensor(X); % Symmetrize X +% S = symtensor(1:15,4,3); % Specify unique values, order 3, dim 3 +% S = symtensor(@rand,3,7); % Random, order 3, dim 7 +% S = symtensor(@ones,4,7); % Ones of order 4, dim 7 +% +% See also TENSOR/SYMMETRIZE, SYMKTENSOR, CP_SYM +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Set up --- the same for all +t = init_fields; +t = class(t,'symtensor'); + +% EMPTY CONSTRUCTOR +if nargin == 0 + error('TTB:BadInputs', 'Not enough input arguments.'); +end + +% COPY CONSTRUCTOR +if nargin == 1 && isa(varargin{1}, 'symtensor') + t.val = varargin{1}.val; + t.m = varargin{1}.m; + t.n = varargin{1}.n; + return; +end + +% CREATE FROM TENSOR +if nargin == 1 && isa(varargin{1}, 'tensor') + + src = varargin{1}; + src = symmetrize(src); + + m = ndims(src); + n = size(src,1); + t.m = m; + t.n = n; + + % Generate distinct indices + sz = nchoosek(m+n-1,m); + I = zeros(sz,m); + for loc = 1:sz + if loc == 1 + I(loc,:) = ones(1,m); + else + I(loc,:) = I(loc-1,:); + j = m; + while (I(loc,j) == n) + j = j - 1; + end + I(loc,j:m) = I(loc,j)+1; + end + end + + % Query symmetric indices from tensor, then save into val array + t.val = double(src(I)); + return; +end + +% For constructing a symtensor from a function handle +if nargin==3 && isa(varargin{1},'function_handle') + m = varargin{2}; + n = varargin{3}; + t.m = m; + t.n = n; + sz = nchoosek(m+n-1,m); + try + t.val = feval(varargin{1}, sz, 1); + t.val = double(t.val); % Convert to double, if possible + catch + error('TTB:BadInputs','Bad generating function'); + end + if ~isequal(size(t.val),[nchoosek(n+m-1,m),1]) + error('TTB:BadInputs','Bad generating function'); + end + return; +end + +% For constructing a symtensor from a value array +if nargin==3 + val = varargin{1}; + m = varargin{2}; + n = varargin{3}; + sz = nchoosek(m+n-1,m); + if ~isvector(val) || numel(val) ~= sz + error('TTB:BadInputs', 'Value array is the wrong size'); + end + t.m = m; + t.n = n; + if iscolumn(val) % Needs to be a column array + t.val = val; + else + t.val = val'; + end + return; +end + +error('TTB:BadInputs','Too many input arguments'); +end + +function t = init_fields() +t.val = []; +t.m = 0; +t.n = 0; +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/tenfun.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/tenfun.m new file mode 100644 index 0000000..eec763f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/tenfun.m @@ -0,0 +1,93 @@ +function Z = tenfun(fun,varargin) +%TENFUN Apply a function to each element in a symmetric tensor. +% +% TENFUN(F,X,...) applies the function specified by the function handle F +% to the given arguments. All arguments must be symtensors or scalars. +% The functions are applied to the value arrays. If there are more than +% two arguments, then the functions are applied elementwise. +% +% Examples +% Z = TENFUN(@(x)(x+1),X) %<-- increase every element by one +% Z = TENFUN(@eq,X,1) %<-- logical comparison of X with scalar +% Z = TENFUN(@plus,X,Y) %<-- adds the two symmetric tensors X and Y +% Z = TENFUN(@max,X,Y,Z) %<-- max over all elements in X,Y,Z +% +% See also SYMTENSOR, TENSOR/TENFUN. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if nargin < 2 + error('TTB:Symtensor:FunFail','Not enough input arguments.'); +end + +if ~isa(fun, 'function_handle') + error('TTB:Symtensor:FunFail','First argument must be a function handle.'); +end + +%% Unary +if nargin == 2 + X = varargin{1}; % Must be a symtensor! + Zvals = fun(X.val); + Z = symtensor(Zvals, ndims(X), size(X,1)); + return; +end + +%% Otherwise, sort out scalars and symtensors +tfscalar = cellfun(@isscalar, varargin); +tfsymtensor = cellfun(@(x) isa(x,'symtensor'), varargin); +sz = cellfun(@size, varargin(tfsymtensor), 'UniformOutput', false); + +if ~all(tfscalar | tfsymtensor) + error('TTB:BadInputs','All inputs must be either symtensors or scalars'); +end + +if length(sz) > 1 && ~isequal(sz{:}) + error('TTB:BadInputs','All tensor inputs must be the same size'); +end + +m = length(sz{1}); +n = sz{1}(1); + +%% Binary Function +if nargin == 3 + X = cell(2,1); + for j = 1:2 + if tfscalar(j) + X{j} = varargin{j}; + else + X{j} = varargin{j}.val; + end + end + Zvals = fun(X{1},X{2}); + Z = symtensor(Zvals, m, n); + return; +end + +%% More than two inputs --- handle elementwise, if possible +p = nchoosek(m+n-1,m); +X = zeros(p,nargin-1); +for j = 1:nargin-1 + if tfscalar(j) + X(:,j) = varargin{j}; + else + X(:,j) = varargin{j}.val; + end +end +Zvals = zeros(p,1); +for i = 1:p + Zvals(i) = fun(X(i,:)); +end +Z = symtensor(Zvals, m, n); +return; + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/times.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/times.m new file mode 100644 index 0000000..67c588f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/times.m @@ -0,0 +1,21 @@ +function Z = times(X,Y) +%TIMES Array multiplication (.*) for symmetric tensors. +% +% TIMES(A,B) is called for the syntax 'A .* B' when A or B is a +% symmetric tensor. A and B must have the same size, unless one is a +% scalar. A scalar can be multiplied by a symmetric tensor of any size. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +Z = tenfun(@times,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/uminus.m new file mode 100644 index 0000000..c9ac19a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus (-) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.val = -t.val; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/uplus.m new file mode 100644 index 0000000..c077396 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus (+) for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/xor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/xor.m new file mode 100644 index 0000000..6f3f88d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@symtensor/xor.m @@ -0,0 +1,18 @@ +function Z = xor(X,Y) +%XOR Logical EXCLUSIVE OR for symmetric tensors. +% +% See also SYMTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@xor,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/ctranspose.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/ctranspose.m new file mode 100644 index 0000000..8fb6932 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/ctranspose.m @@ -0,0 +1,23 @@ +function a = ctranspose(a) +%CTRANSPOSE Complex conjugate transpose for tenmat. +% +% C = CTRANSPOSE(A) swaps the row and column indices of A. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +tmp = a.rindices; +a.rindices = a.cindices; +a.cindices = tmp; +a.data = ctranspose(a.data); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/disp.m new file mode 100644 index 0000000..7661de2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/disp.m @@ -0,0 +1,41 @@ +function disp(t,name) +%DISP Command window display of a matricized tensor (tenmat). +% +% DISP(T) displays a tensor as matrix with no name. +% +% DISP(T,NAME) display a tensor as matrix with the given name. +% +% See also TENMAT, TENMAT/DISPLAY. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('name','var') + name = 'ans'; +end + +fprintf('%s is a matrix corresponding to a tensor of size %s\n',... + name,tt_size2str(t.tsize)); +fprintf('\t%s.rindices = %s (modes of tensor corresponding to rows)\n',... + name,['[ ' num2str(t.rindices) ' ]']); +fprintf('\t%s.cindices = %s (modes of tensor corresponding to columns)\n',... + name,['[ ' num2str(t.cindices) ' ]']); + +if isempty(t.data) + fprintf('\t%s.data = []\n',name); +else + fprintf('\t%s.data = \n',name); + output = tt_matrix2cellstr(t.data); + fprintf('\t\t%s\n',output{:}); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/display.m new file mode 100644 index 0000000..1c3e178 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/display.m @@ -0,0 +1,18 @@ +function display(t) +%DISPLAY Command window display of a tenmat. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/double.m new file mode 100644 index 0000000..ef9031a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/double.m @@ -0,0 +1,18 @@ +function a = double(t) +%DOUBLE Convert tenmat to double array. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +a = t.data; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/end.m new file mode 100644 index 0000000..d651b72 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/end.m @@ -0,0 +1,24 @@ +function e = end(X,k,n) +%END Last index of indexing expression for tenmat. +% +% The expression X(end,:) will call END(X,1,2) to determine +% the value of the first index. +% +% See also TENMAT, TENMAT/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if n > ndims(X) + error('Subscript out of range.'); +end +e = size(X.data,k); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/minus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/minus.m new file mode 100644 index 0000000..0fa004b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/minus.m @@ -0,0 +1,38 @@ +function Z = minus(X,Y) +%MINUS Binary subtraction (-) for tenmat. +% +% See also TENMAT, TENMAT/TENMATFUN. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +fun = @minus; + +% One argument is a scalar +if ((prod(size(X)) == 1 || prod(size(Y)) == 1)) + if (prod(size(Y)) == 1) && isa(X,'tenmat') + Z = X; + Z.data = fun(Z.data,Y); + else + Z = Y; + Z.data = fun(X,Z.data); + end + return; +end + + +% Both arguments are tenmats +Z = tenmat(Y); +if ~(isequal(size(Y),size(Z))) + error('Tenmat size mismatch.') +end +Z.data = fun(X.data,Z.data); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/mtimes.m new file mode 100644 index 0000000..6026d21 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/mtimes.m @@ -0,0 +1,62 @@ +function C = mtimes(A,B) +%MTIMES Multiplies two tenmat objects. +% +% C = MTIMES(A,B) computes the product of A and B. The result is a +% TENMAT object and can be transformed into a tensor. +% +% C = MTIMES(A,B) is called for the syntax 'A * B' when A or B is a +% TENMAT object. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Handle scalar input +if ~isa(B,'tenmat') && numel(B) == 1 + C = A; + C.data = C.data * B; + return; +end +if ~isa(A,'tenmat') && numel(A) == 1 + C = B; + C.data = C.data * A; + return; +end + +% Handle matrix input +if ~isa(A,'tenmat') + A = tenmat(A,1); +end + +if ~isa(B,'tenmat') + B = tenmat(B,1); +end + +% Error check +if size(A,2) ~= size(B,1) + error(['Size mismatch: Number of columns in A is not equal to' ... + ' the number of rows in B']); +end + +tsiz = [A.tsize(A.rindices) B.tsize(B.cindices)]; + +if ~isempty(tsiz) + C = tenmat; + C.tsize = tsiz; + C.rindices = 1:length(A.rindices); + C.cindices = (1:length(B.cindices)) + length(A.rindices); + C.data = A.data * B.data; +else + C = A.data * B.data; +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/norm.m new file mode 100644 index 0000000..be2b1f7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/norm.m @@ -0,0 +1,21 @@ +function n = norm(T) +%NORM Frobenius norm of a tenmat. +% +% NORM(X) returns the Frobenius norm of a tenmat. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +v = reshape(T.data, numel(T.data), 1); +n = norm(v); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/plus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/plus.m new file mode 100644 index 0000000..eee61f3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/plus.m @@ -0,0 +1,38 @@ +function Z = plus(X,Y) +%PLUS Binary addition (+) for tenmat. +% +% See also TENMAT, TENMAT/TENMATFUN. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +fun = @plus; + +% One argument is a scalar +if ((prod(size(X)) == 1 || prod(size(Y)) == 1)) + if (prod(size(Y)) == 1) && isa(X,'tenmat') + Z = X; + Z.data = fun(Z.data,Y); + else + Z = Y; + Z.data = fun(X,Z.data); + end + return; +end + + +% Both arguments are tenmats +Z = tenmat(Y); +if ~(isequal(size(Y),size(Z))) + error('Tenmat size mismatch.') +end +Z.data = fun(X.data,Z.data); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/size.m new file mode 100644 index 0000000..52fe447 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/size.m @@ -0,0 +1,31 @@ +function sz = size(a,idx) +%SIZE Size of tenmat. +% +% D = SIZE(X) returns the two-element row vector D = [M N] +% containing the number of rows and columns in the matrix. +% +% M = SIZE(X,DIM) returns the length of the dimension specified by +% the scalar DIM. For example, SIZE(X,1) returns the number of +% rows. +% +% See also TENMAT, TENMAT/TSIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(a.data) + sz = []; +elseif exist('idx', 'var') + sz = size(a.data, idx); +else + sz = size(a.data); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/subsasgn.m new file mode 100644 index 0000000..77ee78e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/subsasgn.m @@ -0,0 +1,33 @@ +function t = subsasgn(t,s,b) +%SUBSASGN Subscripted assignment for tenmat. +% +% Examples +% X = tenmat(rand(3,4,2),1); +% X(1:2,1:2) = ones(2,2); <-- Calls SUBSASGN +% +% See also TENMAT, TENMAT/SUBSREF. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s.type + case '()' + [m n] = size(t.data); + t.data(s.subs{:}) = b; + if ~isequal([m n],size(t.data)) + error('Ambiguous change in size') + end + otherwise + error('Invalid assignment for tenmat.') +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/subsref.m new file mode 100644 index 0000000..4fb39ad --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/subsref.m @@ -0,0 +1,44 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for tenmat. +% +% Examples +% X(i,j) <-- returns the (i,j) entry in X +% X.data <-- returns a 2D array of the data +% X.tsize <-- returns the size original tensor +% X.rdims <-- tensor dimensions that were mapped to rows +% X.cdims <-- tensor dimensions that were mapped to columns +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '.' + switch s(1).subs + case 'data' + a = tt_subsubsref(t.data,s); + case 'tsize' + a = tt_subsubsref(t.tsize,s); + case {'rindices','rdims'} + a = tt_subsubsref(t.rindices,s); + case {'cindices','cdims'} + a = tt_subsubsref(t.cindices,s); + otherwise + error(['No such field: ', s.subs]); + end + case '()' + a = t.data(s.subs{:}); + otherwise + error('Invalid subsref into tenmat.') +end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/tenmat.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/tenmat.m new file mode 100644 index 0000000..707fa9c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/tenmat.m @@ -0,0 +1,210 @@ +%TENMAT Store tensor as a matrix. +% +%TENMAT Methods: +% ctranspose - Complex conjugate transpose for tenmat. +% disp - Command window display of a matricized tensor (tenmat). +% display - Command window display of a tenmat. +% double - Convert tenmat to double array. +% end - Last index of indexing expression for tenmat. +% minus - Binary subtraction (-) for tenmat. +% mtimes - Multiplies two tenmat objects. +% norm - Frobenius norm of a tenmat. +% plus - Binary addition (+) for tenmat. +% size - Size of tenmat. +% subsasgn - Subscripted assignment for tenmat. +% subsref - Subscripted reference for tenmat. +% tenmat - Create a matricized tensor. +% tsize - Tensor size of tenmat. +% uminus - Unary minus (-) for tenmat. +% uplus - Unary plus (+) for tenmat. +% +% Documentation page for tensor-as-matrix class +% +% See also TENSOR_TOOLBOX +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function A = tenmat(varargin) +%TENMAT Create a matricized tensor. +% +% A = TENMAT(T, RDIMS) creates a matrix representation of a tensor +% T. The dimensions (or modes) specified in RDIMS map to the rows +% of the matrix, and the remaining dimensions (in ascending order) +% map to the columns. +% +% A = TENMAT(T, CDIMS, 't') does the same as above, but instead the +% column dimensions are specified, and the remaining dimensions (in +% ascending order) map to the rows. +% +% A = TENMAT(T, RDIMS, CDIMS) creates a matrix representation of +% tensor T. The dimensions specified in RDIMS map to the rows of +% the matrix, and the dimensions specified in CDIMS map to the +% columns, in the order given. +% +% A = TENMAT(T, RDIM, STR) creates the same matrix representation as +% above, except only one dimension in RDIM maps to the rows of the +% matrix, and the remaining dimensions span the columns in an order +% specified by the string argument STR as follows: +% +% 'fc' - Forward cyclic. Order the remaining dimensions in the +% columns by [RDIM+1:ndims(T), 1:RDIM-1]. This is the +% ordering defined by Kiers. +% +% 'bc' - Backward cyclic. Order the remaining dimensions in the +% columns by [RDIM-1:-1:1, ndims(T):-1:RDIM+1]. This is the +% ordering defined by De Lathauwer, De Moor, and Vandewalle. +% +% A = TENMAT(A, RDIMS, CDIMS, TSIZE) creates a tenmat from a matrix +% A along with the mappings of the row (RDIMS) and column indices +% (CDIMS) and the size of the original tensor (TSIZE). +% +% A = TENMAT(B) is the copy constructor for B also a tenmat. +% +% A = TENMAT is the empty constructor. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% +% Includes improvements offered by Marcus Brubaker. + + +%---------- +% Case 0a: Empty Contructor +%---------- +if (nargin == 0) + A.tsize = []; + A.rindices = []; + A.cindices = []; + A.data = []; + A = class(A, 'tenmat'); + return; +end + +%---------- +% Case 0b: Copy Contructor +%---------- +if (nargin == 1) + B = varargin{1}; + A.tsize = B.tsize; + A.rindices = B.rindices; + A.cindices = B.cindices; + A.data = B.data; + A = class(A, 'tenmat'); + return; +end + +%---------- +% Case I: Called to convert a matrix to a tenmat +%---------- +if (nargin == 4) + + data = varargin{1}; + if ~isnumeric(data) || (ndims(data) ~= 2) + error('A must be a matrix.'); + end + rdims = varargin{2}; + cdims = varargin{3}; + tsize = varargin{4}; + + % Error check + n = numel(tsize); + if ~isequal(1:n, sort([rdims cdims])) + error('Incorrect specification of dimensions'); + elseif (prod(tsize(rdims)) ~= size(data,1)) + error('SIZE(A,1) does not match size specified by RDIMS and SIZE.'); + elseif (prod(tsize(cdims)) ~= size(data,2)) + error('SIZE(A,2) does not match size specified by CDIMS and SIZE.'); + end + + % Save class variables + A.tsize = tsize; + A.rindices = rdims; + A.cindices = cdims; + A.data = data; + A = class(A, 'tenmat'); + return; + +end + +%---------- +% Case II: Called to convert an MDA to a tenmat --- recall after +% converting MDA to a tensor. +%---------- +if isa(varargin{1},'double') + A = tenmat(tensor(varargin{1}),varargin{2:nargin}); + return; +end + +%---------- +% Case III: Convert a tensor to a tenmat +%---------- + +if (nargin < 2) || (nargin > 3) + error('Incorrect number of arguments.'); +end + +% Save the size of T and the number of dimensions +T = varargin{1}; +tsize = size(T); +n = ndims(T); + +% Figure out which dimensions get mapped where +if (nargin == 2) + rdims = varargin{2}; + tmp = true(1,n); + tmp(rdims) = false; + cdims = find(tmp); % i.e., cdims = setdiff(1:n, rdims); +elseif isa(varargin{3},'char') + switch varargin{3} + case 't' % Transpose + cdims = varargin{2}; + tmp = true(1,n); + tmp(cdims) = false; + rdims = find(tmp); % i.e., rdims = setdiff(1:n, cdims); + case 'fc' % Forward cyclic + rdims = varargin{2}; + if (numel(rdims) ~= 1) + error('Only one row dimension if third argument is ''fc''.'); + end + cdims = [rdims+1:n, 1:rdims-1]; + case 'bc' % Backward cyclic + rdims = varargin{2}; + if (numel(rdims) ~= 1) + error('Only one row dimension if third argument is ''bc''.'); + end + cdims = [rdims-1:-1:1, n:-1:rdims+1]; + otherwise + error('Unrecognized option'); + end +else + rdims = varargin{2}; + cdims = varargin{3}; +end + +% Error check +if ~isequal(1:n, sort([rdims cdims])) + error('Incorrect specification of dimensions'); +end + +% Permute T so that the dimensions specified by RDIMS come first +data = reshape(double(permute(T,[rdims cdims])), prod(tsize(rdims)), prod(tsize(cdims))); + +% Save class variables +A.tsize = tsize; +A.rindices = rdims; +A.cindices = cdims; +A.data = data; +A = class(A, 'tenmat'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/tsize.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/tsize.m new file mode 100644 index 0000000..e1babf8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/tsize.m @@ -0,0 +1,33 @@ +function sz = tsize(a,idx) +%TSIZE Tensor size of tenmat. +% +% D = TSIZE(X) returns the size of the tensor being stored as a +% matrix. +% +% M = TSIZE(X,DIM) returns the length of the dimension(s) specified +% by DIM. For example, SIZE(X,1) returns the size of the first +% dimension of the tensor. +% +% See also TENMAT, TENMAT/SIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(a.data) + sz = []; +elseif exist('idx', 'var') + sz = a.tsize(idx); +else + sz = a.tsize; +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/uminus.m new file mode 100644 index 0000000..09cc44c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus (-) for tenmat. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.data = -t.data; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/uplus.m new file mode 100644 index 0000000..5cbc6d8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tenmat/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus (+) for tenmat. +% +% See also TENMAT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/and.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/and.m new file mode 100644 index 0000000..eb89fea --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/and.m @@ -0,0 +1,18 @@ +function Z = and(X,Y) +%AND Logical AND (&) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@and,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/collapse.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/collapse.m new file mode 100644 index 0000000..76bed6a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/collapse.m @@ -0,0 +1,80 @@ +function Y = collapse(X,dims,fun) +%COLLAPSE Collapse tensor along specified dimensions. +% +% Y = COLLAPSE(X,DIMS) sums the entries of X along all dimensions +% specified in DIMS. If DIMS is negative, then X is summed across +% all dimensions *not* specified by -DIMS. +% +% Y = COLLAPSE(X) is shorthand for S = COLLAPSE(X,1:ndims(X)). +% +% Y = COLLAPSE(X,DIMS,FUN) accumulates the entries of T using the +% accumulation function @FUN. +% +% Examples +% X = tenrand([4 4 4]); +% Y = collapse(X,[2 3]) %<-- sum of entries in each mode-1 slice +% Y = collapse(X,[1 2],@max) %<-- max entry in each mode-3 slice +% +% Documentation page for collapsing and scaling tensors +% +% See also TENSOR, TENSOR/SCALE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isempty(X.data) + Y = []; + return; +end + +if ~exist('dims', 'var') + dims = 1:ndims(X); +end + +if isempty(dims) + Y = X; + return; +end + +if ~exist('fun', 'var') + fun = @sum; +end + +dims = tt_dimscheck(dims,ndims(X)); +remdims = setdiff(1:ndims(X),dims); + +% Check for the case where we accumulate over *all* dimensions +if isempty(remdims) + Y = fun(X.data(:)); + return; +end + +% Calculate the size of the result +newsiz = size(X,remdims); + +% Convert to a matrix where each row is going to be collapsed +A = double(tenmat(X,remdims,dims)); + +% Apply the collapse function +B = zeros(size(A,1),1); +for i = 1:size(A,1) + B(i) = fun(A(i,:)); +end + +% Form and return the final result +Y = tensor(tenmat(B,1:numel(remdims),[],newsiz)); + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/contract.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/contract.m new file mode 100644 index 0000000..6b1870c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/contract.m @@ -0,0 +1,75 @@ +function y = contract(x,i,j) +%CONTRACT Contract tensor along two dimensions (array trace). +% +% Y = CONTRACT(X,I,J) contracts the entries of X along dimensions I +% and J. Contraction is a generalization of matrix trace. In other +% words, the trace is performed along the two-dimensional slices +% defined by dimensions I and J. It is possible to implement tensor +% multiplication as an outer product followed by a contraction. +% +% Examples +% X = tensor(rand(4,3,2)); Y = tensor(rand(3,2,4)); +% Z1 = ttt(X,Y,1,3); %<-- Normal tensor multiplication +% Z2 = contract(ttt(X,Y),1,6); %<-- Outer product + contract +% norm(Z1-Z2) %<-- Should be zero +% +% See also TENSOR, TENSOR/TTT. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Error checking +if x.size(i) ~= x.size(j) + error('Must contract along equally sized dimensions'); +end + +% Error checking +if i == j + error('Must contract along two different dimensions'); +end + +% Easy case - returns a scalar +if ndims(x) == 2 + y = trace(x.data); + return; +end + +% Remaining dimensions after trace +remdims = setdiff(1:ndims(x),[i j]); + +% Size for y +newsize = x.size(remdims); + +% Total size of remainder +m = prod(newsize); + +% Number of items to add for trace +n = x.size(i); + +% Permute trace dimensions to the end +x = permute(x, [remdims i j]); + +% Reshape data to be 3D +data = reshape(x.data, m, n, n); + +% Add diagonal entries for each slice +newdata = zeros(m,1); +for i = 1:n + newdata = newdata + data(:,i,i); +end + +% Reshape result +if numel(newsize) > 1 + newdata = reshape(newdata,newsize); +end +y = tensor(newdata,newsize); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/disp.m new file mode 100644 index 0000000..48556d9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/disp.m @@ -0,0 +1,51 @@ +function disp(X,name) +%DISP Command window display of a tensor. +% +% DISP(X) displays a tensor with no name. +% +% DISP(X,NAME) displays a tensor with the given name. +% +% See also TENSOR, TENSOR/DISPLAY. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('name','var') + name = 'ans'; +end + +fprintf(1,'%s is a tensor of size %s\n',name,tt_size2str(X.size)); + +if isempty(X.data) + fprintf(1,'\t%s = []\n',name); + return +end + +s = shiftdim(num2cell(X.data,1:2),2); + +for i = 1:numel(s) + fprintf('\t%s',name); + if ndims(X) == 1 + fprintf('(:)'); + elseif ndims(X) == 2 + fprintf('(:,:)'); + elseif ndims(X) > 2 + fprintf('(:,:'); + fprintf(',%d',tt_ind2sub(X.size(3:end),i)); + fprintf(')'); + end + fprintf(' = \n'); + output = tt_matrix2cellstr(s{i}); + fprintf('\t%s\n',output{:}); +end + +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/display.m new file mode 100644 index 0000000..fe6faf3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/display.m @@ -0,0 +1,20 @@ +function display(t) +%DISPLAY Command window display of a tensor. +% +% DISPLAY(X) displays a tensor with its name. +% +% See also TENSOR, TENSOR/DISP. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/double.m new file mode 100644 index 0000000..11b1dea --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/double.m @@ -0,0 +1,20 @@ +function A = double(X) +%DOUBLE Convert tensor to double array. +% +% A = double(X) converts X to a standard multidimensional array. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +A = double(X.data); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/end.m new file mode 100644 index 0000000..10a0a4c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/end.m @@ -0,0 +1,29 @@ +function e = end(X,k,n) +%END Last index of indexing expression for tensor. +% +% The expression X(end,:,:) will call END(X,1,3) to determine +% the value of the first index. +% +% See also TENSOR, TENSOR/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if n > ndims(X) + error('Subscript out of range.'); +end +if n > 1 %For subscripted indexing + e = X.size(k); %For subscripted indexing +else %Linear indexing, or X is a vector + e = prod(size(X)); %if X is a vector, this equals X.size(1) so works +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/eq.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/eq.m new file mode 100644 index 0000000..9b41264 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/eq.m @@ -0,0 +1,18 @@ +function Z = eq(X,Y) +%EQ Equal (==) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@eq,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/exp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/exp.m new file mode 100644 index 0000000..63bc37f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/exp.m @@ -0,0 +1,19 @@ +function t = exp(t) +%EXP Exponential for tensors. +% +% EXP(X) is the exponential of the elements of X. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +t.data = exp(t.data); \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/find.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/find.m new file mode 100644 index 0000000..e521c64 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/find.m @@ -0,0 +1,40 @@ +function [subs,vals] = find(t) +%FIND Find subscripts of nonzero elements in a tensor. +% +% S = FIND(X) returns the subscripts of the nonzero values in X. +% +% [S,V] = FIND(X) also returns a column vector of the values. +% +% Examples: +% X = tensor(rand(3,4,2)); +% subs = find(X > 0.5) %<-- find subscripts of values greater than 0.5 +% vals = X(subs) %<-- extract the actual values +% +% See also TENSOR/SUBSREF, TENSOR/SUBSASGN +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Find the *linear* indices of the nonzero elements +idx = find(t.data); + +% Convert the linear indices to subscripts +subs = tt_ind2sub(t.size,idx); + +% Extract the corresponding values and return as a column vector +if nargout > 1 + if isempty(subs) + vals = []; + else + vals = reshape(t.data(idx), length(idx), 1); + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/full.m new file mode 100644 index 0000000..6972499 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/full.m @@ -0,0 +1,20 @@ +function X = full(X) +%FULL Convert to a (dense) tensor. +% +% FULL(X) returns X, i.e., does nothing. +% +% See also TENSOR, SPTENSOR/FULL, KTENSOR/FULL, TTENSOR/FULL. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ge.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ge.m new file mode 100644 index 0000000..f985a05 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ge.m @@ -0,0 +1,18 @@ +function Z = ge(X,Y) +%GE Greater than or equal (>=) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@ge,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/gt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/gt.m new file mode 100644 index 0000000..ccb44f3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/gt.m @@ -0,0 +1,18 @@ +function Z = gt(X,Y) +%GT Greater than (>) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@gt,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/innerprod.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/innerprod.m new file mode 100644 index 0000000..23c2532 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/innerprod.m @@ -0,0 +1,50 @@ +function res = innerprod(X,Y) +%INNERPROD Efficient inner product with a tensor. +% +% R = INNERPROD(X,Y) efficiently computes the inner product between +% two tensors X and Y. If Y is a tensor, then the inner product is +% computed directly. Otherwise, the inner product method for +% that type of tensor is called. +% +% Examples +% X = tensor(rand(3,4,3)); +% Y = tensor(randi(10,3,4,3)); +% innerprod(X,Y) +% +% See also TENSOR, SPTENSOR/INNERPROD, KTENSOR/INNERPROD, TTENSOR/INNERPROD +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% X is a tensor +switch class(Y) + + case {'tensor'} + % No need for same size check because it is implicit in the inner + % product below. + if ~isequal(size(X), size(Y)) + error('TTB:UnequalSize', 'X and Y must be the same size'); + end + x = reshape(X.data, 1, numel(X.data)); + y = reshape(Y.data, numel(Y.data), 1); + res = x*y; + + case {'sptensor','ktensor','ttensor'} + % Reverse arguments to call specialized code + res = innerprod(Y,X); + + otherwise + disp(['Inner product not available for class ' class(Y)]); + +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/isequal.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/isequal.m new file mode 100644 index 0000000..149bf04 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/isequal.m @@ -0,0 +1,29 @@ +function z = isequal(x,y) +%ISEQUAL for tensors. +% +% ISEQUAL(A,B) compares the tensors A and B for equality. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% +if ~isequal(x.size,y.size) + z = false; +elseif isa(x,'tensor') && isa(y,'tensor') + z = isequal(x.data,y.data); +elseif isa(y,'sptensor') + z = isequal(x,full(y)); +else + z = false; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/isscalar.m new file mode 100644 index 0000000..e25b37f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for tensors. +% ISSCALAR(S) returns logical 0 (false) if S is a tensor. +% +% See also TENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/issymmetric.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/issymmetric.m new file mode 100644 index 0000000..558c42d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/issymmetric.m @@ -0,0 +1,146 @@ +function [tf,all_diffs,all_perms] = issymmetric(X,grps,ver) +%ISSYMMETRIC Verify that a tensor X is symmetric in specified modes. +% +% TF = ISSYMMETRIC(X) returns true if X is exactly symmetric for every +% permutation of its modes. +% +% [TF,DIFFS,PERMS] = ISSYMMETRIC(X) also returns that maximum difference +% in DIFFS for each permutation in PERMS (one permutation per row). +% +% [...] = ISSYMMETRIC(X,IDX) checks symmetry with respect to the modes +% specified in IDX, which can be an array of indices or a cell array of +% arrays of symmetric indices. +% +% Examples +% W = tensor(rand(3,3,3)); +% issymmetric(W,[1 2]) %<--Checks for symmetry in modes [1 2]. False here +% +% See also TENSOR, SYMMETRIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +n = ndims(X); +sz = size(X); + +%ver is an optional argument specifying the version to use. +if ~exist('ver', 'var') + ver = 0; %By default use new, faster version of issymmetric +end + +% Check that grps exists; if not, create it. +if ~exist('grps','var') + grps = 1:n; +end + +if nargout > 1 + ver = 1; % User requested permutation and difference information. +end + +% Check that grps is a cell array. +if ~iscell(grps) + grps = {grps}; +end + +%Substantially different routines are called depending on whether the user +%requests the permutation information. If permutation is required (or requested) +%the algorithm is much slower +switch ver + + case 0 %use new algorithm + + for i = 1:length(grps) + + % Extract current group + thisgrp = grps{i}; + + % Check tensor dimensions first + if ~all( sz(thisgrp(1)) == sz(thisgrp) ) + tf = false; + return; + end + + % Construct matrix ind where each row is the multi-index for + % one element of X + idx = tt_ind2sub(size(X), (1:numel(X.data))'); + + % Find reference index for every element in the tensor - this + % is to its index in the symmetrized tensor. This puts every + % element into a 'class' of entries that will be the same under + % symmetry. + classidx = idx; + classidx(:,thisgrp) = sort(idx(:,thisgrp),2); + linclassidx = tt_sub2ind(size(X), classidx); + + % Compare each element to its class exemplar + if any(X.data(:) ~= X.data(linclassidx)); + tf = false; + return + end + end + + % We made it past all the tests! + tf = true; + return + + case 1 %Use older algorithm + + % Check tensor dimensions for compatibility with symmetrization + for i = 1:length(grps) + dims = grps{i}; + for j = dims(2:end) + if sz(j) ~= sz(dims(1)) + tf = false; + return; + end + end + end + + % Check actual symmetry. + cnt = sum(cellfun(@(x) factorial(length(x)), grps)); + all_diffs = zeros(cnt,1); + all_perms = zeros(cnt,n); + idx = 1; + for i = 1:length(grps) + + % Compute the permutations for this group of symmetries + p = perms(grps{i}); + + for j = 1:size(p,1) + + % Create the permutation to check + q = 1:n; + q(grps{i}) = p(j,:); + + % Save the permutation + all_perms(idx,:) = q; + + % Do the permutation and see if it's a match. If it's not a match, + % record the difference. + Y = permute(X,q); + if isequal(X.data,Y.data) + all_diffs(idx) = 0; + else + all_diffs(idx) = max(abs(X.data(:)-Y.data(:))); + end + + % Increment the index + idx = idx + 1; + + end + + end + + otherwise + error('Incorrect version specification'); +end +tf = all(all_diffs == 0); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ldivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ldivide.m new file mode 100644 index 0000000..e0ba8b9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ldivide.m @@ -0,0 +1,26 @@ +function Z = ldivide(X,Y) +%LDIVIDE Left array divide for tensor. +% +% LDIVIDE(A,B) is called for the syntax 'A .\ B' when A or B is a tensor. +% A and B must have the same size, unless one is a scalar. +% +% Examples +% X = tenrand([4 3 2],5); +% X .\ 3 +% X .\ X +% +% See also TENSOR, TENSOR/RDIVIDE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@ldivide,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/le.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/le.m new file mode 100644 index 0000000..01b15b1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/le.m @@ -0,0 +1,18 @@ +function Z = le(X,Y) +%LE Less than or equal (<=) for tensor. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@le,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/lt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/lt.m new file mode 100644 index 0000000..5680865 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/lt.m @@ -0,0 +1,18 @@ +function Z = lt(X,Y) +%LT Less than (<) for tensor. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@lt,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mask.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mask.m new file mode 100644 index 0000000..c04d00e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mask.m @@ -0,0 +1,20 @@ +function vals = mask(X,W) +%MASK Extract values as specified by a mask tensor. +% +% V = MASK(X,W) extracts the values in X that correspond to nonzero +% values in the mask tensor W. +% +%MATLAB Tensor Toolbox. +%Copyright 2017, Sandia Corporation. + +% Error check +if any(size(W) > size(X)) + error('Mask cannot be bigger than the data tensor') +end + +% Extract locations of nonzeros in W +wsubs = find(W); + +% Extract values from X +idx = tt_sub2ind(X.size,wsubs); +vals = X.data(idx); \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/minus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/minus.m new file mode 100644 index 0000000..68ee717 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/minus.m @@ -0,0 +1,22 @@ +function Z = minus(X,Y) +%MINUS Binary subtraction (-) for tensors. +% +% MINUS(A,B) is called for the syntax 'A - B' when A or B is a tensor. A +% and B must have the same size, unless one is a scalar. A scalar can be +% subtracted from a tensor of any size. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@minus,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mldivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mldivide.m new file mode 100644 index 0000000..88327a5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mldivide.m @@ -0,0 +1,31 @@ +function Z = mldivide(X,Y) +%MLDIVIDE Slash left division for tensors. +% +% MLDIVIDE(A,B) is called for the syntax 'A \ B' when A is a scalar and B +% is a tensor. +% +% Example +% X = tenrand([4 3 2],5); +% 3 \ X +% +% See also TENSOR, TENSOR/LDIVIDE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(X) + Z = tenfun(@ldivide,X,Y); + return; +end + +error('MLDIVIDE only supports the scalar case for tensors'); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mrdivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mrdivide.m new file mode 100644 index 0000000..f843589 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mrdivide.m @@ -0,0 +1,31 @@ +function Z = mrdivide(X,Y) +%MRDIVIDE Slash right division for tensors. +% +% MRDIVIDE(A,B) is called for the syntax 'A / B' when A is a tensor and B +% is a scalar. +% +% Example +% X = tenrand([4 3 2],5); +% X / 3 +% +% See also TENSOR, TENSOR/RDIVIDE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isscalar(Y) + Z = tenfun(@rdivide,X,Y); + return; +end + +error('MRDIVIDE only supports the scalar case for tensors'); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mtimes.m new file mode 100644 index 0000000..0e46f3a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mtimes.m @@ -0,0 +1,47 @@ +function C = mtimes(A,B) +%MTIMES tensor-scalar multiplication. +% +% C = MTIMES(A,B) is called for the syntax 'A * B' when A or B is a +% tensor and the other argument is a scalar. +% +% For tensor-matrix multiplication, use TTM. +% For tensor-tensor multiplication, use TTT. +% For tensor-tensor array multiplication, use TIMES or 'A .* B'. +% +% Examples +% X = tenrand([3,4,2]) +% W = 5 * X +% +% See also TENSOR, TENSOR/TTM, TENSOR/TTT, TENSOR/TIMES +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% +if isscalar(B) + C = A; + C.data = B * C.data; + return; +end + +if isscalar(A) + C = B; + C.data = A * C.data; + return; +end + +error('Mtimes only supports a tensor times a scalar'); + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mttkrp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mttkrp.m new file mode 100644 index 0000000..5cc3886 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mttkrp.m @@ -0,0 +1,116 @@ +function V = mttkrp(X,U,n,vers) +%MTTKRP Matricized tensor times Khatri-Rao product for tensor. +% +% V = MTTKRP(X,U,N) efficiently calculates the matrix product of the +% n-mode matricization of X with the Khatri-Rao product of all +% entries in U, a cell array of matrices, except the Nth. How to +% most efficiently do this computation depends on the type of tensor +% involved. +% +% V = MTTKRP(X,K,N) instead uses the Khatri-Rao product formed by the +% matrices and lambda vector stored in the ktensor K. As with the cell +% array, it ignores the Nth factor matrix. The lambda vector is absorbed +% into one of the factor matrices. +% +% NOTE: Updated to use BSXFUN per work of Phan Anh Huy. See Anh Huy Phan, +% Petr Tichavský, Andrzej Cichocki, On Fast Computation of Gradients for +% CANDECOMP/PARAFAC Algorithms, arXiv:1204.1586, 2012. +% +% Examples +% mttkrp(tensor(rand(3,3,3)), {rand(3,3), rand(3,3), rand(3,3)}, 2) +% mttkrp(tensor(rand(2,4,5)), {rand(2,6), rand(4,6), rand(5,6)}, 3) +% +% See also TENSOR, TENMAT, KHATRIRAO +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Multiple versions supported... +if ~exist('vers','var') + vers = 1; +end + +N = ndims(X); +if (N < 2) + error('MTTKRP is invalid for tensors with fewer than 2 dimensions'); +end + +if isa(U,'ktensor') + % Absorb lambda into one of the factors, but not the one that's skipped + if n == 1 + U = redistribute(U,2); + else + U = redistribute(U,1); + end + % Extract the factor matrices + U = U.u; +end + +if ~iscell(U) + error('Second argument should be a cell array or a ktensor'); +end + +if (length(U) ~= N) + error('Cell array is the wrong length'); +end + +if n == 1 + R = size(U{2},2); +else + R = size(U{1},2); +end + +for i = 1:N + if i == n, continue; end + if (size(U{i},1) ~= size(X,i)) || (size(U{i},2) ~= R) + error('Entry %d of cell array is wrong size', i); + end +end + +%% Computation + +if vers == 0 % Old version of the code + Xn = permute(X,[n 1:n-1,n+1:N]); + Xn = reshape(Xn.data, size(X,n), []); + Z = khatrirao(U{[1:n-1,n+1:N]},'r'); + V = Xn*Z; + return; +end + +szl = prod(size(X,1:n-1)); %#ok<*PSIZE> +szr = prod(size(X,n+1:N)); +szn = size(X,n); + +if n == 1 + Ur = khatrirao(U{2:N},'r'); + Y = reshape(X.data,szn,szr); + V = Y * Ur; +elseif n == N + Ul = khatrirao(U{1:N-1},'r'); + Y = reshape(X.data,szl,szn); + V = Y' * Ul; +else + Ul = khatrirao(U{n+1:N},'r'); + Ur = reshape(khatrirao(U{1:n-1},'r'), szl, 1, R); + Y = reshape(X.data,[],szr); + Y = Y * Ul; + Y = reshape(Y,szl,szn,R); + if vers == 2 + V = bsxfun(@times,Ur,Y); + V = reshape(sum(V,1),szn,R); + else % default (vers == 1) + V = zeros(szn,R); + for r =1:R + V(:,r) = Y(:,:,r)'*Ur(:,:,r); + end + end +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mttkrps.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mttkrps.m new file mode 100644 index 0000000..33f2ec0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/mttkrps.m @@ -0,0 +1,150 @@ +function V = mttkrps(X,U) +%MTTKRPS Sequence of MTTKRP calculations for a tensor. +% +% V = MTTKRPS(X,U) computes a cell array V such that +% V{k} = mttkrp(X, U, k) for k = 1,...,ndims(X). +% +% See also MTTKRP. +% +% MATLAB Tensor Toolbox. +% Copyright 2018, Sandia Corporation. + +% Written by J. Duersch, 2018. +% +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2018) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Obtain dimensions and optimal splitting. +sz = size(X); +d = length(sz); +s = min_split(sz); + +% Output sequence V{k} = mttkrp(X,U,k) +V = cell(d,1); + +% KRP over modes s+1:d. +K = khatrirao(U{s+1:d},'r'); +% Partial MTTKRP with remaining modes +W = reshape(X.data,[],size(K,1)) * K; + +for k=1:s-1 + % Loop entry invariant: W has modes + V{k} = mttv_mid(W, U(k+1:s)); + % Satisfy invariant. + W = mttv_left(W, U{k}); +end + +% Exit state: W has modes . +V{s} = W; + +% KRP over modes 1:s. +K = khatrirao(U{1:s},'r'); +% Partial MTTKRP with remaining modes +W = reshape(X.data,size(K,1),[])' * K; + +for k=s+1:d-1 + % Loop entry invariant: W has modes + V{k} = mttv_mid(W, U(k+1:d)); + % Satisfy invariant. + W = mttv_left(W, U{k}); +end + +% Exit state: W has modes . +V{d} = W; +end + + + + +function W_out = mttv_left(W_in, U1) +% W_out = mttv_left(W_in, U_left) +% Contract leading mode in partial MTTKRP W_in using the matching factor +% matrix U1. The leading mode is defined as the mode for which consecutive +% increases in the corresponding index address elements at consecutive +% increases in the memory offset. +% +% W_in has modes in natural descending order: . +% Mode m1 is either the first mode or an intermediate mode of the +% original tensor. Mode m2 through mN are subsequence original modes. +% The last mode C is the component mode (indexed over rank-1 components +% 1:r) corresponding to columns in factor matrices. +% U1 is the corresponding factor matrix with modes . +% W_out has modes: + +r = size(U1,2); +W_in = reshape(W_in, size(U1,1), [], r); +W_out = zeros(size(W_in,2), r); +for j=1:r + W_out(:,j) = W_in(:,:,j)' * U1(:,j); +end +end + + + + +function V = mttv_mid(W_in, U_mid) +% V = mttv_mid(W_in, U_mid) +% Contract all intermediate modes in partial MTTKRP W_in using the matching +% cell array U_mid. +% +% W_in has modes in natural descending order: . +% Mode m1 is either the first mode or an intermediate mode of the +% original tensor. Mode m2 through mN are subsequence original modes. +% The last mode C is the component mode (indexed over rank-1 components +% 1:r) corresponding to columns in factor matrices. +% U_mid is the corresponding cell array of factor matrices. That is, +% U_mid{1} has modes , U_mid{2} has modes , etc. The cell +% array must exactly match all intermediate uncontracted modes. +% V is the final MTTKRP with modes: . +if isempty(U_mid) + V = W_in; +else + K = khatrirao(U_mid,'r'); + r = size(K,2); + W_in = reshape(W_in, [], size(K,1), r); + V = zeros(size(W_in,1), r); + for j=1:r + V(:,j) = W_in(:,:,j)*K(:,j); + end +end +end + + + + +function [s_min]=min_split(sz) +% [s_min]=min_split(sz) +% Scan for optimal splitting with minimal memory footprint. +% +% sz gives sizes of each dimension in the original tensor in natural +% descending order. +% s_min gives optimal splitting to minimize partial MTTKRP memory +% footprint. Modes 1:s_min will contract in left-partial computation and +% modes s_min+1:d will contract in right-partial. + +m_left=sz(1); +m_right=prod(sz(2:end)); +s_min=1; + +% Minimize: m_left + m_right. +for s=2:length(sz)-1 + % Peel mode s off right and test placement. + m_right = m_right/sz(s); + if (m_left < m_right) + % The sum is reduced by placing mode s on the left. + s_min = s; + m_left = m_left*sz(s); + else + % The sum would be reduced by placing mode s back on the right. + % There is no further benefit to collecting modes on the left. + break; + end +end +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ndims.m new file mode 100644 index 0000000..18ac223 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ndims.m @@ -0,0 +1,25 @@ +function n = ndims(t) +%NDIMS Return the number of dimensions of a tensor. +% +% NDIMS(X) returns the number of dimensions of tensor X. +% +% Examples +% A = rand(4,3,1); ndims(A) %<-- Returns 2 +% X = tensor(A); ndims(X) %<-- Returns 2 +% X = tensor(A,[4 3 1]); ndims(X) %<-- Returns 3 +% +% See also TENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = numel(t.size); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ne.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ne.m new file mode 100644 index 0000000..9ce772d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ne.m @@ -0,0 +1,18 @@ +function Z = ne(X,Y) +%NE Not equal (~=) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@ne,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/nnz.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/nnz.m new file mode 100644 index 0000000..ae5a3b3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/nnz.m @@ -0,0 +1,18 @@ +function n = nnz(x) +%NNZ Number of nonzeros for tensors. +% +% See also TENSOR, SPTENSOR/NNZ. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +n = nnz(x.data); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/norm.m new file mode 100644 index 0000000..9de8ecc --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/norm.m @@ -0,0 +1,21 @@ +function n = norm(T) +%NORM Frobenius norm of a tensor. +% +% NORM(X) returns the Frobenius norm of a tensor. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +v = reshape(T.data, numel(T.data), 1); +n = norm(v); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/not.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/not.m new file mode 100644 index 0000000..459b37b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/not.m @@ -0,0 +1,18 @@ +function B = not(A) +%NOT Logical NOT (~) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +B = tensor(not(A.data), size(A)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/nvecs.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/nvecs.m new file mode 100644 index 0000000..76c87d7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/nvecs.m @@ -0,0 +1,79 @@ +function u = nvecs(X,n,r,opts) +%NVECS Compute the leading mode-n vectors for a tensor. +% +% U = NVECS(X,n,r) computes the r leading eigenvalues of Xn*Xn' +% (where Xn is the mode-n matricization of X), which provides +% information about the mode-n fibers. In two-dimensions, the r +% leading mode-1 vectors are the same as the r left singular vectors +% and the r leading mode-2 vectors are the same as the r right +% singular vectors. By default, this method computes the top r +% eigenvectors of the matrix Xn*Xn'. This behavior can be changed per the +% options below. +% +% U = NVECS(X,n,r,OPTS) specifies options: +% OPTS.eigsopts: options passed to the EIGS routine [struct('disp',0)] +% OPTS.flipsign: make each column's largest element positive [true] +% OPTS.svds: use svds on Xn rather than eigs on Xn*Xn' [false] +% +% Examples +% X = tensor(randn(3,2,3)); +% nvecs(X,3,2) +% +% Documentation page for n-vecs +% +% See also TENSOR, TENMAT, EIGS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ~exist('opts','var') || isempty(opts) + opts = struct; +end + +if isfield(opts,'eigsopts') + eigsopts = opts.eigsopts; +else + eigsopts.disp = 0; +end + +if isfield(opts,'svds') + flag = opts.svds; +else + flag = false; +end + +Xn = double(tenmat(X,n)); + +if flag + [u,~,~] = svds(Xn, r); +else + Y = Xn*Xn'; + [u,~] = eigs(Y, r, 'LM', eigsopts); +end + +if isfield(opts,'flipsign') + flipsign = opts.flipsign; +else + flipsign = true; +end + +if flipsign + % Make the largest magnitude element be positive + [~,loc] = max(abs(u)); + for i = 1:r + if u(loc(i),i) < 0 + u(:,i) = u(:,i) * -1; + end + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/or.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/or.m new file mode 100644 index 0000000..e4c9d4c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/or.m @@ -0,0 +1,18 @@ +function Z = or(X,Y) +%OR Logical OR (|) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@or,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/permute.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/permute.m new file mode 100644 index 0000000..e19a3cd --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/permute.m @@ -0,0 +1,50 @@ +function T = permute(T,order) +%PERMUTE Permute tensor dimensions. +% +% B = PERMUTE(A,ORDER) rearranges the dimensions of A so that they +% are in the order specified by the vector ORDER. The result has the +% same values of A, but the order of the subscripts needed to access +% any particular element are rearranged as specified by ORDER. +% +% Examples +% T = tensor(rand(3,2,4)); +% permute(T,[1 3 2]) +% +% See also TENSOR, TENSOR/SIZE, TENSOR/NDIMS, PERMUTE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if ndims(T) ~= numel(order) + error('Invalid permutation order'); +end + +% Check for special case of permuting an order-1 object (which has +% no effect but confuses MATLAB's permute command which doesn't +% think that there is such a thing as a 1D-array). +if isequal(order,1) + return; +end + +% Check for special case of empty object (which has +% no effect but confuses MATLAB's permute command which doesn't +% think that there is such a thing as an empty array). +if isempty(order) + return; +end + +% Note that permute does error checking on order, so we don't worry +% about it. +T.data = permute(T.data,order); +T.size = T.size(order); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/plus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/plus.m new file mode 100644 index 0000000..7e744da --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/plus.m @@ -0,0 +1,25 @@ +function Z = plus(X,Y) +%PLUS Binary addition (+) for tensors. +% +% PLUS(A,B) is called for the syntax 'A + B' when A or B is a tensor. A +% and B must have the same size, unless one is a scalar. A scalar can be +% added to a tensor of any size. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if isa(Y,'sumtensor') %If the 2nd component is a sumtensor, treat as such + Z=plus(Y,X) +else + Z = tenfun(@plus,X,Y); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/power.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/power.m new file mode 100644 index 0000000..6b4f132 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/power.m @@ -0,0 +1,18 @@ +function Z = power(X,Y) +%POWER Elementwise power (.^) operator for a tensor. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@power,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/rdivide.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/rdivide.m new file mode 100644 index 0000000..5ba0bb2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/rdivide.m @@ -0,0 +1,26 @@ +function Z = rdivide(X,Y) +%RDIVIDE Right array divide for tensors. +% +% RDIVIDE(A,B) is called for the syntax 'A ./ B' when A or B is a tensor. +% A and B must have the same size, unless one is a scalar. +% +% Examples +% X = tenrand([4 3 2],5); +% X ./ 3 +% X ./ X +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@rdivide,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/reshape.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/reshape.m new file mode 100644 index 0000000..b81f4d7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/reshape.m @@ -0,0 +1,29 @@ +function t = reshape(t,siz) +%RESHAPE Change tensor size. +% RESHAPE(X,SIZ) returns the tensor whose elements +% have been reshaped to the appropriate size. +% +% Examples +% X = tensor(rand(2,3,4)); +% reshape(X,[4,3,2]) +% +% See also TENSOR, TENSOR/SQUEEZE, TENSOR/PERMUTE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if prod(t.size) ~= prod(siz) + error('Number of elements cannot change'); +end + +t.data = reshape(t.data,siz); +t.size = siz; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/scale.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/scale.m new file mode 100644 index 0000000..8191bf8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/scale.m @@ -0,0 +1,75 @@ +function Y = scale(X,S,dims) +%SCALE Scale along specified dimensions of tensor. +% +% Y = SCALE(X,S,DIMS) scales the tensor X along the dimension(s) +% specified in DIMS using the scaling data in S. If DIMS contains +% only one dimension, then S can be a column vector. Otherwise, S +% should be a tensor. +% +% Examples +% X = tenones([3,4,5]); +% S = 10 * [1:5]'; Y = scale(X,S,3) +% S = tensor(10 * [1:5]',5); Y = scale(X,S,3) +% S = tensor(1:12,[3 4]); Y = scale(X,S,[1 2]) +% S = tensor(1:12,[3 4]); Y = scale(X,S,-3) +% S = tensor(1:60,[3 4 5]); Y = scale(X,S,1:3) +% +% Documentation page for collapsing and scaling tensors +% +% See also TENSOR, TENSOR/COLLAPSE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +dims = tt_dimscheck(dims,ndims(X)); +remdims = setdiff(1:ndims(X),dims); + +% Convert to a matrix so that each column of A can be scaled by a +% vectorized version of S. +A = double(tenmat(X,dims,remdims)); + +switch(class(S)) + case {'tensor'} + if ~isequal(size(S), X.size(dims)) + error 'Size mismatch'; + end + % Vectorize S. + S = double(tenmat(S,1:ndims(S),[])); + case {'double'} + if size(S,1) ~= X.size(dims) + error 'Size mismatch'; + end + otherwise + error('Invalid scaling factor'); +end + +[m,n] = size(A); + +% If the size of S is pretty small, we can convert it to a diagonal matrix +% and multiply by A. Otherwise, we scale A column-by-column. +if (m <= n) + B = diag(S) * A; +else + B = zeros(size(A)); + for j = 1:n + B(:,j) = S .* A(:,j); + end +end + +% Convert the matrix B back into a tensor and return. +Y = tensor(tenmat(B,dims,remdims,X.size)); + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/size.m new file mode 100644 index 0000000..eb08fc8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/size.m @@ -0,0 +1,38 @@ +function m = size(t,idx) +%SIZE Tensor dimensions. +% +% D = SIZE(T) returns the sizes of each dimension of tensor X in a +% vector D with ndims(X) elements. +% +% I = size(T,DIM) returns the size of the dimension specified by +% the scalar DIM. +% +% Examples +% A = rand(3,4,2,1); T = tensor(A,[3 4 2 1]); +% size(A) %<-- returns a length-3 vector +% size(T) %<-- returns a length-4 vector +% size(A,2) %<-- returns 4 +% size(T,2) %<-- same +% size(A,5) %<-- returns 1 +% size(T,5) %<-- ERROR! +% +% See also TENSOR, TENSOR/NDIMS, SIZE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +if exist('idx','var') + m = t.size(idx); +else + m = t.size; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/squeeze.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/squeeze.m new file mode 100644 index 0000000..ce3ecaa --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/squeeze.m @@ -0,0 +1,42 @@ +function Y = squeeze(X) +%SQUEEZE Remove singleton dimensions from a tensor. +% +% Y = SQUEEZE(X) returns a tensor Y with the same elements as +% X but with all the singleton dimensions removed. A singleton +% is a dimension such that size(X,dim)==1. +% +% If X has *only* singleton dimensions, then Y is a scalar. +% +% Examples +% squeeze( tenrand([2,1,3]) ) %<-- returns a 2-by-3 tensor +% squeeze( tenrand([1 1]) ) %<-- returns a scalar +% +% See also TENSOR, TENSOR/RESHAPE, TENSOR/PERMUTE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if all(X.size > 1) + % No singleton dimensions to squeeze + Y = X; +else + idx = find(X.size > 1); + if numel(idx) == 0 + % Scalar case - only singleton dimensions + Y = X.data; + else + siz = X.size(idx); + Y = tensor(squeeze(X.data),siz); + end +end + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/subsasgn.m new file mode 100644 index 0000000..e642031 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/subsasgn.m @@ -0,0 +1,140 @@ +function x = subsasgn(x,s,b) +%SUBSASGN Subscripted assignment for a tensor. +% +% We can assign elements to a tensor in three ways. +% +% Case 1: X(R1,R2,...,RN) = Y, in which case we replace the +% rectangular subtensor (or single element) specified by the ranges +% R1,...,RN with Y. The right-hand-side can be a scalar, a tensor, or an +% MDA. +% +% Case 2a: X(S) = V, where S is a p x n array of subscripts and V is +% a scalar or a vector containing p values. +% +% Case 2b: X(I) = V, where I is a set of p linear indices and V is a +% scalar or a vector containing p values. Resize is not allowed in this +% case. +% +% Examples +% X = tensor(rand(3,4,2)) +% X(1:2,1:2,1) = ones(2,2) %<-- replaces subtensor +% X([1 1 1;1 1 2]) = [5;7] %<-- replaces two elements +% X([1;13]) = [5;7] %<-- does the same thing +% X(1,1,2:3) = 1 %<-- grows tensor +% X(1,1,4) = 1 %<- grows the size of the tensor +% +% See also TENSOR, TENSOR/SUBSREF. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s.type + case '.' + error(['Cannot change field ', s.subs, ' directly.']); + + case '{}' + error('Subscript cell reference not supported for tensor.'); + + case '()' + + % We don't allow sub-subscripting for tensors. + if numel(s) ~= 1 + error('Invalid subscripting'); + end + + % Figure out if we are doing a subtensor, a list of subscripts, or + % a list of linear indices... + type = 'error'; + if ndims(x) <= 1 + if (numel(s.subs) > 1) || isvector(s.subs{1}) + type = 'subtensor'; + elseif ismatrix(s.subs{1}) + type = 'subscripts'; + end + else + if numel(s.subs) >= ndims(x) + type = 'subtensor'; + elseif ismatrix(s.subs{1}) + if size(s.subs{1},2) >= ndims(x) + type = 'subscripts'; + elseif iscolumn(s.subs{1}) + type = 'linear indicies'; + end + end + end + + + % *** CASE 1: Rectangular Subtensor *** + if isequal(type,'subtensor') + if isa(b,'tensor') + x.data(s.subs{:},1) = b.data; + else + x.data(s.subs{:},1) = b; + end + % Check if the size has grown! + % Can't vectorize this due to possible trailing 1's + for i = 1:numel(x.size) + x.size(i) = max(x.size(i),size(x.data,i)); + end + % Check if order has grown + for i = numel(x.size)+1:numel(s.subs) + x.size(i) = size(x.data,i); + end + return; + end + + % *** CASE 2a: Subscript indexing *** + if isequal(type,'subscripts'); + + % extract array of subscripts + subs = s.subs{1}; + + % will the size change? if so, we first need to resize x + n = ndims(x); + bsiz = max(subs,[],1); + newsiz = [max([x.size;bsiz(1:n)]) bsiz(n+1:end)]; + if ~isequal(newsiz,x.size) + % We need to enlarge x.data. A trick is to assign its last + % "new" element to zero. This resizes the array correctly. + if numel(newsiz) == 1 + str = sprintf('x.data(%d)=0;',newsiz); + else + str = [sprintf('x.data(') ... + sprintf('%d,',newsiz(1:end-1)) ... + sprintf('%d)=0;', newsiz(end)) ]; + end + eval(str); + x.size = newsiz; + end + + % finally, we can copy in the new data + x.data(tt_sub2ind(newsiz,subs)) = b; + return; + end + + % *** CASE 2b: Linear indexing *** + if isequal(type,'linear indicies'); + idx = s.subs{1}; + if any(idx > prod(x.size)) + error('TTB:BadIndex','In assignement X(I) = Y, a tensor X cannot be resized'); + end + x.data(idx) = b; + return + end + + error('Invalid use of tensor/subsasgn'); +end + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/subsref.m new file mode 100644 index 0000000..2182ec0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/subsref.m @@ -0,0 +1,135 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for tensors. +% +% We can extract elements or subtensors from a tensor in the +% following ways. +% +% Case 1a: y = X(i1,i2,...,iN), where each in is an index, returns a +% scalar. +% +% Case 1b: Y = X(R1,R2,...,RN), where one or more Rn is a range and +% the rest are indices, returns a sparse tensor. +% +% Case 2a: V = X(S) or V = X(S,'extract'), where S is a p x n array +% of subscripts, returns a vector of p values. +% +% Case 2b: V = X(I) or V = X(I,'extract'), where I is a set of p +% linear indices, returns a vector of p values. +% +% Any ambiguity results in executing the first valid case. This +% is particularly an issue if ndims(X)==1. +% +% Examples +% X = tensor(rand(3,4,2,1),[3 4 2 1]); +% X.data %<-- returns multidimensional array +% X.size %<-- returns size +% X(1,1,1,1) %<-- produces a scalar +% X(1,1,1,:) %<-- produces a tensor of order 1 and size 1 +% X(:,1,1,:) %<-- produces a tensor of size 3 x 1 +% X(1:2,[2 4],1,:) %<-- produces a tensor of size 2 x 2 x 1 +% X(1:2,[2 4],1,1) %<-- produces a tensor of size 2 x 2 +% X([1,1,1,1;3,4,2,1]) %<-- returns a vector of length 2 +% X = tensor(rand(10,1),10); +% X([1:6]') %<-- extracts a subtensor +% X([1:6]','extract') %<-- extracts a vector of 6 elements +% +% See also TENSOR, TENSOR/FIND. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +switch s(1).type + case '{}' + error('Cell contents reference from a non-cell array object.') + case '.' + fieldname = s(1).subs; + switch fieldname + case 'data' + a = tt_subsubsref(t.data,s); + case 'size' + a = tt_subsubsref(t.size,s); + otherwise + error(['No such field: ', fieldname]); + end + case '()' + + % *** CASE 1: Rectangular Subtensor *** + if (numel(s(1).subs) == ndims(t)) && ~isequal(s(1).subs{end},'extract') + + % Copy the subscripts + region = s(1).subs; + + if numel(region) ~= ndims(t) + error('Invalid number of subscripts'); + end + + % Extract the data + newdata = t.data(region{:}); + + % Determine the subscripts + newsiz = []; % (future) new size + kpdims = []; % dimensions to keep + rmdims = []; % dimensions to remove + + % Determine the new size and what dimensions to keep + for i = 1:length(region) + if ischar(region{i}) && (region{i} == ':') + newsiz = [newsiz size(t,i)]; + kpdims = [kpdims i]; + elseif numel(region{i}) > 1 + newsiz = [newsiz numel(region{i})]; + kpdims = [kpdims i]; + else + rmdims = [rmdims i]; + end + end + + % If the size is zero, then the result is returned as a scalar; + % otherwise, we convert the result to a tensor. + if isempty(newsiz) + a = newdata; + else + if isempty(rmdims) + a = tensor(newdata,newsiz); + else + a = tensor(permute(newdata,[kpdims rmdims]),newsiz); + end + end + a = tt_subsubsref(a,s); + return; + end + + % *** CASE 2a: Subscript indexing + if size(s(1).subs{1},2) == ndims(t) + % extract array of subscripts + subs = s(1).subs{1}; + a = squeeze(t.data(tt_sub2ind(t.size,subs))); + if isrow(a), a = a'; end + a = tt_subsubsref(a,s); + return; + end + + % *** CASE 2b: Linear indexing *** + if (numel(s(1).subs) > 2) || ((numel(s(1).subs) == 2) && ~isequal(s(1).subs{end},'extract')) + error('Invalid indexing'); + end + + idx = s(1).subs{1}; + if ~iscolumn(idx) + error('Expecting a column index'); + end + + a = squeeze(t.data(idx)); + if isrow(a), a = a'; end + a = tt_subsubsref(a,s); + return; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/symmetrize.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/symmetrize.m new file mode 100644 index 0000000..f855c9a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/symmetrize.m @@ -0,0 +1,175 @@ +function Y = symmetrize(X,grps,ver) +%SYMMETRIZE Symmetrize a tensor X in specified modes. +% +% Y = symmetrize(X) will symmetrize a tensor X with respect to all +% modes so that Y is symmetric with respect to any permutation of +% indices. The dimensions of Y must be equal accross all modes. The +% resulting symmetrized tensor is formed by computing the average over +% all elements in a permutation class. +% +% Y = symmetrize(X,MODES) will symmetrize a tensor X with respect to the +% modes specified by the vector MODES of mode indices. The second +% argument may alternatively be a cell array of vectors of modes for +% symmetrization. +% +% NOTE: It is *the same or less* work to just call X = symmetrize(X) then +% to first check if X is symmetric and then symmetrize it, even if X is +% already symmetric. +% +% Examples +% W = tensor(rand(2,2,2)); +% X = tensor(rand(4,3,3,4)); +% symmetrize(W) +% symmetrize(X,{[1 4], [2,3]}) %<--Symmetrize in modes [1 4] then [2 3] +% +% See also TENSOR, TENSOR/ISSYMMETRIC. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +n = ndims(X); +sz = size(X); + +%ver is an optional argument specifying the version to use. +if ~exist('ver', 'var') + ver=0; %By default use new, faster version of issymmetric +end + +% Check that grps exists; if not, create it. +if ~exist('grps','var') + grps = 1:n; +end + +% Check that grps is a cell array. +if ~iscell(grps) + grps = {grps}; +end + +switch ver + + case 0 % New version (default!) + + ngrps = length(grps); + for i = 1:ngrps + + thisgrp = grps{i}; + + % Check tensor dimensions for compatibility with symmetrization + if ~all( sz(thisgrp(1)) == sz(thisgrp) ) + error('TTB:Tensor:BadModes','Dimension mismatch for symmetrization'); + end + + % Check for no overlap in the sets + if i < ngrps + if ~all(isempty(intersect(thisgrp,grps{i+1:end}))) + error('TTB:Tensor:BadModes','Cannot have overlapping symmetries'); + end + end + + % Construct matrix ind where each row is the multi-index for + % one element of X + idx = tt_ind2sub(size(X), (1:numel(X.data))'); + + % Find reference index for every element in the tensor - this + % is to its index in the symmetrized tensor. This puts every + % element into a 'class' of entries that will be the same under + % symmetry. + classidx = idx; + classidx(:,thisgrp) = sort(idx(:,thisgrp),2); + linclassidx = tt_sub2ind(size(X), classidx); + + % Skip if its already symmetric + if all(X.data(:) == X.data(linclassidx)); + continue; + end + + % Take average over all elements in the same class + classsum = accumarray(linclassidx, X.data(:)); + classnum = accumarray(linclassidx, 1); + avg = classsum ./ classnum; + + % Fill in each entry with its new symmetric version + newdata = avg(linclassidx); + X.data = reshape(newdata,[size(X) 1]); + end + + % Final result + Y = X; + + case 1 % The original version of the algorithm + + % Check tensor dimensions for compatibility with symmetrization + ngrps = length(grps); + for i = 1:ngrps + dims = grps{i}; + for j = dims(2:end) + if sz(j) ~= sz(dims(1)) + error('Dimension mismatch for symmetrization'); + end + end + end + + % Check for no overlap in the sets + for i = 1:ngrps + for j = i+1:ngrps + if ~isempty(intersect(grps{i},grps{j})) + error('Cannot haver overlapping symmetries'); + end + end + end + + % Create the combinations for each symmetrized subset + combos = cell(ngrps,1); + for i = 1:ngrps + combos{i} = perms(grps{i}); + end + + % Create all the permuations to be averaged + total_perms = prod(cellfun(@length,combos)); + sym_perms = repmat(1:n, total_perms, 1); + for i = 1:ngrps + ntimes = prod(cellfun(@length,combos(1:i-1))); + ncopies = prod(cellfun(@length,combos(i+1:end))); + nelems = length(combos{i}); + + idx = 1; + for j = 1:ntimes + for k = 1:nelems + for l = 1:ncopies + sym_perms(idx,grps{i}) = combos{i}(k,:); + idx = idx + 1; + end + end + end + end + + % Create an average tensor + Y = tenzeros(size(X)); + for i = 1:total_perms + Y = Y + permute(X,sym_perms(i,:)); + end + Y = Y / total_perms; + + % It's not *exactly* symmetric due to oddities in differently ordered + % summations and so on, so let's fix that. + % Idea borrowed from Gergana Bounova: + % http://www.mit.edu/~gerganaa/downloads/matlab/symmetrize.m + for i = 1:total_perms + Z = permute(Y,sym_perms(i,:)); + Y.data(:) = max(Y.data(:),Z.data(:)); + end + + otherwise + error('incorrect version specification'); +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/tenfun.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/tenfun.m new file mode 100644 index 0000000..9ca9417 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/tenfun.m @@ -0,0 +1,100 @@ +function Z = tenfun(fun,varargin) +%TENFUN Apply a function to each element in a tensor. +% +% TENFUN(F,X,...) applies the function specified by the function +% handle F to the given arguments. Either both arguments +% must be tensors, or one is a tensor and the other is a scalar/MDA. +% +% Examples +% Z = tenfun(@(x)(x+1),X) %<-- increase every element by one +% Z = tenfun(@eq,X,1) %<-- logical comparison of X with scalar +% Z = tenfun(@plus,X,Y) %<-- adds the two tensors X and Y. +% Z = tenfun(@max,X,Y,Z) %<-- elementwise max over all elements in X,Y,Z +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if nargin < 2 + % error('TENFUN requires at least two input arguments') + error('Not enough input arguments.'); +end + +if ~isa(fun, 'function_handle') + error('First argument must be a function handle.'); +end + +%% Convert inputs to tensors if they aren't already +for i = 1:nargin-1 + if isscalar(varargin{i}) || isa(varargin{i},'tensor') + continue; + elseif isnumeric(varargin{i}) + varargin{i} = tensor(varargin{i}); + elseif ismember(class(varargin{i}), {'symtensor','sptensor','ktensor','ttensor'}) + varargin{i} = full(varargin{i}); + else + error('Invalid input'); + end +end +%% It's ok if there are two inputs and one is a scalar; otherwise, all inputs have to be the same size +if (nargin == 3) && isscalar(varargin{1}) && isa(varargin{2},'tensor') + sz = size(varargin{2}); +elseif (nargin == 3) && isscalar(varargin{2}) && isa(varargin{1},'tensor') + sz = size(varargin{1}); +else + for i = 1:(nargin-1) + if isscalar(varargin{i}) + error('Argument %d is a scalar, but expected a tensor', i+1); + elseif i == 1 + sz = size(varargin{i}); + elseif ~isequal(sz,size(varargin{i})) + error('Tensor %d is not the same size as the first tensor input', i); + end + end +end + +%% Number of inputs for function handle +nfunin = nargin(fun); + +%% Case I: Binary function +if (nargin == 3) && (nfunin == 2) + X = varargin{1}; + Y = varargin{2}; + if ~isscalar(X) + X = X.data; + end + if ~isscalar(Y) + Y = Y.data; + end + data = fun(X,Y); + Z = tensor(data, sz); + return; +end + + +%% Case II: Expects input to be matrix and applies operation on each column +if nargin == 2 + X = varargin{1}.data; + X = reshape(X,1,[]); +else + X = zeros(nargin-1,prod(sz)); + for i = 1:nargin-1 + X(i,:) = varargin{i}.data(:); + end +end +data = fun(X); +Z = tensor(data,sz); +return; + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/tensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/tensor.m new file mode 100644 index 0000000..94e89d9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/tensor.m @@ -0,0 +1,227 @@ +%TENSOR Class for dense tensors. +% +%TENSOR Methods: +% and - Logical AND (&) for tensors. +% collapse - Collapse tensor along specified dimensions. +% contract - Contract tensor along two dimensions (array trace). +% disp - Command window display of a tensor. +% display - Command window display of a tensor. +% double - Convert tensor to double array. +% end - Last index of indexing expression for tensor. +% eq - Equal (==) for tensors. +% exp - Exponential for tensors. +% find - Find subscripts of nonzero elements in a tensor. +% full - Convert to a (dense) tensor. +% ge - Greater than or equal (>=) for tensors. +% gt - Greater than (>) for tensors. +% innerprod - Efficient inner product with a tensor. +% isequal - for tensors. +% isscalar - False for tensors. +% issymmetric - Verify that a tensor X is symmetric in specified modes. +% ldivide - Left array divide for tensor. +% le - Less than or equal (<=) for tensor. +% lt - Less than (<) for tensor. +% mask - Extract values as specified by a mask tensor. +% minus - Binary subtraction (-) for tensors. +% mldivide - Slash left division for tensors. +% mrdivide - Slash right division for tensors. +% mtimes - tensor-scalar multiplication. +% mttkrp - Matricized tensor times Khatri-Rao product for tensor. +% mttkrps - Sequence of MTTKRP calculations for dense tensor. +% ndims - Return the number of dimensions of a tensor. +% ne - Not equal (~=) for tensors. +% nnz - Number of nonzeros for tensors. +% norm - Frobenius norm of a tensor. +% not - Logical NOT (~) for tensors. +% nvecs - Compute the leading mode-n vectors for a tensor. +% or - Logical OR (|) for tensors. +% permute - Permute tensor dimensions. +% plus - Binary addition (+) for tensors. +% power - Elementwise power (.^) operator for a tensor. +% rdivide - Right array divide for tensors. +% reshape - Change tensor size. +% scale - Scale along specified dimensions of tensor. +% size - Tensor dimensions. +% squeeze - Remove singleton dimensions from a tensor. +% subsasgn - Subscripted assignment for a tensor. +% subsref - Subscripted reference for tensors. +% symmetrize - Symmetrize a tensor X in specified modes. +% tenfun - Apply a function to each element in a tensor. +% tensor - Create tensor. +% times - Array multiplication for tensors. +% transpose - is not defined on tensors. +% ttm - Tensor times matrix. +% ttsv - Tensor times same vector in multiple modes. +% ttt - Tensor mulitplication (tensor times tensor). +% ttv - Tensor times vector. +% uminus - Unary minus (-) for tensors. +% uplus - Unary plus (+) for tensors. +% xor - Logical EXCLUSIVE OR for tensors. +% +% Documentation page for tensor class +% +% See also TENSOR_TOOLBOX +% +% Reference: +% * BW Bader and TG Kolda. Algorithm 862: MATLAB Tensor Classes for Fast +% Algorithm Prototyping, ACM Trans Mathematical Software 32:635-653, 2006. +% DOI:10.1145/1186785.1186794. [BibTeX] +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + + +function t = tensor(varargin) +%TENSOR Create tensor. +% +% X = TENSOR(A,SIZ) creates a tensor from the multidimensional array A. +% The SIZ argument is a size vector specifying the desired shape +% of A. +% +% X = TENSOR(F,SIZ) createa a tensor of size SIZ using the function +% handle F to create the data. The function F must take a size vector as +% input. +% +% X = TENSOR(A) creates a tensor from the multidimensional array A, using +% SIZ = size(A). +% +% X = TENSOR(S) copies a tensor S. +% +% X = TENSOR(A) converts an sptensor, ktensor, ttensor, or tenmat object +% to a tensor. +% +% X = TENSOR creates an empty, dense tensor object. +% +% Examples +% X = tensor(rand(3,4,2)) %<-- Tensor of size 3 x 4 x 2 +% X = tensor(@rand, [3 4 2]) %<-- Equivalent +% Y = tensor(zeros(3,1),3) %<-- Tensor of size 3 +% Y = tensor(@zeros, [3 1]); +% Z = tensor(ones(12,1),[3 4 1]) %<-- Tensor of size 3 x 4 x 1 +% Z = tensor(@ones, [3 4 1]) %<-- Equivalent +% +% See also TENSOR, TENSOR/NDIMS. +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% EMPTY/DEFAULT CONSTRUCTOR +if nargin == 0 + t.data = []; + t.size = []; + t = class(t, 'tensor'); + return; +end + +% CONVERSION/COPY CONSTRUCTORS +% Note that we pass through this if/switch statement if the first argument +% is not any of these cases. +if (nargin == 1) + v = varargin{1}; + switch class(v) + case 'tensor', + % COPY CONSTRUCTOR + t.data = v.data; + t.size = v.size; + t = class(t, 'tensor'); + return; + case {'ktensor','ttensor','sptensor','sumtensor','symtensor','symktensor'}, + % CONVERSION + t = full(v); + return; + case 'tenmat', + % RESHAPE TENSOR-AS-MATRIX + % Here we just reverse what was done in the tenmat constructor. + % First we reshape the data to be an MDA, then we un-permute + % it using ipermute. + sz = tsize(v); + order = [v.rdims,v.cdims]; + data = reshape(v.data, [sz(order) 1 1]); + if numel(order) >= 2 + t.data = ipermute(data,order); + else + t.data = data; + end + t.size = sz; + t = class(t, 'tensor'); + return; + end +end + +% FUNCTION HANDLE AND SIZE +if (nargin <= 2) && isa(varargin{1},'function_handle') + fh = varargin{1}; + siz = varargin{2}; + + % Check size + if ~isrow(siz) + error('TTB:BadInput', 'Size must be a row vector'); + end + + % Generate data + data = fh([siz 1 1]); + + % Create the tensor + t.data = data; + t.size = siz; + t = class(t, 'tensor'); + return; +end + +% CONVERT A MULTIDIMENSIONAL ARRAY +if (nargin <= 2) + + % Check first argument + data = varargin{1}; + if ~isa(data,'numeric') && ~isa(data,'logical') + error('First argument must be a multidimensional array.') + end + + % Create or check second argument + if nargin == 1 + siz = size(data); + else + siz = varargin{2}; + if ~isempty(siz) && ndims(siz) ~= 2 && size(siz,1) ~= 1 + error('Second argument must be a row vector.'); + end + end + + % Make sure the number of elements matches what's been specified + if isempty(siz) + if ~isempty(data) + error('Empty tensor cannot contain any elements'); + end + elseif prod(siz) ~= numel(data) + error('TTB:WrongSize', 'Size of data does not match specified size of tensor'); + end + + % Make sure the data is indeed the right shape + if ~isempty(data) && ~isempty(siz) + data = reshape(data,[siz 1 1]); + end + + % Create the tensor + t.data = data; + t.size = siz; + t = class(t, 'tensor'); + return; + +end + + +error('Unsupported use of function TENSOR.'); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/times.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/times.m new file mode 100644 index 0000000..7155216 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/times.m @@ -0,0 +1,26 @@ +function Z = times(X,Y) +%TIMES Array multiplication for tensors. +% +% TIMES(A,B) is called for the syntax 'A .* B' when A or B is a +% tensor. A and B must have the same size, unless one is a scalar. A +% scalar can be multiplied by a tensor of any size. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if isa(Y,'ktensor') || isa(Y,'ttensor') || isa(Y,'sptensor') + Y = full(Y); +end + +Z = tenfun(@times,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/transpose.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/transpose.m new file mode 100644 index 0000000..8f8c262 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/transpose.m @@ -0,0 +1,18 @@ +function transpose(x) +%TRANSPOSE is not defined on tensors. +% +% See also TENSOR, TENSOR/PERMUTE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +error('Transpose on tensor is not defined'); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttm.m new file mode 100644 index 0000000..3cf5100 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttm.m @@ -0,0 +1,171 @@ +function Y = ttm(X,V,varargin) +%TTM Tensor times matrix. +% +% Y = TTM(X,A,N) computes the n-mode product of tensor X with a +% matrix A; i.e., X x_N A. The integer N specifies the dimension +% (or mode) of X along which A should be multiplied. If size(A) = +% [J,I], then X must have size(X,N) = I. The result will be the +% same order and size as X except that size(Y,N) = J. +% +% Y = TTM(X,{A,B,C,...}) computes the n-mode product of tensor X +% with a sequence of matrices in the cell array. The n-mode +% products are computed sequentially along all dimensions (or modes) +% of X. The cell array contains ndims(X) matrices. +% +% Y = TTM(X,{A,B,C,...},DIMS) computes the sequence tensor-matrix +% products along the dimensions specified by DIMS. +% +% Y = TTM(...,'t') performs the same computations as above except +% the matrices are transposed. +% +% Examples +% X = tensor(rand(5,3,4,2)); +% A = rand(4,5); B = rand(4,3); C = rand(3,4); D = rand(3,2); +% Y = ttm(X, A, 1) %<-- computes X times A in mode-1 +% Y = ttm(X, {A,B,C,D}, 1) %<-- same as above +% Y = ttm(X, A', 1, 't') %<-- same as above +% Y = ttm(X, {A,B,C,D}, [1 2 3 4]) %<-- 4-way multiply +% Y = ttm(X, {D,C,B,A}, [4 3 2 1]) %<-- same as above +% Y = ttm(X, {A,B,C,D}) %<-- same as above +% Y = ttm(X, {A',B',C',D'}, 't') %<-- same as above +% Y = ttm(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4 +% Y = ttm(X, {A,B,C,D}, [3 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, [1 2 4]) %<-- 3-way multiply +% Y = ttm(X, {A,B,C,D}, [1 2 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, -3) %<-- same as above +% Y = ttm(X, {A,B,C,D}, -3) %<-- same as above +% +% See also TENSOR, TENSOR/TTT, TENSOR/TTV. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +%% Check the number of arguments +if (nargin < 2) + error('TTM requires at least two arguments.'); +end + +%% Create 'n' and 'tflag' arguments from varargin +n = 1:ndims(X); +tflag = ''; +ver = 0; +if numel(varargin) == 1 + if ischar(varargin{1}) + tflag = varargin{1}; + else + n = varargin{1}; + end +elseif numel(varargin) == 2 + n = varargin{1}; + tflag = varargin{2}; +elseif numel(varargin) == 3 + n = varargin{1}; + tflag = varargin{2}; + ver = varargin{3}; +end + +%% Handle cell array +if iscell(V) + % Copy n into dims + dims = n; + % Check that the dimensions are valid + [dims,vidx] = tt_dimscheck(dims,ndims(X),numel(V)); + % Calculate individual products + Y = ttm(X, V{vidx(1)}, dims(1), tflag); + for k = 2 : numel(dims) + Y = ttm(Y, V{vidx(k)}, dims(k), tflag); + end + % All done + return; +end + +%% Check the second argument +if ~ismatrix(V) + error('tensor/ttm: 2nd argument must be a matrix.'); +end + +%% Check n +if (numel(n) ~= 1 || (n < 0) || n > ndims(X)) + error('Dimension N must be between 1 and NDIMS(X).'); +end + +%% COMPUTE SINGLE N-MODE PRODUCT + +N = ndims(X); +sz = size(X); + +if ver == 0 %old verion + order = [n,1:n-1,n+1:N]; + newdata = double(permute(X,order)); + newdata = reshape(newdata,sz(n),prod(sz([1:n-1,n+1:N]))); + if tflag == 't' + newdata = V' * newdata; + p = size(V,2); + else + newdata = V*newdata; + p = size(V,1); + end + newsz = [p,sz(1:n-1),sz(n+1:N)]; + Y = tensor(newdata,newsz); + Y = ipermute(Y,order); +else % new version + + if tflag == 't' + p = size(V, 2); + else + p = size(V, 1); + end + + + if n == 1 + A = reshape(X.data, sz(n), []); + if tflag == 't' + B = V' * A; + else + B = V * A; + end + elseif n == N + At = reshape(X.data, [], sz(n)); + if tflag == 't' + B = At * V; + else + B = At * V'; + end + else + nblocks = prod(sz(n+1:N)); + ncols = prod(sz(1:n-1)); % Per block + nAk = sz(n) * ncols; % Number entries in each block of A + nBk = p * ncols; + B = zeros(p * nblocks * ncols, 1); + for k = 1 : nblocks + % Extract k-th sub-block of A (in row-major orientation) + Akt = reshape(X.data((k-1) * nAk + 1: k * nAk), ncols, sz(n)); + if tflag == 't' + Bkt = Akt * V; + else + Bkt = Akt * V'; + end + B((k-1) * nBk + 1: k * nBk) = Bkt(:); + end + + end + newsz = sz; + newsz(n) = p; + Y = tensor(B, newsz); + + +end + +return; + +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttsv.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttsv.m new file mode 100644 index 0000000..adee508 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttsv.m @@ -0,0 +1,82 @@ +function y = ttsv(A,x,n,ver) +%TTSV Tensor times same vector in multiple modes. +% +% Y = TTSV(A,X) multiples the tensor A by the vector X in all modes. +% +% Y = TTSV(A,X,-1) multiplies the tensor A by the vector X in all modes +% but the first. Returns the answer as a normal MATLAB array (not a +% tensor). +% +% Y = TTSV(A,X,-2) multiplies the tensor A by the vector X in all modes +% but the first two. Returns the answer as a normal MATLAB matrix (not a +% tensor). +% +% Y = TTSV(A,X,-N) multiplies the tensor A by the vector X is all modes +% but the first N. +% +% See also TENSOR, TENSOR/TTV. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%% Process inputs (only two simple cases are supported) +if ~exist('n','var') + n = 0; +elseif (n > 0) + error('Invalid usage'); +end + +if ~exist('ver','var') + ver = 2; +end + + +%% Calculate - old way +if ver == 1 + P = ndims(A); + [X{1:P}] = deal(x); + if (n == 0) + y = ttv(A,X); + elseif (n == -1) || (n == -2) + y = double(ttv(A,X,-(1:-n))); + else + y = ttv(A,X,-(1:-n)); + end + return +end + +%% Calculate - new way +d = ndims(A); % Number of modes in tensor +sz = size(A,1); % Size of one mode (they're all the same) + +dnew = -n; % Number of modes in result +drem = d - dnew; % Number of modes being multiplied out + +%[X{1:drem}] = deal(x); +% Xkr = khatrirao(X); +% Ars = reshape(A.data, sz.^dnew, sz.^drem); +% y = Ars * Xkr; + +y = A.data; +for i = drem: -1 : 1 + yy = reshape(y,sz.^(dnew + i - 1),sz); + y = yy * x; +end + +% Convert to matrix if 2-way or convert back to a tensor if the result is +% 3-way or higher. Leave scalar or vector result alone. +if (dnew == 2) + y = reshape(y, [sz sz]); +elseif dnew > 2 + y = tensor(y, sz * ones(dnew,1)); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttt.m new file mode 100644 index 0000000..879f8fe --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttt.m @@ -0,0 +1,94 @@ +function c = ttt(varargin) +%TTT Tensor mulitplication (tensor times tensor). +% +% TTT(X,Y) computes the outer product of tensors X and Y. +% +% TTT(X,Y,XDIMS,YDIMS) computes the contracted product of tensors +% X and Y in the dimensions specified by the row vectors XDIMS and +% YDIMS. The sizes of the dimensions specified by XDIMS and YDIMS +% must match; that is, size(X,XDIMS) must equal size(Y,YDIMS). +% +% TTT(X,Y,DIMS) computes the inner product of tensors X and Y in the +% dimensions specified by the vector DIMS. The sizes of the +% dimensions specified by DIMS must match; that is, size(X,DIMS) must +% equal size(Y,DIMS). +% +% Examples +% X = tensor(rand(4,2,3)); +% Y = tensor(rand(3,4,2)); +% Z = ttt(X,Y) %<-- outer product of X and Y +% Z = ttt(X,X,1:3) %<-- inner product of X with itself +% Z = ttt(X,Y,[1 2 3],[2 3 1]) %<-- inner product of X & Y +% Z = ttt(X,Y,[1 3],[2 1]) %<-- product of X & Y along specified dims +% +% See also TENSOR, TENSOR/TTM, TENSOR/TTV. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTT requires at least two arguments.'); +end + +% Check the first argument +if ~isa(varargin{1}, 'tensor') + error('First argument must be a tensor.'); +else + a = varargin{1}; +end + +% Check the second argument +if ~isa(varargin{2}, 'tensor') + error('Second argument must be a tensor.'); +else + b = varargin{2}; +end + +% Optional 3rd argument +if nargin >= 3 + adims = varargin{3}; +else + adims = []; +end + +% Optional 4th argument +if nargin >= 4 + bdims = varargin{4}; +else + bdims = adims; +end + +if ~isequal(size(a,adims),size(b,bdims)) + error('Specified dimensions do not match.'); +end + +%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% COMPUTE THE PRODUCT %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Avoid transpose by reshaping A and computing C = A * B +amatrix = tenmat(a,adims,'t'); +bmatrix = tenmat(b,bdims); +cmatrix = amatrix * bmatrix; + +% Check whether or not the result is a scalar. +if isa(cmatrix,'tenmat') + c = tensor(cmatrix); +else + c = cmatrix; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttv.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttv.m new file mode 100644 index 0000000..b0395db --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/ttv.m @@ -0,0 +1,105 @@ +function c = ttv(a,v,dims) +%TTV Tensor times vector. +% +% Y = TTV(X,A,N) computes the product of tensor X with a (column) +% vector A. The integer N specifies the dimension in X along which +% A is multiplied. If size(A) = [I,1], then X must have size(X,N) = +% I. Note that ndims(Y) = ndims(X) - 1 because the N-th dimension +% is removed. +% +% Y = TTV(X,{A,B,C,...}) computes the product of tensor X with a +% sequence of vectors in the cell array. The products are computed +% sequentially along all dimensions (or modes) of X. The cell array +% contains ndims(X) vectors. +% +% Y = TTV(X,{A,B,C,...},DIMS) computes the sequence of tensor-vector +% products along the dimensions specified by DIMS. +% +% Examples +% X = tensor(rand(5,3,4,2)); +% A = rand(5,1); B = rand(3,1); C = rand(4,1); D = rand(2,1); +% Y = ttv(X, A, 1) %<-- X times A in mode 1 +% Y = ttv(X, {A,B,C,D}, 1) %<-- same as above +% Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply +% Y = ttv(X, {D,C,B,A}, [4 3 2 1]) %<-- same as above +% Y = ttv(X, {A,B,C,D}) %<-- same as above +% Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4 +% Y = ttv(X, {A,B,C,D}, [3 4]) %<-- same as above +% Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way mutplication +% Y = ttv(X, {A,B,C,D}, [1 2 4]) %<-- same as above +% Y = ttv(X, {A,B,D}, -3) %<-- same as above +% Y = ttv(X, {A,B,C,D}, -3) %<-- same as above +% +% See also TENSOR, TENSOR/TTT, TENSOR/TTM. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% Check the number of arguments +if (nargin < 2) + error('TTV requires at least two arguments.'); +end + +% Check for 3rd argument +if ~exist('dims','var') + dims = []; +end + +% Check that 2nd argument is cell array. If not, recall with v as a +% cell array with one element. +if ~iscell(v) + c = ttv(a,{v},dims); + return; +end + +% Get sorted dims and index for multiplicands +[dims,vidx] = tt_dimscheck(dims,ndims(a),numel(v)); + +% Check that each multiplicand is the right size. +for i = 1:numel(dims) + if ~isequal(size(v{vidx(i)}),[size(a,dims(i)) 1]) + error('Multiplicand is wrong size'); + end +end + +if exist('tensor/ttv_single','file') == 3 + c = a; + for i = numel(dims) : -1 : 1 + c = ttv_single(c,v{vidx(i)},dims(i)); + end + return; +end + +% Extract the MDA +c = a.data; + +% Permute it so that the dimensions we're working with come last +remdims = setdiff(1:ndims(a),dims); +if (ndims(a) > 1) + c = permute(c,[remdims dims]); +end + +% Do each multiply in sequence, doing the highest index first, +% which is important for vector multiplies. +n = ndims(a); +sz = a.size([remdims dims]); +for i = numel(dims) : -1 : 1 + c = reshape(c,prod(sz(1:n-1)),sz(n)); + c = c * v{vidx(i)}; + n = n-1; +end + +% If needed, convert the final result back to a tensor +if (n > 0) + c = tensor(c,sz(1:n)); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/uminus.m new file mode 100644 index 0000000..47011a9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/uminus.m @@ -0,0 +1,18 @@ +function t = uminus(t) +%UMINUS Unary minus (-) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +t.data = -t.data; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/uplus.m new file mode 100644 index 0000000..a85680d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/uplus.m @@ -0,0 +1,19 @@ +function t = uplus(t) +%UPLUS Unary plus (+) for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +% This function does nothing! + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/xor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/xor.m new file mode 100644 index 0000000..83ec3c5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@tensor/xor.m @@ -0,0 +1,18 @@ +function Z = xor(X,Y) +%XOR Logical EXCLUSIVE OR for tensors. +% +% See also TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Z = tenfun(@xor,X,Y); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/disp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/disp.m new file mode 100644 index 0000000..d98a862 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/disp.m @@ -0,0 +1,34 @@ +function disp(t,name) +%DISP Command window display of a ttensor. +% +% DISP(T) displays a ttensor with no name. +% +% DISP(T,NAME) display a ttensor with the given name. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ~exist('name','var') + name = 'ans'; +end + +fprintf(1,'%s is a ttensor of size %s\n', name, tt_size2str(size(t))); +disp(t.core, sprintf('\t%s.core',name)); + +for j = 1 : ndims(t) + fprintf('\t%s.U{%d} = \n', name, j); + output = tt_matrix2cellstr(t.u{j}); + fprintf('\t\t%s\n',output{:}); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/display.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/display.m new file mode 100644 index 0000000..d142c37 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/display.m @@ -0,0 +1,17 @@ +function display(t) +%DISPLAY Command window display of a ttensor. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +disp(t,inputname(1)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/double.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/double.m new file mode 100644 index 0000000..0cbbb51 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/double.m @@ -0,0 +1,19 @@ +function A = double(T) +%DOUBLE Convert ttensor to double array. +% +% A = double(T) converts T to a standard multidimensional array. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +A = double(full(T)); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/end.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/end.m new file mode 100644 index 0000000..eec6183 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/end.m @@ -0,0 +1,22 @@ +function e = end(X,k,n) +%END Last index of indexing expression for ttensor. +% +% The expression X(end,:,:) will call END(X,1,3) to determine +% the value of the first index. +% +% See also TTENSOR, TTENSOR/SUBSREF, END. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Note that this only works with {} because () is not supported by +% subsref. +e = ndims(X); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/full.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/full.m new file mode 100644 index 0000000..db52aee --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/full.m @@ -0,0 +1,29 @@ +function X = full(T) +%FULL Convert a ttensor to a (dense) tensor. +% +% X = FULL(T) converts ttensor T to (dense) tensor X. +% +% See also TTENSOR, TENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Preallocate to ensure there is enough space +X = tenzeros(size(T)); + +% Now do the calculation +X = ttm(T.core,T.u); + +% Make sure that X is a dense tensor (small chance it could be a sparse +% tensor). +X = tensor(X); + +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/innerprod.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/innerprod.m new file mode 100644 index 0000000..3ea6397 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/innerprod.m @@ -0,0 +1,66 @@ +function res = innerprod(X,Y) +%INNERPROD Efficient inner product with a ttensor. +% +% R = INNERPROD(X,Y) efficiently computes the inner product between +% two tensors X and Y. This inner product is the standard inner product, +% if the tensors were treated as vectors. How to do this most efficiently +% depends on the tensor Y. +% +% See also TENSOR/INNERPROD, TTENSOR, KTENSOR/INNERPROD +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% X is a ttensor +switch class(Y) + + case {'ttensor'} + if ~isequal(size(X),size(Y)) + error('X and Y must be the same size.'); + end + if prod(size(X.core)) > prod(size(Y.core)) + % Reverse argument and call this function again so that the + % tensor with the smaller core is the first argument. + res = innerprod(Y,X); + return + end + W = cell(ndims(X),1); + for n = 1:ndims(X) + W{n} = X.u{n}'*Y.u{n}; + end + J = ttm(Y.core, W); + res = innerprod(X.core,J); + return + + case {'tensor','sptensor'} + if ~isequal(size(X),size(Y)) + error('X and Y must be the same size.'); + end + if (prod(size(X)) < prod(size(X.core))) + Z = full(X); + res = innerprod(Z,Y); + return; + end + Z = ttm(Y,X.u,'t'); + res = innerprod(Z, X.core); + return + + case {'ktensor'} + % Reverse arguments to call ktensor implementation + res = innerprod(Y,X); + return + + otherwise + disp(['Inner product not available for class ' class(Y)]); + return +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/isequal.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/isequal.m new file mode 100644 index 0000000..0700f34 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/isequal.m @@ -0,0 +1,39 @@ +function [tf, tf_core, tf_U] = isequal(A,B) +%ISEQUAL True if the part of two ttensor's are numerically equal. +% +% TF = ISEQUAL(A,B) returns true if each factor matrix and the core +% are equal for A and B. +% +% [TF, TF_CORE, TF_FACTORS] = ISEQUAL(A,B) returns also the result of +% comparing the core (TF_CORE) and an array with the results of comparing +% the factor matrices (TF_FACTORS). +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +tf = false; +tf_core = false; +tf_U = false; + +if ~isa(B,'ttensor') + return; +end + +if ndims(A) ~= ndims(B) + return; +end + +tf_core = isequal(A.core, B.core); +tf_U = cellfun(@isequal, A.u, B.u); +tf = tf_core & all(tf_U); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/isscalar.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/isscalar.m new file mode 100644 index 0000000..bbdc8bf --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/isscalar.m @@ -0,0 +1,19 @@ +function output = isscalar(~) +%ISSCALAR False for ttensors. +% ISSCALAR(S) returns logical 0 (false) if S is a ttensor. +% +% See also TTENSOR +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +output = false; +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/mtimes.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/mtimes.m new file mode 100644 index 0000000..c8e3dad --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/mtimes.m @@ -0,0 +1,23 @@ +function C = mtimes(A,B) +%MTIMES Implement scalar multiplication for a ttensor. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ~isa(B,'ttensor') && numel(B) == 1 + C = ttensor(B * A.core, A.u); +elseif ~isa(A,'ttensor') && numel(A) == 1 + C = ttensor(A * B.core, B.u); +else + error('Use mtimes(full(A),full(B)).'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/mttkrp.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/mttkrp.m new file mode 100644 index 0000000..b2cc2d4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/mttkrp.m @@ -0,0 +1,43 @@ +function V = mttkrp(X,U,n) +%MTTKRP Matricized tensor times Khatri-Rao product for ttensor. +% +% V = MTTKRP(X,U,n) efficiently calculates the matrix product of the +% n-mode matricization of X with the Khatri-Rao product of all +% entries in U, a cell array of matrices, except the nth. How to +% most efficiently do this computation depends on the type of tensor +% involved. +% +% Examples +% T = ttensor(tensor(rand(2,3,4)), {rand(5,2), rand(2,3), rand(4,4)}); +% mttkrp(T , {rand(5,6), rand(2,6), rand(4,6)}, 3) +% +% See also TENSOR/MTTKRP, TTENSOR, TTENSOR/TTV +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +N = ndims(X); + +if (n==1) + R = size(U{2},2); +else + R = size(U{1},2); +end + +% Compute cell array of weights to multiply into core +W = cell(N,1); +for i = [1:n-1,n+1:N] + W{i} = (X.u{i}' * U{i}); +end +Y = mttkrp(X.core,W,n); + +% Find each column of answer by multiplying columns of X.u{n} with weights +V = X.u{n} * Y; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ndims.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ndims.m new file mode 100644 index 0000000..107921c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ndims.m @@ -0,0 +1,19 @@ +function n = ndims(t) +%NDIMS Return the number of dimensions for a ttensor. +% +% NDIMS(T) returns the number of dimensions of tensor T. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +n = numel(t.u); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/norm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/norm.m new file mode 100644 index 0000000..d5afbd6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/norm.m @@ -0,0 +1,28 @@ +function nrm = norm(X) +%NORM Norm of a ttensor. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +if prod(size(X)) > prod(size(X.core)) + V = cell(ndims(X),1); + for n = 1:ndims(X) + V{n} = X.u{n}'*X.u{n}; + end + Y = ttm(X.core,V); + tmp = innerprod(Y, X.core); + nrm = sqrt(tmp); +else + nrm = norm(full(X)); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/nvecs.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/nvecs.m new file mode 100644 index 0000000..56bf360 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/nvecs.m @@ -0,0 +1,92 @@ +function u = nvecs(X,n,r,opts) +%NVECS Compute the leading mode-n vectors for a ttensor. +% +% U = NVECS(X,n,r) computes the r leading eigenvalues of Xn*Xn' +% (where Xn is the mode-n matricization of X), which provides +% information about the mode-n fibers. In two-dimensions, the r +% leading mode-1 vectors are the same as the r left singular vectors +% and the r leading mode-2 vectors are the same as the r right +% singular vectors. +% +% U = NVECS(X,n,r,OPTS) specifies options: +% OPTS.eigsopts: options passed to the EIGS routine [struct('disp',0)] +% OPTS.flipsign: make each column's largest element positive [true] +% +% Examples +% X = ttensor(tensor(ones(2,3,4)), 2*ones(1,2), 3*ones(2,3), 4*ones(4,4)); +% nvecs(X, 2, 2) +% +% Documentation page for n-vecs +% +% See also TTENSOR, TENMAT, EIGS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ~exist('opts','var') + opts = struct; +end + +if isfield(opts,'eigsopts') + eigsopts = opts.eigsopts; +else + eigsopts.disp = 0; +end + +% Compute inner product of all n-1 factors +V = cell(ndims(X),1); +for i = 1:ndims(X) + if i == n, + V{i} = X.u{i}; + else + V{i} = X.u{i}' * X.u{i}; + end +end + +% Form H +H = ttm(X.core,V); + +if isa(H,'sptensor') + HnT = double(sptenmat(H,n,'t')); +else + H = full(H); + HnT = double(tenmat(H,n,'t')); +end +G = X.core; +if isa(G,'sptensor') + GnT = double(sptenmat(G,n,'t')); +else + G = full(G); + GnT = double(tenmat(G,n,'t')); +end + +% Compute Xn * Xn' +Y = HnT'*GnT*X.u{n}'; + +[u,d] = eigs(Y, r, 'LM', eigsopts); + +if isfield(opts,'flipsign') + flipsign = opts.flipsign; +else + flipsign = true; +end + +if flipsign + % Make the largest magnitude element be positive + [val,loc] = max(abs(u)); + for i = 1:r + if u(loc(i),i) < 0 + u(:,i) = u(:,i) * -1; + end + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/permute.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/permute.m new file mode 100644 index 0000000..637fa7a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/permute.m @@ -0,0 +1,35 @@ +function b = permute(a,order) +%PERMUTE Permute dimensions for a ttensor. +% +% Y = PERMUTE(X,ORDER) rearranges the dimensions of X so that they +% are in the order specified by the vector ORDER. The tensor +% produced has the same values of X but the order of the subscripts +% needed to access any particular element are rearranged as +% specified by ORDER. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +N = ndims(a); + +if ~isequal(1:N,sort(order)) + error('Invalid permuation'); +end + +newcore = permute(a.core,order); +newu = a.u(order); +b = ttensor(newcore,newu); + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/size.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/size.m new file mode 100644 index 0000000..cc61b26 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/size.m @@ -0,0 +1,32 @@ +function m = size(t,idx) +%SIZE Size of a ttensor. +% +% D = SIZE(T) returns the size of the tensor. +% +% I = size(T,DIM) returns the size of the dimension specified by +% the scalar DIM. +% +% See also TTENSOR, TTENSOR/NDIMS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +if ndims(t) == 0 + m = []; +end + +if exist('idx','var') + m = size(t.u{idx}, 1); +else + for i = 1 : ndims(t) + m(i) = size(t.u{i}, 1); + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/subsasgn.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/subsasgn.m new file mode 100644 index 0000000..95b18da --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/subsasgn.m @@ -0,0 +1,58 @@ +function t = subsasgn(t,s,b) +%SUBSASGN Subscripted assignment for a ttensor. +% +% Subscripted assignment can be used to alter the core tensor or the +% factor matrices in a ttensor. The entire factor matrix or tensor must be +% provided. +% +% Examples +% X = ttensor(tensor(rand(2,2,2)), rand(4,2), rand(5,2), rand(3,2)); +% X.core = tensor(ones(2,2,2)) %<--Change core tensor to all ones +% X.U{2} = zeros(4,2) %<--Change 2nd factor matrix to zeros +% X.U = {zeros(4,2),ones(5,2), randn(3,2)} %<--Change all matrices at once +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +switch s(1).type + case '.' + switch s(1).subs + case {'core','lambda'} + if length(s) == 1 + t = ttensor(b, t.u); + else + tmpcore = subsasgn(t.core, s(2:end), b); + t = ttensor(tmpcore, t.u); + end + case {'u','U'} + if length(s) == 1 + t = ttensor(t.core, b); + else + tmpu = subsasgn(t.u, s(2:end), b); %Refine in U + t = ttensor(t.core, tmpu); + end + otherwise + error(['Cannot change field ', s.subs, ' directly.']); + end + case '()' + error('Cannot change individual entries in ttensor.') + case '{}' + new_s(1).type = '.'; + new_s(1).subs = 'u'; + new_s(2:length(s)+1) = s; + t = subsasgn(t, new_s, b); + otherwise + error('Invalid subsasgn.'); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/subsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/subsref.m new file mode 100644 index 0000000..82b3804 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/subsref.m @@ -0,0 +1,45 @@ +function a = subsref(t,s) +%SUBSREF Subscripted reference for a ttensor. +% +% Subscripted reference is used to query the components of a ttensor. +% +% Examples +% core = tensor(rand(2,2,2)); +% X = ttensor(core, rand(4,2), rand(5,2), rand(3,2)); +% X.core %<-- returns core array +% X.U %<-- returns a cell array of three matrices +% X.U{1} %<-- returns the matrix corresponding to the first mode. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +switch s(1).type + case '.' + switch s(1).subs + case {'core','lambda'} + a = tt_subsubsref(t.core,s); + case {'U','u'} + a = tt_subsubsref(t.u,s); + otherwise + error(['No such field: ', s.subs]); + end + case '()' + error('Subsref with () not supported for ttensor.'); + case '{}' + new_s(1).type = '.'; + new_s(1).subs = 'u'; + new_s(2:length(s)+1) = s; + a = subsref(t, new_s); + otherwise + error('Invalid subsref.'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttensor.m new file mode 100644 index 0000000..9d69463 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttensor.m @@ -0,0 +1,125 @@ +%TTENSOR Class for Tucker tensors (decomposed). +% +%TTENSOR Methods: +% disp - Command window display of a ttensor. +% display - Command window display of a ttensor. +% double - Convert ttensor to double array. +% end - Last index of indexing expression for ttensor. +% full - Convert a ttensor to a (dense) tensor. +% innerprod - Efficient inner product with a ttensor. +% isequal - True if the part of two ttensor's are numerically equal. +% isscalar - False for ttensors. +% mtimes - Implement scalar multiplication for a ttensor. +% mttkrp - Matricized tensor times Khatri-Rao product for ttensor. +% ndims - Return the number of dimensions for a ttensor. +% norm - Norm of a ttensor. +% nvecs - Compute the leading mode-n vectors for a ttensor. +% permute - Permute dimensions for a ttensor. +% size - Size of a ttensor. +% subsasgn - Subscripted assignment for a ttensor. +% subsref - Subscripted reference for a ttensor. +% ttensor - Tensor stored as a Tucker operator (decomposed). +% ttm - Tensor times matrix for ttensor. +% ttv - Tensor times vector for ttensor. +% uminus - Unary minus for ttensor. +% uplus - Unary plus for ttensor. +% +% Documentation page for Tucker tensor class +% +% See also TENSOR_TOOLBOX +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +function t = ttensor(varargin) +%TTENSOR Tensor stored as a Tucker operator (decomposed). +% +% T = TTENSOR(G,U1,U2,...,UM) creates a TUCKER tensor from its +% constituent parts. Here G is a tensor of size K1 x K2 x ... x KM +% and each Um is a matrix with Km columns. +% +% T = TTENSOR(G,U) is the same as above except that U is a cell +% array containing matrix Um in cell m. +% +% The core tensor G can be any type of tensor that supports the +% following functions: +% - size +% - uminus +% - disp (with 2 arguments; see, e.g., TENSOR/DISP) +% - ttv +% - ttm +% - mtimes (scalar multiplication only) +% - permute +% - subsasgn +% - subsref +% +% T = TTENSOR(S) creates a TUCKER tensor by copying an existing +% TUCKER tensor. +% +% T = TTENSOR is the empty constructor. +% +% See also TTENSOR, TUCKER_ALS. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Empty constructor +if (nargin == 0) + t.core = tensor; % empty tensor + t.u = []; + t = class(t, 'ttensor'); + return; +end + +% Copy CONSTRUCTOR +if (nargin == 1) && isa(varargin{1}, 'ttensor') + t.core = varargin{1}.core; + t.u = varargin{1}.u; + t = class(t, 'ttensor'); + return; +end + +% Core can be basically anything that supports certain functions. +t.core = varargin{1}; + +if isa(varargin{2},'cell') + t.u = varargin{2}; +else + for i = 2 : nargin + t.u{i-1} = varargin{i}; + end +end + +% Check that each Um is indeed a matrix +for i = 1 : length(t.u) + if ndims(t.u{i}) ~= 2 + error(['Matrix U' int2str(i) ' is not a matrix!']); + end +end + +% Size error checking +k = size(t.core); + +if length(k) ~= length(t.u) + error(['CORE has order ', int2str(length(k)), ... + ' but there are ', int2str(length(t.u)), ' matrices.']); +end + +for i = 1 : length(t.u) + if size(t.u{i},2) ~= k(i) + error(['Matrix U' int2str(i) ' does not have ' int2str(k(i)) ... + ' columns.']); + end +end + +t = class(t, 'ttensor'); +return; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttm.m new file mode 100644 index 0000000..8bfd684 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttm.m @@ -0,0 +1,106 @@ +function X = ttm(X,V,varargin) +%TTM Tensor times matrix for ttensor. +% +% Y = TTM(X,A,N) computes the n-mode product of the ttensor X with a +% matrix A; i.e., X x_N A. The integer N specifies the dimension +% (or mode) of X along which A should be multiplied. If size(A) = +% [J,I], then X must have size(X,N) = I. The result will be a +% ttensor of the same order and size as X except that size(Y,N) = J. +% +% Y = TTM(X,{A,B,C,...}) computes the n-mode product of the ttensor +% X with a sequence of matrices in the cell array. The n-mode +% products are computed sequentially along all dimensions (or modes) +% of X. The cell array contains ndims(X) matrices. +% +% Y = TTM(X,{A,B,C,...},DIMS) computes the sequence tensor-matrix +% products along the dimensions specified by DIMS. +% +% Y = TTM(...,'t') performs the same computations as above except +% the matrices are transposed. +% +% Examples +% X = ttensor(tensor(rand(2,2,2,2)),{rand(5,2),rand(3,2),rand(4,2),rand(2,2)}); +% A = rand(4,5); B = rand(4,3); C = rand(3,4); D = rand(3,2); +% Y = ttm(X, A, 1) %<-- computes X times A in mode-1 +% Y = ttm(X, {A,B,C,D}, 1) %<-- same as above +% Y = ttm(X, A', 1, 't') %<-- same as above +% Y = ttm(X, {A,B,C,D}, [1 2 3 4]) %<-- 4-way multiply +% Y = ttm(X, {D,C,B,A}, [4 3 2 1]) %<-- same as above +% Y = ttm(X, {A,B,C,D}) %<-- same as above +% Y = ttm(X, {A',B',C',D'}, 't') %<-- same as above +% Y = ttm(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4 +% Y = ttm(X, {A,B,C,D}, [3 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, [1 2 4]) %<-- 3-way multiply +% Y = ttm(X, {A,B,C,D}, [1 2 4]) %<-- same as above +% Y = ttm(X, {A,B,D}, -3) %<-- same as above +% Y = ttm(X, {A,B,C,D}, -3) %<-- same as above +% +% See also TTENSOR, TTENSOR/ARRANGE, TENSOR/TTM +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTM requires at least two arguments.'); +end + +% Check for transpose option +isTranspose = false; +if numel(varargin) > 0 + if isnumeric(varargin{1}); + dims = varargin{1}; + end + isTranspose = (ischar(varargin{end}) && (varargin{end} == 't')); +end + +% Check for dims argument +if ~exist('dims','var') + dims = []; +end + +% Check that 2nd argument is cell array. If not, recall with V as a +% cell array with one element. +if ~iscell(V) + X = ttm(X,{V},dims,varargin{end}); + return; +end + +% Get sorted dims and index for multiplicands +[dims,vidx] = tt_dimscheck(dims,ndims(X),numel(V)); + +% Determine correct size index +if isTranspose + j = 1; +else + j = 2; +end + +% Check that each multiplicand is the right size. +for i = 1:numel(dims) + if (ndims(V) ~= 2) || (size(V{vidx(i)},j) ~= size(X,dims(i))) + error('Multiplicand is wrong size'); + end +end + +% Do the multiplications in the specified modes. +for i = 1:numel(dims) + if isTranspose + X.u{dims(i)} = V{vidx(i)}'* X.u{dims(i)}; + else + X.u{dims(i)} = V{vidx(i)} * X.u{dims(i)}; + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttv.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttv.m new file mode 100644 index 0000000..e80b964 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/ttv.m @@ -0,0 +1,80 @@ +function c = ttv(a,v,dims) +%TTV Tensor times vector for ttensor. +% +% Y = TTV(X,A,N) computes the product of ttensor X with a +% (column) vector A. The integer N specifies the dimension in X +% along which A is multiplied. If size(A) = [I,1], then X must have +% size(X,N) = I. Note that ndims(Y) = ndims(X) - 1 because the N-th +% dimension is removed. +% +% Y = TTV(X,{A1,A2,...}) computes the product of ttensor X with a +% sequence of vectors in the cell array. The products are computed +% sequentially along all dimensions (or modes) of X. The cell array +% contains ndims(X) vectors. +% +% Y = TTV(X,{A1,A2,...},DIMS) computes the sequence tensor-vector +% products along the dimensions specified by a vector DIMS. +% +% Examples +% X = ttensor(tensor(rand(2,3,2)), rand(3,2), rand(2,3), rand(2,2)); +% ttv(X, [1:3]', 1) +% ttv(X, {[1:3]', [1:2]', [1:2]'}) +% ttv(X, {[1:3]', [1:2]'}, [1 3]) +% +% Documentation page for multiplying tensors +% +% See also TENSOR/TTV, TTENSOR, TTENSOR/TTM. +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + +%%%%%%%%%%%%%%%%%%%%%% +%%% ERROR CHECKING %%% +%%%%%%%%%%%%%%%%%%%%%% + +% Check the number of arguments +if (nargin < 2) + error('TTV requires at least two arguments.'); +end + +% Check for 3rd argument +if ~exist('dims','var') + dims = []; +end + +% Check that 2nd argument is cell array. If not, recall with v as a +% cell array with one element. +if ~iscell(v) + c = ttv(a,{v},dims); + return; +end + +% Get sorted dims and index for multiplicands +[dims,vidx] = tt_dimscheck(dims,ndims(a),numel(v)); + +% Check that each multiplicand is the right size. +for i = 1:numel(dims) + if ~isequal(size(v{vidx(i)}),[size(a,dims(i)) 1]) + error('Multiplicand is wrong size'); + end +end + +% Figure out which dimensions will be left when we're done +remdims = setdiff(1:ndims(a),dims); + +% Create w to be multiplied with a.core +w = cell(ndims(a),1); +for i = 1:numel(dims) + w{dims(i)} = a.u{dims(i)}' * v{vidx(i)}; +end + +% Create new core +newcore = ttv(a.core,w,dims); + +% Create final result +if isempty(remdims) + c = newcore; +else + c = ttensor(newcore,a.u{remdims}); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/uminus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/uminus.m new file mode 100644 index 0000000..e9b11a4 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/uminus.m @@ -0,0 +1,17 @@ +function t = uminus(t) +%UMINUS Unary minus for ttensor. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +t.core = -t.core; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/uplus.m b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/uplus.m new file mode 100644 index 0000000..e9c49ae --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/@ttensor/uplus.m @@ -0,0 +1,17 @@ +function t = uplus(t) +%UPLUS Unary plus for ttensor. +% +% See also TTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% This function does nothing! diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/CONTRIBUTION_GUIDE.md b/ext/YetAnotherFEcode/external/tensor_toolbox/CONTRIBUTION_GUIDE.md new file mode 100644 index 0000000..358ced8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/CONTRIBUTION_GUIDE.md @@ -0,0 +1,35 @@ +# Tensor Toolbox Contribution Guide + +## Checklist + +- [ ] **Issue** Before the merge request, submit an issue for the change, providing as much detailed information as possible. For bug reports, please provide enough information to reproduce the problem. + +- [ ] **Fork** Create a branch or fork of the code and make your changes. + +- [ ] **Help Comments** Create or update comments for the m-files, following the style of the existing files. Be sure to explain all code options. + +- [ ] **HTML Documentation** For any major new functionality, please follow the following steps. + - [ ] Add HTML documentation in the `doc\html` directory with the name `XXX_doc.html` + - [ ] Use the MATLAB `publish` command to create a new file in `doc\html` + - [ ] Add a pointer to this documentation file in `doc\html\helptoc.xml` + - [ ] Add pointers in any related higher-level files, e.g., a new method for CP should be referenced in the `cp.html` file + - [ ] Add link to HTML documentation from help comments in function + - [ ] Update search database by running: builddocsearchdb('[full path to tensor_toolbox/doc/html directory]') + +- [ ] **Tests** Create or update tests in the `tests` directory, especially for bug fixes or strongly encouraged for new code. + +- [ ] **Contents** If new functions were added to a class, go to the `maintenance` directory and run `update_classlist('Class',XXX)` to add the new functions to the class XXX help information. If new functions were added at +top level, go to `maintenance` and run `update_topcontents` to update the Contents.m file at the top level. + +- [ ] **Release Notes** +Update `RELEASE_NOTES.txt` with any significant bug fixes or additions. + +- [ ] **Contributors List** +Update `CONTRIBUTORS.md` with your name and a brief description of the contributions. + +- [ ] **Pass All Tests** +Confirm that all tests (including existing tests) pass in `tests` directory. + +- [ ] **Merge Request** At any point, create a work-in-progress merge request, referencing the issue number and with this checklist and WIP in the header. + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/CONTRIBUTORS.md b/ext/YetAnotherFEcode/external/tensor_toolbox/CONTRIBUTORS.md new file mode 100644 index 0000000..aad9f6c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/CONTRIBUTORS.md @@ -0,0 +1,18 @@ +Primary POC: Tamara G. Kolda, tgkolda@sandia.gov + +* Evrim Acar - CP with optimization and missing data (`cp_opt`, `cp_wopt`) +* Woody Austin - Various fixes +* Brett W. Bader - Original author +* Grey Ballard - Various fixes +* Robert Bassett - Cleaning up symmetric tensor methods and classes (`cp_sym`, `symtensor`, `symktensor`) +* Casey Battaglino - Randomized CP (`cp_arls`) +* Eric C. Chi - Alternating Poisson Regression with Mulitplicative Updates (`cp_apr`) +* Jed Duersch - Improved K-tensor full ('ktensor/full') and Generalized CP (`gcp_opt`) +* Daniel M. Dunlavy - CP with optimization and missing data (`cp_opt`, `cp_wopt`), various fixes +* Samantha Hansen - Alternating Poisson Regression with QN or Newton Updates (`cp_apr`) +* David Hong - Generalized CP (`gcp_opt`) +* Jackson Mayo - Eigenvalue methods (`eig_sshopm`, `eig_sshopmc`, `eig_geap`) +* Todd Plantenga - Alternating Poisson Regression with QN or Newton Updates (`cp_apr`) +* Jimeng Sun - Memory-efficient Tucker (distributed with version 2.6 or earlier) +* Alex Williams - Ktensor/viz method (`ktensor/viz`) +* Shaden Smith - Faster fitness computation in `cp_als`. diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/Contents.m b/ext/YetAnotherFEcode/external/tensor_toolbox/Contents.m new file mode 100644 index 0000000..91905d9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/Contents.m @@ -0,0 +1,51 @@ +% Tensor Toolbox (Sandia National Labs) +% Version 3.1 04-Jun-2019 +% Tensor Toolbox for dense, sparse, and decomposed n-way arrays. +% +% Tensor Toolbox Classes: +% tensor - Dense tensor. +% sptensor - Sparse tensor. +% symtensor - Symmetric tensor. +% ktensor - Kruskal decomposed tensor. +% symktensor - Kruskal decomposed symmetric tensor. +% sumtensor - Sum of different types of tensors. +% ttensor - Tucker decomposed tensor. +% tenmat - Tensor as matrix. +% sptenmat - Sparse tensor as matrix. +% +% Tensor Toolbox Functions: +% cp_als - Compute a CP decomposition of any type of tensor. +% cp_apr - Compute nonnegative CP with alternating Poisson regression. +% cp_arls - CP decomposition of dense tensor via randomized least squares. +% cp_nmu - Compute nonnegative CP with multiplicative updates. +% cp_opt - Fits a CP model to a tensor via optimization. +% cp_sym - Fit a symmetric P model to the symmetric input tensor. +% cp_wopt - Fits a weighted CP model to a tensor via optimization. +% create_guess - Creates initial guess for CP or Tucker fitting. +% create_problem - Create test problems for tensor factorizations. +% create_problem_binary - Creates random low-rank 0/1 tensor. +% eig_geap - Shifted power method for generalized tensor eigenproblem. +% eig_sshopm - Shifted power method for finding real eigenpair of real tensor. +% eig_sshopmc - Shifted power method for real/complex eigenpair of tensor. +% export_data - Export tensor-related data to a file. +% gcp_opt - Fits Generalized CP decomposition with user-specified function. +% hosvd - Compute sequentially-truncated higher-order SVD (Tucker). +% import_data - Import tensor-related data to a file. +% khatrirao - Khatri-Rao product of matrices. +% matrandcong - Create a random matrix with a fixed congruence. +% matrandnorm - Normalizes columns of X so that each is unit 2-norm. +% matrandorth - Generates random n x n orthogonal real matrix. +% sptendiag - Creates a sparse tensor with v on the diagonal. +% sptenrand - Sparse uniformly distributed random tensor. +% tendiag - Creates a tensor with v on the diagonal. +% teneye - Create identity tensor of specified size. +% tenones - Ones tensor. +% tenrand - Uniformly distributed pseudo-random tensor. +% tenrandblk - Generate nearly block diagonal tensor. +% tenzeros - Create zeros tensor. +% tt_ind2sub - Multiple subscripts from linear indices. +% tt_sub2ind - Converts multidimensional subscripts to linear indices. +% tucker_als - Higher-order orthogonal iteration. +% tucker_sym - Symmetric Tucker approximation. +% +% Documentation page for Tensor Toolbox diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/INSTALL.txt b/ext/YetAnotherFEcode/external/tensor_toolbox/INSTALL.txt new file mode 100644 index 0000000..a104a20 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/INSTALL.txt @@ -0,0 +1,43 @@ +Please remember to cite the appropriate references when you have used +Tensor Toolbox in your work. See "How to Cite" under +http://www.sandia.gov/~tgkolda/TensorToolbox/ for more details. + +To install the Tensor Toolbox for MATLAB: + +1. Unpack the compressed file. In a linux environment, for example, this can + be done from the command line via: + + unzip tensor_toolbox_2.6.zip + + *or* + + gunzip -c tensor_toolbox_2.6.tar.gz | tar xvf - + + This should create a directory named *tensor_toolbox_2.6*. + +2. Rename the root directory from *tensor_toolbox_2.6* + to *tensor_toolbox*. + +3. Start MATLAB. + +4. Within MATLAB, cd to the *tensor_toolbox* directory and execute the + following commands: + + addpath(pwd) %<-- Add the tensor toolbox to the MATLAB path + cd met; addpath(pwd) %<-- [OPTIONAL] Also add the met directory + savepath %<-- Save for future MATLAB sessions + +5. For help, type + + help tensor_toolbox + + To see the documentation, go to the help home screen. + Click "Supplemental Software" in the lower left. + Choose "Tensor Toolbox" and browse the pages. + + For help on MET, be sure that the met directory is on your path and + then get a directory listing with explanation by typing + + help met + + \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/LICENSE.txt b/ext/YetAnotherFEcode/external/tensor_toolbox/LICENSE.txt new file mode 100644 index 0000000..67b48fc --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/LICENSE.txt @@ -0,0 +1,34 @@ +************************************************************************ + + Tensor Toolbox for MATLAB + by Sandia National Laboratories + +Copyright (2018) Sandia Corporation. Under the terms of Contract +DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains +certain rights in this software. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +************************************************************************ + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/README.md b/ext/YetAnotherFEcode/external/tensor_toolbox/README.md new file mode 100644 index 0000000..6351b82 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/README.md @@ -0,0 +1,295 @@ +# MATLAB Tensor Toolbox, Version 3.1 + +Tensors (also known as multidimensional arrays or N-way arrays) are used in a +variety of applications ranging from chemometrics to network analysis. +The Tensor Toolbox provides the following classes for manipulating dense, +sparse, and structured tensors using MATLAB's object-oriented features: + +* `tensor` - Dense tensors, extending MATLAB's native mutlidimensional array capabilities. +* `sptensor` - Sparse tensors, only stores the nonzeros and their indices. +* `symtensor` - Symmetric tensor, only stores the unique entries. +* `ttensor` - Tucker decomposed tensor, stored as a core and factor matrices. +* `ktensor` - Kruskal decomposed tensor, stored as weight and factor matrices. +* `symktensor` - Kruskal decomposed _symmetric_ tensor, stored as weight and factor matrix. +* `sumtensor` - Sum of different types of tensors, never formed explicitly. +* `tenmat` - Tensor as a matrix, with extra information so that it can be converted back into a tensor. +* `sptenmat` - Store an sptensor as a sparse matrix in coordinate format, with extra information so that it can be converted back into an sptensor. + +The Tensor Toolbox for MATLAB is open source, but we ask that you please cite +the appropriate references ([listed below](#how-to-cite)) so that we can continue to show the +relevance of this work. Your contributions are warmly welcomed as well; please +see the [contribution guide](CONTRIBUTION_GUIDE.md). +Previous contributors are listed in [contributors](CONTRIBUTORS.md). +Full details of the license can be found in [license](LICENSE.txt). + +### What's new in Version 3.0? + +Version 3.0 adds + +* New classes and functions for symmetric tensors: `symtensor`, `symktensor`, `cp_sym` +* New class for sums of different tensor types: `sumtensor` +* Function to compute HOSVD and ST-HOSVD: `hosvd` + +We have also fixed many bugs. View the [RELEASE_NOTES.txt](RELEASE_NOTES.txt) file for details. + +## How to cite + +If you have used the Tensor Toolbox in your work in any way, +please cite the software itself along with at least one publication or preprint. +Thanks very much for your support. + +__General software reference, should always be cited:__ +Brett W. Bader, Tamara G. Kolda and others. +MATLAB Tensor Toolbox Version 3.1, +Available online, June 2019. +URL: https://gitlab.com/tensors/tensor_toolbox. +_Consider adding the short hash for the exact version that was used. +If you clone the repository, use the command +`git log --pretty=format:'%h' -n 1`. +If you download, the long hash is baked into the filename, but you need only use +the first 8 characters._ + +``` bibtex +@misc{TTB_Software, + author = {Brett W. Bader and Tamara G. Kolda and others}, + title = {MATLAB Tensor Toolbox Version 3.0-dev}, + howpublished = {Available online}, + month = aug, + year = {2017}, + url = {https://gitlab.com/tensors/tensor_toolbox} +} +``` + +__Default citation for dense computations:__ +B. W. Bader and T. G. Kolda. +Algorithm 862: MATLAB tensor classes for fast algorithm prototyping, +ACM Transactions on Mathematical Software 32(4):635-653, December 2006. +DOI: 10.1145/1186785.1186794. + +``` bibtex +@article{TTB_Dense, + author = {Brett W. Bader and Tamara G. Kolda}, + title = {Algorithm 862: {MATLAB} tensor classes for fast algorithm prototyping}, + journal = {ACM Transactions on Mathematical Software}, + month = dec, + year = {2006}, + volume = {32}, + number = {4}, + pages = {635--653}, + doi = {10.1145/1186785.1186794} +} +``` + +__Default citation for sparse computations:__ +B. W. Bader and T. G. Kolda. +Efficient MATLAB computations with sparse and factored tensors, +SIAM Journal on Scientific Computing 30(1):205-231, December 2007. DOI: 10.1137/060676489. + +``` bibtex +@article{TTB_Sparse, + author = {Brett W. Bader and Tamara G. Kolda}, + title = {Efficient {MATLAB} computations with sparse and factored tensors}, + journal = {SIAM Journal on Scientific Computing}, + month = dec, + year = {2007}, + volume = {30}, + number = {1}, + pages = {205--231}, + doi = {10.1137/060676489} +} +``` + +__Citation for all-at-once optimization for CP factorization (`cp_opt`):__ +E. Acar, D. M. Dunlavy and T. G. Kolda. +A Scalable Optimization Approach for Fitting Canonical Tensor Decompositions, +Journal of Chemometrics 25(2):67-86, February 2011. DOI: 10.1002/cem.1335. + +``` bibtex +@article{TTB_CPOPT, + author = {Evrim Acar and Daniel M. Dunlavy and Tamara G. Kolda}, + title = {A Scalable Optimization Approach for Fitting Canonical Tensor Decompositions}, + journal = {Journal of Chemometrics}, + month = feb, + year = {2011}, + volume = {25}, + number = {2}, + pages = {67--86}, + doi = {10.1002/cem.1335} +} +``` + +__Citation for CP factorization with missing data (`cp_wopt`):__ +E. Acar, D. M. Dunlavy, T. G. Kolda and M. Mørup. +Scalable Tensor Factorizations for Incomplete Data, +Chemometrics and Intelligent Laboratory Systems 106(1):41-56, March 2011. +DOI: 10.1016/j.chemolab.2010.08.004. + +``` bibtex +@article{TTB_CPWOPT, + author = {Evrim Acar and Daniel M. Dunlavy and Tamara G. Kolda and Morten M{\o}rup}, + title = {Scalable Tensor Factorizations for Incomplete Data}, + journal = {Chemometrics and Intelligent Laboratory Systems}, + month = mar, + year = {2011}, + volume = {106}, + number = {1}, + pages = {41--56}, + doi = {10.1016/j.chemolab.2010.08.004} +} +``` + +__Citation for Shifted Symmetric Higher-Order Power Method for Tensor Eigenvalues (`eig_sshopm`):__ +T. G. Kolda and J. R. Mayo. +Shifted Power Method for Computing Tensor Eigenpairs, +SIAM Journal on Matrix Analysis and Applications 32(4):1095-1124, October 2011. DOI: 10.1137/100801482. + +``` bibtex +@article{TTB_SSHOPM, + author = {Tamara G. Kolda and Jackson R. Mayo}, + title = {Shifted Power Method for Computing Tensor Eigenpairs}, + journal = {SIAM Journal on Matrix Analysis and Applications}, + month = oct, + year = {2011}, + volume = {32}, + number = {4}, + pages = {1095-1124}, + doi = {10.1137/100801482} +} +``` + +__Citation for Method for Generalized Tensor Eigenvalues (`eig_geap`):__ +T. G. Kolda and J. R. Mayo. +An Adaptive Shifted Power Method for Computing Generalized Tensor Eigenpairs, +SIAM Journal on Matrix Analysis and Applications 35(4):1563-1581, December 2014. +DOI: 10.1137/100801482. + +``` bibtex +@Article{TTB_EIGGEAP, + title = {An Adaptive Shifted Power Method for Computing Generalized Tensor Eigenpairs}, + author = {Tamara G. Kolda and Jackson R. Mayo}, + doi = {10.1137/140951758}, + journal = {SIAM Journal on Matrix Analysis and Applications}, + number = {4}, + volume = {35}, + year = {2014}, + month = dec, + pages = {1563--1581}, + url = {http://epubs.siam.org/toc/sjmael/35/4}, +} +``` + +__Citations for on Poisson Tensor Factorization (`cp_apr`)__ +1. E. C. Chi and T. G. Kolda. +On Tensors, Sparsity, and Nonnegative Factorizations, +SIAM Journal on Matrix Analysis and Applications 33(4):1272-1299, December 2012. +2. S. Hansen, T. Plantenga and T. G. Kolda. +Newton-Based Optimization for Kullback-Leibler Nonnegative Tensor Factorizations, +Optimization Methods and Software 30(5):1002-1029, April 2015. + +``` bibtex +@Article{TTB_CPAPR, + title = {On Tensors, Sparsity, and Nonnegative Factorizations}, + author = {Eric C. Chi and Tamara G. Kolda}, + doi = {10.1137/110859063}, + journal = {SIAM Journal on Matrix Analysis and Applications}, + number = {4}, + volume = {33}, + year = {2012}, + month = dec, + pages = {1272-1299}, +} + +@Article{TTB_CPAPRB, + author = {Samantha Hansen and Todd Plantenga and Tamara G. Kolda}, + title = {Newton-Based Optimization for {Kullback-Leibler} Nonnegative Tensor Factorizations}, + journal = {Optimization Methods and Software}, + volume = {30}, + number = {5}, + pages = {1002-1029}, + month = {April}, + year = {2015}, + doi = {10.1080/10556788.2015.1009977}, +} +``` +__Citation for Symmetric CP (`cp_sym`):__ +T. G. Kolda, +Numerical Optimization for Symmetric Tensor Decomposition, +Mathematical Programming B 151(1):225-248, April 2015, doi:10.1007/s10107-015-0895-0 +``` bibtex +@article{TTB_CPSYM, +author = {Tamara G. Kolda}, +title = {Numerical Optimization for Symmetric Tensor Decomposition}, +journal = {Mathematical Programming B}, +volume = {151}, +number = {1}, +pages = {225-248}, +month = apr, +year = {2015}, +doi = {10.1007/s10107-015-0895-0}, +} +``` + +__Citation for CP with randomized least squares (`cp_rals`):__ +C. Battaglino, G. Ballard and T. G. Kolda, +A Practical Randomized CP Tensor Decomposition, +arXiv:1701.06600, January 2017 + +```bibtex +@misc{TTB_CPRALS, +author = {Casey Battaglino and Grey Ballard and Tamara G. Kolda}, +title = {A Practical Randomized {CP} Tensor Decomposition}, +month = jan, +year = {2017}, +eprint = {1701.06600}, +eprintclass = {cs.NA}, +} +``` +__Citation for Memory-Efficient Tucker (`tucker_me` and `ttm_me`):__ +T. G. Kolda and J. Sun. +Scalable Tensor Decompositions for Multi-aspect Data Mining, +ICDM 2008: Proceedings of the 8th IEEE International Conference on Data Mining, +pp. 363-372, December 2008. DOI: 10.1109/ICDM.2008.89. +_This code is no longer included with the toolbox but can be found in +Version 2.6._ + +``` bibtex +@inproceedings{TTB_MET, + author = {Tamara G. Kolda and Jimeng Sun}, + title = {Scalable Tensor Decompositions for Multi-aspect Data Mining}, + booktitle = {ICDM 2008: Proceedings of the 8th IEEE International Conference on Data Mining}, + month = dec, + year = {2008}, + pages = {363--372}, + doi = {10.1109/ICDM.2008.89} +} +``` + + +## Getting started and using the software + +### Download + +The latest release can be obtained by clicking +[Releases](https://gitlab.com/tensors/tensor_toolbox/releases) at left. +The latest development version can be obtained here by cloning or +downloading using the buttons above. +Version 2.6 and earlier can be obtained +[here](http://www.sandia.gov/~tgkolda/TensorToolbox/index-2.6.html). + +### Installation +1. Unpack the files, if necessary +2. Start MATLAB +3. Within MATLAB, navigate to the `tensor_toolbox` directory and execute the following commands: + 1. `addpath(pwd)` + 2. `savepath` + +### Getting help +At any time, type `help tensor_toolbox` for help on classes or functions. +You can also find a getting started guide via MATLAB's help system. Launch help +by pressing the question mark button and look for Tensor Toolbox under supplemental +software, as highlighted in the image below. + +![Navigating MATLAB Help Screen](doc/images/helpscreen.PNG "Navigating MATLAB Help Screen") + +Copyright 2019, Sandia Corporation. + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/RELEASE_NOTES.txt b/ext/YetAnotherFEcode/external/tensor_toolbox/RELEASE_NOTES.txt new file mode 100644 index 0000000..480cc77 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/RELEASE_NOTES.txt @@ -0,0 +1,355 @@ +Tensor Toolbox version 3.1 +by Brett W. Bader, Tamara G. Kolda, Evrim Acar, Daniel M. Dunlavy, et al. +Copyright 2019, Sandia National Laboratories. + +============================================ +Changes from Version 2.6 (February 6, 2015) +============================================ +- Changed license conditions: now open source BSD license. +- New KTENSOR/VIZ function for visualizing the factors produced by the + CP decomposition. +- Added new CP_SYM and TUCKER_SYM functions for symmetric tensor + decompositions. Added new SYMTENSOR and SYMKTENSOR classes with + limited functionality to support symmetric tensors. +- Added new SUMTENSOR class that works with an implicit sum of tensors + without actually forming the result. +- Added new CP-ARLS method that does alternating *RANDOMIZED* least + squares fitting for the CP decomposition per Battaglino et al. +- New GCP_OPT method for generalized CP. +- New CREATE_PROBLEM_BINARY method for generating problems where the + low-rank model corresponds to the odds of a 1. +- Improve KTENSOR/FULL function. +- Added SPTENSOR/SPONES function that replaces nonzero sparse tensor + elements with ones. +- Removed memory-efficient Tucker (met) code. +- Fixed formatting of lambda in ktensor/disp. +- Fixed type of subs in import_data for sptensor data. +- Made call to fixsigns in cp_als optional. + +============================================ +Changes from Version 2.5 (February 1, 2012) +============================================ +Top Level +- Added new EIG_GEAP function for computing generalized tensor + eigenpairs. Renamed SSHOPM to EIG_SSHOPM and added support for + adaptive shift (now the default). Renamed SSHOPMC to EIG_SSHOPMC. +- Major updates to CP_APR, including changing the default to use + 2nd-order optimization per paper of Hansen, Plantenga, & Kolda. See + method help for more information. +- Minor changes to CP_ALS: (1) Fixed bug in normalization step. (2) + Updated some calculations per work of Phan Anh Huy. (3) Forced + printing of last iteration so long as printitn > 0. +- Updated MTTKRP and KHATRIRAO, per work of Phan Anh Huy. +- Fixed bug in CREATE_PROBLEM for 'Sparse_Generation'. +- Added SPTENSOR support EXPORT_DATA and IMPORT_DATA. Added KTENSOR + support to IMPORT_DATA. +- Updated random number generator references to the new MATLAB + implementsion in CREATE_PROBLEM and CREATE_GUESS. +- Added instructions for adding MET to the pat in INSTALL.txt. +- Fixed function name for TT_FAC_TO_VEC per Evrim Acar bug report. +- Added new function MATRANDNORM. +- Renamed TT_CCONG to MATRANDCONG, TT_RANDORTHMATH to MATRANDORTH. +- Removed TT_ASSIGNMENT_TYPE, TT_COMBINATOR, TT_CP_W*, + TT_CREATE_MISSING_DATA_PATTERN. +- Modernized documentation with class support. + +Class: ktensor +- Fixed SUBSREF to properly handle lists of indices. +- Only call ARRANGE from NORMALIZE if there are multiple + components. Force ARRANGE to produce dense matrices. Fixes bug + reported by Jason Mattax on 3/1/2012. +- Fixed comments for NORMALIZE, SUBSREF. +- Added ISSYMMETRIC and SYMMETRIZE functions. +- Constructor can now take SYMKTENSOR as an input. + +Class: sptensor +- Fixed bug in SUBSASGN discovered by Sebastien Bratieres pertaining + to empty tensors. + +Class: tensor +- Fixed bug in ISSYMMETRIC with respect to groups. + +Acknowledgments: +- The function @symtensor/private/multinomial.m is from Mukhtar Ullah + and was distributed via the MATLAB file exchange. + +============================================ +Changes from Version 2.4 (March 22, 2010) +============================================ + +Top Level +- The "algorithms" directory has been eliminated. All routines are now + at the root level, meaning that only one directory has to be added + to the path to get all of Tensor Toolbox's standard functionality. +- Added new CREATE_PROBLEM and CREATE_GUESS routines that can be used + to generate test problems and initial guesses. These were first used + at the AIM 2010 Tensor Decomposition workshop. Added TT_RANDORTHMAT, + a helper function for creating problems. +- Added new SSHOPM and SSHOPMC code for Shifted Symmetric Higher-Order + Power Method for computing tensor eigenpairs. +- Added new CP_APR method for Poisson Tensor Factorization via + alternating Poisson regression, along with helper function + tt_loglikelihood. +- Added TENEYE to create "identity tensor". +- Helper functions for CP_OPT and CP_WOPT (like cp_fg) now have "tt_" + prepended to their names. They are not listed in the contents files. +- Adding ability to import and export text versions of matrices and + tensors via IMPORT_DATA/EXPORT_DATA functions. +- Making calling sequence to TENZEROS, TENRAND, and TENONES + consistent. Now all three will take either a size array or a list, + i.e., tenones([5 4 3]) or tenones(5,4,3) produce the same + results. Eliminated two-argument version of tenzeros, i.e., a call + to tenzeros(M,N) should be changed to tenzeros(N*ones(1,M)). +- Added additional comments in CP_ALS. +- Made output of CP_WOPT consistent with CP_OPT, i.e., now includes + output of optimization method. +- Fixed bug for empty tensor in TENONES. +- Fixed TT_IND2SUB, TT_SUB2IND to handle empty inputs. + +Documentation +- Added documentation in the help browser for cp_opt, cp_wopt, cp_als, + and sshopm. + +Class: tensor +- Added SYMMETRIZE function to symmetrize a tensor and ISSYMMETRIC + function to check if a tensor is symmetric. +- Adding new TTSV function to compute a tensor times the same vector + in every mode. Intended for symmetric tensors and doesn't allow user + to specify exactly which modes are skipped. +- Fixed "empty tensor" bugs in TENSOR (constructor), PERMUTE, COLLAPSE. +- Fixed "1D tensor" bug in TENMAT. +- Fixed bug with no results in FIND. +- More error checking in MTTKRP. + +CLASS: sptensor +- Added DIVIDE function for elementwise division. + +Class: ktensor +- Added new SCORE function to compute "factor match score" for + two ktensor's. Includes "greedy" option when +- Added new REDISTRIBUTE function to redistribute the weights from + lambda into a specified mode. +- Fixed bug in NORM, which sometimes returned a negative value due to + small errors in the calculation. Now it returns max(0,val). +- Added ISEQUAL function that checks for elementwise equality on + individual components. +- Lots of new options for the NORMALIZE function. + +Class: ttensor +- Added ISEQUAL function that checks for elementwise equality on + individual components. + +============================================ +Changes from Version 2.3 (July 8, 2009) +============================================ +General +- tenzeros(m,n): Now has the ability to create an mth-order tensor of + size n in every mode. tenzeros(siz) still works as usual. +- tt_subcheck now uses isfinite rather than ~isnan and ~isinf based on + error report from user. + +Algorithms +- Added new cp_opt and cp_wopt functions (and related utility + functions) for computing CP and weighted CP via optimization. + Requires that the user also install the Poblano Toolbox for + Matlab. This is freely available at + http://software.sandia.gov/trac/poblano. +- Changed the way that cp_als and tucker_als handle input arguments so + that they can now be parameter-value pairs. Should be backwards + compatible with old calling sequence. +- cp_als: Reverted the way that Unew is calculated from Unew = Unew * + pinv(Y) to Unew (Y \ Unew')'; which is from TTB2.2 and seems to give + better performance. + +Class: sptensor +- permute - Added check for empty tensor based on user error report. +- spmatrix - Added check for empty tensor based on user error report. +- sptensor - Replaced "~" with "junk" so it will work with Matlab 7.8 + (older version). Allowed sptensor to take an sptensor3 object + (though this class is not released yet) as input and convert it. + +Class: ktensor +- Revamped "arrange" so that it can also just accept a permutation and + rearrange the components. +- Adding a new "extract" function to select and extract a subset of the + components (rank-one factors) of a ktensor. +- Adding a new function "ncomponents" to return the number of + components. +- Added a "normalize" function that normalizes the columns of the factor + matrices to length 1 and absorbs the weights into lambda (without + rearranging the factors). +- Added new function "tocell" to convert a ktensor to a cell array. + +Class: sptenmat +- In function "double", added check for empty tensor based on error + report from user. + +============================================ +Changes from Version 2.2 (January 10, 2007) +============================================ +General: +- Added Memory Efficient Tucker (MET) package by Tamara Kolda and + Jimeng Sun. Type 'help tucker_me' after installation for more + information. +- Fixed bug in tenzeros command so that it now returns an empty tensor + when the initial size is emtpy. +- Fixed bug in tt_assignment_type so that it works with a sparse + tensor that is initially completely empty. +- Added comments to tt_sub2ind and tt_ind2sub. +- Removed errant ^M's at the end of every line of tt_subscheck. + +Algorithms: +- Changed parafac_als to cp_als (old one can still be called but is + deprecated). +- Added an option to cp_als to only print the information every n + iterations where n is a user-defined parameter. Also fixed bug in + the case of R=1. +- Added new cp_nmu function for computing a nonnegative tensor + factorization based upon Lee & Seung's NMF multiplicative update. +- Made calculation of residual in Tucker more efficient. + +Class: tensor +- Adds reshape command. +- Fixed find function so that it always returns a column vector. + (Bug# 3969) +- Fixed tenfun documentation. (Bug# 3339) + +Class: sptensor +- Adds reshape and spmatrix commands. +- Fixed bug in constructor so that it checks for subscripts out of + range and other input problems. (Bug# 3925) +- Fixed bug is subsasgn so that it works for a certain way of + inserting complex values. (Bug #3868) +- Fixed bug in disp function for sptensor that caused it not to accept + the user input to display all nonzeros for large tensors. (Bug #4009) +- Fixed bug in collapse so that it handles empty sptensor's correctly. +- Fixed bug in rdivide so that it will work correctly when either + argument is an empty tensor. +- Fixed bug in squeeze so that it now works correctly for sptensor's + with no nonzero elements. (Bug #3002) +- Fixed bug in subsasgn for a sptensor so that it works even when the + initial tensor is completely empty. +- Fixed bug in ttt so that it works even when one of the sptensors has + zero nonzeros. (Bug #3017) +- Fixed bug in elemfun that didn't remove those nonzeros that had + become zero (e.g., log 1 = 0). (Bug# 3235) + +Class: sptenmat +- Fixed bug in sptenmat so that it works when it is passed an sptensor + that doesn't have any entries. + +============================================ +Changes from Version 2.1 (December 1, 2006) +============================================ +General: +- Added INSTALL.txt with installation instructions. +- Updated copyright date from 2006 to 2007 throughout. + +Classes: tensor and sptensor +- Added transpose and ctranspose functions that throw an error + (transpose is not supported for tensors, but previously would do + nothing as if it *had* performed the transpose). +- Added ldivide, rdivide, lmdivide, and rmdivide, though they all work + only with scalars. + +Class: tensor +- Added isequal. +- Made find slightly more efficient in the case where the + corresponding values are not also returned. +- Fixed bug in assigning elements to 1-dimensional tensors. + +Class: sptensor +- Cleaner display with disp or display functions. +- Added checks against invalid sizes and subscripts for tensor + construction and subscripted reference and assignment. +- Fixed bug where the index 58 was confused with the character ':' + in subscripted reference and assignment. +- Made results of logical operators consistent with how sparse + matrices work, i.e., produces a dense tensor iff the equivalent + command on a sparse matrix would do the same. +- Plus and minus now work with a scalar or dense tensor, and the + result in those cases is dense. +- Added ability to do .* with a scalar. +- Made it so that isequal now works with dense tensors and will return + true if the two tensors are equivalent. +- Fixed bugs in double and squeeze on an all-zero sparse tensor. + +============================================ +Changes from Version 2.0 (September 6, 2006) +============================================ +All +- innerprod: Added checks that sizes match +- Improved subscripted assignment for tensor and sptensor. Now + supports assignment to a scalar (i.e., assign every element to that + scalar) and growth in both the size and number of dimensions. + +Class: tensor +- Added new function: nnz +- tenfun (and most relational operations): Fixed major bug is + dense-sparse comparisons. + +Class: sptensor +- Added new functions: not, and, or, xor, eq, ne, le, lt, ge, gt, isequal +- sptensor: Fixed bug where a 1D tensor was not correctly converted to + a sparse tensor. Also, added ability to accept an MDA as an input + and to accept a list of logical values as well as numerics. +- subsref: Fixed bug on subscripted reference to an empty tensor. +- ttt: Major overhaul that fixes a number of bugs and improves + efficiency dramatically. +- nvecs: Improved efficiency by converting to MATLAB sparse matrix + and calling eigs on that rather than calling eigs with the aatx + function. +- disp/display: Fixed bug that caused tensors with a *single* element + to display incorrectly. +- full: Fixed bug that caused it to fail if called on a completely + empty tensor. + +Class: ttensor +- innerprod/norm/nvecs: Improved efficiency. +- ttm: Removed errant debug print statements. + +Class: ktensor +- datadisp.m: Minor changes to formatting. + +Other +- License.txt: Removed an errant "7.3" that was in the text. +- Fixed top-level contents file and added version information so that + it will show up from MATLAB's ver command. + +========================================= +Changes from Version 1.0 (April 13, 2006) +========================================= + +Classes +- Added support for sparse tensors (sptensor and sptenmat) +- Renamed tensor_as_matrix to tenmat +- Renamed tucker_tensor to ttensor +- Renamed cp_tensor to ktensor +- Many functions have substantially improved efficiency + +Changes to the tensor class +- Removed functions: issamesize, order, shiftdim +- Renamed functions: multiarrayop to tenfun +- New functions: collapse, contract, find, full, innerprod, mttkrp, nvecs, + scale + +Changes to the ktensor class (formerly cp_tensor) +- Removed functions: issamesize, order +- New functions: datadisp, double, end, fixsigns, innerprod, mttkrp, + nvecs, times, ttm + +Changes to the ttensor class (formerly tucker_tensor) +- Removed functions: issamesize, order +- New functions: double, end, innerprod, mttkrp, norm, nvecs, ttm, ttv + +Changes to the tenmat class (formerly tensor_as_matrix) +- New functions: end, minus, norm, plus, uminus, uplus + +Changes to examples, algorithms, and documentation +- The examples directory no longer exists. +- Instead, documentation has been incorporated directly into the + MATLAB help navigator. +- Also, a new algorithms directory has been added with two ALS methods + for CANDECOMP/PARAFAC and Tucker. + +$Id: RELEASE_NOTES.txt,v 1.16 2010/03/22 16:08:13 tgkolda Exp $ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_als.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_als.m new file mode 100644 index 0000000..86a8786 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_als.m @@ -0,0 +1,216 @@ +function [P,Uinit,output] = cp_als(X,R,varargin) +%CP_ALS Compute a CP decomposition of any type of tensor. +% +% M = CP_ALS(X,R) computes an estimate of the best rank-R +% CP model of a tensor X using an alternating least-squares +% algorithm. The input X can be a tensor, sptensor, ktensor, or +% ttensor. The result M is a ktensor. +% +% M = CP_ALS(X,R,'param',value,...) specifies optional parameters and +% values. Valid parameters and their default values are: +% 'tol' - Tolerance on difference in fit {1.0e-4} +% 'maxiters' - Maximum number of iterations {50} +% 'dimorder' - Order to loop through dimensions {1:ndims(A)} +% 'init' - Initial guess [{'random'}|'nvecs'|cell array] +% 'printitn' - Print fit every n iterations; 0 for no printing {1} +% 'fixsigns' - Call fixsigns at end of iterations {true} +% +% [M,U0] = CP_ALS(...) also returns the initial guess. +% +% [M,U0,out] = CP_ALS(...) also returns additional output that contains +% the input parameters. +% +% Note: The "fit" is defined as 1 - norm(X-full(M))/norm(X) and is +% loosely the proportion of the data described by the CP model, i.e., a +% fit of 1 is perfect. +% +% NOTE: Updated in various minor ways per work of Phan Anh Huy. See Anh +% Huy Phan, Petr Tichavský, Andrzej Cichocki, On Fast Computation of +% Gradients for CANDECOMP/PARAFAC Algorithms, arXiv:1204.1586, 2012. +% +% Examples: +% X = sptenrand([5 4 3], 10); +% M = cp_als(X,2); +% M = cp_als(X,2,'dimorder',[3 2 1]); +% M = cp_als(X,2,'dimorder',[3 2 1],'init','nvecs'); +% U0 = {rand(5,2),rand(4,2),[]}; %<-- Initial guess for factors of M +% [M,U0,out] = cp_als(X,2,'dimorder',[3 2 1],'init',U0); +% M = cp_als(X,2,out.params); %<-- Same params as previous run +% +% Documentation page for CP-ALS +% +% See also KTENSOR, TENSOR, SPTENSOR, TTENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + + +%% Extract number of dimensions and norm of X. +N = ndims(X); +normX = norm(X); + +%% Set algorithm parameters from input or by using defaults +params = inputParser; +params.addParameter('tol',1e-4,@isscalar); +params.addParameter('maxiters',50,@(x) isscalar(x) & x > 0); +params.addParameter('dimorder',1:N,@(x) isequal(sort(x),1:N)); +params.addParameter('init', 'random', @(x) (iscell(x) || ismember(x,{'random','nvecs'}))); +params.addParameter('printitn',1,@isscalar); +params.addParameter('fixsigns',true,@islogical); +params.parse(varargin{:}); + +%% Copy from params object +fitchangetol = params.Results.tol; +maxiters = params.Results.maxiters; +dimorder = params.Results.dimorder; +init = params.Results.init; +printitn = params.Results.printitn; + +%% Error checking + +%% Set up and error checking on initial guess for U. +if iscell(init) + Uinit = init; + if numel(Uinit) ~= N + error('OPTS.init does not have %d cells',N); + end + for n = dimorder(2:end) + if ~isequal(size(Uinit{n}),[size(X,n) R]) + error('OPTS.init{%d} is the wrong size',n); + end + end +else + % Observe that we don't need to calculate an initial guess for the + % first index in dimorder because that will be solved for in the first + % inner iteration. + if strcmp(init,'random') + Uinit = cell(N,1); + for n = dimorder(2:end) + Uinit{n} = rand(size(X,n),R); + end + elseif strcmp(init,'nvecs') || strcmp(init,'eigs') + Uinit = cell(N,1); + for n = dimorder(2:end) + Uinit{n} = nvecs(X,n,R); + end + else + error('The selected initialization method is not supported'); + end +end + +%% Set up for iterations - initializing U and the fit. +U = Uinit; +fit = 0; + +% Store the last MTTKRP result to accelerate fitness computation. +U_mttkrp = zeros(size(X, dimorder(end)), R); + +if printitn>0 + fprintf('\nCP_ALS:\n'); +end + +%% Main Loop: Iterate until convergence + +if (isa(X,'sptensor') || isa(X,'tensor')) && (exist('cpals_core','file') == 3) + + %fprintf('Using C++ code\n'); + [lambda,U] = cpals_core(X, Uinit, fitchangetol, maxiters, dimorder); + P = ktensor(lambda,U); + +else + + UtU = zeros(R,R,N); + for n = 1:N + if ~isempty(U{n}) + UtU(:,:,n) = U{n}'*U{n}; + end + end + + for iter = 1:maxiters + + fitold = fit; + + % Iterate over all N modes of the tensor + for n = dimorder(1:end) + + % Calculate Unew = X_(n) * khatrirao(all U except n, 'r'). + Unew = mttkrp(X,U,n); + % Save the last MTTKRP result for fitness check. + if n == dimorder(end) + U_mttkrp = Unew; + end + + % Compute the matrix of coefficients for linear system + Y = prod(UtU(:,:,[1:n-1 n+1:N]),3); + Unew = Unew / Y; + if issparse(Unew) + Unew = full(Unew); % for the case R=1 + end + + % Normalize each vector to prevent singularities in coefmatrix + if iter == 1 + lambda = sqrt(sum(Unew.^2,1))'; %2-norm + else + lambda = max( max(abs(Unew),[],1), 1 )'; %max-norm + end + + Unew = bsxfun(@rdivide, Unew, lambda'); + + U{n} = Unew; + UtU(:,:,n) = U{n}'*U{n}; + end + + P = ktensor(lambda,U); + + % This is equivalent to innerprod(X,P). + iprod = sum(sum(P.U{dimorder(end)} .* U_mttkrp) .* lambda'); + if normX == 0 + fit = norm(P)^2 - 2 * iprod; + else + normresidual = sqrt( normX^2 + norm(P)^2 - 2 * iprod ); + fit = 1 - (normresidual / normX); %fraction explained by model + end + fitchange = abs(fitold - fit); + + % Check for convergence + if (iter > 1) && (fitchange < fitchangetol) + flag = 0; + else + flag = 1; + end + + if (mod(iter,printitn)==0) || ((printitn>0) && (flag==0)) + fprintf(' Iter %2d: f = %e f-delta = %7.1e\n', iter, fit, fitchange); + end + + % Check for convergence + if (flag == 0) + break; + end + end +end + + +%% Clean up final result +% Arrange the final tensor so that the columns are normalized. +P = arrange(P); +% Fix the signs +if params.Results.fixsigns + P = fixsigns(P); +end + +if printitn>0 + if normX == 0 + fit = norm(P)^2 - 2 * innerprod(X,P); + else + normresidual = sqrt( normX^2 + norm(P)^2 - 2 * innerprod(X,P) ); + fit = 1 - (normresidual / normX); %fraction explained by model + end + fprintf(' Final f = %e \n', fit); +end + +output = struct; +output.params = params.Results; +output.iters = iter; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_apr.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_apr.m new file mode 100644 index 0000000..0001abb --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_apr.m @@ -0,0 +1,1689 @@ +function [M, Minit, output] = cp_apr(X, R, varargin) +%CP_APR Compute nonnegative CP with alternating Poisson regression. +% +% M = CP_APR(X, R) computes an estimate of the best rank-R CP model of a +% nonnegative tensor X using an alternating Poisson regression. This is +% most appropriate for sparse count data (i.e., nonnegative integer +% values) because it uses Kullback-Liebler divergence. The input X can +% be a tensor or sptensor. The result M is a ktensor. Input data must be +% nonnegative, and the computed ktensor factors are all nonnegative. +% +% Different algorithm variants are available (selected by the 'alg' +% parameter): +% 'pqnr' - row subproblems by projected quasi-Newton (default) +% 'pdnr' - row subproblems by projected damped Hessian +% 'mu' - multiplicative update (default in version 2.5) +% +% M = CP_APR(X, R, 'param', value, ...) specifies optional parameters and +% values. Some parameters work in all situations, others apply only for +% a particular choice of algorithm. +% +% Valid parameters and their default values are: +% 'alg' - Algorithm ['mu'|'pdnr'|'pqnr'] {'pqnr'} +% 'stoptol' - Tolerance on the overall KKT violation {1.0e-4} +% 'stoptime' - Maximum number of seconds to run {1e6} +% 'maxiters' - Maximum number of iterations {1000} +% 'init' - Initial guess [{'random'}|ktensor] +% 'maxinneriters' - Maximum inner iterations per outer iteration {10} +% 'epsDivZero' - Safeguard against divide by zero {1.0e-10} +% 'printitn' - Print every n outer iterations; 0 for none {1} +% 'printinneritn' - Print every n inner iterations {0} +% +% Additional input parameters for algorithm 'mu': +% 'kappa' - Offset to fix complementary slackness {100} +% 'kappatol' - Tolerance on complementary slackness {1.0e-10} +% +% Additional input parameters for algorithm 'pdnr': +% 'epsActive' - Bertsekas tolerance for active set {1.0e-8} +% 'mu0' - Initial damping parameter {1.0e-5} +% 'precompinds' - Precompute sparse tensor indices {true} +% 'inexact' - Compute inexact Newton steps {true} +% +% Additional input parameters for algorithm 'pqnr': +% 'epsActive' - Bertsekas tolerance for active set {1.0e-8} +% 'lbfgsMem' - Number vector pairs to store for L-BFGS {3} +% 'precompinds' - Precompute sparse tensor indices {true} +% +% [M,M0] = CP_APR(...) also returns the initial guess. +% +% [M,M0,out] = CP_APR(...) also returns additional output: +% out.kktViolations - maximum KKT violation per iteration +% out.nInnerIters - number of inner iterations per outer iteration +% out.obj - final negative log-likelihood objective +% out.ttlTime - time algorithm took to converge or reach max +% out.times - cumulative time through each outer iteration +% If algorithm is 'mu': +% out.nViolations - number of factor matrices needing complementary +% slackness adjustment per iteration +% If algorithm is 'pdnr' or 'pqnr': +% out.nZeros - number of zero factor entries per iteration +% +% REFERENCES: +% * E. C. Chi and T. G. Kolda. On Tensors, Sparsity, and Nonnegative +% Factorizations, SIAM J. Matrix Analysis, 33(4):1272-1299, Dec. 2012, +% http://dx.doi.org/10.1137/110859063 +% * S. Hansen, T. Plantenga and T. G. Kolda, Newton-Based Optimization +% for Kullback-Leibler Nonnegative Tensor Factorizations, +% Optimization Methods and Software, 2015, +% http://dx.doi.org/10.1080/10556788.2015.1009977 +% +% See also CP_ALS, KTENSOR, TENSOR, SPTENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Set the algorithm choice and initial guess from input or defaults. +params = inputParser; +params.addParameter('alg', 'pqnr', @(x) (ismember(x,{'mu','pdnr','pqnr'})) ); +params.addParameter('init','random', @(x) (isa(x,'ktensor') || ismember(x,{'random'})) ); +params.KeepUnmatched = true; +params.parse(varargin{:}); + +alg = params.Results.alg; +Minit = params.Results.init; + +% Extract the number of modes in tensor X. +N = ndims(X); + +if (R <= 0) + error('Number of components requested must be positive'); +end + +%% Check that the data is nonnegative. +tmp = find(X < 0.0); +if (size(tmp,1) > 0) + error('Data tensor must be nonnegative for Poisson-based factorization'); +end + +%% Set up an initial guess for the factor matrices. +if isa(Minit,'ktensor') + % User provided an initial ktensor; validate it. + + if (ndims(Minit) ~= N) + error('Initial guess does not have the right number of modes'); + end + if (ncomponents(Minit) ~= R) + error('Initial guess does not have the right number of components'); + end + + for n = 1:N + if (size(Minit,n) ~= size(X,n)) + error('Mode %d of the initial guess is the wrong size',n); + end + if (min(min(Minit.U{n})) < 0.0) + error('Initial guess has negative element in mode %d',n); + end + end + if (min(Minit.lambda) < 0.0) + error('Initial guess has a negative ktensor weight'); + end + +elseif strcmp(Minit,'random') + % Choose random values for each element in the range (0,1). + F = cell(N,1); + for n = 1:N + F{n} = rand(size(X,n),R); + end + Minit = ktensor(F); +end + + +%% Call a solver based on the choice of algorithm parameter, passing +% all the other input parameters. +if strcmp(alg,'mu') + [M, output] = tt_cp_apr_mu (X, R, Minit, params.Unmatched); + output.params.alg = 'mu'; + +elseif strcmp(alg,'pdnr') + [M, output] = tt_cp_apr_pdnr (X, R, Minit, params.Unmatched); + output.params.alg = 'pdnr'; + +elseif strcmp(alg,'pqnr') + [M, output] = tt_cp_apr_pqnr (X, R, Minit, params.Unmatched); + output.params.alg = 'pqnr'; +end + +end + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Main algorithm PQNR +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [M, out] = tt_cp_apr_pqnr(X, R, Minit, varargin) +%TT_CP_APR_PQNR Compute nonnegative CP with alternating Poisson regression. +% +% tt_cp_apr_pqnr(X, R, ...) computes an estimate of the best rank-R +% CP model of a tensor X using an alternating Poisson regression. +% The algorithm solves "row subproblems" in each alternating subproblem, +% using a quasi-Newton Hessian approximation. +% The function is typically called by cp_apr. +% +% The model is solved by nonlinear optimization, and the code literally +% minimizes the negative of log-likelihood. However, printouts to the +% console reverse the sign to show maximization of log-likelihood. +% +% The function call can specify optional parameters and values. +% Valid parameters and their default values are: +% 'stoptol' - Tolerance on the overall KKT violation {1.0e-4} +% 'stoptime' - Maximum number of seconds to run {1e6} +% 'maxiters' - Maximum number of iterations {1000} +% 'maxinneriters' - Maximum inner iterations per outer iteration {10} +% 'epsDivZero' - Safeguard against divide by zero {1.0e-10} +% 'printitn' - Print every n outer iterations; 0 for no printing {1} +% 'printinneritn' - Print every n inner iterations {0} +% 'epsActive' - Bertsekas tolerance for active set {1.0e-8} +% 'lbfgsMem' - Number vector pairs to store for L-BFGS {3} +% 'precompinds' - Precompute sparse tensor indices to run faster {true} +% +% Return values are: +% M - ktensor model with R components +% out.fnEvals - number of row obj fn evaluations per outer iteration +% out.kktViolations - maximum KKT violation per iteration +% out.nInnerIters - number of inner iterations per outer iteration +% out.nZeros - number of factor elements equal to zero per iteration +% out.obj - final log-likelihood objective +% (minimization objective is actually -1 times this) +% out.ttlTime - time algorithm took to converge or reach max +% out.times - cumulative time through each outer iteration +% +% REFERENCE: Samantha Hansen, Todd Plantenga, Tamara G. Kolda. +% Newton-Based Optimization for Nonnegative Tensor Factorizations, +% arXiv:1304.4964 [math.NA], April 2013, +% URL: http://arxiv.org/abs/1304.4964. Submitted for publication. +% +% See also CP_APR, KTENSOR, TENSOR, SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Set algorithm parameters from input or by using defaults. +params = inputParser; +params.addParamValue('epsActive', 1e-8, @isscalar); +params.addParamValue('epsDivZero',1e-10,@isscalar); +params.addParamValue('lbfgsMem',3,@isscalar); +params.addParamValue('maxinneriters',10,@isscalar); +params.addParamValue('maxiters',1000,@(x) isscalar(x) & x > 0); +params.addParamValue('precompinds',true,@(x) isa(x,'logical')); +params.addParamValue('printinneritn',0,@isscalar); +params.addParamValue('printitn',1,@isscalar); +params.addParamValue('stoptime',1e6,@isscalar); +params.addParamValue('stoptol',1e-4,@isscalar); +params.parse(varargin{:}); + +%% Copy from params object. +epsActSet = params.Results.epsActive; +epsDivZero = params.Results.epsDivZero; +nSizeLBFGS = params.Results.lbfgsMem; +maxInnerIters = params.Results.maxinneriters; +maxOuterIters = params.Results.maxiters; +precomputeSparseIndices = params.Results.precompinds; +printInnerItn = params.Results.printinneritn; +printOuterItn = params.Results.printitn; +stoptime = params.Results.stoptime; +stoptol = params.Results.stoptol; + +out = []; + +% Extract the number of modes in tensor X. +N = ndims(X); + +% If the initial guess has any rows of all zero elements, then modify +% so the row subproblem is not taking log(0). Values will be restored to +% zero later if the unfolded X for the row has no nonzeros. +for n = 1:N + rowsum = sum(Minit{n},2); + tmpIx = find(rowsum == 0); + if (isempty(tmpIx) == false) + Minit{n}(tmpIx,1) = 1.0e-8; + end +end + +% Start with the initial guess, normalized using the vector L1 norm. +M = normalize(Minit,[],1); + +% Sparse tensor flag affects how Pi and Phi are computed. +if isa(X,'sptensor') + isSparse = true; +else + isSparse = false; +end + +% Initialize output arrays. +fnEvals = zeros(maxOuterIters,1); +fnVals = zeros(maxOuterIters,1); +kktViolations = -ones(maxOuterIters,1); +nInnerIters = zeros(maxOuterIters,1); +nzeros = zeros(maxOuterIters,1); +times = zeros(maxOuterIters,1); + +if (printOuterItn > 0) + fprintf('\nCP_PQNR (alternating Poisson regression using quasi-Newton)\n'); +end +dispLineWarn = (printInnerItn > 0); + +% Start the wall clock timer. +tic; + + +if (isSparse && precomputeSparseIndices) + % Precompute sparse index sets for all the row subproblems. + % Takes more memory but can cut execution time significantly in some cases. + if (printOuterItn > 0) + fprintf(' Precomputing sparse index sets...'); + end + sparseIx = cell(N); + for n = 1:N + num_rows = size(M{n},1); + sparseIx{n} = cell(num_rows,1); + for jj = 1:num_rows + sparseIx{n}{jj} = find(X.subs(:,n) == jj); + end + end + if (printOuterItn > 0) + fprintf('done\n'); + end +end + + +%% Main Loop: Iterate until convergence or a max threshold is reached. +for iter = 1:maxOuterIters + + isConverged = true; + kktModeViolations = zeros(N,1); + countInnerIters = zeros(1,N); + + % Alternate thru each factor matrix, A_1, A_2, ... , A_N. + for n = 1:N + + % Shift the weight from lambda to mode n. + M = redistribute(M,n); + + % Calculate Khatri-Rhao product of all matrices but the n-th. + if (isSparse == false) + % Data is not a sparse tensor. + Pi = tt_calcpi_prowsubprob(X, isSparse, M, R, n, N, []); + X_mat = double(tenmat(X,n)); + end + + num_rows = size(M{n},1); + isRowNOTconverged = zeros(1,num_rows); + + % Loop over the row subproblems in mode n. + for jj = 1:num_rows + + % Get data values for row jj of matricized mode n. + if (isSparse) + % Data is a sparse tensor. + if (precomputeSparseIndices == false) + sparse_indices = find(X.subs(:,n) == jj); + else + sparse_indices = sparseIx{n}{jj}; + end + if (isempty(sparse_indices)) + % The row jj of matricized tensor X in mode n is empty. + M{n}(jj,:) = 0; + continue + end + x_row = X.vals(sparse_indices); + + % Calculate just the columns of Pi needed for this row. + Pi = tt_calcpi_prowsubprob(X, isSparse, M, ... + R, n, N, sparse_indices); + else + x_row = X_mat(jj,:); + end + + % Get current values of the row subproblem variables. + m_row = M{n}(jj,:); + + % Initialize L-BFGS storage for the row subproblem. + delm = zeros(R, nSizeLBFGS); + delg = zeros(R, nSizeLBFGS); + rho = zeros(nSizeLBFGS, 1); + lbfgsPos = 1; + m_rowOLD = []; + gradOLD = []; + + % Iteratively solve the row subproblem with projected qNewton steps. + for i = 1:maxInnerIters + % Calculate the gradient. + [gradM, phi_row] = calc_grad(isSparse, Pi, epsDivZero, ... + x_row, m_row); + + if (i == 1) + % Original cp_aprPQN_row code (and plb_row) does a gradient + % step to prime the L-BFGS approximation. However, it means + % a row subproblem that already converged wastes time + % doing a gradient step before checking KKT conditions. + % TODO: fix in a future release. + m_rowOLD = m_row; + gradOLD = gradM; + [m_row, f, f_unit, f_new, num_evals] ... + = tt_linesearch_prowsubprob(-gradM', gradM', ... + m_rowOLD, ... + 1, 1/2, 10, 1.0e-4, ... + isSparse, x_row, Pi, ... + phi_row, dispLineWarn); + fnEvals(iter) = fnEvals(iter) + num_evals; + [gradM, phi_row] = calc_grad(isSparse, Pi, epsDivZero, ... + x_row, m_row); + end + + % Compute the row subproblem kkt_violation. + % Experiments in the original paper used this: + %kkt_violation = norm(abs(min(m_row,gradM')),2); + % Now we use | KKT |_inf: + kkt_violation = max(abs(min(m_row,gradM'))); + + % Report largest row subproblem initial violation. + if ((i == 1) && (kkt_violation > kktModeViolations(n))) + kktModeViolations(n) = kkt_violation; + end + + if (mod(i, printInnerItn) == 0) + fprintf(' Mode = %1d, Row = %d, InnerIt = %d', ... + n, jj, i); + if (i == 1) + fprintf(', RowKKT = %.2e\n', kkt_violation); + else + fprintf(', RowKKT = %.2e, RowObj = %.4e\n', ... + kkt_violation, -f_new); + end + end + + % Check for row subproblem convergence. + if (kkt_violation < stoptol) + break; + else + % Not converged, so m_row will be modified. + isRowNOTconverged(jj) = 1; + end + + % Update the L-BFGS approximation. + tmp_delm = m_row - m_rowOLD; + tmp_delg = gradM - gradOLD; + tmp_rho = 1 / (tmp_delm * tmp_delg); + if ((tmp_rho > 0.0) && (isinf(tmp_rho) == false)) + delm(:,lbfgsPos) = tmp_delm; + delg(:,lbfgsPos) = tmp_delg; + rho(lbfgsPos) = tmp_rho; + else + % Rho is required to be positive; if not, then skip + % the L-BFGS update pair. The recommended safeguard for + % full BFGS is Powell damping, but not clear how to damp + % in 2-loop L-BFGS. + if (dispLineWarn) + fprintf('WARNING: skipping L-BFGS update, rho would be 1 / %.2e\n', ... + (tmp_delm * tmp_delg)); + end + % Roll back lbfgsPos since it will increment later. + if (lbfgsPos == 1) + if (rho(nSizeLBFGS) > 0) + lbfgsPos = nSizeLBFGS; + else + % Fatal error, should not happen. + fprintf('ERROR: L-BFGS first iterate is bad\n'); + return; + end + else + lbfgsPos = lbfgsPos - 1; + end + end + + % Calculate the search direction. + search_dir = getSearchDirPqnr(m_row, gradM, epsActSet, ... + delm, delg, rho, lbfgsPos, ... + i, dispLineWarn); + lbfgsPos = mod(lbfgsPos, nSizeLBFGS) + 1; + + m_rowOLD = m_row; + gradOLD = gradM; + + % Perform a projected linesearch and update variables. + % Start from a unit step length, decrease by 1/2, stop with + % sufficient decrease of 1.0e-4 or at most 10 steps. + [m_row, f, f_unit, f_new, num_evals] ... + = tt_linesearch_prowsubprob(search_dir', gradOLD', m_rowOLD, ... + 1, 1/2, 10, 1.0e-4, ... + isSparse, x_row, Pi, ... + phi_row, dispLineWarn); + fnEvals(iter) = fnEvals(iter) + num_evals; + end + + M{n}(jj,:) = m_row; + countInnerIters(n) = countInnerIters(n) + i; + + end + + % Test if all row subproblems have converged, which means that + % no variables in this mode were changed. + if (sum(isRowNOTconverged) ~= 0) + isConverged = false; + end + + % Shift weight from mode n back to lambda. + M = normalize(M,[],1,n); + + % Total number of inner iterations for a given outer iteration, + % totalled across all modes and all row subproblems in each mode. + nInnerIters(iter) = nInnerIters(iter) + countInnerIters(n); + end + + % Save output items for the outer iteration. + num_zero = 0; + for n = 1:N + num_zero = num_zero + nnz(find(M{n} == 0.0)); + end + nzeros(iter) = num_zero; + kktViolations(iter) = max(kktModeViolations); + + % Print outer iteration status. + if (mod(iter,printOuterItn) == 0) + fnVals(iter) = -tt_loglikelihood(X,M); + fprintf('%4d. Ttl Inner Its: %d, KKT viol = %.2e, obj = %.8e, nz: %d\n', ... + iter, nInnerIters(iter), kktViolations(iter), fnVals(iter), ... + num_zero); + end + + times(iter) = toc; + + % Check for convergence + if (isConverged) + break; + end + if (times(iter) > stoptime) + fprintf('Exiting because time limit exceeded\n'); + break; + end + +end + +t_stop = toc; + +%% Clean up final result and set output items. +M = normalize(M,'sort',1); +loglike = tt_loglikelihood(X,M); + +if (printOuterItn > 0) + % For legacy reasons, compute "fit", the fraction explained by the model. + % Fit is in the range [0,1], with 1 being the best fit. + normX = norm(X); + normresidual = sqrt( normX^2 + norm(M)^2 - 2 * innerprod(X,M) ); + fit = 1 - (normresidual / normX); + + fprintf('===========================================\n'); + fprintf(' Final log-likelihood = %e \n', loglike); + fprintf(' Final least squares fit = %e \n', fit); + fprintf(' Final KKT violation = %7.7e\n', kktViolations(iter)); + fprintf(' Total inner iterations = %d\n', sum(nInnerIters)); + fprintf(' Total execution time = %.2f secs\n', t_stop); +end + +out = struct; +out.params = params.Results; +out.obj = loglike; +out.kktViolations = kktViolations(1:iter); +out.fnVals = fnVals(1:iter); +out.fnEvals = fnEvals(1:iter); +out.nInnerIters = nInnerIters(1:iter); +out.nZeros = nzeros(1:iter); +out.times = times(1:iter); +out.ttlTime = t_stop; + +end + +%---------------------------------------------------------------------- + +function [grad_row, phi_row] = calc_grad(isSparse, Pi, eps_div_zero, x_row, m_row) +%function grad_row = calc_grad(isSparse, Pi, eps_div_zero, x_row, m_row) +% Compute the gradient for a PQNR row subproblem. +% +% isSparse - true if x_row is sparse, false if dense +% Pi - matrix +% eps_div_zero - safeguard value to prevent division by zero +% x_row - row vector of data values for the row subproblem +% m_row - vector of variables for the row subproblem +% +% Returns the gradient vector for a row subproblem. + + if (isSparse) + v = m_row * Pi'; + w = x_row' ./ max(v, eps_div_zero); + phi_row = w * Pi; + + else + v = m_row * Pi'; + w = x_row ./ max(v, eps_div_zero); + phi_row = w * Pi; + end + + grad_row = (ones(size(phi_row)) - phi_row)'; +end + +%---------------------------------------------------------------------- + +function [d] = getSearchDirPqnr (m_row, grad, epsActSet, ... + delta_m, delta_g, rho, lbfgs_pos, ... + iters, disp_warn) +% Compute the search direction by projecting with L-BFGS. +% +% m_row - current variable values +% grad - gradient at m_row +% epsActSet - Bertsekas tolerance for active set determination +% delta_m - L-BFGS array of vector variable deltas +% delta_g - L-BFGS array of gradient deltas +% lbfgs_pos - pointer into L-BFGS arrays +% +% Returns +% d - search direction based on current L-BFGS and grad +% +% Adapted from MATLAB code of Dongmin Kim and Suvrit Sra written in 2008. +% Modified extensively to solve row subproblems and use a better linesearch; +% see the reference at the top of this file for details. + + lbfgsSize = size(delta_m,2); + + % Determine active and free variables. + % If epsActSet is zero, then the following works: + % fixedVars = find((m_row == 0) & (grad' > 0)); + % For the general case this works but is less clear and assumes m_row > 0: + % fixedVars = find((grad' > 0) & (m_row <= min(epsActSet,grad'))); + projGradStep = (m_row - grad') .* (m_row - grad' > 0); + wk = norm(m_row - projGradStep); + fixedVars = find((grad' > 0) & (m_row <= min(epsActSet,wk))); + + d = -grad; + d(fixedVars) = 0; + + if ((delta_m(:,lbfgs_pos)' * delta_g(:,lbfgs_pos)) == 0.0) + % Cannot proceed with this L-BFGS data; most likely the iteration + % has converged, so this is rarely seen. + if (disp_warn) + fprintf('WARNING: L-BFGS update is orthogonal, using gradient\n'); + end + return; + end + + alpha = ones(lbfgsSize,1); + k = lbfgs_pos; + + % Perform an L-BFGS two-loop recursion to compute the search direction. + + for i = 1 : min(iters, lbfgsSize) + alpha(k) = rho(k) * delta_m(:, k)' * d; + d = d - alpha(k) * delta_g(:, k); + k = lbfgsSize - mod(1 - k, lbfgsSize); + end + + coef = 1 / rho(lbfgs_pos) / (delta_g(:, lbfgs_pos)' * delta_g(:, lbfgs_pos)); + d = coef * d; + + for i = 1 : min(iters, lbfgsSize) + k = mod(k, lbfgsSize) + 1; + b = rho(k) * delta_g(:, k)' * d; + d = d + (alpha(k) - b) * delta_m(:, k); + end + + d(fixedVars) = 0; + +end + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Main algorithm PDNR +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [M, out] = tt_cp_apr_pdnr(X, R, Minit, varargin) +%TT_CP_APR_PDNR Compute nonnegative CP with alternating Poisson regression. +% +% tt_cp_apr_pdnr(X, R, ...) computes an estimate of the best rank-R +% CP model of a tensor X using an alternating Poisson regression. +% The algorithm solves "row subproblems" in each alternating subproblem, +% using a Hessian of size R^2. +% The function is typically called by cp_apr. +% +% The model is solved by nonlinear optimization, and the code literally +% minimizes the negative of log-likelihood. However, printouts to the +% console reverse the sign to show maximization of log-likelihood. +% +% The function call can specify optional parameters and values. +% Valid parameters and their default values are: +% 'stoptol' - Tolerance on the overall KKT violation {1.0e-4} +% 'stoptime' - Maximum number of seconds to run {1e6} +% 'maxiters' - Maximum number of iterations {1000} +% 'maxinneriters' - Maximum inner iterations per outer iteration {10} +% 'epsDivZero' - Safeguard against divide by zero {1.0e-10} +% 'printitn' - Print every n outer iterations; 0 for no printing {1} +% 'printinneritn' - Print every n inner iterations {0} +% 'epsActive' - Bertsekas tolerance for active set {1.0e-8} +% 'mu0' - Initial damping parameter {1.0e-5} +% 'precompinds' - Precompute sparse tensor indices to run faster {true} +% 'inexact' - Compute inexact Newton steps {true} +% +% Return values are: +% M - ktensor model with R components +% out.fnEvals - number of row obj fn evaluations per outer iteration +% out.kktViolations - maximum KKT violation per iteration +% out.nInnerIters - number of inner iterations per outer iteration +% out.nZeros - number of factor elements equal to zero per iteration +% out.obj - final log-likelihood objective +% (minimization objective is actually -1 times this) +% out.ttlTime - time algorithm took to converge or reach max +% out.times - cumulative time through each outer iteration +% +% REFERENCE: Samantha Hansen, Todd Plantenga, Tamara G. Kolda. +% Newton-Based Optimization for Nonnegative Tensor Factorizations, +% arXiv:1304.4964 [math.NA], April 2013, +% URL: http://arxiv.org/abs/1304.4964. Submitted for publication. +% +% See also CP_APR, KTENSOR, TENSOR, SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Set algorithm parameters from input or by using defaults. +params = inputParser; +params.addParamValue('epsActive', 1e-8, @isscalar); +params.addParamValue('epsDivZero',1e-10,@isscalar); +params.addParamValue('maxinneriters',10,@isscalar); +params.addParamValue('maxiters',1000,@(x) isscalar(x) & x > 0); +params.addParamValue('precompinds',true,@(x) isa(x,'logical')); +params.addParamValue('inexact',true,@(x) isa(x,'logical')); +params.addParamValue('mu0',1e-5,@isscalar); +params.addParamValue('printinneritn',0,@isscalar); +params.addParamValue('printitn',1,@isscalar); +params.addParamValue('stoptime',1e6,@isscalar); +params.addParamValue('stoptol',1e-4,@isscalar); +params.parse(varargin{:}); + +%% Copy from params object. +epsActSet = params.Results.epsActive; +epsDivZero = params.Results.epsDivZero; +maxInnerIters = params.Results.maxinneriters; +maxOuterIters = params.Results.maxiters; +mu0 = params.Results.mu0; +precomputeSparseIndices = params.Results.precompinds; +inexactNewton = params.Results.inexact; +printInnerItn = params.Results.printinneritn; +printOuterItn = params.Results.printitn; +stoptime = params.Results.stoptime; +stoptol = params.Results.stoptol; + + +% Extract the number of modes in tensor X. +N = ndims(X); + +% If the initial guess has any rows of all zero elements, then modify +% so the row subproblem is not taking log(0). Values will be restored to +% zero later if the unfolded X for the row has no nonzeros. +for n = 1:N + rowsum = sum(Minit{n},2); + tmpIx = find(rowsum == 0); + if (isempty(tmpIx) == false) + Minit{n}(tmpIx,1) = 1.0e-8; + end +end + +% Start with the initial guess, normalized using the vector L1 norm. +M = normalize(Minit,[],1); + +% Sparse tensor flag affects how Pi and Phi are computed. +if isa(X,'sptensor') + isSparse = true; +else + isSparse = false; +end + +% Initialize output arrays. +fnVals = zeros(maxOuterIters,1); +fnEvals = zeros(maxOuterIters,1); +kktViolations = -ones(maxOuterIters,1); +nInnerIters = zeros(maxOuterIters,1); +nzeros = zeros(maxOuterIters,1); +times = zeros(maxOuterIters,1); + +if (printOuterItn > 0) + fprintf('\nCP_PDNR (alternating Poisson regression using damped Newton)\n'); +end +dispLineWarn = (printInnerItn > 0); + +% Start the wall clock timer. +tic; + + +if (isSparse && precomputeSparseIndices) + % Precompute sparse index sets for all the row subproblems. + % Takes more memory but can cut execution time significantly in some cases. + if (printOuterItn > 0) + fprintf(' Precomputing sparse index sets...'); + end + sparseIx = cell(N); + for n = 1:N + num_rows = size(M{n},1); + sparseIx{n} = cell(num_rows,1); + for jj = 1:num_rows + sparseIx{n}{jj} = find(X.subs(:,n) == jj); + end + end + if (printOuterItn > 0) + fprintf('done\n'); + end +end + +e_vec = ones(1,R); + +rowsubprobStopTol = stoptol; + +%% Main Loop: Iterate until convergence or a max threshold is reached. +for iter = 1:maxOuterIters + + isConverged = true; + kktModeViolations = zeros(N,1); + countInnerIters = zeros(1,N); + + % Alternate thru each factor matrix, A_1, A_2, ... , A_N. + for n = 1:N + + % Shift the weight from lambda to mode n. + M = redistribute(M,n); + + % Calculate Khatri-Rhao product of all matrices but the n-th. + if (isSparse == false) + % Data is not a sparse tensor. + Pi = tt_calcpi_prowsubprob(X, isSparse, M, R, n, N, []); + X_mat = double(tenmat(X,n)); + end + + num_rows = size(M{n},1); + isRowNOTconverged = zeros(1,num_rows); + + % Loop over the row subproblems in mode n. + for jj = 1:num_rows + % Initialize the damped Hessian parameter for the row subproblem. + mu = mu0; + + % Get data values for row jj of matricized mode n. + if (isSparse) + % Data is a sparse tensor. + if (precomputeSparseIndices == false) + sparse_indices = find(X.subs(:,n) == jj); + else + sparse_indices = sparseIx{n}{jj}; + end + if (isempty(sparse_indices)) + % The row jj of matricized tensor X in mode n is empty. + M{n}(jj,:) = 0; + continue + end + x_row = X.vals(sparse_indices); + + % Calculate just the columns of Pi needed for this row. + Pi = tt_calcpi_prowsubprob(X, isSparse, M, ... + R, n, N, sparse_indices); + else + x_row = X_mat(jj,:); + end + + % Get current values of the row subproblem variables. + m_row = M{n}(jj,:); + + % Iteratively solve the row subproblem with projected Newton steps. + innerIterMaximum = maxInnerIters; + if (inexactNewton && (iter == 1)) + innerIterMaximum = 2; + end + for i = 1:innerIterMaximum + % Calculate the gradient. + [phi_row, ups_row] ... + = calc_partials(isSparse, Pi, epsDivZero, x_row, m_row); + gradM = (e_vec - phi_row)'; + + % Compute the row subproblem kkt_violation. + % Experiments in the original paper used this: + %kkt_violation = norm(abs(min(m_row,gradM')),2); + % Now we use | KKT |_inf: + kkt_violation = max(abs(min(m_row,gradM'))); + + % Report largest row subproblem initial violation. + if ((i == 1) && (kkt_violation > kktModeViolations(n))) + kktModeViolations(n) = kkt_violation; + end + + if (mod(i, printInnerItn) == 0) + fprintf(' Mode = %1d, Row = %d, InnerIt = %d', ... + n, jj, i); + if (i == 1) + fprintf(', RowKKT = %.2e\n', kkt_violation); + else + fprintf(', RowKKT = %.2e, RowObj = %.4e\n', ... + kkt_violation, -f_new); + end + end + + % Check for row subproblem convergence. + if (kkt_violation < rowsubprobStopTol) + break; + else + % Not converged, so m_row will be modified. + isRowNOTconverged(jj) = 1; + end + + % Calculate the search direction. + [search_dir, predicted_red] ... + = getSearchDirPdnr(Pi, ups_row, R, gradM, m_row, mu, epsActSet); + + % Perform a projected linesearch and update variables. + % Start from a unit step length, decrease by 1/2, stop with + % sufficient decrease of 1.0e-4 or at most 10 steps. + [m_rowNEW, f_old, f_unit, f_new, num_evals] ... + = tt_linesearch_prowsubprob(search_dir', gradM', m_row, ... + 1, 1/2, 10, 1.0e-4, ... + isSparse, x_row, Pi, ... + phi_row, dispLineWarn); + fnEvals(iter) = fnEvals(iter) + num_evals; + m_row = m_rowNEW; + + % Update damping parameter mu based on the unit step length, + % which is returned in f_unit. + actual_red = f_old - f_unit; + rho = actual_red / (-predicted_red); + if (predicted_red == 0) + mu = 10 * mu; + elseif (rho < 1/4) + mu = (7/2) * mu; + elseif (rho > 3/4) + mu = (2/7) * mu; + end + end + + M{n}(jj,:) = m_row; + countInnerIters(n) = countInnerIters(n) + i; + + end + + % Test if all row subproblems have converged, which means that + % no variables in this mode were changed. + if (sum(isRowNOTconverged) ~= 0) + isConverged = false; + end + + % Shift weight from mode n back to lambda. + M = normalize(M,[],1,n); + + % Total number of inner iterations for a given outer iteration, + % totalled across all modes and all row subproblems in each mode. + nInnerIters(iter) = nInnerIters(iter) + countInnerIters(n); + end + + % Save output items for the outer iteration. + num_zero = 0; + for n = 1:N + num_zero = num_zero + nnz(find(M{n} == 0.0)); + end + nzeros(iter) = num_zero; + kktViolations(iter) = max(kktModeViolations); + if (inexactNewton) + rowsubprobStopTol = max(stoptol, kktViolations(iter) / 100.0); + end + + % Print outer iteration status. + if (mod(iter,printOuterItn) == 0) + fnVals(iter) = -tt_loglikelihood(X,M); + fprintf('%4d. Ttl Inner Its: %d, KKT viol = %.2e, obj = %.8e, nz: %d\n', ... + iter, nInnerIters(iter), kktViolations(iter), fnVals(iter), ... + num_zero); + end + + times(iter) = toc; + + % Check for convergence + if (isConverged && (inexactNewton == false)) + break; + end + if (isConverged && (inexactNewton == true) && (rowsubprobStopTol <= stoptol)) + break; + end + if (times(iter) > stoptime) + fprintf('Exiting because time limit exceeded\n'); + break; + end + +end + +t_stop = toc; + +%% Clean up final result and set output items. +M = normalize(M,'sort',1); +loglike = tt_loglikelihood(X,M); + +if (printOuterItn > 0) + % For legacy reasons, compute "fit", the fraction explained by the model. + % Fit is in the range [0,1], with 1 being the best fit. + normX = norm(X); + normresidual = sqrt( normX^2 + norm(M)^2 - 2 * innerprod(X,M) ); + fit = 1 - (normresidual / normX); + + fprintf('===========================================\n'); + fprintf(' Final log-likelihood = %e \n', loglike); + fprintf(' Final least squares fit = %e \n', fit); + fprintf(' Final KKT violation = %7.7e\n', kktViolations(iter)); + fprintf(' Total inner iterations = %d\n', sum(nInnerIters)); + fprintf(' Total execution time = %.2f secs\n', t_stop); +end + +out = struct; +out.params = params.Results; +out.obj = loglike; +out.kktViolations = kktViolations(1:iter); +out.fnEvals = fnEvals(1:iter); +out.fnVals = fnVals(1:iter); +out.nInnerIters = nInnerIters(1:iter); +out.nZeros = nzeros(1:iter); +out.times = times(1:iter); +out.ttlTime = t_stop; + +end + +%---------------------------------------------------------------------- + +function [phi_row, ups_row] ... + = calc_partials(isSparse, Pi, eps_div_zero, x_row, m_row) +% Compute derivative quantities for a PDNR row subproblem. +% +% isSparse - true if x_row is sparse, false if dense +% Pi - matrix +% eps_div_zero - safeguard value to prevent division by zero +% x_row - row vector of data values for the row subproblem +% m_row - vector of variables for the row subproblem +% +% Returns two vectors for a row subproblem: +% phi_row - gradient of row subproblem, except for a constant +% ups_row - intermediate quantity (upsilon) used for second derivatives + + if (isSparse) + v = m_row * Pi'; + w = x_row' ./ max(v, eps_div_zero); + phi_row = w * Pi; + u = v .^ 2; + ups_row = x_row' ./ max(u, eps_div_zero); + + else + v = m_row * Pi'; + w = x_row ./ max(v, eps_div_zero); + phi_row = w * Pi; + u = v .^ 2; + ups_row = x_row ./ max(u, eps_div_zero); + end + +end + + +%---------------------------------------------------------------------- + +function H = getHessian(upsilon, Pi, free_indices) +% Return the Hessian for one PDNR row subproblem of M{n}, for just the rows and +% columns corresponding to the free variables. + + num_free = length(free_indices); + H = zeros(num_free,num_free); + for i = 1:num_free + for j = i:num_free + c = free_indices(i); + d = free_indices(j); + val = sum(upsilon' .* Pi(:,c) .* Pi(:,d)); + H(i,j) = val; + H(j,i) = val; + end + end + +end + +%---------------------------------------------------------------------- + +function [search_dir, pred_red] ... + = getSearchDirPdnr (Pi, ups_row, R, gradM, m_row, mu, epsActSet) +% Compute the search direction for PDNR using a two-metric projection +% with damped Hessian. +% +% Pi - matrix +% ups_row - intermediate quantity (upsilon) used for second derivatives +% R - number of variables for the row subproblem +% gradM - gradient vector for the row subproblem +% m_row - vector of variables for the row subproblem +% mu - damping parameter +% epsActSet - Bertsekas tolerance for active set determination +% +% Returns: +% search_dir - search direction vector +% pred_red - predicted reduction in quadratic model + + search_dir = zeros(R,1); + projGradStep = (m_row - gradM') .* (m_row - gradM' > 0); + wk = norm(m_row - projGradStep); + + % Determine active and free variables. + num_free = 0; + free_indices_tmp = zeros(R,1); + for r = 1:R + if ((m_row(r) <= min(epsActSet,wk)) && (gradM(r) > 0) ) + % Variable is not free (belongs to set A or G). + if (m_row(r) ~= 0) + % Variable moves according to the gradient (set G). + search_dir(r) = -gradM(r); + end + else + % Variable is free (set F). + num_free = num_free + 1; + free_indices_tmp(num_free) = r; + end + end + free_indices = free_indices_tmp(1:num_free); + + % Compute the Hessian for free variables. + Hessian_free = getHessian(ups_row, Pi, free_indices); + grad_free = -gradM(free_indices); + + % Compute the damped Newton search direction over free variables. + search_dir(free_indices) ... + = linsolve(Hessian_free + (mu * eye(num_free)), grad_free); + + % If the Hessian is too ill-conditioned, use gradient descent. + [~, msgid] = lastwarn('MATLAB:noWarning'); + if (strcmp(msgid,'MATLAB:nearlySingularMatrix')) + fprintf('WARNING: damped Hessian is nearly singular\n'); + search_dir = -gradM; + end + + % Calculate expected reduction in the quadratic model of the objective. + q = search_dir(free_indices)' ... + * (Hessian_free + (mu * eye(num_free))) ... + * search_dir(free_indices); + pred_red = (search_dir(free_indices)' * gradM(free_indices)) + (0.5 * q); + if (pred_red > 0) + fprintf('ERROR: expected decrease is positive\n'); + search_dir = -gradM; + end + +end + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Main algorithm MU +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [M, output] = tt_cp_apr_mu(X, R, Minit, varargin) +%TT_CP_APR_MU Compute nonnegative CP with alternating Poisson regression. +% +% tt_cp_apr_mu(X, R, ...) computes an estimate of the best rank-R +% CP model of a tensor X using an alternating Poisson regression. +% The algorithm solves each alternating subproblem using multiplicative +% updates with adjustments for values near zero. +% The function is typically called by cp_apr. +% +% The function call can specify optional parameters and values. +% Valid parameters and their default values are: +% 'stoptol' - Tolerance on the overall KKT violation {1.0e-4} +% 'stoptime' - Maximum number of seconds to run {1e6} +% 'maxiters' - Maximum number of iterations {1000} +% 'maxinneriters' - Maximum inner iterations per outer iteration {10} +% 'epsDivZero' - Safeguard against divide by zero {1.0e-10} +% 'printitn' - Print every n outer iterations; 0 for no printing {1} +% 'printinneritn' - Print every n inner iterations {0} +% 'kappatol' - Tolerance on complementary slackness {1.0e-10} +% 'kappa' - Offset to fix complementary slackness {100} +% +% Return values are: +% M - ktensor model with R components +% out.kktViolations - maximum KKT violation per iteration +% out.nInnerIters - number of inner iterations per outer iteration +% out.nViolations - number of factor matrices needing complementary +% slackness adjustment per iteration +% out.obj - final log-likelihood objective +% out.ttlTime - time algorithm took to converge or reach max +% out.times - cumulative time through each outer iteration +% +% REFERENCE: E. C. Chi and T. G. Kolda. On Tensors, Sparsity, and +% Nonnegative Factorizations, arXiv:1112.2414 [math.NA], December 2011, +% URL: http://arxiv.org/abs/1112.2414. Submitted for publication. +% +% See also CP_APR, KTENSOR, TENSOR, SPTENSOR. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Set algorithm parameters from input or by using defaults. +params = inputParser; +params.addParamValue('epsDivZero',1e-10,@isscalar); +params.addParamValue('kappa',1e-2,@isscalar); +params.addParamValue('kappatol',1e-10,@isscalar); +params.addParamValue('maxinneriters',10,@isscalar); +params.addParamValue('maxiters',1000,@(x) isscalar(x) & x > 0); +params.addParamValue('printinneritn',0,@isscalar); +params.addParamValue('printitn',1,@isscalar); +params.addParamValue('stoptime',1e6,@isscalar); +params.addParamValue('stoptol',1e-4,@isscalar); +params.parse(varargin{:}); + + +%% Extract dimensions of X and number of dimensions of X. +N = ndims(X); + +%% Copy from params object. +epsilon = params.Results.epsDivZero; +tol = params.Results.stoptol; +stoptime = params.Results.stoptime; +maxOuterIters = params.Results.maxiters; +kappa = params.Results.kappa; +kappaTol = params.Results.kappatol; +maxInnerIters = params.Results.maxinneriters; +printOuterItn = params.Results.printitn; +printInnerItn = params.Results.printinneritn; +kktViolations = -ones(maxOuterIters,1); +nInnerIters = zeros(maxOuterIters,1); +times = zeros(maxOuterIters,1); + +%% Set up and error checking on initial guess for U. +if isa(Minit,'ktensor') + if ndims(Minit) ~= N + error('Initial guess does not have the right number of dimensions'); + end + + if ncomponents(Minit) ~= R + error('Initial guess does not have the right number of components'); + end + + for n = 1:N + if size(Minit,n) ~= size(X,n) + error('Dimension %d of the initial guess is the wrong size',n); + end + end +elseif strcmp(Minit,'random') + F = cell(N,1); + for n = 1:N + F{n} = rand(size(X,n),R); + end + Minit = ktensor(F); +else + error('The selected initialization method is not supported'); +end + + +%% Set up for iterations - initializing M and Phi. +M = normalize(Minit,[],1); +Phi = cell(N,1); +kktModeViolations = zeros(N,1); + +if printOuterItn > 0 + fprintf('\nCP_APR:\n'); +end + +nViolations = zeros(maxOuterIters,1); + +% Start the wall clock timer. +tic; + +% PDN-R and PQN-R benefit from precomputing sparse indices of X for each +% mode subproblem. However, MU execution time barely changes, so the +% precompute option is not offered. + + +%% Main Loop: Iterate until convergence. +for iter = 1:maxOuterIters + + isConverged = true; + for n = 1:N + + % Make adjustments to entries of M{n} that are violating + % complementary slackness conditions. + if (iter > 1) + V = (Phi{n} > 1) & (M{n} < kappaTol); + if any(V(:)) + nViolations(iter) = nViolations(iter) + 1; + M{n}(V>0) = M{n}(V>0) + kappa; + end + end + + % Shift the weight from lambda to mode n + M = redistribute(M,n); + + % Calculate product of all matrices but the n-th + % (Sparse case only calculates entries corresponding to nonzeros in X) + Pi = calculatePi(X, M, R, n, N); + + % Do the multiplicative updates + for i = 1:maxInnerIters + + % Count the inner iterations + nInnerIters(iter) = nInnerIters(iter) + 1; + + % Calculate matrix for multiplicative update + Phi{n} = calculatePhi(X, M, R, n, Pi, epsilon); + + % Check for convergence + kktModeViolations(n) = max(abs(vectorizeForMu(min(M.U{n},1-Phi{n})))); + if (kktModeViolations(n) < tol) + break; + else + isConverged = false; + end + + % Do the multiplicative update + M{n} = M{n} .* Phi{n}; + + % Print status + if mod(i, printInnerItn)==0 + fprintf(' Mode = %1d, Inner Iter = %2d, KKT violation = %.6e\n', n, i, kktModeViolations(n)); + end + end + + % Shift weight from mode n back to lambda + M = normalize(M,[],1,n); + + end + + kktViolations(iter) = max(kktModeViolations); + + if (mod(iter,printOuterItn)==0) + fprintf(' Iter %4d: Inner Its = %2d KKT violation = %.6e, nViolations = %2d\n', ... + iter, nInnerIters(iter), kktViolations(iter), nViolations(iter)); + end + times(iter) = toc; + + % Check for convergence + if (isConverged) + if printOuterItn>0 + fprintf('Exiting because all subproblems reached KKT tol.\n'); + end + break; + end + if (times(iter) > stoptime) + if printOuterItn>0 + fprintf('Exiting because time limit exceeded.\n'); + end + break; + end +end +t_stop = toc; + +%% Clean up final result +M = normalize(M,'sort',1); + +obj = tt_loglikelihood(X,M); +if printOuterItn>0 + normX = norm(X); + normresidual = sqrt( normX^2 + norm(M)^2 - 2 * innerprod(X,M) ); + fit = 1 - (normresidual / normX); %fraction explained by model + fprintf('===========================================\n'); + fprintf(' Final log-likelihood = %e \n', obj); + fprintf(' Final least squares fit = %e \n', fit); + fprintf(' Final KKT violation = %7.7e\n', kktViolations(iter)); + fprintf(' Total inner iterations = %d\n', sum(nInnerIters)); + fprintf(' Total execution time = %.2f secs\n', t_stop); +end + +output = struct; +output.params = params.Results; +output.kktViolations = kktViolations(1:iter); +output.nInnerIters = nInnerIters(1:iter); +output.nViolations = nViolations(1:iter); +output.nTotalIters = sum(nInnerIters); +output.times = times(1:iter); +output.ttlTime = t_stop; +output.obj = obj; + + +end + +function Pi = calculatePi(X, M, R, n, N) + +if (isa(X,'sptensor')) + Pi = ones(nnz(X), R); + for nn = [1:n-1,n+1:N] + Pi = M{nn}(X.subs(:,nn),:) .* Pi; + end +else + U = M.U; + Pi = khatrirao(U{[1:n-1,n+1:N]},'r'); +end + +end + +function Phi = calculatePhi(X, M, R, n, Pi, epsilon) + +if (isa(X,'sptensor')) + Phi = -ones(size(X,n),R); + xsubs = X.subs(:,n); + v = sum(M.U{n}(xsubs,:).*Pi,2); + wvals = X.vals ./ max(v, epsilon); + for r = 1:R + Yr = accumarray(xsubs, wvals .* Pi(:,r), [size(X,n) 1]); + Phi(:,r) = Yr; + end +else + Xn = double(tenmat(X,n)); + V = M.U{n}*Pi'; + W = Xn ./ max(V, epsilon); + Y = W * Pi; + Phi = Y; +end + +end + +%---------------------------------------------------------------------- + +function y = vectorizeForMu(x) +y = x(:); +end + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Shared Internal Functions +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +function Pi = tt_calcpi_prowsubprob(X, isSparse, M, R, n, N, sparse_indices) +% TT_CALCPI_PROWSUBPROB Compute Pi for a row subproblem. +% +% X - data tensor +% isSparse - true if X is sparse, false if dense +% M - current factor matrices +% R - number of columns in each factor matrix +% n - mode +% N - number of modes (equals the number of factor matrices) +% sparse_indices - indices of row subproblem nonzero elements +% +% Returns Pi matrix. +% +% Intended for use by CP_PDN and CP_PQN. +% Based on calculatePi() in CP_APR, which computes for an entire mode +% instead of a single row subproblem. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + if (isSparse) + % X is a sparse tensor. Compute Pi for the row subproblem specified + % by sparse_indices. + num_row_nnz = length(sparse_indices); + + Pi = ones(num_row_nnz, R); + for nn = [1:n-1,n+1:N] + Pi = M{nn}(X.subs(sparse_indices,nn),:) .* Pi; + end + else + % X is a dense tensor. Compute Pi for all rows in the mode. + U = M.U; + Pi = khatrirao(U{[1:n-1,n+1:N]},'r'); + end + +end + +%---------------------------------------------------------------------- + +function [m_new, f_old, f_1, f_new, num_evals] ... + = tt_linesearch_prowsubprob(d, grad, m_old, step_len, step_red, ... + max_steps, suff_decr, isSparse, x_row, Pi, ... + phi_row, disp_warn) +% TT_LINESEARCH_PROWSUBPROB Perform a line search on a row subproblem. +% +% d - search direction +% grad - gradient vector at m_old +% m_old - current variable values +% step_len - initial step length, which is the maximum possible step length +% step_red - step reduction factor (suggest 1/2) +% max_steps - maximum number of steps to try (suggest 10) +% suff_decr - sufficient decrease for convergence (suggest 1.0e-4) +% isSparse - sparsity flag for computing the objective +% x_row - row subproblem data, for computing the objective +% Pi - Pi matrix, for computing the objective +% phi_row - 1-grad, more accurate if failing over to multiplicative update +% disp_warn - true means warning messages are displayed +% +% Returns +% m_new - new (improved) variable values +% num_evals - number of times objective was evaluated +% f_old - objective value at m_old +% f_1 - objective value at m_old + step_len * d +% f_new - objective value at m_new + + minDescentTol = 1.0e-7; + smallStepTol = 1.0e-7; + + stepSize = step_len; + + % Evaluate the current objective value. + f_old = -1 * tt_loglikelihood_row(isSparse, x_row, m_old, Pi); + num_evals = 1; + count = 1; + + while (count <= max_steps) + % Compute a new step and project it onto the positive orthant. + m_new = m_old + (stepSize .* d); + m_new = m_new .* (m_new > 0); + + % Check that it is a descent direction. + gDotd = sum(grad .* (m_new - m_old)); + if (gDotd > 0) || (sum(m_new) < minDescentTol) + % Don't evaluate the objective if not a descent direction + % or if all of the elements of m_new are close to zero. + f_new = Inf; + if (count == 1) + f_1 = f_new; + end + + stepSize = stepSize * step_red; + count = count + 1; + else + % Evaluate objective function at new iterate. + f_new = -1 * tt_loglikelihood_row(isSparse, x_row, m_new, Pi); + num_evals = num_evals + 1; + if (count == 1) + f_1 = f_new; + end + + % Check for sufficient decrease. + if (f_new <= f_old + suff_decr * gDotd) + break; + else + stepSize = stepSize * step_red; + count = count + 1; + end + end + end + + % Check if the line search failed. + if (isinf(f_1) == 1) + % Unit step failed; return a value that yields ared = 0. + f_1 = f_old; + end + if ( ((count >= max_steps) && (f_new > f_old)) ... + || (sum(m_new) < smallStepTol) ) + + % Fall back on a multiplicative update step (scaled steepest descent). + % Experiments indicate it works better than a unit step in the direction + % of steepest descent, which would be the following: + % m_new = m_old - (step_len * grad); % steepest descent + % A simple update formula follows, but suffers from round-off error + % when phi_row is tiny: + % m_new = m_old - (m_old .* grad); + % Use this for best accuracy: + m_new = m_old .* phi_row; % multiplicative update + + % Project to the constraints and reevaluate the subproblem objective. + m_new = m_new .* (m_new > 0); + f_new = -1 * tt_loglikelihood_row(isSparse, x_row, m_new, Pi); + num_evals = num_evals + 1; + + % Let the caller know the search direction made no progress. + f_1 = f_old; + + if (disp_warn) + fprintf('WARNING: line search failed, using multiplicative update step\n'); + end + end + +end + +%---------------------------------------------------------------------- + +function f = tt_loglikelihood_row(isSparse, x, m, Pi) +%TT_LOGLIKELIHOOD_ROW Compute log-likelihood of one row subproblem. +% +% The row subproblem for a given mode includes one row of matricized tensor +% data (x) and one row of the model (m) in the same matricized mode. +% Then +% (dense case) +% m: R-length vector +% x: J-length vector +% Pi: R x J matrix +% (sparse case) +% m: R-length vector +% x: p-length vector, where p = nnz in row of matricized data tensor +% Pi: R x p matrix +% F = - (sum_r m_r - sum_j x_j * log (m * Pi_j) +% where Pi_j denotes the j^th column of Pi +% NOTE: Rows of Pi' must sum to one +% +% isSparse - true if x is sparse, false if dense +% x - vector of data values +% m - vector of model values +% Pi - matrix +% +% Returns the log-likelihood probability f. +% +% Intended for use by CP_PDN and CP_PQN. +% Similar to tt_loglikelihood() in CP_APR, which computes log likelihood +% for the entire tensor instead of a single row subproblem. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + + term1 = -sum(m); + + if (isSparse) + term2 = sum(x' .* log(m * Pi')); + else + b_pi = m * Pi'; + term2 = 0; + for i = 1:length(x) + if (x(i) == 0) + % Define zero times log(anything) to be zero. + else + term2 = term2 + x(i) .* log(b_pi(i)); + end + end + end + + f = term1 + term2; + +end + +%---------------------------------------------------------------------- + +function f = tt_loglikelihood(X,M) +%TT_LOGLIKELIHOOD Compute log-likelihood of data X with model M. +% +% F = TT_LOGLIKELIHOOD(X,M) computes the log-likelihood of model M given +% data X, where M is a ktensor and X is a tensor or sptensor. +% Specifically, F = - (sum_i m_i - x_i * log_i) where i is a multiindex +% across all tensor dimensions. +% +% See also cp_apr, tensor, sptensor, ktensor. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +N = ndims(X); + +if ~isa(M, 'ktensor') + error('M must be a ktensor'); +end + +M = normalize(M,1,1); + +if isa(X, 'sptensor') + xsubs = X.subs; + A = M.U{1}(xsubs(:,1),:); + for n = 2:N + A = A .* M.U{n}(xsubs(:,n),:); + end + f = sum(X.vals .* log(sum(A,2))) - sum(sum(M.U{1})); +else +%{ +% Old code is probably faster, but returns NaN if X and M are both zero +% for some element. + f = sum(sum(double(tenmat(X,1)) .* log(double(tenmat(M,1))))) - sum(sum(M.U{1})); +%} + % The check for x==0 is also in tt_loglikelihood_row. + dX = double(tenmat(X,1)); + dM = double(tenmat(M,1)); + f = 0; + for i = 1:size(dX,1) + for j = 1:size(dX,2) + if (dX(i,j) == 0.0) + % Define zero times log(anything) to be zero. + else + f = f + dX(i,j) * log(dM(i,j)); + end + end + end + f = f - sum(sum(M.U{1})); + +end + +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_arls.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_arls.m new file mode 100644 index 0000000..e0bac65 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_arls.m @@ -0,0 +1,402 @@ +function [P,Uinit,output] = cp_arls(X,R,varargin) +%CP_ARLS CP decomposition of dense tensor via randomized least squares. +% +% M = CP_ARLS(X,R) computes an estimate of the best rank-R +% CP model of a dense tensor X using a randomized alternating +% least-squares algorithm. The input X must be a (dense) tensor. The +% result P is a ktensor. +% +% *Important Note:* The fit computed by CP_ARLS is an approximate +% fit, so the stopping conditions are necessarily more conservative. The +% approximation is based on sampling entries from the full tensor and +% estimating the overall fit based on their individual errors. +% +% M = CP_ARLS(X,R,'mix',0) skips the 'mixing' which is an expensive +% preprocessing step. In many cases, this step is not necessary and +% requires less initialization time and space. It is suggested to try +% this out. +% +% M = CP_ARLS(X,R,'param',value,...) specifies optional parameters and +% values. Valid parameters and their default values are: +% o 'mix' - Include FJLT transformations {true} +% o 'epoch' - Number of iterations between convergence checks {50} +% o 'maxepochs' - Maximum number of epochs {1000} +% o 'newitol' - Quit after this many epochs with no improvement {5} +% o 'tol' - Tolerance for improvement, i.e., fit - maxfit > tol {0} +% o 'fitthresh' - Terminate when fit > fitthresh {1.000} +% o 'printitn' - Print fit every n epochs; 0 for no printing {10} +% o 'init' - Initial guess ['random'|'nvecs'|cell array] {random} +% o 'nsamplsq' - Number of least-squares row samples {10Rlog2(R)} +% o 'nsampfit' - Number of entry samples for approximate fit {2^14} +% o 'dimorder' - Order to loop through dimensions {1:ndims(A)} +% +% [M,U0] = CP_ARLS(...) also returns the initial guess. +% +% [M,U0,out] = CP_ARLS(...) also returns additional output that +% contains the input parameters and other information. +% +% Examples: +% info = create_problem('Size',[100 100 100],'Num_Factors',2); +% M = cp_arls(info.Data,2); +% +% REFERENCE: C. Battaglino, G. Ballard, T. G. Kolda. A Practical +% Randomized CP Tensor Decomposition, to appear in SIAM J. Matrix +% Analysis and Applications, 2017. http://arxiv.org/abs/1701.06600 +% +% Documentation page for CP-ARLS +% +% See also CP_ALS, KTENSOR, TENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +%% Extract some sizes, etc. +N = ndims(X); +sz = size(X); +num_elements = prod(sz); + +%% Parse parameters +params = inputParser; +params.addParameter('init', 'random', @(x) (iscell(x) || ismember(x,{'random'}))); +params.addParameter('dimorder', 1:N, @(x) isequal(sort(x),1:N)); +params.addParameter('printitn', 10, @isscalar); +params.addParameter('mix', true, @islogical); +params.addParameter('nsamplsq', ceil(10*R*log2(R))); +params.addParameter('maxepochs', 1000); +params.addParameter('nsampfit', 2^14); +params.addParameter('tol', 0, @isscalar); +params.addParameter('fitthresh', 1, @(x) isscalar(x) & x > 0 & x <= 1); +params.addParameter('epoch', 50) +params.addParameter('newitol', 5); +params.addParameter('truefit', false, @islogical); +params.parse(varargin{:}); + +% Copy from params object +fitchangetol = params.Results.tol; +maxepochs = params.Results.maxepochs; +dimorder = params.Results.dimorder; +init = params.Results.init; +printitn = params.Results.printitn; +fitthresh = params.Results.fitthresh; % cprand will terminate if this fit is reached (default 1) +do_fft = params.Results.mix; +newitol = params.Results.newitol; +nsamplsq = params.Results.nsamplsq; +nsampfit = params.Results.nsampfit; +epochsize = params.Results.epoch; +truefit = params.Results.truefit; + +%% Set up initial guess for U (factor matrices) +if iscell(init) + Uinit = init; + if numel(Uinit) ~= N + error('OPTS.init does not have %d cells',N); + end + for n = dimorder(2:end) + if ~isequal(size(Uinit{n}),[size(X,n) R]) + error('OPTS.init{%d} is the wrong size',n); + end + end +else + % Observe that we don't need to calculate an initial guess for the + % first index in dimorder because that will be solved for in the first + % inner iteration. + if strcmp(init,'random') + Uinit = cell(N,1); + for n = dimorder(2:end) + Uinit{n} = rand(sz(n),R); + end + else + error('The selected initialization method is not supported'); + end +end + +%% Set up for iterations - initializing U and the fit. +U = Uinit; +U_mixed = Uinit; +diag_flips = []; + +if printitn>0 + if(do_fft) + fprintf('\nCP-ARLS (with mixing): \n'); + else + fprintf('\nCP_ARLS (without mixing): \n'); + end +end + +%% Sample input tensor for stopping criterion +nsampfitles = min(num_elements,nsampfit); +Xfit_subs = sample_all_modes(nsampfitles, sz); +Xfit_vals = X(Xfit_subs); +Xvalsqr_mean = mean((Xfit_vals).^2); +normX = sqrt(Xvalsqr_mean * num_elements); % Approximate! + +%% Mixing tensor (if needed) +if (do_fft) % --- with mixing --- + % Compute random diagonal D_n for each factor + diag_flips = cell(N,1); + for n = 1:N + diag_flips{n} = (rand(sz(n),1)<0.5)*2-1; + end + + % Extract dense data array for use with fft and bsx commands + X_mixed = X.data; + % Mixing is equivalent to a series of TTMs with D_n, F_n + % However, we can use bsxfun and fft to avoid matricizing. + for n = N:-1:1 + % Reshape the diagonal flips into a 1*...*sz(n)*...*1 tensor + % This lets us use bsxfun along the nth dimension. + bsxdims = ones(1,N); + bsxdims(n) = sz(n); + % Note that the next line arranges the flips array to work with the + % tensor in its default memory layout + flips = reshape(diag_flips{n},bsxdims); + % fft(...,[],n) operates fiber-wise on dimension n + X_mixed = fft(bsxfun(@times, X_mixed, flips),[],n); + end + X_mixed = tensor(X_mixed); +else % --- without mixing --- + X_mixed = X; % no mixing +end + +%% Mixing factors (if needed) +if (do_fft) + % Mix factor matrices: U{i} = F{i}*D{i}*U{i} + for i = 2:N + U_mixed{i} = fft(bsxfun(@times,U{i},diag_flips{i})); + end +end + +%% Main Loop: Iterate until convergence +maxfit = 0; +newi = 0; % number of epochs without improvement + +% ALS Loop +for epoch = 1:maxepochs + + % Do a bunch of iterations within each epoch + for eiters = 1:epochsize + + % Iterate over all N modes of the tensor + for n = dimorder(1:end) + + mixinfo.dofft = do_fft; + mixinfo.signflips = diag_flips; + Unew = dense_sample_mttkrp(X_mixed,U_mixed,n,nsamplsq,mixinfo); + + if issparse(Unew) + Unew = full(Unew); % for the case R=1 + end + + % Normalize each vector to prevent singularities in coefmatrix + if epoch == 1 + lambda = sqrt(sum(abs(Unew).^2,1))'; %2-norm + else + lambda = max( max(abs(Unew),[],1), 1 )'; %max-norm + end + + Unew = bsxfun(@rdivide, Unew, lambda'); + U_mixed{n} = Unew; + if (do_fft) + U{n} = real(bsxfun(@times, ifft(Unew), diag_flips{n})); + else + U{n} = Unew; + end + end + end + + % After each epoch, check convergence conditions + P = ktensor(lambda, U); + Pfit_vals = sample_ktensor(P, Xfit_subs); + elem_mean = mean((Xfit_vals - Pfit_vals).^2); + normDiff = sqrt(elem_mean * num_elements); % Approximate! + fit = 1 - normDiff / normX; + + if fit > maxfit + fitchangetol + newi = 0; + maxfit = fit; + Psave = P; % Keep the best one seen so far! + else + newi = newi + 1; + end + + if (fit > fitthresh) || (newi >= newitol) + flag = 0; + else + flag = 1; + end + + if (mod(epoch,printitn)==0) || ((printitn>0) && (flag==0)) + fprintf(' Iter %2dx%d: f~ = %e newi = %d\n', epoch, epochsize, fit, newi); + end + + % Check for convergence + if (flag == 0) + break; + end +end +%% Clean up final result +% Arrange the final tensor so that the columns are normalized. +P = Psave; +P = arrange(P); +P = fixsigns(P); % Fix the signs + +if truefit + normresidual = sqrt( norm(X)^2 + norm(P)^2 - 2 * innerprod(X,P) ); + fit = 1 - (normresidual / normX);%fraction explained by model + Pfit_vals = sample_ktensor(P, Xfit_subs); + Xfit_mean = mean((Xfit_vals - Pfit_vals).^2); + testfit = 1 - sqrt(Xfit_mean*num_elements)/normX; + if printitn > 0 + fprintf(' Final fit = %e Final estimated fit = %e \n', fit, testfit); + end +else + fit = NaN; +end + +output = struct; +output.params = params.Results; +output.iters = epoch; +output.fit = fit; +end + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%% Sub-functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +% MTTKRP That performs sampling after transforming X and the KR-product with an FFT +% And then solves using normal equations +function [V, Xsamp, Zsamp] = dense_sample_mttkrp(X,U,n,nsamplsq,mixinfo) + +N = ndims(X); + +if (N < 2) + error('MTTKRP is invalid for tensors with fewer than 2 dimensions'); +end + +if (length(U) ~= N) + error('Cell array is the wrong length'); +end + +if n == 1 + R = size(U{2},2); +else + R = size(U{1},2); +end + +for i = 1:N + if i == n, continue; end + if (size(U{i},1) ~= size(X,i)) || (size(U{i},2) ~= R) + error('Entry %d of cell array is wrong size', i); + end +end + +dims = size(X); + +% Compute uniform samples for tensor and factor matrices +[tensor_idx, factor_idx] = sample_mode_n(nsamplsq, dims, n); + +% Reshape the sampled tensor +Xsamp = reshape(X(tensor_idx), dims(n), []); + +% Perform a sampled KRP +Zsamp = skr(U{[1:n-1,n+1:N]}, factor_idx); + +% alternative +V = Xsamp / Zsamp.'; +if (mixinfo.dofft) + V = real(bsxfun(@times,ifft(V),mixinfo.signflips{n})); + V = fft(bsxfun(@times,V,mixinfo.signflips{n})); +end + +return; +end + + +% Sample Khatri-Rao Product of a cell array of factors +% Without forming the full KR Product +function P = skr(varargin) +if iscell(varargin{1}) % Input is a single cell array + A = varargin{1}; +else % Input is a sequence of matrices + A = varargin(1:end-1); +end + +numfactors = length(A); +matorder = numfactors:-1:1; +idxs = varargin{end}; + +%% Error check on matrices and compute number of rows in result +ndimsA = cellfun(@ndims, A); +if(~all(ndimsA == 2)) + error('Each argument must be a matrix'); +end + +ncols = cellfun(@(x) size(x, 2), A); +if(~all(ncols == ncols(1))) + error('All matrices must have the same number of columns.'); +end + +P = A{matorder(1)}(idxs(:,matorder(1)),:); +for i = matorder(2:end) + %P = P .*A{i}(idxs(:,i),:); + P = bsxfun(@times, P, A{i}(idxs(:,i),:)); +end +end + + +% Random sample fibers in mode n from tensor X +% Generate the corresponding indices for the factor matrices as a tuple +function [tensor_idx, factor_idx] = sample_mode_n(nsamplsq, dims, n) +D = length(dims); +tensor_idx = zeros(nsamplsq, D); % Tuples that index fibers in original tensor + +tensor_idx(:,n) = ones(nsamplsq, 1); +for i = [1:n-1,n+1:D] + % Uniformly sample w.r. in each dimension besides n + tensor_idx(:,i) = randi(dims(i), nsamplsq, 1); +end + +% Save indices to sample from factor matrices +factor_idx = tensor_idx(:,[1:n-1,n+1:D]); + +% Expand tensor_idx so that every fiber element is included +%tensor_idx = repelem(tensor_idx,dims(n),1); % not portable +tensor_idx = kron(tensor_idx,ones(dims(n),1)); % portable +tensor_idx(:,n) = repmat((1:dims(n))',nsamplsq,1); +tensor_idx = tt_sub2ind(dims, tensor_idx); +end + + +% Random sample fibers in mode n from tensor X +% Generate the corresponding indices for the factor matrices as a tuple +function [subs, idxs] = sample_all_modes(nsamplsq, dims) +D = length(dims); +subs = zeros(nsamplsq, D); % Tuples that index fibers in original tensor + +for i = 1:D + % Uniformly sample w.r. in each dimension + subs(:,i) = randi(dims(i), nsamplsq, 1); +end + +% subs can be used to sample from factor matrices as well as the tensor +subs = unique(subs,'rows'); %todo: do this more efficiently +idxs = tt_sub2ind(dims, subs); +end + + +% Random sample fibers in mode n from tensor X +% Generate the corresponding indices for the factor matrices as a tuple +function [data] = sample_ktensor(P, subs) +data = skr(P.u, subs) * P.lambda; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_nmu.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_nmu.m new file mode 100644 index 0000000..9c2ef4a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_nmu.m @@ -0,0 +1,162 @@ +function [P,Uinit] = cp_nmu(X,R,opts) +%CP_NMU Compute nonnegative CP with multiplicative updates. +% +% P = CP_NMU(X,R) computes an estimate of the best rank-R PARAFAC +% model of a tensor X with nonnegative constraints on the factors. +% This version uses the Lee & Seung multiplicative updates from +% their NMF algorithm. The input X can be a tensor, sptensor, +% ktensor, or ttensor. The result P is a ktensor. +% +% P = CP_NMU(X,R,OPTS) specify options: +% OPTS.tol: Tolerance on difference in fit {1.0e-4} +% OPTS.maxiters: Maximum number of iterations {50} +% OPTS.dimorder: Order to loop through dimensions {1:ndims(A)} +% OPTS.init: Initial guess [{'random'}|'nvecs'|cell array] +% OPTS.printitn: Print fit every n iterations {1} +% +% [P,U0] = CP_NMU(...) also returns the initial guess. +% +% Examples: +% X = sptenrand([5 4 3], 10); +% P = cp_nmu(X,2); +% P = cp_nmu(X,2,struct('dimorder',[3 2 1])); +% P = cp_nmu(X,2,struct('dimorder',[3 2 1],'init','nvecs')); +% U0 = {rand(5,2),rand(4,2),[]}; %<-- Initial guess for factors of P +% P = cp_nmu(X,2,struct('dimorder',[3 2 1],'init',{U0})); +% +% See also KTENSOR, TENSOR, SPTENSOR, TTENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Fill in optional variable +if ~exist('opts','var') + opts = struct; +end + +%% Extract number of dimensions and norm of X. +N = ndims(X); +normX = norm(X); + +%% Set algorithm parameters from input or by using defaults +fitchangetol = setparam(opts,'tol',1e-4); +maxiters = setparam(opts,'maxiters',500); +dimorder = setparam(opts,'dimorder',1:N); +init = setparam(opts,'init','random'); +printitn = setparam(opts,'printitn',1); +epsilon = 1e-12; % Small number to protect against round-off error + +%% Error checking +% Error checking on maxiters +if maxiters < 0 + error('OPTS.maxiters must be positive'); +end + +% Error checking on dimorder +if ~isequal(1:N,sort(dimorder)) + error('OPTS.dimorder must include all elements from 1 to ndims(X)'); +end + +%% Set up and error checking on initial guess for U. +if iscell(init) + Uinit = init; + if numel(Uinit) ~= N + error('OPTS.init does not have %d cells',N); + end + for n = dimorder(1:end); + if ~isequal(size(Uinit{n}),[size(X,n) R]) + error('OPTS.init{%d} is the wrong size',n); + end + end +else + if strcmp(init,'random') + Uinit = cell(N,1); + for n = dimorder(1:end) + Uinit{n} = rand(size(X,n),R) + 0.1; + end + elseif strcmp(init,'nvecs') || strcmp(init,'eigs') + Uinit = cell(N,1); + for n = dimorder(1:end) + k = min(R,size(X,n)-2); + fprintf(' Computing %d leading e-vectors for factor %d.\n',k,n); + Uinit{n} = abs(nvecs(X,n,k)); + if (k < R) + Uinit{n} = [Uinit{n} rand(size(X,n),R-k)]; + end + end + else + error('The selected initialization method is not supported'); + end +end + +%% Set up for iterations - initializing U and the fit. +U = Uinit; +fit = 0; + +if printitn>0 + fprintf('\nNonnegative PARAFAC:\n'); +end + +%% Main Loop: Iterate until convergence +for iter = 1:maxiters + + fitold = fit; + + % Iterate over all N modes of the tensor + for n = dimorder(1:end) + + % Compute the matrix of coefficients for linear system + Y = ones(R,R); + for i = [1:n-1,n+1:N] + Y = Y .* (U{i}'*U{i}); + end + Y = U{n} * Y; + + % Initialize matrix of unknowns + Unew = U{n}; + + % Calculate Unew = X_(n) * khatrirao(all U except n, 'r'). + tmp = mttkrp(X,U,n) + epsilon; + + % Update unknowns + Unew = Unew .* tmp; + Unew = Unew ./ (Y + epsilon); + + U{n} = Unew; + end + + P = ktensor(U); + normresidual = sqrt( normX^2 + norm(P)^2 - 2 * innerprod(X,P) ); + fit = 1 - (normresidual / normX); %fraction explained by model + fitchange = abs(fitold - fit); + + if mod(iter,printitn)==0 + fprintf(' Iter %2d: fit = %e fitdelta = %7.1e\n', iter, fit, fitchange); + end + + % Check for convergence + if (iter > 1) && (fitchange < fitchangetol) + break; + end + +end + +%% Clean up final result +% Arrange the final tensor so that the columns are normalized. +P = arrange(P); + +if printitn>0 + normresidual = sqrt( normX^2 + norm(P)^2 - 2 * innerprod(X,P) ); + fit = 1 - (normresidual / normX); %fraction explained by model + fprintf(' Final fit = %e \n', fit); +end + +return; + +%% +function x = setparam(opts,name,default) +if isfield(opts,name); + x = opts.(name); +else + x = default; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_opt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_opt.m new file mode 100644 index 0000000..3893f15 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_opt.m @@ -0,0 +1,184 @@ +function [P, P0, output] = cp_opt(Z,R,varargin) +%CP_OPT Fits a CP model to a tensor via optimization. +% +% K = CP_OPT(X,R) fits an R-component CANDECOMP/PARAFAC (CP) model +% to the tensor X. The result K is a ktensor. The function being +% optimized is F(K) = 1/2 || X - K ||^2. +% +% K = CP_OPT(X,R,'param',value,...) specifies additional +% parameters for the method. Specifically... +% +% 'init' - Initialization for factor matrices (default: 'randn'). This +% can be a cell array with the initial matrices, a ktensor, or one of the +% following strings: +% 'randn' Randomly generated via randn function +% 'rand' Randomly generated via rand function +% 'zeros' All zeros +% 'nvecs' Selected as leading left singular vectors of X(n) +% +% 'opt_options' - Optimization method options, passed as a structure. +% Type 'help lbfgsb' to see the options. (Note that the 'opts.x0' option +% is overwritten using the choice for 'init', above.) +% +% 'lower'/'upper' - Lower/upper bounds, passed in as a scalar (if they +% are all the same), vector, cell array, or ktensor (lambda values +% ignored). +% +% [K, U0] = CP_OPT(...) also returns the initial guess. +% +% [K, U0, OUT] = CP_OPT(...) also returns a structure with the +% optimization exit flag, the final relative fit, and the full +% output from the optimization method. The fit is defined as +% +% FIT = 100 * (1 - ( F(K) / F(0) )). +% +% REFERENCE: E. Acar, D. M. Dunlavy and T. G. Kolda, A Scalable +% Optimization Approach for Fitting Canonical Tensor Decompositions, +% J. Chemometrics 25(2):67-86, February 2011 (doi:10.1002/cem.1335) +% +% Documentation page for CP-OPT +% +% See also TENSOR, SPTENSOR, KTENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Error checking +% if ~isa(Z,'tensor') && ~isa(Z,'sptensor') +% error('Z must be a tensor or a sptensor'); +% end +% +if (nargin < 2) + error('Error: invalid input arguments'); +end + +%% Set parameters +params = inputParser; +params.addParameter('opt', 'lbfgsb', @(x) ismember(x,{'ncg','tn','lbfgs','lbfgsb'})); +params.addParameter('init', 'randn', @(x) (iscell(x) || isa(x, 'ktensor') || ismember(x,{'random','rand','randn','nvecs','zeros'}))); +params.addParameter('lower',-Inf); +params.addParameter('upper',Inf); +params.addParameter('opt_options', '', @isstruct); +params.parse(varargin{:}); + +init = params.Results.init; +opt = params.Results.opt; +options = params.Results.opt_options; +lower = params.Results.lower; +upper = params.Results.upper; + +use_lbfgsb = strcmp(opt,'lbfgsb'); + +%% Initialization +sz = size(Z); +N = length(sz); + +if iscell(init) + P0 = init; +elseif isa(init,'ktensor') + P0 = tocell(init); +else + P0 = cell(N,1); + if strcmpi(init,'nvecs') + for n=1:N + P0{n} = nvecs(Z,n,R); + end + else + for n=1:N + P0{n} = matrandnorm(feval(init,sz(n),R)); + end + end +end + +%% Set up lower and upper (L-BFGS-B only) + +if ~use_lbfgsb && ( any(isfinite(lower)) || any(isfinite(upper)) ) + error('Cannot use lower and upper bounds without L-BFGS-B'); +end + +if use_lbfgsb + lower = convert_bound(lower,sz,R); + upper = convert_bound(upper,sz,R); +end + + +%% Set up optimization algorithm + +if use_lbfgsb % L-BFGS-B + if ~exist('lbfgsb','file') + error(['CP_OPT requires L-BFGS-B function. This can be downloaded'... + 'at https://github.com/stephenbeckr/L-BFGS-B-C']); + end +else % POBLANO + switch (params.Results.opt) + case 'ncg' + fhandle = @ncg; + case 'tn' + fhandle = @tn; + case 'lbfgs' + fhandle = @lbfgs; + end + + if ~exist('poblano_params','file') + error(['CP_OPT requires Poblano Toolbox for Matlab. This can be ' ... + 'downloaded at http://software.sandia.gov/trac/poblano.']); + end +end + +%% Set up optimization algorithm options +if isempty(options) + if use_lbfgsb + options.maxIts = 10000; + options.maxTotalIts = 50000; + options.printEvery = 10; + else + options = feval(fhandle, 'defaults'); + end +end + + + + +%% Fit CP using CPOPT +normsqr = norm(Z)^2; +if use_lbfgsb + opts = options; + opts.x0 = tt_fac_to_vec(P0); + [xx,ff,out] = lbfgsb(@(x)tt_cp_fun(x,Z,normsqr), lower, upper, opts); + P = ktensor(tt_cp_vec_to_fac(xx, Z)); + output.ExitMsg = out.lbfgs_message1; + output.Fit = 100 * (1 - ff /(0.5 * normsqr)); + output.OptOut = out; +else % POBLANO + out = feval(fhandle, @(x)tt_cp_fun(x,Z,normsqr), tt_fac_to_vec(P0), options); + P = ktensor(tt_cp_vec_to_fac(out.X, Z)); + output.ExitFlag = out.ExitFlag; + output.Fit = 100 * (1 - out.F /(0.5 * normsqr)); + output.OptOut = out; +end + + +%% Clean up final result +% Arrange the final tensor so that the columns are normalized. +P = arrange(P); +% Fix the signs +P = fixsigns(P); + + +function newbound = convert_bound(bound,sz,R) + +len = sum(sz)*R; + +if isscalar(bound) + newbound = bound * ones(len,1); +elseif isa(bound,'ktensor') + newbound = tovec(bound, false); +elseif iscell(bound) + newbound = tt_fac_to_vec(bound); +end + +if ~isequal(size(newbound), [len 1]) + error('Bound is the wrong size'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_sym.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_sym.m new file mode 100644 index 0000000..1e19d3b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_sym.m @@ -0,0 +1,256 @@ +function [Model,Info] = cp_sym(A,P,varargin) +%CP_SYM Fit a symmetric P model to the symmetric input tensor. +% +% MODEL = CP_SYM(X,R) fits an R-component symmetric CP model to the +% tensor X. The result MODEL is a symmetric Kruskal tensor (symktensor). +% The function being optimized is defined by the parameters listed +% below. +% +% MODEL = CP_SYM(X,R,'param',value) accepts additional arguments as +% described below. +% +% Several optimization methods can be used, depending on what toolboxes +% and codes are available. The MATLAB optimization toolbox provides +% FMINUNC and FMINCON. The POBLANO package provides limited-memory BFGS +% (LBFGS), Nonlinear CG (NCG), and Truncated Newton (TN). Finally, we +% provide limited support for SNOPT, which uses a sequential quadratic +% programming algorithm. We recommend 'lbfgs' from the POBLANO package as +% the best choice for unconstrained and 'SNOPT' as the best choice for +% constrained. +% +% o 'alg' - Optimiation algorithm. Choices: {'fminun','fmincon', +% 'lbfgs','ncg','tn','snopt'}. Default: 'lbfgs'. +% o 'alg_options' - Options that are passed to the optimization +% algorithm. +% +% Parameters that define the objective function (more details in +% symktensor/setup_fg)... +% +% o 'unique' - Give each unique index equal weight. Default: True. +% o 'fast' - Use fast version if unique is false. Default: True. +% o 'l2weight' - Penalty on column norms of X = 1. Default: 0. +% o 'l1weight' - Weight to encourage sparsity in LAMBDA. Default: 0. +% o 'l1param' - Parameter to encourage sparsity in LAMBDA. Default: 10. +% o 'nonneg' - Require: X >= 0, LAMBDA >= 0. Default: False. +% o 'nolambda' - Remove LAMBDA from the optimization. Default: False. +% +% Additionally, it is possible to specify an initial guess. +% +% o 'init' - Specify initial guess. Default: [] (none). +% +% [MODEL,INFO] = CP_SYM(X,R,...) returns additional information about the +% optimization: +% +% o model0 - Initial guess for model. +% o data - Produced by fg_setup using X and the initial guess. +% o setuptime - Time for setup. +% o optout - Information returned by the optimization method. +% o optalg - Optimization algorithm. (See 'alg' above.) +% o optopt - Optimization parameters. (See 'alg_options' above.) +% o runtime - Time for running optimization method. +% +% See also SYMKTENSOR, TENSOR/ISSYMMETRIC, SYMKTENSOR/FG, +% SYMKTENSOR/FG_SETUP. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +%% Check inputs +if nargin < 2 + error('Requires at least two arguments') +end +if ~isa(A,'tensor') && ~isa(A,'symtensor') + error('First input must be a tensor or symtensor'); +end +if ~issymmetric(A) + error('Input tensor must be symmetric'); +end +if ~isscalar(P) || (P < 1) || (round(P) ~= P) + error('Second input must be a scalar positive integer value'); +end + +%% Parse optional parameters +params = inputParser; +params.addParameter('alg','lbfgs',@(x) ismember(x,{'fminunc','fmincon','lbfgs','ncg','tn','snopt'})); +params.addParameter('alg_options',[]); +params.addParameter('init',[]); +params.KeepUnmatched = true; % <-- Unmatched params passed to fg_setup +params.parse(varargin{:}); + +alg = params.Results.alg; +options = params.Results.alg_options; +Model0 = params.Results.init; + +%% Setup +if isempty(Model0) + Model0 = symktensor(P,A); %random initialization. With rand, i.e. U(0,1) +end + +starttime = tic; +data = fg_setup(Model0,A,params.Unmatched); +x0 = tovec(Model0,data.fgopts.nolambda); +fhandle = @(x) fg_wrapper(x,data); +setuptime = toc(starttime); + +Info.model0 = Model0; +Info.data = data; +Info.setuptime = setuptime; + +%% Optimization +if isequal(alg(1:4),'fmin') % MATLAB Optimization Toolbox + + if isempty(options) + options = optimset('GradObj','on',... + 'MaxFunEvals',50000,... + 'MaxIter',10000,... + 'TolFun',1e-8,... + 'Display','iter'); + end + + if data.fgopts.nonneg + lb = zeros(size(x0)); + ub = Inf * ones(size(x0)); + starttime = tic; + [x,fval,flag,out] = fmincon(fhandle,x0,[],[],[],[],lb,ub,[],options); + else + starttime = tic; + [x,fval,flag,out] = fminunc(fhandle,x0,options); + end + + runtime = toc(starttime); + Model = symktensor(x,A,data.fgopts.nolambda); + out.fval = fval; + out.flag = flag; + Info.optout = out; + Info.optalg = alg; + Info.optopt = options; + Info.runtime = runtime; + +elseif ismember(alg,{'lbfgs','ncg','tn'}) % POBLANO + + if (data.fgopts.nonneg) + error('Nonnegative optimization requires a different method'); + end + optfh = eval(sprintf('@%s',alg)); + if isempty(options) + options = optfh('defaults'); + options.DisplayIters = 10; + options.Display = 'iter'; + options.StopTol = 1e-7; + options.RelFuncTol = 1e-8; + options.MaxIters = 10000; + options.MaxFuncEvals = 50000; + end + starttime = tic; + out = optfh(fhandle, x0, options); + runtime = toc(starttime); + Model = symktensor(out.X,A,data.fgopts.nolambda); + Info.optout = out; + Info.optalg = alg; + Info.optopt = options; + Info.runtime = runtime; + +elseif ismember(alg,{'snopt'}) % SNOPT + + % NOTE that the MATLAB interface for SNOPT requires that we create (and + % delete) some temporary files. Specifically... + % + % o snoptspecs.txt + % o snoptoutput.txt + % o snoptwrapper.m + % + % Also, the way the options works are different for SNOPT than the + % other methods. The user can define an options struct with the + % parameter names for SNOPT, replacing spaces with underscores. + + if isempty(options) + options.Major_Iteration_limit = 10000; + options.New_superbasics_limit = 999; + options.Superbasics_limit = 999; + options.Major_optimality_tolerance = 1e-8; + end + + specfile = which('snoptspecs.txt'); + wd_specfile = isempty(specfile); + if wd_specfile + fid = fopen('snoptspecs.txt','w'); + fprintf(fid,'Begin snmain2\n'); + fprintf(fid,' Derivative option 1\n'); + fprintf(fid,' Major iterations 200\n'); + fprintf(fid,' Major Print level 000001\n'); + fprintf(fid,'* (JFLXBT)\n'); + fprintf(fid,' Minor print level 1\n'); + fprintf(fid,' Solution No\n'); + fprintf(fid,'End snmain2\n'); + fclose(fid); + specfile = which('snoptspecs.txt'); + else + warning('Using existing ''snoptspecs.txt'' file.'); + end + if isempty(specfile) + error('SNOPT specification file not found'); + end + if exist('snoptoutput.txt','file') + warning('Overwriting ''snoptoutput.txt'' file.'); + end + snprint('snoptoutput.txt'); + snspec ( specfile ); + names = fieldnames(options); + for i = 1:length(names) + n = names{i}; + estr = sprintf('snseti (''%s'', %g);', strrep(n,'_',' '), options.(n)); + eval(estr); + end + snset ('Minimize'); + wd_wrapper = ~exist('snoptwrapper.m','file'); + if wd_wrapper + fid = fopen('snoptwrapper.m','w'); + fprintf(fid,'function [F,G] = snoptwrapper(x)\n'); + fprintf(fid,'global snoptwrapperfun\n'); + fprintf(fid,'[F,G] = snoptwrapperfun(x);\n'); + fclose(fid); + else + warning('Using existing ''snoptwrapper.m'' file.'); + end + + global snoptwrapperfun + snoptwrapperfun = @(x) fg_wrapper(x,data); + + lb = -Inf * ones(size(x0)); + ub = Inf * ones(size(x0)); + if data.fgopts.nonneg + lb = zeros(size(x0)); + end + + starttime = tic; + [x,fval,flag] = snopt(x0,lb,ub,0,Inf,'snoptwrapper'); + snprint off; % Closes the file and empties the print buffer + runtime = toc(starttime); + + %clear global fhandle + Model = symktensor(x,A,data.fgopts.nolambda); + out.fval = fval; + out.flag = flag; + Info.optout = out; + Info.optoptions = options; + Info.optalg = alg; + Info.runtime = runtime; + + delete('snoptoutput.txt'); + if wd_specfile + delete('snoptspecs.txt'); + end + if wd_wrapper + delete('snoptwrapper.m'); + end +else + + error('Invalid optimization method'); + +end + + +%% Extract solution and Info about the run + +function [f,g] = fg_wrapper(x,data) +model = symktensor(x,data.M,data.P,data.fgopts.nolambda); +[f,g] = fg(model,data); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/cp_wopt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_wopt.m new file mode 100644 index 0000000..0278265 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/cp_wopt.m @@ -0,0 +1,471 @@ +function [P, P0, output] = cp_wopt(Z,W,R,varargin) +%CP_WOPT Fits a weighted CP model to a tensor via optimization. +% +% K = CP_WOPT(X,W,R) fits an R-component weighted CANDECOMP/PARAFAC +% (CP) model to the tensor X, where W is an indicator for missing +% data (0 = missing, 1 = present). The result K is a ktensor. The +% function being optimized is F(K) = 1/2 || W .* (X - K) ||^2. +% +% K = CP_WOPT(X,W,R,'param', value,...) specifies additional +% parameters for the method. Specifically... +% +% 'skip_zeroing' - Skip the *expensive* step where all the missing +% entries in X are set to zero. Only set this to true if the entries were +% already zeroed out. There is no way to disable the printing for the +% time for this unless you set this to true --- this is to avoid +% accidentally including this in timing results. +% +% 'init' - Initialization for factor matrices (default: 'randn'). This +% can be a cell array with the initial matrices, a ktensor, or one of the +% following strings: +% 'randn' Randomly generated via randn function +% 'rand' Randomly generated via rand function +% 'zeros' All zeros +% 'nvecs' Selected as leading left singular vectors of X(n) +% +% 'opt' - Optimization method, defaults to 'lbfgsb' which is +% bound-constrained L-BFGS-B. See the full documentation for other +% options. +% +% 'opt_options' - Optimization method options, passed as a structure. +% Type 'help lbfgsb' to see the options. (Note that the 'opts.x0' option +% is overwritten using the choice for 'init', above.) +% +% 'lower'/'upper' - Lower/upper bounds, passed in as a scalar (if they +% are all the same), vector, cell array, or ktensor (lambda values +% ignored). +% +% 'fun' - Specifies the type of implementation (default: 'auto') +% 'auto' Dense implementation +% 'sparse' Sparse implementation +% 'sparse_lowmem' Memory efficient sparse implementation +% +% 'verbosity' - Set to zero to disable all printing. +% +% [K, U0] = CP_WOPT(...) also returns the initial guess. +% +% [K, U0, OUT] = CP_WOPT(...) also returns a structure with the +% optimization exit flag, the final relative fit, and the full +% output from the optimization method. +% +% REFERENCE: E. Acar, D. M. Dunlavy, T. G. Kolda and M. Mørup, Scalable +% Tensor Factorizations for Incomplete Data, Chemometrics and Intelligent +% Laboratory Systems 106(1):41-56, March 2011 +% (doi:10.1016/j.chemolab.2010.08.004) +% +% Documentation page for CP-WOPT +% +% See also CP_OPT. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + + +%% Set parameters +params = inputParser; +params.addParameter('opt','lbfgsb', @(x) ismember(x,{'lbfgsb','ncg','tn','lbfgs'})); +params.addParameter('init', 'randn', @(x) (iscell(x) || isa(x, 'ktensor') || ismember(x,{'random','rand','randn','nvecs','zeros'}))); +params.addParameter('lower',-Inf); +params.addParameter('upper',Inf); +params.addParameter('opt_options', '', @isstruct); +params.addParameter('skip_zeroing', false, @islogical); +params.addParameter('fun','auto', @(x) ismember(x,{'auto','default','sparse','sparse_lowmem'})); +params.addParameter('verbosity',10); +params.parse(varargin{:}); + +init = params.Results.init; +opt = params.Results.opt; +options = params.Results.opt_options; +lower = params.Results.lower; +upper = params.Results.upper; +funtype = params.Results.fun; +do_zeroing = ~params.Results.skip_zeroing; +verbosity = params.Results.verbosity; +do_print = verbosity > 0; + +use_lbfgsb = strcmp(opt,'lbfgsb'); + +if do_print + fprintf('Running CP-WOPT...\n'); +end + +%% Zeroing +if do_zeroing + tic; + Z = Z.*W; + ztime = toc; + fprintf('Time for zeroing out masked entries of data tensor is %.2e seconds.\n', ztime); + fprintf('(If zeroing is done in preprocessing, set ''skip_zeroing'' to true.)\n'); +end + +%% Initialization +sz = size(Z); +N = length(sz); + +if iscell(init) + P0 = init; +elseif isa(init,'ktensor') + P0 = tocell(init); +else + P0 = cell(N,1); + if strcmpi(init,'nvecs') + for n=1:N + P0{n} = nvecs(Z,n,R); + end + else + for n=1:N + P0{n} = matrandnorm(feval(init,sz(n),R)); + end + end +end + +%% Set up lower and upper (L-BFGS-B only) + +if ~use_lbfgsb && ( any(isfinite(lower)) || any(isfinite(upper)) ) + error('Cannot use lower and upper bounds without L-BFGS-B'); +end + +if use_lbfgsb + lower = convert_bound(lower,sz,R); + upper = convert_bound(upper,sz,R); +end + +%% Set up optimization algorithm + +if use_lbfgsb % L-BFGS-B + if ~exist('lbfgsb','file') + error(['CP_OPT requires L-BFGS-B function. This can be downloaded'... + 'at https://github.com/stephenbeckr/L-BFGS-B-C']); + end +else % POBLANO + switch (params.Results.opt) + case 'ncg' + opthandle = @ncg; + case 'tn' + opthandle = @tn; + case 'lbfgs' + opthandle = @lbfgs; + end + + if ~exist('poblano_params','file') + error(['CP_OPT requires Poblano Toolbox for Matlab. This can be ' ... + 'downloaded at http://software.sandia.gov/trac/poblano.']); + end +end + + +%% Set up optimization algorithm options +if isempty(options) + if use_lbfgsb + options.maxIts = 10000; + options.maxTotalIts = 50000; + if do_print + options.printEvery = verbosity; + else + options.printEvery = 0; + end + else + options = feval(fhandle, 'defaults'); + end +end + +%% Set up function handle +normZsqr = norm(Z)^2; + +if (isequal(funtype,'auto') && isa(Z,'tensor')) || isequal(funtype,'default') + funhandle = @(x) tt_cp_wfun(Z,W,x,normZsqr); +else + if ~isa(Z,'sptensor') || ~isa(W,'sptensor') + warning('Converting dense tensor to sparse'); + Z = sptensor(Z); + W = sptensor(W); + end + Zvals = tt_cp_wfg_sparse_setup(Z,W); + fflag = ~isequal(funtype,'sparse_lowmem'); + funhandle = @(x) tt_cp_wfun(Zvals,W,x,normZsqr,fflag); +end + + + +%% Fit CP using CP_WOPT by ignoring missing entries + +if use_lbfgsb + opts = options; + opts.x0 = tt_fac_to_vec(P0); + [xx,ff,out] = lbfgsb(funhandle, lower, upper, opts); + P = ktensor(tt_cp_vec_to_fac(xx, Z)); + output.ExitMsg = out.lbfgs_message1; + output.f = ff; + %output.Fit = 100 * (1 - ff /(0.5 * normZsqr)); + output.OptOut = out; +else + out = feval(opthandle, funhandle, tt_fac_to_vec(P0), options); + P = ktensor(tt_cp_vec_to_fac(out.X,Z)); + output.ExitFlag = out.ExitFlag; + output.FuncEvals = out.FuncEvals; + output.f = out.F; + output.G = tt_cp_vec_to_fac(out.G,W); + output.OptOut = out; +end + +%% Clean up final result +% Arrange the final tensor so that the columns are normalized. +P = arrange(P); +% Fix the signs +P = fixsigns(P); + +function [f,G] = tt_cp_wfg(Z,W,A,normZsqr) +%TT_CP_WFG Function and gradient of CP with missing data. +% +% [F,G] = TT_CP_WFG(Z,W,A) computes the function and gradient values of +% the function 0.5 * || W .* (Z - ktensor(A)) ||^2. The input A is a +% cell array containing the factor matrices. The input W is a (dense +% or sparse) tensor containing zeros wherever data is missing. The +% input Z is a (dense or sparse) tensor that is assumed to have +% zeros wherever there is missing data. The output is the function F +% and a cell array G containing the partial derivatives with respect +% to the factor matrices. +% +% [F,G] = TT_CP_WFG(Z,W,A,NORMZSQR) also passes in the pre-computed +% norm of Z, which makes the computations faster. +% +% See also TT_CP_WFUN, TT_CP_WFG_SPARSE, TT_CP_WFG_SPARSE_SETUP. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Compute B = W.*ktensor(A) +if isa(W,'sptensor') + B = W.*ktensor(A); +else + B = W.*full(ktensor(A)); +end + +%% Compute normZ +if ~exist('normZsqr','var') + normZsqr = norm(Z)^2; +end + +% function value +f = 0.5 * normZsqr - innerprod(Z,B) + 0.5 * norm(B)^2; + +% gradient computation +N = ndims(Z); +G = cell(N,1); +T = Z - B; +for n = 1:N + G{n} = zeros(size(A{n})); + G{n} = -mttkrp(T,A,n); +end + +function [f,g] = tt_cp_wfun(Zdata,W,x,normZsqr,memflag) +%TT_CP_WFUN Computes function and gradient for weighted CP. +% +% [F,G] = TT_CP_WFUN(Z,W,x,normZsqr) calculates the function and gradient +% for the function 0.5 * || W .* (Z - ktensor(A)) ||^2 where W is an +% indicator for missing data (0 = missing, 1 = present), Z is the data +% tensor that is being fit (assumed that missing entries have already +% been set to zero), A is a cell array of factor matrices that is created +% from the vector x, and normZsqr in the norm of Z squared. +% +% [F,G] = TT_CP_WFUN(Zvals,W,x,normZsqr) is a special version that takes +% just the nonzeros in Z as calculated by the helper function +% CP_WFG_SPARSE_SETUP. +% +% [F,G] = TT_CP_WFUN(....,false) uses a more memory efficient version for +% the sparse code. +% +% See also TT_CP_WFG, TT_CP_WFG_SPARSE, TT_CP_WFG_SPARSE_SETUP +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Convert x to factor matrices (i.e., a cell array). +% Normally we would pass in the data tensor, but we may have a data +% tensor or a data array if we are doing the sparse +% calculation. Therefore, we exploit the fact that W is the same +% size as Z and pass it into the function. +A = tt_cp_vec_to_fac(x,W); + +%% Compute the function and gradient +if isa(Zdata,'tensor') || isa(Zdata,'sptensor') + if ~exist('normZsqr','var') + normZsqr = norm(Zdata)^2; + end + [f,G] = tt_cp_wfg(Zdata,W,A,normZsqr); +else + if ~exist('normZsqr','var') + normZsqr = sum(Zdata.^2); + end + if ~exist('memflag','var') + memflag = true; + end + [f,G] = tt_cp_wfg_sparse(Zdata,W,A,normZsqr,memflag); +end + +%% Convert gradient to a vector +g = tt_fac_to_vec(G); + + +function Zvals = tt_cp_wfg_sparse_setup(Z,W) +%CP_WFG_SPARSE_SETUP Creates a special array. +% +% ZVALS = CP_WFG_SPARSE_SETUP(Z,W) creates an array ZVALS that +% contains the values of Z corresponding to the indices specified +% by W.subs. +% +% See also CP_WFG_SPARSE. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +Zsubs = Z.subs; +Wsubs = W.subs; +Zvals = zeros(size(W.vals)); +[junk,loc] = ismember(Zsubs,Wsubs,'rows'); +Zvals(loc) = Z.vals; + +function [f,G] = tt_cp_wfg_sparse(Zvals,W,A,normZsqr,memflag) +%TT_CP_WFG_SPARSE Computes weighted CP function and gradient. +% +% [F,G] = TT_CP_WFG_SPARSE(ZVALS,W,A) computes the function and +% gradient with respect to A of || W .* (Z - ktensor(A)) ||^2 where +% Z = W .* X. The variable ZVALS contains the values of the tensor Z +% at the locations specified by W.subs. (ZVALS can be computed using +% a provided preprocessing function.) The variable A is a cell array +% of component matrices. The tensor W is a sparse tensor that has +% ones in entries where we know the values. +% +% [F,G] = TT_CP_WFG_SPARSE(ZVALS,W,A,NORMZSQR) also passes in the +% pre-computed norm of Z, which makes the computations faster. +% +% [F,G] = TT_CP_WFG_SPARSE(ZVALS,A,W,NORMZSQR,false) uses less memory +% but more time and is appropriate for very large sparse tensors. +% +% See also TT_CP_WFG_SPARSE_SETUP, CP_WFG, CP_WFUN. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + + +%% Set-up +N = ndims(W); +R = size(A{1},2); +sz = cellfun(@(x)size(x,1),A); +Wsubs = W.subs; +Wvals = W.vals; +Nvals = length(Wvals); + +if ~exist('memflag','var') + memflag = true; +end + +%% Compute B = W.*ktensor(A) +Bvals = zeros(Nvals,1); +for r = 1:R + newvals = Wvals; + for n = 1:N + bigArn = A{n}(Wsubs(:,n),r); + newvals = newvals .* bigArn; + end + Bvals = Bvals + newvals; +end + +%% Compute normZ +if ~exist('normZsqr','var') + normZsqr = sum(Zvals.^2); +end + +%% function value: f = 0.5 * normZsqr - innerprod(Z,B) + 0.5 * norm(B)^2 +f = 0.5 * normZsqr - Zvals'*Bvals + 0.5 * sum(Bvals.^2); + +%% gradient computation +Tvals = Zvals - Bvals; + +G = cell(N,1); +for n = 1:N + G{n} = zeros(size(A{n})); +end + +for r = 1:R + if (memflag) + bigAr = cell(N,1); + for n = 1:N + bigAr{n} = A{n}(Wsubs(:,n),r); + end + for SkipN = 1:N + newvals = Tvals; + for n = [1:SkipN-1,SkipN+1:N] + newvals = newvals .* bigAr{n}; + end + G{SkipN}(:,r) = accumarray(Wsubs(:,SkipN),newvals,[sz(SkipN) 1]); + end + else + for SkipN = 1:N + newvals = Tvals; + for n = [1:SkipN-1,SkipN+1:N] + bigArn = A{n}(Wsubs(:,n),r); + newvals = newvals .* bigArn; + end + G{SkipN}(:,r) = accumarray(Wsubs(:,SkipN),newvals,[sz(SkipN) 1]); + end + end + +end + +for n = 1:N + G{n} = -G{n}; +end + +function newbound = convert_bound(bound,sz,R) + +len = sum(sz)*R; + +if isscalar(bound) + newbound = bound * ones(len,1); +elseif isa(bound,'ktensor') + newbound = tovec(bound, false); +elseif iscell(bound) + newbound = tt_fac_to_vec(bound); +end + +if ~isequal(size(newbound), [len 1]) + error('Bound is the wrong size'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/create_guess.m b/ext/YetAnotherFEcode/external/tensor_toolbox/create_guess.m new file mode 100644 index 0000000..44d9efc --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/create_guess.m @@ -0,0 +1,152 @@ +function U = create_guess(varargin) +%CREATE_GUESS Creates initial guess for CP or Tucker fitting. +% +% U = CREATE_GUESS('Param',value,...) creates an initial guess at the +% factor matrices for a CP or Tucker decomposition. The factors can be +% generated randomly, random orthogonal, etc. If the tensor is provided, +% it can be alternatively generated via the HO-SVD. +% +% --- Parameters --- +% +% 'Factor_Generator' - Method to be used to generate the factor matrices. +% Options: +% - 'rand' (uniform on [0,1]) +% - 'randn' (standard normal distribution) +% - 'orthogonal' +% - 'stochastic' (uniform on [0,1] with column sums rescaled to 1) +% - 'nvecs' (HOSVD solution) +% - 'pertubation' of the true solution +% Alternatively, pass in a function that accepts two arguments (the size +% of the matrix) and generates the desired factor. Default: 'rand' +% +% 'Size' - Size of the tensor. Required to be specified unless 'Data' or +% 'Soln' is given. Default: [] +% +% 'Num_Factors' - Number of factors (can be either a single value for CP +% or a vector for Tucker). Required to be specified unless 'Soln' is +% given. Default: [] +% +% 'Data' - The actual tensor to be fit. Required if 'nvecs' is the +% selected Factor Generator. The 'Size' parameter is ignored if this +% is specified. Default: [] +% +% 'Soln' - The actual solution to the problem. Required if 'pertubation' +% is the selected Factor Generator. The 'Size' and 'Num_Factors' +% parameters are ignored if this is specified. Default: [] +% +% 'Pertubation' - Size of the pertubation is the 'pertubation' option is +% selected under 'Factor_Generator'. The pertubation is of the form U+p*N +% where U is the original factor matrix, N is a noise matrix with entries +% selected for a standard normal distribution, and p is the pertubation +% parameter times ||U||/||N||. Default: 0.10 +% +% 'Skip' - Specifies mode to skip in initial guess generation (this is +% useful for ALS). Default: 0 (no skipping) +% +% 'State' - State of the random number generator. This can be used +% to reproduce results. +% +% Documentation page for creating test problems +% +% See also CREATE_PROBLEM. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Random set-up +defaultStream = RandStream.getGlobalStream; + +%% Parse inputs +p = inputParser; +p.addParamValue('Factor_Generator', 'rand', @(x) isa(x,'function_handle') || ... + ismember(lower(x),{'rand','randn','orthogonal','stochastic','nvecs','pertubation'})); +p.addParamValue('Size', [], @(x) isempty(x) || all(x)); +p.addParamValue('Num_Factors', [], @(x) isempty(x) || all(x)); +p.addParamValue('Soln', [], @(x) isempty(x) || isa(x,'ktensor') || isa(x,'ttensor')); +p.addParamValue('Data', [], @(x) isempty(x) || isa(x,'tensor') || isa(x,'sptensor')); +p.addParamValue('Pertubation', 0.10, @(x) x >= 0 & x < 1); +p.addParamValue('Skip', 0); +p.addParamValue('State', defaultStream.State, @(x) true); +p.parse(varargin{:}); +params = p.Results; + +%% Initialize random number generator with specified state. +defaultStream.State = params.State; + +%% Determine problem size +if ~isempty(params.Soln) + sz = size(params.Soln); +elseif ~isempty(params.Data) + sz = size(params.Data); +else + sz = params.Size; +end +if isempty(sz) + error('Size must be specified'); +end +nd = length(sz); +modes = setdiff(1:nd,params.Skip); + +%% Determine number of factors +if ~isempty(params.Soln) + nf = zeros(nd,1); + for n = 1:nd + nf(n) = size(params.Soln.U{n},2); + end +else + nf = params.Num_Factors; + if length(nf) == 1 + nf = nf * ones(nd,1); + end +end + +%% Create factor matrices +U = cell(nd,1); +if isa(params.Factor_Generator,'function_handle') + for n = modes + U{n} = params.Factor_Generator(sz(n), nf(n)); + end + return; +end + +switch(params.Factor_Generator) + case 'rand' + for n = modes + U{n} = rand(sz(n), nf(n)); + end + case 'randn' + for n = modes + U{n} = randn(sz(n), nf(n)); + end + case 'orthogonal' + for n = modes + X = matrandorth(sz(n)); + U{n} = X(:,1:nf(n)); + end + case 'stochastic' + for n = modes + X = rand(sz(n), nf(n)); + S = sum(X,1); + U{n} = X * diag(1./S); + end + case 'nvecs' + if isempty(params.Data) + error('Data required for nvecs initialization'); + end + for n = modes + U{n} = nvecs(params.Data,n,nf(n)); + end + case 'pertubation' + if isempty(params.Soln) + error('Soln required for pertubation initialization'); + end + for n = modes + X = params.Soln{n}; + N = rand(size(X)); + p = params.Pertubation * norm(X,'fro') / norm(N,'fro'); + U{n} = X + p * N; + end +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/create_problem.m b/ext/YetAnotherFEcode/external/tensor_toolbox/create_problem.m new file mode 100644 index 0000000..d54db7e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/create_problem.m @@ -0,0 +1,458 @@ +function [info,params] = create_problem(varargin) +%CREATE_PROBLEM Create test problems for tensor factorizations. +% +% INFO = CREATE_PROBLEM('Param',value,...) creates a tensor factorization +% test problem. It generates a solution corresponding to a ktensor or a +% ttensor, and then it generates an example data tensor which has that +% underlying factorization. The data tensor can be optionally corrupted +% with noise, generated via a special procedure to produce a sparse +% tensor, or even have missing data. +% +% [INFO,PARAMS] = CREATE_PROBLEM(...) also returns the parameters that +% were used to generate the problem. An identical problem can be +% generated via INFO_COPY = CREATE_PROBLEM(PARAMS). +% +% --- General --- +% +% 'State' - State of the random number generator. This can be used +% to reproduce results. +% +% --- Solution Parameters --- +% +% The desired solution will be returned in the "Soln" field of INFO. It +% will be either a ktensor or ttensor, depending on the user option +% 'Type', described below. +% +% 'Soln' - A CP or Tucker tensor that is the desired solution. Renders +% all other solution parameters obsolete. Default: [] +% +% 'Type' - CP or Tucker. Default: 'CP' +% +% 'Size' - Size of the tensor. Default: [100 100 100] +% +% 'Num_Factors' - Number of factors. Default: 2 (or [2 2 2] for Tucker) +% +% 'Symmetric' - List of modes that should be symmetric, i.e., have +% identical factor matrices. This cell array of lists of modes if +% different subsets should be symmetric, i.e., {[1 2],[3 4]} says that +% modes 1 and 2 and 3 and 4 are symmetric. +% +% 'Factor_Generator' - Method to be used to generate the factor matrices. +% Options are 'rand' (uniform on [0,1]), 'randn' (standard normal +% distribution), 'orthogonal', or 'stochastic' (uniform on [0,1] +% with column sums rescaled to 1). Alternatively, pass in a function that +% accepts two arguments (the size of the matrix) and generates the +% desired factor. Default: 'randn' +% +% 'Lambda_Generator' - Method used to genate the lambda vector for CP +% solutions. The choices are the same as for the 'Factor_Generator'. +% Default: 'rand' +% +% 'Core_Generator' - Method used to generate the core tensor for Tucker +% solutions. The choices are 'rand' and 'randn' (as described above). +% Alternatively, pass in a function that accepts a vector-valued size and +% generates a tensor of the specified size. Default: 'randn' +% +% --- Missing Data Parameters --- +% +% The missing data pattern will be returned in the "Pattern" data +% field of INFO. If there is no missing data, then this will just be an +% empty array. Otherwise, it will be a tensor that is zero whereever data +% is missing and one elsewhere. +% +% 'M' - The proportion of missing data *or* a tensor or sptensor that +% contains the missing data pattern as described above. Default: 0 +% +% 'Sparse_M' - Generate sparse rather than dense missing data pattern +% tensor. Only useful for large tensors that don't easily fit in memory +% and when M > 80%. Default: false. +% +% --- Data Parameters --- +% +% The data to be factorized will be returned in the "Data" field of INFO. +% It will have zeros for any entries that are missing (though not all +% zeros necessarily correspond to missing data). +% +% 'Sparse_Generation' - Generate a sparse tensor via a special procedure +% that works only for ktensor's (CP) that can be scaled so that the +% column factors and lambda are stochastic. Note that this geneartion +% procedure will modify lambda vector in the solution so that it is +% appropriately scaled for the number of inserted nonzeros. A value of +% zero means no sparse generation, and any positive value is the number +% of nonzeros to be inserted. Any value in the range (0,1) will be +% interpreted as a percentage. The procedure is incompatible with missing +% data. Default: 0 (no sparse generation). +% +% 'Noise' - Amount of Gaussian noise to add. Let N be a "noise" +% tensor with entries drawn from the standard norm distribution, and +% let Y be the noise-free tensor, i.e. Y = full(K). Then Z = Y + eta +% * norm(Y,'fro') / norm(N,'fro') * N is the noisy version of the +% tensor where eta is the percentage of noise to add. If the data tensor +% is sparse (either due to sparse generation or sparsity due to missing +% data), then noise is only generated at the nonzero entries. +% Default: 0.10 +% +% Examples: +% % Create a 100 x 100 x 100 problem with 5 factors (each entry from the +% % standard normal distribution) and 10% noise with diagonal lambda +% % values of all ones. +% info = create_problem('Lambda_Generator', @ones); +% +% % Same as above except that the we use a special function to generate +% % factor matrices with a constant congruence of 0.9. +% info = create_problem('Factor_Generator', @(m,n) matrandcong(m,n,.9), ... +% 'Lambda_Generator', @ones); +% +% Documentation page for creating test problems +% +% See also MATRANDCONG, MATRANDORTH, MATRANDNORM, CREATE_GUESS. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +%% Random set-up +defaultStream = RandStream.getGlobalStream; + +%% Parse inputs +p = inputParser; +p.addParamValue('State', defaultStream.State, @(x) true); +p.addParamValue('Soln', [], @(x) isempty(x) || isa(x,'ktensor') || isa(x,'ttensor')); +p.addParamValue('Type', 'CP', @(x) ismember(lower(x),{'cp','tucker'})); +p.addParamValue('Size', [100 100 100], @all); +p.addParamValue('Num_Factors', 2, @all); +p.addParamValue('Factor_Generator', 'randn', @is_valid_matrix_generator); +p.addParamValue('Lambda_Generator', 'rand', @is_valid_matrix_generator); +p.addParamValue('Core_Generator', 'randn', @is_valid_tensor_generator); +p.addParamValue('M', 0, @(x) is_missing_data(x) || (x == 0)); +p.addParamValue('Sparse_M', false, @islogical); +p.addParamValue('Sparse_Generation', 0, @(x) x >= 0); +p.addParamValue('Symmetric', []); +p.addParamValue('Noise', 0.10, @(x) x >= 0); +% p.addParamValue('Rtest', 0, @(x) isscalar(x) & x >= 0); +% p.addParamValue('Init_Type', 'random', @(x) ismember(x,{'random','nvecs'})); +p.parse(varargin{:}); +params = p.Results; + +%% Initialize random number generator with specified state. +defaultStream.State = params.State; + +%% Error checking +if is_missing_data(params.M) && (params.Sparse_Generation > 0) + error('Cannot combine missing data and sparse generation'); +end + +if strcmpi(params.Type, 'tucker') && (params.Sparse_Generation > 0) + error('Sparse generation only supported for CP'); +end + +%% Check for incompatible parameters +if ~isempty(params.Symmetric) + if is_missing_data(params.M) + error('Not set up to generate a symmetric problem with missing data'); + end + if (params.Sparse_Generation ~= 0) + error('Not set up to generate a sparse symmetric problem'); + end +end + +%% Create return data structure +info = struct; +info.Soln = generate_solution(params); + +if is_missing_data(params.M) + info.Pattern = generate_missing_pattern(size(info.Soln), params); + info.Data = generate_data(info.Soln, info.Pattern, params); +elseif (params.Sparse_Generation) + [info.Data, info.Soln] = generate_data_sparse(info.Soln, params); +else + info.Data = generate_data(info.Soln, [], params); +end + +function D = generate_data(S, W, params) +%GENERATE_DATA Generate CP or Tucker data from a given solution. + +sz = size(S); +if isempty(W) + Rdm = tensor(randn([sz 1 1]), sz); + Z = full(S); + % Use symmetric noise for a symmetric problem + if ~isempty(params.Symmetric) + Rdm = symmetrize(Rdm, params.Symmetric); %Robert to discuss with Tammy + end +else + if isa(W,'sptensor') + Rdm = sptensor(W.subs,randn(nnz(W),1),W.size); + Z = W.*S; + else + Rdm = W.*tensor(randn([sz 1 1]), sz); + Z = W.*full(S); + end +end + +D = Z + params.Noise * norm(Z) * Rdm / norm(Rdm); + +% Make sure the final result is *absolutely* symmetric +if ~isempty(params.Symmetric) + D = symmetrize(D, params.Symmetric); +end + +function output = prosample(nsamples, prob) +%PROSAMPLE Proportional sampling + +% Create bins +bins = min([0 cumsum(prob')],1); +bins(end) = 1; + +% Create indices +[~, output] = histc(rand(nsamples,1),bins); + + +function [Z,S] = generate_data_sparse(S,params) +%GENERATE_DATA_SPARSE Generate sparse CP data from a given solution. + +% Error check on S +if any(S.lambda < 0) + error('All lambda values must be nonnegative'); +end +if any(cellfun(@(x) any(x(:)<0), S.U)) + error('All factor matrices must be nonnegative'); +end +if ~strcmpi(params.Type,'CP') + error('Only works for CP'); +end +if ~isempty(params.Symmetric) + warning('Symmetry constraints have been ignored'); +end + +% Convert S to a probability tensor +P = normalize(S,[],1); +eta = sum(P.lambda); +P.lambda = P.lambda / eta; + +% Determine how many samples per component +nedges = params.Sparse_Generation; +if nedges < 1 + nedges = round(nedges * prod(size(P))); +end +nd = ndims(P); +nc = size(P.U{1},2); +csample = prosample(nedges, P.lambda); +csums = accumarray(csample,1,[nc 1]); + +% Determine subscripts for each randomly sampled entry +sz = size(S); +subs = cell(nc,1); +for c = 1:nc + nsample = csums(c); + if nsample == 0 + continue; + end + subs{c} = zeros(nsample,nd); + for d = 1:nd + PU = P.U{d}; + subs{c}(:,d) = prosample(nsample, PU(:,c)); + end +end + +% Assemble final tensor. Note that duplicates are summed. +allsubs = cell2mat(subs); +Z = sptensor(allsubs,1,sz); + +% Rescale S so that it is proportional to the number of edges inserted +S = P; +S.lambda = nedges * S.lambda; + +function W = generate_missing_pattern(sz, params) +%GENERATE_MISSING_PATTERN Generate a tensor pattern of missing data. + +M = params.M; +S = params.Sparse_M; +if isa(M, 'tensor') || isa(M, 'sptensor') + W = M; + return; +end +if M == 0 + W = []; + return; +end +if (M < 0.8) && S + warning('Setting sparse to false because there are less than 80% missing elements'); + S = false; +end +W = tt_create_missing_data_pattern(sz, M, S); + +function S = generate_solution(params) +%GENERATE_SOLUTION Generate factor matrices and other data for CP or Tucker + +if ~isempty(params.Soln) + S = params.Soln; + return; +end + +% Get size of final tensor +sz = params.Size; +nd = length(sz); + +% Get size of factors +nfactors = params.Num_Factors; +if numel(nfactors) == 1 + nfactors = nfactors * ones(size(sz)); +end +if any(size(nfactors) ~= size(sz)) + error('''Num_Factors'' should either be a single value or the same dimensions as ''Size''.'); +end + +% Create factor matrices +fgfh = get_generator(params.Factor_Generator); +U = cell(nd,1); +for n = 1:nd + U{n} = fgfh(sz(n), nfactors(n)); +end + +if ~isempty(params.Symmetric) + if ~iscell(params.Symmetric) + params.Symmetric = {params.Symmetric}; + end + for i = 1:length(params.Symmetric) + grp = params.Symmetric{i}; + for j = 2:length(grp) + U{grp(j)} = U{grp(1)}; + end + end +end + +% Create final ktensor or ttensor +switch lower(params.Type) + case {'cp'} + lgfh = get_generator(params.Lambda_Generator); + lambda = lgfh(nfactors(1),1); + S = ktensor(lambda,U); + case {'tucker'} + cgfh = get_generator(params.Core_Generator); + core = tensor(cgfh(nfactors)); + S = ttensor(core,U); + otherwise + error('Invalid choice for ''Type'''); +end + +function b = is_valid_matrix_generator(x) +b = isa(x,'function_handle') || ... + ismember(lower(x),{'rand','randn','orthogonal','stochastic'}); + +function b = is_valid_tensor_generator(x) +b = isa(x,'function_handle') || ismember(lower(x),{'rand','randn'}); + +function fh = get_generator(x) +if isa(x,'function_handle') + fh = x; + return; +end +switch lower(x) + case {'randn'} + fh = @randn; + case {'rand'} + fh = @rand; + case {'orthogonal'} + fh = @rand_orth_mat; + case {'stochastic'} + fh = @rand_column_stochastic; + otherwise + error('Invalid choice for generator'); +end + +function Y = rand_column_stochastic(M,N) +X = rand(M,N); +S = sum(X,1); +Y = X * diag(1./S); + +function Y = rand_orth_mat(M,N) +X = matrandorth(M); +Y = X(:,1:N); + +function tf = is_missing_data(x) +tf = isa(x,'tensor') || isa(x,'sptensor') || (isscalar(x) && (x > 0) && (x < 1)); + + +function W = tt_create_missing_data_pattern(sz,M,isSparse) +%TEST_CREATE_RME Creates a randomly missing element (RME) indicator tensor. +% +% W = TEST_CREATE_RME(SZ,M) creates an indicator (binary) tensor W of the +% specified size with 0's indicating missing data and 1's indicating +% valid data. The percentage of zeros is given by M. Will only return a +% tensor that has at least one entry per N-1 dimensional slice. +% +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others. +% http://www.sandia.gov/~tgkolda/TensorToolbox. +% Copyright (2015) Sandia Corporation. Under the terms of Contract +% DE-AC04-94AL85000, there is a non-exclusive license for use of this +% work by or on behalf of the U.S. Government. Export of this data may +% require a license from the United States Government. +% The full license terms can be found in the file LICENSE.txt + +% Code by Evrim Acar and Tammy Kolda, 2009. + +%% Set up isSparse variable +if ~exist('isSparse','var') + isSparse = false; +end + +%% Initialize +% Number of dimensions +N = length(sz); + +% Total number of entries in tensor of given sz +P = prod(sz); + +% Total number of entries that should be set to one +Q = ceil((1-M)*P); + +%% Create the tensor +% Keep iterating until the tensor is created or we give up. +for iter = 1:20 + % Create the indicator tensor W + if isSparse + % start with 50% more than Q random subs + % TODO: work out the expected value of a*Q to guarantee Q unique entries + subs = unique(ceil(rand(ceil(1.5*Q),size(sz,2))*diag(sz)),'rows'); + % check if there are too many unique subs + if size(subs,1) > Q + % unique orders the subs and would bias toward first subs + % with lower values, so we sample to cut back + idx = randperm(size(subs,1)); + subs = subs(idx(1:Q),:); + elseif size(subs,1) < Q + warning('Only generated %d of %d desired subscripts', size(subs,1), Q); + end + W = sptensor(subs,1,sz); + else + % Compute the linear indices of the missing entries. Note that + % the indices must be a column array for the linear indexing + % into W to work. + idx = randperm(P); + idx = idx(1:Q)'; + W = tenzeros(sz); + W(idx) = 1; + end + + % Check if W has any empty slices + isokay = zeros(N,1); + for n = 1:N + isokay(n) = all(double(collapse(W,-n))); + end + + % Quit if we're okay + if all(isokay) + break; + end + +end + +if ~all(isokay) + error('After %d iterations, cannot produce a tensor with %f%% missing data without an empty slice.', iter, M*100); +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/create_problem_binary.m b/ext/YetAnotherFEcode/external/tensor_toolbox/create_problem_binary.m new file mode 100644 index 0000000..3cd148c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/create_problem_binary.m @@ -0,0 +1,202 @@ +function [X,Mtrue,info] = create_problem_binary(sz,r,varargin) +%CREATE_PROBLEM_BINARY Creates random low-rank 0/1 tensor. +% +% [X,M,INFO] = CREATE_PROBLEM_BINARY(SZ,R,'param','value') creates an +% sptensor X of size SZ from the low-rank ktensor M of rank R that +% corresponds to the *odds* of a 1 in each position. The parameters that +% control this are as follows: +% +% 'state' - State of random number generator, for reproducing results. +% 'loprob' - Probability of 'noise' one. Default: 0.01. +% 'hiprob' - Probability of 'structural' one. Default: 0.90. +% 'density' - Density of structural entries. Default: 1/r. +% 'verbosity' - Output: 0: None, 1: Minimal (default), 2: Detailed. +% 'spgen' - Avoid explicitly forming low-rank tensor. Default: False. +% +% REFERENCES: +% * T. G. Kolda, D. Hong, J. Duersch. Stochastic Gradients for +% Large-Scale Tensor Decomposition, 2019. +% +% See also: GCP_OPT, CREATE_PROBLEM. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with + +%% Random set-up +defaultStream = RandStream.getGlobalStream; + +%% Set algorithm parameters from input or by using defaults +params = inputParser; +params.addParameter('state', defaultStream.State); +params.addParameter('loprob', 0.01, @(x) isscalar(x) && x > 0 && x < 0.1); +params.addParameter('hiprob', 0.9, @(x) isscalar(x) && x > 0 && x < 1); +params.addParameter('density', []); +params.addParameter('verbosity', 1); +params.addParameter('spgen',false); +params.addParameter('Mtrue',[]); +params.parse(varargin{:}); +info.params = params.Results; + +%% Initialize random number generator with specified state +defaultStream.State = params.Results.state; + +%% Extract parameters +loprob = params.Results.loprob; +hiprob = params.Results.hiprob; +density = params.Results.density; +verbosity = params.Results.verbosity; +spgen = params.Results.spgen; +Mtrue = params.Results.Mtrue; + +%% Setup +if verbosity > 0 + fprintf('Creating random problem instance\n'); +end + +%% Set up for creating factor matrices + +% Density specifies the density of high values in the first r-1 columns of +% the factor matrices +if isempty(density) + density = 1/r; +end + +% Extract the order of the tensor +d = length(sz); + +% Convert the high and low probabilities to the dth root of the +% corresponding odds. +loval = nthroot(loprob/(1-loprob),d); +hival = nthroot(hiprob/(1-hiprob),d); + + +%% Populate factor matrices +% The first (r-1) columns of each factor matrix is sparse per the +% specified denstiy. The nonzero values are normal distributed around the +% hival odds ration with a standard deviation of 0.5. +% The last column of each factor matrices is dense but low-valued set to +% the constant loval, corresponding to the general noisyness of binary +% observations. + +if isempty(Mtrue) + A = cell(d,1); + for k = 1:d + if r > 1 + A1v = random('Normal', hival, 0.5, [sz(k),r-1]); + A1p = rand(sz(k),r-1) < density; + A1 = max(A1v .* A1p, 0); + else + A1 = []; + end + A2 = loval * ones(sz(k),1); + A{k} = [A1,A2]; + end + Mtrue = ktensor(A); % Correct solution +else + A = Mtrue.u; + if verbosity > 0 + fprintf('Using user-specified choice for Mtrue\n'); + end +end +%% + +if spgen + + % --- Create all-zero sparse tensor --- + X = sptensor(sz); + + % --- Compute big entries of X, which are expected to be few --- + + if verbosity > 1 + fprintf('Generating high probability entries...\n'); + end + + % Find possible high values correspond to each component + subs = []; + for j = 1:r-1 + + % Identify nonzeros in each mode + modeidx = cell(d,1); + for k = 1:d + tmp = A{k}(:,j); + modeidx{k} = find(tmp > 0); + end + + % Count nnzs in each factor + cnts = cellfun(@length, modeidx); + + % Compute total number of entries from this factor + fcnt = prod(cnts); + + if fcnt > 0 + % Create the subscripts of those entries + csubs = tt_ind2sub(cnts',(1:fcnt)'); + fsubs = zeros(fcnt,d); + for k = 1:d + fsubs(:,k) = modeidx{k}(csubs(:,k)); + end + subs = [subs; fsubs]; + end + + end + + subs = unique(subs,'rows'); + nhigh_max = size(subs,1); + if verbosity > 1 + fprintf('\tmax # high entries = %d\n',nhigh_max); + end + + if nhigh_max > 0 + % Compute the probablities at those entries + Mvals = Mtrue(subs); + Pvals = Mvals ./ (1 + Mvals); + Xvals = random('Binomial',1,Pvals); + tf = (Xvals == 1); + % Remove the subscripts that don't correspond to ones + hisubs = subs(tf,:); + X(hisubs) = 1; + nhigh = sum(Xvals); + else + hisubs = []; + nhigh = 0; + end + + if verbosity > 1 + fprintf('\t# high entries = %d\n',nhigh); + end + + % --- Compute the 'noise' from the rest of the entries --- + + if verbosity > 1 + fprintf('Generating low probability (aka noise) entries...\n'); + end + + % Number of remaining entries + nloprob = prod(sz) - nhigh_max; + % Randomly compute how many will be 1's, using binomial, + % which we estimate using Poisson since nloprob is large and loprob is + % small. + nlow = random('Poisson', nloprob * loprob, 1); + if verbosity > 1 + fprintf('\t# low entries = %d\n',nlow); + end + if nlow > 0 + % Choose that many indicies + losubs = tt_sample_zeros(X, tt_sub2ind(sz,hisubs), nlow, 1.1, false); + X(losubs) = 1; + end + if verbosity > 1 + fprintf('\tFinished\n'); + end + + info.nlow = nlow; + info.nhigh_max = nhigh_max; + info.nhigh = nhigh; + + +else + Mtruef = full(Mtrue); + P = Mtruef ./ (1 + Mtruef); + X = sptensor(random('Binomial',1,double(P))); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/collapse_scale_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/collapse_scale_doc.m new file mode 100644 index 0000000..990817f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/collapse_scale_doc.m @@ -0,0 +1,60 @@ +%% Collapsing and scaling tensors +% The tensor and sptensor classes support that notion of collapsing and +% scaling dimensions. + +%% Examples of collapsing a tensor +X = tenrand([4 3 2]) %<-- Generate some data. +%% +Y = collapse(X,[2 3]) %<-- Sum of entries in each mode-1 slice. +%% +Y = collapse(X,-1) %<-- Same as above. +%% +Z = collapse(X,2) %<-- Sum of entries in each row fiber. +%% +collapse(X,1:3) %<-- Sum of all entries. +%% Alternate accumulation functions for tensor +Y = collapse(X,[1 2],@max) %<-- Max entry in each mode-3 slice. +%% +Z = collapse(X,-3,@mean) %<-- Average entry in each mode-3 slice. +%% Examples of collapsing a sptensor +X = sptenrand([4 3 2],6) %<-- Generate some data. +%% +Y = collapse(X,[2 3]) %<-- Sum of entries in each mode-1 slice. +%% +Y = collapse(X,-1) %<-- Same as above. +%% +Z = collapse(X,2) %<-- Sum of entries in each row fiber. +%% +collapse(X,1:3) %<-- Sum of all entries. +%% Alternate accumulation functions for sptensor +Y = collapse(X,[1 2],@min) %<-- Min *nonzero* entry in each mode-3 slice. +%% +Z = collapse(X,-3,@mean) %<-- Average *nonzero* entry in each mode-3 slice. +%% Scaling a tensor in different modes +X = tenones([3,4,5]); %<-- Generate data +S = 10 * [1:5]'; Y = scale(X,S,3) %<-- Scale in mode-3 +%% +S = tensor(10 * [1:5]',5); Y = scale(X,S,3) %<-- First argument is a tensor. +%% +S = tensor(1:12,[3 4]); Y = scale(X,S,[1 2]) %<-- Scale in two modes. +%% +S = tensor(1:12,[3 4]); Y = scale(X,S,-3) %<-- Same as above. +%% +S = tensor(1:60,[3 4 5]); Y = scale(X,S,1:3) %<-- Scale in every mode. +%% +Y = S .* X %<-- Same as above. + +%% Scaling a sptensor in different modes +X = ones(sptenrand([3 4 5], 10)) %<-- Generate data. +%% +S = 10 * [1:5]'; Y = scale(X,S,3) %<-- Scale in one mode. +%% +S = tensor(10 * [1:5]',5); Y = scale(X,S,3) %<-- Same as above. +%% +S = tensor(1:12,[3 4]); Y = scale(X,S,[1 2]) %<-- Scale in two modes. +%% +S = tensor(1:12,[3 4]); Y = scale(X,S,-3) %<-- Same as above. +%% +Z = scale(X,Y,1:3) %<-- Scale by a sparse tensor. +%% +X .* Y %<-- Same as above. \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_als_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_als_doc.m new file mode 100644 index 0000000..b1e21f2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_als_doc.m @@ -0,0 +1,118 @@ +%% Alternating least squares for CANDECOMP/PARAFAC (CP) Decomposition +% The function |cp_als| computes an estimate of the best rank-R +% CP model of a tensor X using the well-known alternating least-squares +% algorithm (see, e.g., Kolda and Bader, SIAM Review, 2009, for more +% information). The input X can be almost any type of tensor inclusing a +% |tensor|, |sptensor|, |ktensor|, or |ttensor|. The output CP model is a +% |ktensor|. + +%% Load some data +% We use the well-known _amino acids data set_ from Andersson and Bro. +% It contains fluorescence measurements of 5 samples containing 3 amino +% acids: Tryptophan, Tyrosine, and Phenylalanine.Each amino acid +% corresponds to a rank-one component. The tensor is of size 5 x 51 x 201 +% from 5 samples, 51 excitations, and 201 emissions. +% Further details can be found here: +% . +% Please cite the following paper for this data: +% Rasmus Bro, PARAFAC: Tutorial and applications, Chemometrics and +% Intelligent Laboratory Systems, 1997, 38, 149-171. +% This dataset can be found in the |doc| directory. +load aminoacids +%% Basic call to the method, specifying the data tensor and its rank +% This uses a _random_ initial guess. At each iteration, it reports the 'fit' +% which is defined as |1-(norm(X-M)/norm(X))| and is loosely the proportion +% of the data described by the CP model, i.e., a fit of 1 is perfect. +rng(3) %<- Setting random seed for reproducibility of this script +M1 = cp_als(X,3); %<- Call the method +%% +% We typically can achieve a final fit of f = 0.97. The method stops when +% the change in the fit becomes less than the specified +% tolerance, which defaults to 1-e4. + +%% Visualize the results +% Use the |ktensor/viz| function to visualize the results. +vizopts = {'PlotCommands',{'bar','line','line'},... + 'ModeTitles',{'Concentration','Emission','Excitation'},... + 'BottomSpace',0.10,'HorzSpace',0.04,'Normalize',0}; +info1 = viz(M1,'Figure',1,vizopts{:}); + +%% Run again with a different initial guess, output the initial guess. +% This time we have two outputs. The first output is the solution as a +% ktensor. The second output is a cell array containing the initial guess. +% Since the first mode is not needed, it is omitted from the cell array. +[M2bad,U2] = cp_als(X,3); + +%% Increase the maximium number of iterations +% Note that the previous run kicked out at only 50 iterations before +% reaching the specified convegence tolerance. Let's increate the maximum +% number of iterations and try again, using the same initial guess. +M2 = cp_als(X,3,'maxiters',100,'init',U2); + +%% +% This solution looks more or less the same as the previous one. +info2 = viz(M2,'Figure',2,vizopts{:}); + +%% Compare the two solutions +% Use the |ktensor/score| function to compare the two solutions. A score of +% 1 indicates a perfect match. These are not exactly the same, but they are +% pretty close. +score(M1,M2) + +%% Rerun with same initial guess +% Using the same initial guess (and all other parameters) gives the exact +% same solution. +M2alt = cp_als(X,3,'maxiters',100,'init',U2); +score(M2, M2alt) %<- Score of 1 indicates the same solution + +%% Changing the output frequency +% Using the |'printitn'| option to change the output frequency. +M2alt2 = cp_als(X,3,'maxiters',100,'init',U2,'printitn',10); + +%% Suppress all output +% Set |'printitn'| to zero to suppress all output. +M2alt3 = cp_als(X,3,'maxiters',100,'init',U2,'printitn',0); % <-No output + +%% Use HOSVD initial guess +% Use the |'nvecs'| option to use the leading mode-n singular vectors as +% the initial guess. +M3 = cp_als(X,3,'init','nvecs','printitn',10); + +%% +% Compare to the first solution using score, and see they are nearly the +% same because the score is close to 1. +score(M1,M3) + +%% Change the order of the dimensions in CP +[M4,~,info] = cp_als(X,3,'dimorder',[2 3 1],'init','nvecs','printitn',10); +score(M1,M4) + +%% +% In the last example, we also collected the third output argument which +% has some extra information in it. The field |info.iters| has the total +% number of iterations. The field |info.params| has the information used to +% run the method. Unless the initialization method is 'random', passing the +% parameters back to the method will yield the exact same results. +M4alt = cp_als(X,3,info.params); +score(M4,M4alt) + +%% Change the tolerance +% It's also possible to loosen or tighten the tolerance on the change in +% the fit. You may need to increase the number of iterations for it to +% converge. +M5 = cp_als(X,3,'init','nvecs','tol',1e-6,'maxiters',1000,'printitn',10); + +%% Control sign ambiguity of factor matrices +% The default behavior of |cp_als| is to make a call to |fixsigns| to fix +% the sign ambiguity of the factor matrices. You can turn off this behavior +% by passing the |'fixsigns'| parameter value of |false| when calling |cp_als|. +X = ktensor([1;1], {[1, 1; 1, -10],[1, 1; 1, -10]}); +M = cp_als(X, 2, 'printitn', 0, 'init', X.U) % <-default behavior, fixsigns called +M = cp_als(X, 2, 'printitn', 0, 'init', X.U, 'fixsigns', false) % <-fixsigns not called + +%% Recommendations +% * Run multiple times with different guesses and select the solution with +% the best fit. +% * Try different ranks and choose the solution that is the best descriptor +% for your data based on the combination of the fit and the interpretaton +% of the factors, e.g., by visualizing the results. \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_apr_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_apr_doc.m new file mode 100644 index 0000000..10854c8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_apr_doc.m @@ -0,0 +1,41 @@ +%% Alternating Poisson Regression for fitting CP to sparse count data + +%% Set up a sample problem +% We follow the general procedure outlined by E. C. Chi and T. G. Kolda, +% On Tensors, Sparsity, and Nonnegative Factorizations, arXiv:1112.2414 +% [math.NA], December 2011 (http://arxiv.org/abs/1112.2414). + +% Pick the size and rank +sz = [100 80 60]; +R = 5; + +% Generate factor matrices with a few large entries in each column; this +% will be the basis of our soln. +A = cell(3,1); +for n = 1:length(sz) + A{n} = rand(sz(n), R); + for r = 1:R + p = randperm(sz(n)); + nbig = round( (1/R)*sz(n) ); + A{n}(p(1:nbig),r) = 100 * A{n}(p(1:nbig),r); + end +end +lambda = rand(R,1); +S = ktensor(lambda, A); +S = normalize(S,'sort',1); + +% Create sparse test problem based on provided solution. +nz = prod(sz) * .05; +info = create_problem('Soln', S, 'Sparse_Generation', nz); + +% Extract data and solution +X = info.Data; +M_true = info.Soln; + +%% Call CP-APR + +% Compute a solution +M = cp_apr(X, R, 'printitn', 10); + +% Score the solution +factor_match_score = score(M, M_true, 'greedy', true) diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_arls_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_arls_doc.m new file mode 100644 index 0000000..6f4465c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_arls_doc.m @@ -0,0 +1,150 @@ +%% Alternating randomized least squares for CP Decomposition +% The function |cp_arls| computes an estimate of the best rank-R CP model +% of a tensor X using alternating _randomized_ least-squares algorithm. +% The input X must be a (dense) |tensor|. The output CP model is a +% |ktensor|. The CP-ARLS method is described in the following reference: +% +% * C. Battaglino, G. Ballard, T. G. Kolda. A Practical Randomized CP +% Tensor Decomposition, to appear in SIAM J. Matrix Analysis and +% Applications, 2017. Preprint: +% . +%% Set up a sample problem +% We set up an especially difficult and somewhat large sample problem that +% has high collinearity (0.9) and 1% noise. This is an example where the +% randomized method will generally outperform the standard method. +sz = [200 300 400]; +R = 5; +ns = 0.01; +coll = 0.9; + +info = create_problem('Size', sz, 'Num_Factors', R, 'Noise', ns, ... + 'Factor_Generator', @(m,n) matrandcong(m,n,coll), ... + 'Lambda_Generator', @ones); + +% Extract data and solution +X = info.Data; +M_true = info.Soln; + +%% Running the CP-ARLS method +% Running the method is essentially the same as using CP-ALS, feed the data +% matrix and the desired rank. Note that the iteration is of the form NxN +% which is the number of epochs x the number of iterations per epoch. The +% default number of iterations per epoch is 50. At the end of each epoch, +% we check the convergence criteria. Because this is a randomized method, +% we do not achieve strict decrease in the objective function. Instead, we +% look at the number of epochs without improvement (newi) and exit when +% this crosses the predefined tolerance (`newitol`), which defaults to 5. +% It is important to note that the fit values that are reported are +% approximate, so this is why it is denoted by `f~` rather than just `f`. + +tic +[M1, ~, out1] = cp_arls(X,R); +time1 = toc; +scr1 = score(M1,M_true); +fprintf('\n*** Results for CP-ARLS (with mixing) ***\n'); +fprintf('Time (secs): %.3f\n', time1) +fprintf('Score (max=1): %.3f\n', scr1); + +%% Speed things up by skipping the initial mixing +% The default behavior is to mix the data in each mode using an FFT and +% diagonal random +/-1 matrix. This may add substantial preprocessing time, +% though it helps to ensure that the method converges. Oftentimes, such as +% with randomly-generated data, the mixing is not necessary. + +tic +[M2, ~, out2] = cp_arls(X,R,'mix',false); +time2 = toc; +scr2 = score(M2,M_true); + +fprintf('\n*** Results for CP-ARLS (no mix) ***\n'); +fprintf('Time (secs): %.3f\n', time2) +fprintf('Score (max=1): %.3f\n', scr2); + +%% Comparing with CP-ALS +% CP-ALS may be somewhat faster, especially since this is a relatively +% small problem, but it usually will not achieve as good of an answer in +% terms of the score. + +tic; +[M3, ~, out3] = cp_als(X,R,'maxiters',500,'printitn',10); +time3 = toc; +scr3 = score(M3,M_true); +fprintf('\n*** Results for CP-ALS ***\n'); +fprintf('Time (secs): %.3f\n', time3) +fprintf('Score (max=1): %.3f\n', scr3); + +%% How well does the approximate fit do? +% It is possible to check the accuracy of the fit computation by having the +% code compute the true fit and the final solution, enabled by the +% `truefit` option. +[M4,~,out4] = cp_arls(X,R,'truefit',true); + +%% Varying epoch size +% It is possible to vary that number of iterations per epoch. Fewer +% iterations means that more time is spent checking for convergence and it +% may also be harder to detect as an single iteration can have some +% fluctuation and we are actually looking for the overall trend. In +% contrast, too many iterations means that the method won't realize when it +% has converged and may spend too much time computing. + +%% +tic +M = cp_arls(X,R,'epoch',1,'newitol',20); +toc +fprintf('Score: %.4f\n',score(M,M_true)); + +%% +tic +M = cp_arls(X,R,'epoch',200,'newitol',3,'printitn',2); +toc +fprintf('Score: %.4f\n',score(M,M_true)); + +%% Set up another sample problem +% We set up another problem with 10% noise, but no collinearity. +sz = [200 300 400]; +R = 5; +ns = 0.10; + +info = create_problem('Size', sz, 'Num_Factors', R, 'Noise', ns, ... + 'Factor_Generator', @rand,'Lambda_Generator', @ones); + +% Extract data and solution +X = info.Data; +M_true = info.Soln; + +%% Terminating once a desired fit is achieved +% If we know the noise level is 10%, we would expect a fit of 0.90 at best. +% So, we can set a threshold that is close to that and terminate as soon as +% we achieve that accuracy. Since detecting convergence is hard for a +% randomized method, this can lead to speed ups. However, if the fit is not +% high enough, the accuracy may suffer consequently. +M = cp_arls(X,R,'newitol',20,'fitthresh',0.895,'truefit',true); +fprintf('Score: %.4f\n',score(M,M_true)); + +%% Changing the number of function evaluation samples +% The function evaluation is approximate and based on sampling the number +% of entries specified by `nsampfit`. If this is too small, the samples +% will not be accurate enough. If this is too large, the computation will +% take too long. The default is $2^14$, which should generally be +% sufficient. It may sometimes be possible to use smaller values. The same +% sampled entries are used for every convergence check --- we do not +% resample to check other entries. +M = cp_arls(X,R,'truefit',true,'nsampfit',100); +fprintf('Score: %.4f\n',score(M,M_true)); + +%% Change the number of sampled rows in least squares solve +% The default number of sampled rows for the least squares solves is +% `ceil(10*R*log2(R))`. This seemed to work well in most tests, but this can +% be varied higher or lower. For R=5, this means we sample 117 rows per +% solve. The rows are different for every least squares problem. Let's see +% what happens if we reduce this to 10. + +M = cp_arls(X,R,'truefit',true,'nsamplsq',10); +fprintf('Score: %.4f\n',score(M,M_true)); + +%% +% What if we use 25? +M = cp_arls(X,R,'truefit',true,'nsamplsq',25); +fprintf('Score: %.4f\n',score(M,M_true)); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_opt_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_opt_doc.m new file mode 100644 index 0000000..5cb0c55 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_opt_doc.m @@ -0,0 +1,125 @@ +%% All-at-once optimization for CP tensor decomposition +% We explain how to use |cp_opt| function which implements the *CP-OPT* +% method that fits the CP model using _direct_ or _all-at-once_ +% optimization. This is in contrast to the |cp_als| function which +% implements the *CP-ALS* that fits the CP model using _alternating_ +% optimization. The CP-OPT method is described in the +% following reference: +% +% * E. Acar, D. M. Dunlavy and T. G. Kolda, *A Scalable +% Optimization Approach for Fitting Canonical Tensor Decompositions*, +% _J. Chemometrics_ 25(2):67-86, February 2011 +% + + +%% Third-party optimization software +% The |cp_opt| method uses third-party optimization software to do the +% optimization. You can use either +% +% * +% (preferred), or +% * . +% +% The remainder of these instructions assume L-BFGS-B is being used. See +% for instructions on using |cp_opt| with +% Poblano. + +%% Check that the software is installed. +% Be sure that lbfgsb is in your path. +help lbfgsb + +%% Create an example problem. +% Create an example 50 x 40 x 30 tensor with rank 5 and add 10% noise. +R = 5; +info = create_problem('Size', [50 40 30], 'Num_Factors', R, 'Noise', 0.10); +X = info.Data; +M_true = info.Soln; + +%% Create initial guess using 'nvecs' +M_init = create_guess('Data', X, 'Num_Factors', R, 'Factor_Generator', 'nvecs'); + +%% Call the |cp_opt| method +% Here is an example call to the cp_opt method. By default, each iteration +% prints the least squares fit function value (being minimized) and the +% norm of the gradient. + +[M,M0,output] = cp_opt(X, R, 'init', M_init); + +%% Check the output +% It's important to check the output of the optimization method. In +% particular, it's worthwhile to check the exit message. +% The message |CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH| means that +% it has converged because the function value stopped improving. +exitmsg = output.ExitMsg + +%% +% The fit is the percentage of the data that is explained by the model. +% Because we have noise, we do not expect the fit to be perfect. +fit = output.Fit + +%% Evaluate the output +% We can "score" the similarity of the model computed by CP and compare +% that with the truth. The |score| function on ktensor's gives a score in +% [0,1] with 1 indicating a perfect match. Because we have noise, we do +% not expect the fit to be perfect. See for more details. +scr = score(M,M_true) + +%% Overfitting example +% Re-using the same example as before, consider the case where we don't +% know R in advance. We might guess too high. Here we show a case where we +% guess R+1 factors rather than R. + +% Generate initial guess of the corret size +M_plus_init = create_guess('Data', X, 'Num_Factors', R+1, ... + 'Factor_Generator', 'nvecs'); + +%% + +% Run the algorithm +[M_plus,~,output] = cp_opt(X, R+1, 'init', M_plus_init); +exitmsg = output.ExitMsg +fit = output.Fit + +%% + +% Check the answer (1 is perfect) +scr = score(M_plus, M_true) + +%% Nonnegative factorization +% We can employ lower bounds to get a nonnegative factorization. + +%% Create an example problem. +% Create an example 50 x 40 x 30 tensor with rank 5 and add 10% noise. We +% select nonnegative factor matrices and lambdas. The +% create_problem doesn't really know how to add noise without going +% negative, so we _hack_ it to make the observed tensor be nonzero. +R = 5; +info = create_problem('Size', [50 40 30], 'Num_Factors', R, 'Noise', 0.10,... + 'Factor_Generator', 'rand', 'Lambda_Generator', 'rand'); +X = info.Data .* (info.Data > 0); % Force it to be nonnegative +M_true = info.Soln; + +%% Generate initial guess of the corret size +M_init = create_guess('Data', X, 'Num_Factors', R, ... + 'Factor_Generator', 'rand'); +%% Call the |cp_opt| method +% Here we specify a lower bound of zero with the last two arguments. +[M,M0,output] = cp_opt(X, R, 'init', M_init,'lower',0); + +%% Check the output +exitmsg = output.ExitMsg + +%% +% The fit is the percentage of the data that is explained by the model. +% Because we have noise, we do not expect the fit to be perfect. +fit = output.Fit + +%% Evaluate the output +% We can "score" the similarity of the model computed by CP and compare +% that with the truth. The |score| function on ktensor's gives a score in +% [0,1] with 1 indicating a perfect match. Because we have noise, we do +% not expect the fit to be perfect. See for more details. +scr = score(M,M_true) \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_opt_poblano_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_opt_poblano_doc.m new file mode 100644 index 0000000..1fb08c6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_opt_poblano_doc.m @@ -0,0 +1,98 @@ +%% All-at-once optimization for CP tensor decomposition (with Poblano) +% We explain how to use |cp_opt| with the Poblano toolbox. The default is +% to use L-BFGS-B (not Poblabo), which is described . + + +%% Poblano Optimization Toolbox +% Check that you have Poblano 1.1 installed. The output of your 'ver' +% command should look something like the following. +ver + +%% Create an example problem. +% Create an example 50 x 40 x 30 tensor with rank 5 and add 10% noise. +R = 5; +info = create_problem('Size', [50 40 30], 'Num_Factors', R, 'Noise', 0.10); +X = info.Data; +M_true = info.Soln; + +%% Create initial guess using 'nvecs' +M_init = create_guess('Data', X, 'Num_Factors', R, ... + 'Factor_Generator', 'nvecs'); + + +%% Set up the optimization parameters +% It's genearlly a good idea to consider the parameters of the optimization +% method. The default options may be either too stringent or not stringent +% enough. The most important options to consider are detailed here. + +% Get the defaults +ncg_opts = ncg('defaults'); +% Tighten the stop tolerance (norm of gradient). This is often too large. +ncg_opts.StopTol = 1.0e-6; +% Tighten relative change in function value tolearnce. This is often too large. +ncg_opts.RelFuncTol = 1.0e-20; +% Increase the number of iterations. +ncg_opts.MaxIters = 10^4; +% Only display every 10th iteration +ncg_opts.DisplayIters = 10; +% Display the final set of options +ncg_opts + +%% Call the |cp_opt| method +% Here is an example call to the cp_opt method. By default, each iteration +% prints the least squares fit function value (being minimized) and the +% norm of the gradient. The meaning of any line search warnings +% can be checked via . +[M,~,output] = cp_opt(X, R, 'init', M_init, ... + 'opt', 'ncg', 'opt_options', ncg_opts); + +%% Check the output +% It's important to check the output of the optimization method. In +% particular, it's worthwhile to check the exit flag. +% A zero (0) indicates successful termination with the gradient smaller +% than the specified StopTol, and a three (3) indicates a successful +% termination where the change in function value is less than RelFuncTol. +% The meaning of any other flags can be checked via +% . +exitflag = output.ExitFlag + +%% +% The fit is the percentage of the data that is explained by the model. +% Because we have noise, we do not expect the fit to be perfect. +fit = output.Fit + +%% Evaluate the output +% We can "score" the similarity of the model computed by CP and compare +% that with the truth. The |score| function on ktensor's gives a score in +% [0,1] with 1 indicating a perfect match. Because we have noise, we do +% not expect the fit to be perfect. See for more details. +scr = score(M,M_true) + +%% Overfitting example +% Consider the case where we don't know R in advance. We might guess too +% high. Here we show a case where we guess R+1 factors rather than R. + +% Generate initial guess of the corret size +M_plus_init = create_guess('Data', X, 'Num_Factors', R+1, ... + 'Factor_Generator', 'nvecs'); + +%% + +% Loosen the stop tolerance (norm of gradient). +ncg_opts.StopTol = 1.0e-2; + +%% + +% Run the algorithm +[M_plus,~,output] = cp_opt(X, R+1, 'init', M_plus_init, ... + 'opt', 'ncg', 'opt_options', ncg_opts); +exitflag = output.ExitFlag +fit = output.Fit + + +%% + +% Check the answer (1 is perfect) +scr = score(M_plus, M_true) + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_wopt_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_wopt_doc.m new file mode 100644 index 0000000..3ebf466 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_wopt_doc.m @@ -0,0 +1,90 @@ +%% Weighted optimization for CP tensor decomposition with incomplete data +% We explain how to use the CP Weighted Optimization (CP-WOPR) method +% implements in |cp_wopt|. The method is described in the following article: +% +% * E. Acar, D. M. Dunlavy, T. G. Kolda and M. Mørup, +% *Scalable Tensor Factorizations for Incomplete Data*, +% _Chemometrics and Intelligent Laboratory Systems_ 106(1):41-56, March 2011 +% (doi:10.1016/j.chemolab.2010.08.004) + +%% Third-party optimization software +% The |cp_wopt| method uses third-party optimization software to do the +% optimization. You can use either +% +% * +% (preferred), or +% * . +% +% The remainder of these instructions assume L-BFGS-B is being used. See +% for instructions on using |cp_wopt| with +% Poblano. + +%% Important Information +% +% It is critical to zero out the values in the missing entries of the data +% tensor. This can be done by calling |cp_wopt(X.*P,P,...)|. This is a +% frequent source of errors in using this method. + +%% Create an example problem with missing data. +% Here we have 25% missing data and 10% noise. +R = 2; +info = create_problem('Size', [15 10 5], 'Num_Factors', R, ... + 'M', 0.25, 'Noise', 0.10); +X = info.Data; +P = info.Pattern; +M_true= info.Soln; + +%% Create initial guess using 'nvecs' +M_init = create_guess('Data', X, 'Num_Factors', R, ... + 'Factor_Generator', 'nvecs'); + + + + +%% Call the |cp_wopt| method +% Here is an example call to the cp_opt method. By default, each iteration +% prints the least squares fit function value (being minimized) and the +% norm of the gradient. +[M,~,output] = cp_wopt(X, P, R, 'init', M_init); + +%% Check the output +% It's important to check the output of the optimization method. In +% particular, it's worthwhile to check the exit message for any problems. +% The message |CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH| means that +% it has converged because the function value stopped improving. +exitmsg = output.ExitMsg + + +%% Evaluate the output +% We can "score" the similarity of the model computed by CP and compare +% that with the truth. The |score| function on ktensor's gives a score in +% [0,1] with 1 indicating a perfect match. Because we have noise, we do +% not expect the fit to be perfect. See for more details. +scr = score(M,M_true) + +%% Create a SPARSE example problem with missing data. +% Here we have 95% missing data and 10% noise. +R = 2; +info = create_problem('Size', [150 100 50], 'Num_Factors', R, ... + 'M', 0.95, 'Sparse_M', true, 'Noise', 0.10); +X = info.Data; +P = info.Pattern; +M_true= info.Soln; + +%% Create initial guess using 'nvecs' +M_init = create_guess('Data', X, 'Num_Factors', R, ... + 'Factor_Generator', 'nvecs'); + + +%% Call the |cp_wopt| method +[M,~,output] = cp_wopt(X, P, R, 'init', M_init); + +%% Check the output +exitmsg = output.ExitMsg + + +%% Evaluate the output +scr = score(M,M_true) + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_wopt_poblano_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_wopt_poblano_doc.m new file mode 100644 index 0000000..d71c26a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/cp_wopt_poblano_doc.m @@ -0,0 +1,133 @@ +%% Weighted optimization for CP tensor decomposition with incomplete data +% We explain how to use |cp_wopt| with the POBLANO toolbox. The method is +% described in the following article: +% +% E. Acar, D. M. Dunlavy, T. G. Kolda and M. Mørup, +% Scalable Tensor Factorizations for Incomplete Data, +% Chemometrics and Intelligent Laboratory Systems 106(1):41-56, March 2011 +% (doi:10.1016/j.chemolab.2010.08.004) + +%% Important Information +% +% It is critical to zero out the values in the missing entries of the data +% tensor. This can be done by calling |cp_wopt(X.*P,P,...)|. This is a +% frequent source of errors in using this method. + +%% Create an example problem with missing data. +% Here we have 25% missing data and 10% noise. +R = 2; +info = create_problem('Size', [15 10 5], 'Num_Factors', R, ... + 'M', 0.25, 'Noise', 0.10); +X = info.Data; +P = info.Pattern; +M_true= info.Soln; + +%% Create initial guess using 'nvecs' +M_init = create_guess('Data', X, 'Num_Factors', R, ... + 'Factor_Generator', 'nvecs'); + + +%% Set up the optimization parameters +% It's genearlly a good idea to consider the parameters of the optimization +% method. The default options may be either too stringent or not stringent +% enough. The most important options to consider are detailed here. + +% Get the defaults +ncg_opts = ncg('defaults'); +% Tighten the stop tolerance (norm of gradient). This is often too large. +ncg_opts.StopTol = 1.0e-6; +% Tighten relative change in function value tolearnce. This is often too large. +ncg_opts.RelFuncTol = 1.0e-20; +% Increase the number of iterations. +ncg_opts.MaxIters = 10^4; +% Only display every 10th iteration +ncg_opts.DisplayIters = 10; +% Display the final set of options +ncg_opts + +%% Call the |cp_wopt| method +% Here is an example call to the cp_opt method. By default, each iteration +% prints the least squares fit function value (being minimized) and the +% norm of the gradient. The meaning of any line search warnings +% can be checked via . +[M,~,output] = cp_wopt(X, P, R, 'init', M_init, ... + 'opt', 'ncg', 'opt_options', ncg_opts); + +%% Check the output +% It's important to check the output of the optimization method. In +% particular, it's worthwhile to check the exit flag. +% A zero (0) indicates successful termination with the gradient smaller +% than the specified StopTol, and a three (3) indicates a successful +% termination where the change in function value is less than RelFuncTol. +% The meaning of any other flags can be checked via +% . +exitflag = output.ExitFlag + + +%% Evaluate the output +% We can "score" the similarity of the model computed by CP and compare +% that with the truth. The |score| function on ktensor's gives a score in +% [0,1] with 1 indicating a perfect match. Because we have noise, we do +% not expect the fit to be perfect. See for more details. +scr = score(M,M_true) + +%% Create a SPARSE example problem with missing data. +% Here we have 95% missing data and 10% noise. +R = 2; +info = create_problem('Size', [150 100 50], 'Num_Factors', R, ... + 'M', 0.95, 'Sparse_M', true, 'Noise', 0.10); +X = info.Data; +P = info.Pattern; +M_true= info.Soln; + +%% Create initial guess using 'nvecs' +M_init = create_guess('Data', X, 'Num_Factors', R, ... + 'Factor_Generator', 'nvecs'); + + +%% Set up the optimization parameters +% It's genearlly a good idea to consider the parameters of the optimization +% method. The default options may be either too stringent or not stringent +% enough. The most important options to consider are detailed here. + +% Get the defaults +ncg_opts = ncg('defaults'); +% Tighten the stop tolerance (norm of gradient). This is often too large. +ncg_opts.StopTol = 1.0e-6; +% Tighten relative change in function value tolearnce. This is often too large. +ncg_opts.RelFuncTol = 1.0e-20; +% Increase the number of iterations. +ncg_opts.MaxIters = 10^4; +% Only display every 10th iteration +ncg_opts.DisplayIters = 10; +% Display the final set of options +ncg_opts + +%% Call the |cp_wopt| method +% Here is an example call to the cp_opt method. By default, each iteration +% prints the least squares fit function value (being minimized) and the +% norm of the gradient. The meaning of any line search warnings +% can be checked via . +[M,~,output] = cp_wopt(X, P, R, 'init', M_init, ... + 'opt', 'ncg', 'opt_options', ncg_opts); + +%% Check the output +% It's important to check the output of the optimization method. In +% particular, it's worthwhile to check the exit flag. +% A zero (0) indicates successful termination with the gradient smaller +% than the specified StopTol, and a three (3) indicates a successful +% termination where the change in function value is less than RelFuncTol. +% The meaning of any other flags can be checked via +% . +exitflag = output.ExitFlag + + +%% Evaluate the output +% We can "score" the similarity of the model computed by CP and compare +% that with the truth. The |score| function on ktensor's gives a score in +% [0,1] with 1 indicating a perfect match. Because we have noise, we do +% not expect the fit to be perfect. See for more details. +scr = score(M,M_true) + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_amino_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_amino_doc.m new file mode 100644 index 0000000..7dae8ca --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_amino_doc.m @@ -0,0 +1,112 @@ +%% GCP-OPT Examples with Amino Acids Dataset +% +% For more details, see . + +%% Setup +% We use the well known amino acids dataset for some tests. This data has +% some negative values, but the factorization itself should be nonnegative. + +% Load the data +load(fullfile(getfield(what('tensor_toolbox'),'path'),'doc','aminoacids.mat')) + +clear M fit + +vizopts = {'PlotCommands',{@bar,@(x,y) plot(x,y,'r'),@(x,y) plot(x,y,'g')},... + 'BottomSpace',0.1, 'HorzSpace', 0.04, 'Normalize', @(x) normalize(x,'sort',2)}; + +%% CP-ALS +% Just a reminder of what CP-ALS does. + +cnt = 1; + +tic, M{cnt} = cp_als(X,3,'printitn',10); toc + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); + +viz(M{cnt},'Figure',cnt,vizopts{:}); + +%% GCP with Gaussian +% We can instead call the GCP with the Gaussian function. + +cnt = 2; +M{cnt} = gcp_opt(X,3,'type','Gaussian','printitn',10); + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); + +viz(M{cnt},'Figure',cnt,vizopts{:}); + +%% GCP with Gaussian and Missing Data +% What is some data is missing? +cnt = 3; + +% Proportion of missing data +p = 0.35; + +% Create a mask with the missing entries set to 0 and everything else 1 +W = tensor(double(rand(size(X))>p)); + +% Fit the model, using the 'mask' option +M{cnt} = gcp_opt(X.*W,3,'type','Gaussian','mask',W,'printitn',10); + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); + +viz(M{cnt},'Figure',cnt,vizopts{:}); + +%% GCP with ADAM +% We can also use stochastic gradient, though it's pretty slow for such a +% small tensor. +cnt = 4; + +% Specify 'opt' = 'adam' +M{cnt} = gcp_opt(X,3,'type','Gaussian','opt','adam','printitn',1,'fsamp',5000,'gsamp',500); + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); + +viz(M{cnt},'Figure',cnt,vizopts{:}); + +%% GCP with Gamma (terrible!) +% We can try Gamma, but it's not really the right distribution and produces +% a terrible result. +cnt = 5; + +Y = tensor(X(:) .* (X(:) > 0), size(X)); +M{cnt} = gcp_opt(Y,3,'type','Gamma','printitn',25); + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); + +viz(M{cnt},'Figure',cnt,vizopts{:}); + +%% GCP with Huber + Lower Bound +% Huber works well. By default, Huber has no lower bound. To add one, we +% have to pass in the func/grad/lower information explicitly. We can use +% |gcp_fg_setup| to get the func/grad parameters. +cnt = 6; + +% Call helper function tt_gcp_fg_setup to get the function and gradient handles +[fh,gh] = tt_gcp_fg_setup('Huber (0.25)'); + +% Pass the func/grad/lower explicitly. +M{cnt} = gcp_opt(X,3,'func',fh,'grad',gh,'lower',0,'printitn',25); + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); + +viz(M{cnt},'Figure',cnt,vizopts{:}); + +%% GCP with Beta +% This is also pretty bad, which gives an idea of the struggle of choosing +% the wrong distribution. It can work a little bit, but it's clearly the +% wrong objective. +cnt = 7; + +M{cnt} = gcp_opt(X,3,'type','beta (0.75)','printitn',25); + +fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X); +fprintf('Fit: %g\n', fit(cnt)); +viz(M{cnt},'Figure',cnt,vizopts{:}); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_doc.m new file mode 100644 index 0000000..a8deef2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_doc.m @@ -0,0 +1,329 @@ +%% Generalized CP (GCP) Tensor Decomposition +% This document outlines usage and examples for the generalized CP (GCP) +% tensor decomposition implmented in |gcp_opt|. +% GCP allows alternate objective functions besides sum of squared errors, +% which is the standard for CP. +% The code support both dense and sparse input tensors, but the sparse +% input tensors require randomized optimization methods. For some examples, +% see also . +% +% GCP is described in greater detail in the manuscripts: +% +% * D. Hong, T. G. Kolda, J. A. Duersch. +% _Generalized Canonical Polyadic Tensor Decomposition_. +% , +% 2018. To appear in SIAM Review, 2019. +% * T. G. Kolda, D. Hong, J. Duersch. _Stochastic Gradients for +% Large-Scale Tensor Decomposition_, 2019. + +%% Basic Usage +% The idea of GCP is to use alternative objective functions. As such, the +% most important thing to specify is the objective function. +% +% The command |M = gcp_opt(X,R,'type',type)| +% computes an estimate of the best rank-|R| +% generalized CP (GCP) decomposition of the tensor |X| for the specified +% generalized loss function specified by |type|. The input |X| can be a +% tensor or sparse tensor. The result |M| is a Kruskal tensor. +% Some options for the objective function are: +% +% * |'binary'| - Bernoulli distribution for binary data +% * |'count'| - Poisson distribution for count data (see also ) +% * |'normal'| - Gaussian distribution (see also and ) +% * |'huber (0.25)'| - Similar to Gaussian but robust to outliers +% * |'rayleigh'| - Rayleigh distribution for nonnegative data +% * |'gamma'| - Gamma distribution for nonnegative data +% +% See +% for a complete list of options. + +%% Manually specifying the loss function +% Rather than specifying a type, the user has the option to explicitly +% specify the objection function, gradient, and lower bounds using the +% following options: +% +% * |'func'| - Objective function handle, e.g., |@(x,m) (m-x).^2| +% * |'grad'| - Gradient function handle, e.g., |@(x,m) 2.*(m-x)| +% * |'lower'| - Lower bound, e.g., 0 or |-Inf| +% +% Note that the function must be able to work on vectors of x and m values. + +%% Choice of Optimzation Method +% The default optimization method is L-BFGS-B (bound-constrained +% limited-memory BFGS). To use this, install the third-party software: +% +% * +% +% The L-BFGS-B software can only be used for dense tensors. +% The other choice is to use a stochastic optimization method, either +% stochastic gradient descent (SGD) or ADAM. This can be used for +% dense or sparse tensors. +% +% The command |M = gcp_opt(X,R,...,'opt',opt)| specifies the optimization +% method where |opt| is one of the following strings: +% +% * |'lbfgsb'| - Bound-constrained limited-memory BFGS +% * |'sgd'| - Stochastic gradient descent (SGD) +% * |'adam'| - Momentum-based SGD method +% +% Each methods has parameters, which are described below. + +%% Specifying Missing or Incomplete Data Using the Mask Option +% If some entries of the tensor are unknown, the method can mask off that +% data during the fitting process. To do so, specify a *mask* tensor |W| +% that is the same size as the data tensor |X|. +% The mask tensor should be 1 if the entry in |X| is known and 0 otherwise. +% The call is |M = gcp_opt(X,R,'type',type','mask',W)|. + +%% Other Options +% A few common options are as follows: +% +% * |'maxiters'| - Maximum number of outer iterations {1000} +% * |'init'| - Initialization for factor matrices {|'rand'|} +% * |'printitn'| - Print every n iterations; 0 for no printing {1} +% * |'state'| - Random state, to re-create the same outcome {[]} + +%% Specifying L-BFGS-B Parameters +% In addition to the options above, there are two options used to modify +% the L-BFGS-B behavior. +% +% * |'factr'| - Tolerance on the change on the objective value. Defaults to +% 1e7, which is multiplied by machine epsilon. +% * |'pgtol'| - Projected gradient tolerance, defaults to 1e-5. +% +% It can sometimes be useful to increase or decrease |pgtol| depending on +% the objective function and size of the tensor. + + +%% Specifying SGD and ADAM Parameters +% There are a number of parameters that can be adjusted for SGD and ADAM. +% +% *Stochastic Gradient.* There are three different sampling methods for +% computing the stochastic gradient: +% +% * _Uniform_ - Entries are selected uniformly at random. Default for dense +% tensors. +% * _Stratified_ - Zeros and nonzeros are sampled separately, which is +% recommended for sparse tensors. Default for sparse tensors. +% * _Semi-Stratified_ - Modification to stratified sampling that avoids +% rejection sampling for better efficiency at the cost of potentially +% higher variance. +% +% The options corresponding to these are as follows. +% +% * |'sampler'| - Type of sampling to use for stochastic gradient. Defaults +% to |'uniform'| for dense and |'stratified'| for sparse. The third options +% is |'semi-stratified'|. +% * |'gsamp'| - Number of samples for stochastic gradient. This should +% generally be O(sum(sz)*r). For the stratified or semi-stratified sampler, +% this can be two numbers. The first +% is the number of nonzero samples and the second is the number of zero +% samples. If only one number is specified, then this is used as the number +% for both nonzeros and zeros, and the total number of samples is 2x what is +% specified. +% +% *Estimating the Function.* We also use sampling to estimate the function +% value. +% +% * |'fsampler'| - This can be |'uniform'| (default for dense) or +% |'stratified'| (default for sparse) or a custom function handle. +% The custom function handleis primarily useful +% in reusing the same sampled elements across different tests. For +% instance, we might create such a sampler by calling the hidden sampling +% function and saving its outputs: +% +% [xsubs, xvals, wghts] = tt_sample_uniform(X, 10000); +% fsampler = @() deal(xsubs, xvals, wghts);' +% +% * |'fsamp'| - Number of samples to estimate function. +% This should generally be somewhat large since we want this sample to +% generate a reliable estimate of the true function value. +% +% The |'stratified'| sampler has an extra option: +% * |'oversample'| - Factor to oversample when implicitly sampling zeros in +% the sparse case. Defaults to 1.1. Only adjust for very small tensors. +% +% There are some other options that are needed for SGD, the learning rate +% and a decrease schedule. Our schedule is very simple - we decrease the +% rate if there is no improvement in the approximate function value after +% an epoch. After a specified number of decreases (|'maxfails'|), we quit. +% +% * |'rate'| - Initial learning rate. Defaults to 1e-3. +% * |'decay'| - How much to decrease the learning rate once progress +% stagnates, i.e., no decrease in objective function between epochs. +% Default to 0.1. +% * |'maxfails'| - How many times to decrease the learning rate. Can be +% zero. Defaults to 1. +% * |'epciters'| - Iterations per epoch. Defaults to 1000. +% * |'festtol'| - Quit if the function estimate goes below this level. Defaults to |-Inf|. +% +% There are some options that are specific to ADAM and generally needn't +% change: +% +% * |'beta1'| - Default to 0.9 +% * |'beta2'| - Defaults to 0.999 +% * |'epsilon'| - Defaults to 1e-8 +% + +%% Example on Gaussian distributed +% We set up the example with known low-rank structure. Here |nc| is the +% rank and |sz| is the size. +clear +rng(4) +nc = 2; +sz = [50 60 70]; +info = create_problem('Size',sz,'Num_Factors',nc); +X = info.Data; +M_true = info.Soln; +whos +%% +% Run GCP-OPT +tic, [M1,M0,out] = gcp_opt(X,nc,'type','normal','printitn',10); toc +fprintf('Final fit: %e (for comparison to f in CP-ALS)\n',1 - norm(X-full(M1))/norm(X)); +fprintf('Score: %f\n',score(M1,M_true)); + +%% +% Compare to CP-ALS, which should usually be faster +tic, M2 = cp_als(X,nc,'init',tocell(M0),'printitn',1); toc +fprintf('Objective function: %e (for comparison to f(x) in GCP-OPT)\n', norm(X-full(M2))^2/prod(size(X))); +fprintf('Score: %f\n',score(M2,M_true)); + +%% +% Now let's try is with the ADAM functionality +tic, [M3,~,out] = gcp_opt(X,nc,'type','normal','opt','adam','init',M0,'printitn',1); toc +fprintf('Final fit: %e (for comparison to f in CP-ALS)\n',1 - norm(X-full(M1))/norm(X)); +fprintf('Score: %f\n',score(M3,M_true)); + +%% Create an example Rayleigh tensor model and data instance. +% Consider a tensor that is Rayleigh-distribued. This means its entries are +% all nonnegative. First, we generate such a tensor with low-rank +% structure. +clear +rng(65) +nc = 3; +sz = [50 60 70]; +nd = length(sz); + +% Create factor matrices that correspond to smooth sinusidal factors +U=cell(1,nd); +for k=1:nd + V = 1.1 + cos(bsxfun(@times, 2*pi/sz(k)*(0:sz(k)-1)', 1:nc)); + U{k} = V(:,randperm(nc)); +end +M_true = normalize(ktensor(U)); +X = tenfun(@raylrnd, full(M_true)); +%% +% Visualize the true solution +viz(M_true, 'Figure', 1) + +%% +% Run GCP-OPT +tic, [M1,~,out] = gcp_opt(X,nc,'type','rayleigh','printitn',10); toc +fprintf('Score: %f\n',score(M1,M_true)); + +%% +% Visualize the solution from GCP-OPT +viz(M1, 'Figure', 2) + +%% +% Now let's try is with the scarce functionality - this leaves out all but +% 10% of the data! +tic, [M2,~,out] = gcp_opt(X,nc,'type','rayleigh','opt','adam'); toc +fprintf('Final fit: %e (for comparison to f in CP-ALS)\n',1 - norm(X-full(M1))/norm(X)); +fprintf('Score: %f\n',score(M2,M_true)); + +%% +% Visualize the solution with scarce +viz(M2, 'Figure', 3) + +%% Boolean tensor. +% The model will predict the odds of observing a 1. Recall that the odds +% related to the probability as follows. If $p$ is the probability adn $r$ +% is the odds, then $r = p / (1-p)$. Higher odds indicates a higher +% probability of observing a one. +clear +rng(7639) +nc = 3; % Number of components +sz = [50 60 70]; % Tensor size +nd = length(sz); % Number of dimensions + +%% +% We assume that the underlying model tensor has factor matrices with only +% a few "large" entries in each column. The small entries should correspond +% to a low but nonzero entry of observing a 1, while the largest entries, +% if multiplied together, should correspond to a very high likelihood of +% observing a 1. +probrange = [0.01 0.99]; % Absolute min and max of probabilities +oddsrange = probrange ./ (1 - probrange); +smallval = nthroot(min(oddsrange)/nc,nd); +largeval = nthroot(max(oddsrange)/nc,nd); + +A = cell(nd,1); +for k = 1:nd + A{k} = smallval * ones(sz(k), nc); + nbig = 5; + for j = 1:nc + p = randperm(sz(k)); + A{k}(p(1:nbig),j) = largeval; + end +end +M_true = ktensor(A); + +%% +% Convert K-tensor to an observed tensor +% Get the model values, which correspond to odds of observing a 1 +Mfull = full(M_true); +% Convert odds to probabilities +Mprobs = Mfull ./ (1 + Mfull); +% Flip a coin for each entry, with the probability of observing a one +% dictated by Mprobs +Xfull = 1.0*(tensor(@rand, sz) < Mprobs); +% Convert to sparse tensor, real-valued 0/1 tensor since it was constructed +% to be sparse +X = sptensor(Xfull); +fprintf('Proportion of nonzeros in X is %.2f%%\n', 100*nnz(X) / prod(sz)); + +%% +% Just for fun, let's visualize the distribution of the probabilities in +% the model tensor. +histogram(Mprobs(:)) + +%% +% Call GCP_OPT on the full tensor +[M1,~,out] = gcp_opt(Xfull, nc, 'type', 'binary','printitn',25); +fprintf('Final score: %f\n', score(M1,M_true)); + +%% +% GCP-OPT as sparse tensor + +[M2,~,out] = gcp_opt(X, nc, 'type', 'binary'); +fprintf('Final score: %f\n', score(M2,M_true)); + + +%% Create and test a Poisson count tensor. +nc = 3; +sz = [80 90 100]; +nd = length(sz); +paramRange = [0.5 60]; +factorRange = paramRange.^(1/nd); +minFactorRatio = 95/100; +lambdaDamping = 0.8; +rng(21); +info = create_problem('Size', sz, ... + 'Num_Factors', nc, ... + 'Factor_Generator', @(m,n)factorRange(1)+(rand(m,n)>minFactorRatio)*(factorRange(2)-factorRange(1)), ... + 'Lambda_Generator', @(m,n)ones(m,1)*(lambdaDamping.^(0:n-1)'), ... + 'Sparse_Generation', 0.2); + +M_true = normalize(arrange(info.Soln)); +X = info.Data; +viz(M_true, 'Figure',3); + +%% Loss function for Poisson negative log likelihood with identity link. + +% Call GCP_OPT on sparse tensor +[M1,M0,out] = gcp_opt(X, nc, 'type', 'count','printitn',25); +fprintf('Final score: %f\n', score(M1,M_true)); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_fg_options_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_fg_options_doc.m new file mode 100644 index 0000000..fdd92f5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/gcp_opt_fg_options_doc.m @@ -0,0 +1,152 @@ +%% Function Types for GCP +% The GCP capability of the Tensor Toolbox allows the user to specify a fit +% function. There are a number of ''standard'' choices that we provide via +% the helper function |tt_gcp_fg_setup| function. These choices are +% presented in detail below. Motivations and details for these choices can +% be found in: +% +% * D. Hong, T. G. Kolda, J. A. Duersch. +% _Generalized Canonical Polyadic Tensor Decomposition_. +% , +% 2018. To appear in SIAM Review, 2019. +% +% These choices can be passed directly to gcp_opt via the 'type' option. To +% test the options, call the hidden function: +% +% |[f,g,lowerbnd] = tt_gcp_fg_setup(type)| +% +% We discuss the choices for the type below. +%% Gaussian (real-valued data) +% This is indicated by specifying the type as either |'normal'| or +% |'gaussian'|. This choice correspond to standard CP, which is implemented +% in |cp_als| and |cp_opt|. It is useful for continuous real-valued data +% tensors. This choice specifies +% +% $$f(x,m) = (x-m)^2, \quad g(x,m) = 2(m-x), \quad \ell=-\infty$$ +% + +[f,g,lowerbnd] = tt_gcp_fg_setup('normal') + +%% Poisson (count data) +% This is indicated by specifying the type as either |'count'| or +% |'poisson'|. This choice is useful for count data tensors, i.e., +% tensors that have only entries in {0,1,2,...}. This choice corresponds to +% Poisson CP, which is implemente din |cp_apr|. This choice specifies +% +% $$f(x,m) = m - x \log(m + 10^{-10}), +% \quad g(x,m) = 1 - \frac{x}{m+10^{-10}}, +% \quad \ell=0$$ +% +% The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors. + +[f,g,lowerbnd] = tt_gcp_fg_setup('count') + +%% Poisson with Log Link (count data) +% This is indicated by specifying the type as |'poisson-log'|. This choice +% is useful for count data tensors, i.e., tensors that have only entries in +% {0,1,2,...}. This choice specifies +% +% $$f(x,m) = e^m - x m, +% \quad g(x,m) = e^m - x, +% \quad \ell=-\infty$$ +% + +[f,g,lowerbnd] = tt_gcp_fg_setup('poisson-log') + +%% Bernoulli with Odds Link (binary data) +% This is indicated by specifying the type as either |'binary'| or +% |'bernoulli-odds'|. This choice is useful for binary data tensors, i.e., +% tensors that have only 0 or 1 entries. This choice specifies +% +% $$f(x,m) = \log(m+1) - x \log(m + 10^{-10}), +% \quad g(x,m) = \frac{1}{m+1} - \frac{x}{m+10^{-10}}, +% \quad \ell=0$$ +% +% The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors. + +[f,g,lowerbnd] = tt_gcp_fg_setup('binary') + +%% Bernoulli with Logit Link (binary data) +% This is indicated by specifying the type as |'bernoulli-logit'|. This +% choice is useful for binary data tensors, i.e., tensors that have only 0 +% or 1 entries. This choice specifies +% +% $$f(x,m) = \log(e^m+1) - x m, +% \quad g(x,m) = \frac{e^m}{e^m+1} - x, +% \quad \ell=-\infty$$ +% + +[f,g,lowerbnd] = tt_gcp_fg_setup('bernoulli-logit') + +%% Rayleigh (real-valued data) +% This is indicated by specifying the type |'rayleigh'|. This choice is +% useful for nonnegative real-values data tensors, i.e., +% tensors that have only nonnegative. This choice specifies +% +% $$f(x,m) = 2 \log(m+10^{-10}) - \frac{\pi}{4} \frac{x}{(m + 10^{-10})^2}, +% \quad g(x,m) = \frac{1}{m+10^{-10}} - \frac{\pi}{2} \frac{x}{(m + 10^{-10})^3}, +% \quad \ell=0$$ +% +% The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors. + +[f,g,lowerbnd] = tt_gcp_fg_setup('rayleigh') + +%% Gamma (nonnegative real-valued data) +% This is indicated by specifying the type |'gamma'|. This choice is +% useful for nonnegative real-values data tensors, i.e., +% tensors that have only nonnegative. This choice specifies +% +% $$f(x,m) = \frac{x}{m+10^{-10}} + \log(m + 10^{-10}), +% \quad g(x,m) = \frac{-x}{(m+10^{-10})^2} - \frac{1}{m + 10^{-10}}, +% \quad \ell=0$$ +% +% The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors. + +[f,g,lowerbnd] = tt_gcp_fg_setup('gamma') + +%% Huber (nonnegative real-valued data) +% This is indicated by specifying the type |'huber (DELTA)'|, where |DELTA| +% is $\Delta$ in the equations below. This choice is useful for +% nonnegative real-values data tensors, i.e., tensors that +% have only nonnegative. This choice specifies +% +% $$f(x,m) = \left\{ \begin{array}{ll}(x-m)^2 & \mbox{if } |x-m| \leq \Delta, \\ +% 2\Delta|x-m|-\Delta^2 & \mbox{otherwise}\end{array}\right., +% \quad +% g(x,m) = \left\{ \begin{array}{ll}-2(x-m) & \mbox{if } |x-m| \leq \Delta, \\ +% 2\Delta\mbox{sgn}(x-m) & \mbox{otherwise}\end{array}\right., +% \quad +% \ell = 0 +% $$ + +[f,g,lowerbnd] = tt_gcp_fg_setup('huber (0.25)') + +%% Negative Binomial (count data) +% This is indicated by specifying the type |'negative-binomial (r)'|, where |r| +% is $r$ in the equations below. This choice is useful for +% count data tensors. This choice specifies +% +% $$f(x,m) = (r+x) \log(1+m) - x \log(m+10^{-10}), +% \quad +% g(x,m) = \frac{(r+x)}{1+m} - \frac{x}{m+10^{-10}}, +% \quad +% \ell = 0 +% $$ + +[f,g,lowerbnd] = tt_gcp_fg_setup('negative-binomial (4)') + +%% Beta (nonnegative real-valued data) +% This is indicated by specifying the type |'beta (BETA)'|, where |BETA| +% is $\beta$ in the equations below. This choice is useful for +% nonnegative data tensors. Choices of $\beta=0$ or $\beta=1$ are not +% allowed because these correspond to 'gamma' or 'rayleigh'. +% This choice specifies +% +% $$f(x,m) = \frac{ (m+10^{-10})^\beta }{\beta} - \frac{x(m+10^{-10})^{(\beta-1)} }{\beta-1}, +% \quad +% g(x,m) = (m+10^{-10})^{(\beta-1)} - x(m+10^{-10})^{(\beta-2)}, +% \quad +% \ell = 0 +% $$ + +[f,g,lowerbnd] = tt_gcp_fg_setup('beta (0.3)') diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/helptoc_template.xml b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/helptoc_template.xml new file mode 100644 index 0000000..acfea09 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/helptoc_template.xml @@ -0,0 +1,74 @@ + + + + + + MyToolbox Toolbox + + + + + + + + + + + + Getting Started with the MyToolbox Toolbox + System Requirements + + Features + + Feature 1 + + Feature 2 + + + + + MyToolbox User Guide + Setting Up MyToolbox + + Processing Data + + Verifying MyToolbox outputs + Handling Test Failures + + + + + + Function Reference + + First Category + + function_1 + function_2 + + + + Second Category + + function_3 + function_4 + + + + Third category + + + + + Mytoolbox Examples + + + + MyToolbox Web Site (Example only: goes to mathworks.com) + + + \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/hosvd_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/hosvd_doc.m new file mode 100644 index 0000000..ebd7cf2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/hosvd_doc.m @@ -0,0 +1,184 @@ +%% Computing Tucker via the HOSVD + +%% Higher-order Singular Value Decomposition (HOSVD) and Sequentially-truncased HOSVD (ST-HOSVD) +% The HOSVD computes a Tucker decomposition of a tensor via a simple +% process. For each mode k, it computes the r_k leading left singular +% values of the matrix unfolding and stores those as factor matrix U_k. +% Then it computes a |ttm| of the original tensor and all the factor matrices to +% yield the core of size r_1 x r_2 x ... x r_d. The core and factor +% matrices are used to form the |ttensor|. +% The values of r_k that lead to a good approximation can be computed +% automatically to yield a specified error tolerance; this is recommended +% and the default in our code. +% The ST-HOSVD is an improvement on the HOSVD that does a TTM in _each_ mode +% before moving on to the next mode. This has the advantage of shrinking +% the tensor at each step and reducing subsequent computations. ST-HOSVD is the +% default in the |hosvd| code. +% +% +% * L. R. Tucker, Some mathematical notes on three-mode factor analysis, +% _Psychometrika_ 31:279-311, 1966, doi:10.1007/BF02289464 +% * L. D. Lathauwer, B. D. Moor and J. Vandewalle, A multilinear singular +% value decomposition, _SIAM J. Matrix Analysis and Applications_ +% 21(4):1253-1278, 2000, doi:10.1137/S0895479896305696 +% * N. Vannieuwenhoven, R. Vandebril and K. Meerbergen, A New Truncation +% Strategy for the Higher-Order Singular Value Decomposition, _SIAM J. +% Scientific Computing_ 34(2):A1027-A1052, 2012, +% doi:10.1137/110836067 +% + +%% Simple example of usage + +% Create random 50 x 40 x 30 tensor with 5 x 4 x 3 core +info = create_problem('Type','Tucker','Num_Factors',[5 4 3],'Size',[50 40 30],'Noise',0.01); +X = info.Data; + +% Compute HOSVD with desired relative error = 0.1 +T = hosvd(X,0.1); + +% Check size of core +coresize = size(T.core) + +% Check relative error +relerr = norm(X-full(T))/norm(X) + +%% Generate a core with different accuracies for different sizes +% We will create a core tensor that has is nearly block diagonal. The +% blocks are expontentially decreasing in norm, with the idea that we can +% pick off one block at a time as we increate the prescribed accuracy of +% the HOSVD. To do this, we use |tenrandblk|. + +% Block sizes (need not be cubic). Number of rows is the number +% of levels and number of columns is the order of the tensor. +bsz = [3 2 1; 2 2 2; 2 3 4]; + +% Squared norm of each block. Must be length L and sum to <= 1 +bsn = [.9 .09 .009]'; + +% Create core tensor with given block structure and norm 1 +G = tenrandblk(bsz,bsn,true); + +%% +fprintf('Size of G: %s\n', tt_size2str(size(G))); + +%% Generate data tensor with core described above +% We take the core G and embed into into a larger tensor X by using +% orthogonal transformations. The true rank of this tensor is equal to the +% size of G. + +% Size of X +xsz = [20 20 20]; + +% Create orthogonal matrices +U = cell(3,1); +for k = 1:3 + V = matrandorth(xsz(k)); + U{k} = V(:,1:size(G,k)); +end + +% Create X +X = full(ttensor(G,U)); + +% The norm should be unchanged +fprintf('||X||=%f\n',norm(X)); + +%% Compute (full) HOSVD +% We compute the ST-HOSVD using the |hosvd| method. We specify the +% tolerance to close to machine precision. Ideally, it finds a core that is +% the same size as G. + +fprintf('ST-HOSVD...\n'); +T = hosvd(X,2*sqrt(eps)); + +%% Compute low-rank HOSVD approximation +% The norm squared of the first two blocks of G is 0.99, so specifying an +% error of 1e-2 should yield a core of size 4 x 4 x 3. However, the +% conservative nature of the algorithm means that it may pick something +% larger. We can compensate by specifying a larger tolerance. + +% Using 1e-2 exactly is potentially too conservative... +fprintf('Result with tol = sqrt(1e-2):\n'); +T = hosvd(X, sqrt(1e-2),'verbosity',1); + +% But a small multiple (i.e., |ndims(X)|) usually works... +fprintf('Result with tol = sqrt(3e-2):\n'); +T = hosvd(X, sqrt(3e-2),'verbosity',1); + +%% +% Similarly, lhe norm squared of the first block of G is 0.9, so specifying +% an error of 1e-1 should result in a core of size 3 x 2 x 1. + +% Using 1e-1 exactly is potentially too conservative... +fprintf('Result with tol = sqrt(1e-1):\n'); +T = hosvd(X, sqrt(1e-1),'verbosity',1); + +% But a small multiple (i.e., |ndims(X)|) usually works... +fprintf('Result with tol = sqrt(3e-1):\n'); +T = hosvd(X, sqrt(3e-1),'verbosity',1); + +%% Verbosity - Getting more or less information. +% Setting the verbosity to zero suppresses all output. +% Cranking up the verbosity gives some insight into the decision-making +% process... + +% Example 1 +T = hosvd(X, sqrt(3e-1),'verbosity',10); + +%% +% Example 2 +T = hosvd(X, sqrt(3*eps),'verbosity',10); + +%% Specify the ranks +% If you know the rank you want, you can specify it. But there's no +% guarantee that it will satisfy the specified tolerance. In such cases, +% the method will throw a warning. + +% Rank is okay +T = hosvd(X,sqrt(3e-1),'ranks',bsz(1,:)); + +% Rank is too small for the specified error +T = hosvd(X,sqrt(3e-1),'ranks',[1 1 1]); + +% But you can set the error to the tensor norm to make the warning go away +T = hosvd(X,norm(X),'ranks',[1 1 1]); + +%% Specify the mode order +% It's also possible to specify the order of the modes. The default is +% 1:ndims(X). +T = hosvd(X,sqrt(3e-1),'dimorder',ndims(X):-1:1); + +%% Generate bigger data tensor with core described above +% Uses the same procedure as before, but now the size is bigger. + +% Size of Y +ysz = [100 100 100]; + +% Create orthogonal matrices +U = cell(3,1); +for k = 1:3 + V = matrandorth(ysz(k)); + U{k} = V(:,1:size(G,k)); +end + +% Create Y +Y = full(ttensor(G,U)); + +%% ST-HOSVD compared to HOSVD +% The answers are essentially the same for the sequentially-truncated HOSVD +% and the HOSVD... + +fprintf('ST-HOSVD...\n'); +T = hosvd(Y,2*sqrt(eps)); +fprintf('HOSVD...\n'); +T = hosvd(Y,2*sqrt(eps),'sequential',false); + +%% +% But ST-HOSVD may be slightly faster than HOSVD for larger tensors. + +fprintf('Time for 10 runs of ST-HOSVD:\n'); +tic, for i =1:10, T = hosvd(Y,2*sqrt(eps),'verbosity',0); end; toc + +fprintf('Time for 10 runs of HOSVD:\n'); +tic, for i =1:10, T = hosvd(Y,2*sqrt(eps),'verbosity',0,'sequential',false); end; toc + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/bibtex.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/bibtex.html new file mode 100644 index 0000000..201fc41 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/bibtex.html @@ -0,0 +1,171 @@ + + + + + BibTex Entries: Tensor Toolbox for MATLAB + + + + + +
+

BibTex Entries: Tensor Toolbox for MATLAB

+
+ +

@misc{TTB_Software,
+  author = {Brett W. Bader and Tamara G. Kolda and others},
+  title = {MATLAB Tensor Toolbox Version 3.1},
+  howpublished = {Available online},
+  month = jun,
+  year = {2019},
+  url = {https://www.tensortoolbox.org}
+}

+ +
+@article{TTB_Dense,
+  author = {Brett W. Bader and Tamara G. Kolda},
+  title = {Algorithm 862: {MATLAB} tensor classes for fast algorithm prototyping},
+  journal = {ACM Transactions on Mathematical Software},
+  month = dec,
+  year = {2006},
+  volume = {32},
+  number = {4},
+  pages = {635-653},
+  doi = {10.1145/1186785.1186794}
+}
+
+ + +

@article{TTB_Sparse,
+  author = {Brett W. Bader and Tamara G. Kolda},
+  title = {Efficient {MATLAB} computations with sparse and factored tensors},
+  journal = {SIAM Journal on Scientific Computing},
+  month = dec,
+  year = {2007},
+  volume = {30},
+  number = {1},
+  pages = {205-231},
+  doi = {10.1137/060676489}
+}
+

+ +

@article{TTB_CPOPT,
+  author = {Evrim Acar and Daniel M. Dunlavy and Tamara G. Kolda},
+  title = {A Scalable Optimization Approach for Fitting Canonical Tensor Decompositions},
+  journal = {Journal of Chemometrics},
+  month = feb,
+  year = {2011},
+  volume = {25},
+  number = {2},
+  pages = {67-86},
+  doi = {10.1002/cem.1335}
+}
+

+ +

@article{TTB_CPWOPT,
+  author = {Evrim Acar and Daniel M. Dunlavy and Tamara G. Kolda and Morten M{\o}rup},
+  title = {Scalable Tensor Factorizations for Incomplete Data},
+  journal = {Chemometrics and Intelligent Laboratory Systems},
+  month = mar,
+  year = {2011},
+  volume = {106},
+  number = {1},
+  pages = {41-56},
+  doi = {10.1016/j.chemolab.2010.08.004}
+}
+

+ +

@article{TTB_SSHOPM,
+  author = {Tamara G. Kolda and Jackson R. Mayo},
+  title = {Shifted Power Method for Computing Tensor Eigenpairs},
+  journal = {SIAM Journal on Matrix Analysis and Applications},
+  month = oct,
+  year = {2011},
+  volume = {32},
+  number = {4},
+  pages = {1095-1124},
+  doi = {10.1137/100801482}
+}
+

+ +

@Article{TTB_EIGGEAP,
+  title = {An Adaptive Shifted Power Method for Computing Generalized Tensor Eigenpairs},
+  author = {Tamara G. Kolda and Jackson R. Mayo},
+  doi = {10.1137/140951758},
+  journal = {SIAM Journal on Matrix Analysis and Applications},
+  number = {4},
+  volume = {35},
+  year = {2014},
+  month = dec,
+  pages = {1563-1581},
+}
+

+ +

@Article{TTB_CPAPR,
+  title = {On Tensors, Sparsity, and Nonnegative Factorizations},
+  author = {Eric C. Chi and Tamara G. Kolda},
+  doi = {10.1137/110859063},
+  journal = {SIAM Journal on Matrix Analysis and Applications},
+  number = {4},
+  volume = {33},
+  year = {2012},
+  month = dec,
+  pages = {1272-1299},
+}
+
+@Article{TTB_CPAPRB,
+  author = {Samantha Hansen and Todd Plantenga and Tamara G. Kolda}, 
+  title = {Newton-Based Optimization for {Kullback-Leibler} Nonnegative Tensor Factorizations}, 
+  journal = {Optimization Methods and Software}, 
+  volume = {30}, 
+  number = {5}, 
+  pages = {1002-1029},
+  month = {April}, 
+  year = {2015},
+  doi = {10.1080/10556788.2015.1009977},
+} 
+

+ +

@article{TTB_CPSYM,  
+  author = {Tamara G. Kolda}, 
+  title = {Numerical Optimization for Symmetric Tensor Decomposition}, 
+  journal = {Mathematical Programming B}, 
+  volume = {151}, 
+  number = {1}, 
+  pages = {225-248}, 
+  month = apr, 
+  year = {2015},
+  doi = {10.1007/s10107-015-0895-0},
+}
+

+ +

@misc{TTB_CPRALS,  
+  author = {Casey Battaglino and Grey Ballard and Tamara G. Kolda}, 
+  title = {A Practical Randomized {CP} Tensor Decomposition},
+  howpublished = {arXiv:1701.06600},  
+  month = jan, 
+  year = {2017},
+  eprint = {1701.06600},
+  eprintclass = {cs.NA},
+}
+

+ +

@inproceedings{TTB_MET,
+  author = {Tamara G. Kolda and Jimeng Sun},
+  title = {Scalable Tensor Decompositions for Multi-aspect Data Mining},
+  booktitle = {ICDM 2008: Proceedings of the 8th IEEE International Conference on Data Mining},
+  month = dec,
+  year = {2008},
+  pages = {363-372},
+  doi = {10.1109/ICDM.2008.89}
+}
+

+ +
+

www.tensortoolbox.org

+ +
+ + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/collapse_scale_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/collapse_scale_doc.html new file mode 100644 index 0000000..814bc9e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/collapse_scale_doc.html @@ -0,0 +1,451 @@ + + + + + Collapsing and scaling tensors

Collapsing and scaling tensors

The tensor and sptensor classes support that notion of collapsing and scaling dimensions.

Contents

Examples of collapsing a tensor

X = tenrand([4 3 2]) %<-- Generate some data.
+
X is a tensor of size 4 x 3 x 2
+	X(:,:,1) = 
+	    0.9473    0.6743    0.6155
+	    0.8133    0.9271    0.0034
+	    0.9238    0.3438    0.9820
+	    0.1990    0.5945    0.8995
+	X(:,:,2) = 
+	    0.6928    0.2999    0.0974
+	    0.4397    0.8560    0.3974
+	    0.7010    0.1121    0.3333
+	    0.6097    0.2916    0.9442
+
Y = collapse(X,[2 3]) %<-- Sum of entries in each mode-1 slice.
+
Y is a tensor of size 4
+	Y(:) = 
+	    3.3272
+	    3.4369
+	    3.3961
+	    3.5385
+
Y = collapse(X,-1) %<-- Same as above.
+
Y is a tensor of size 4
+	Y(:) = 
+	    3.3272
+	    3.4369
+	    3.3961
+	    3.5385
+
Z = collapse(X,2) %<-- Sum of entries in each row fiber.
+
Z is a tensor of size 4 x 2
+	Z(:,:) = 
+	    2.2371    1.0901
+	    1.7438    1.6931
+	    2.2497    1.1464
+	    1.6930    1.8455
+
collapse(X,1:3) %<-- Sum of all entries.
+
+ans =
+
+   13.6987
+
+

Alternate accumulation functions for tensor

Y = collapse(X,[1 2],@max) %<-- Max entry in each mode-3 slice.
+
Y is a tensor of size 2
+	Y(:) = 
+	    0.9820
+	    0.9442
+
Z = collapse(X,-3,@mean) %<-- Average entry in each mode-3 slice.
+
Z is a tensor of size 2
+	Z(:) = 
+	    0.6603
+	    0.4813
+

Examples of collapsing a sptensor

X = sptenrand([4 3 2],6) %<-- Generate some data.
+
X is a sparse tensor of size 4 x 3 x 2 with 6 nonzeros
+	(1,1,2)    0.7221
+	(1,3,1)    0.9685
+	(2,1,1)    0.1557
+	(4,1,1)    0.1630
+	(4,2,1)    0.3134
+	(4,2,2)    0.0294
+
Y = collapse(X,[2 3]) %<-- Sum of entries in each mode-1 slice.
+
+Y =
+
+    1.6906
+    0.1557
+         0
+    0.5057
+
+
Y = collapse(X,-1) %<-- Same as above.
+
+Y =
+
+    1.6906
+    0.1557
+         0
+    0.5057
+
+
Z = collapse(X,2) %<-- Sum of entries in each row fiber.
+
Z is a sparse tensor of size 4 x 2 with 5 nonzeros
+	(1,1)    0.9685
+	(1,2)    0.7221
+	(2,1)    0.1557
+	(4,1)    0.4764
+	(4,2)    0.0294
+
collapse(X,1:3) %<-- Sum of all entries.
+
+ans =
+
+    2.3520
+
+

Alternate accumulation functions for sptensor

Y = collapse(X,[1 2],@min) %<-- Min *nonzero* entry in each mode-3 slice.
+
+Y =
+
+    0.1557
+    0.0294
+
+
Z = collapse(X,-3,@mean) %<-- Average *nonzero* entry in each mode-3 slice.
+
+Z =
+
+    0.4001
+    0.3758
+
+

Scaling a tensor in different modes

X = tenones([3,4,5]); %<-- Generate data
+S = 10 * [1:5]'; Y = scale(X,S,3) %<-- Scale in mode-3
+
Y is a tensor of size 3 x 4 x 5
+	Y(:,:,1) = 
+	    10    10    10    10
+	    10    10    10    10
+	    10    10    10    10
+	Y(:,:,2) = 
+	    20    20    20    20
+	    20    20    20    20
+	    20    20    20    20
+	Y(:,:,3) = 
+	    30    30    30    30
+	    30    30    30    30
+	    30    30    30    30
+	Y(:,:,4) = 
+	    40    40    40    40
+	    40    40    40    40
+	    40    40    40    40
+	Y(:,:,5) = 
+	    50    50    50    50
+	    50    50    50    50
+	    50    50    50    50
+
S = tensor(10 * [1:5]',5); Y = scale(X,S,3) %<-- First argument is a tensor.
+
Y is a tensor of size 3 x 4 x 5
+	Y(:,:,1) = 
+	    10    10    10    10
+	    10    10    10    10
+	    10    10    10    10
+	Y(:,:,2) = 
+	    20    20    20    20
+	    20    20    20    20
+	    20    20    20    20
+	Y(:,:,3) = 
+	    30    30    30    30
+	    30    30    30    30
+	    30    30    30    30
+	Y(:,:,4) = 
+	    40    40    40    40
+	    40    40    40    40
+	    40    40    40    40
+	Y(:,:,5) = 
+	    50    50    50    50
+	    50    50    50    50
+	    50    50    50    50
+
S = tensor(1:12,[3 4]); Y = scale(X,S,[1 2]) %<-- Scale in two modes.
+
Y is a tensor of size 3 x 4 x 5
+	Y(:,:,1) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,2) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,3) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,4) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,5) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+
S = tensor(1:12,[3 4]); Y = scale(X,S,-3) %<-- Same as above.
+
Y is a tensor of size 3 x 4 x 5
+	Y(:,:,1) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,2) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,3) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,4) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,5) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+
S = tensor(1:60,[3 4 5]); Y = scale(X,S,1:3) %<-- Scale in every mode.
+
Y is a tensor of size 3 x 4 x 5
+	Y(:,:,1) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,2) = 
+	    13    16    19    22
+	    14    17    20    23
+	    15    18    21    24
+	Y(:,:,3) = 
+	    25    28    31    34
+	    26    29    32    35
+	    27    30    33    36
+	Y(:,:,4) = 
+	    37    40    43    46
+	    38    41    44    47
+	    39    42    45    48
+	Y(:,:,5) = 
+	    49    52    55    58
+	    50    53    56    59
+	    51    54    57    60
+
Y = S .* X %<-- Same as above.
+
Y is a tensor of size 3 x 4 x 5
+	Y(:,:,1) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	Y(:,:,2) = 
+	    13    16    19    22
+	    14    17    20    23
+	    15    18    21    24
+	Y(:,:,3) = 
+	    25    28    31    34
+	    26    29    32    35
+	    27    30    33    36
+	Y(:,:,4) = 
+	    37    40    43    46
+	    38    41    44    47
+	    39    42    45    48
+	Y(:,:,5) = 
+	    49    52    55    58
+	    50    53    56    59
+	    51    54    57    60
+

Scaling a sptensor in different modes

X = ones(sptenrand([3 4 5], 10)) %<-- Generate data.
+
X is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)     1
+	(1,1,4)     1
+	(1,2,4)     1
+	(1,3,5)     1
+	(1,4,4)     1
+	(2,1,4)     1
+	(2,3,5)     1
+	(3,1,2)     1
+	(3,4,3)     1
+	(3,4,5)     1
+
S = 10 * [1:5]'; Y = scale(X,S,3) %<-- Scale in one mode.
+
Y is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)    10
+	(1,1,4)    40
+	(1,2,4)    40
+	(1,3,5)    50
+	(1,4,4)    40
+	(2,1,4)    40
+	(2,3,5)    50
+	(3,1,2)    20
+	(3,4,3)    30
+	(3,4,5)    50
+
S = tensor(10 * [1:5]',5); Y = scale(X,S,3) %<-- Same as above.
+
Y is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)    10
+	(1,1,4)    40
+	(1,2,4)    40
+	(1,3,5)    50
+	(1,4,4)    40
+	(2,1,4)    40
+	(2,3,5)    50
+	(3,1,2)    20
+	(3,4,3)    30
+	(3,4,5)    50
+
S = tensor(1:12,[3 4]); Y = scale(X,S,[1 2]) %<-- Scale in two modes.
+
Y is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)     1
+	(1,1,4)     1
+	(1,2,4)     4
+	(1,3,5)     7
+	(1,4,4)    10
+	(2,1,4)     2
+	(2,3,5)     8
+	(3,1,2)     3
+	(3,4,3)    12
+	(3,4,5)    12
+
S = tensor(1:12,[3 4]); Y = scale(X,S,-3) %<-- Same as above.
+
Y is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)     1
+	(1,1,4)     1
+	(1,2,4)     4
+	(1,3,5)     7
+	(1,4,4)    10
+	(2,1,4)     2
+	(2,3,5)     8
+	(3,1,2)     3
+	(3,4,3)    12
+	(3,4,5)    12
+
Z = scale(X,Y,1:3) %<-- Scale by a sparse tensor.
+
Z is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)     1
+	(1,1,4)     1
+	(1,2,4)     4
+	(1,3,5)     7
+	(1,4,4)    10
+	(2,1,4)     2
+	(2,3,5)     8
+	(3,1,2)     3
+	(3,4,3)    12
+	(3,4,5)    12
+
X .* Y %<-- Same as above.
+
ans is a sparse tensor of size 3 x 4 x 5 with 10 nonzeros
+	(1,1,1)     1
+	(1,1,4)     1
+	(1,2,4)     4
+	(1,3,5)     7
+	(1,4,4)    10
+	(2,1,4)     2
+	(2,3,5)     8
+	(3,1,2)     3
+	(3,4,3)    12
+	(3,4,5)    12
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/converting.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/converting.html new file mode 100644 index 0000000..4b61a83 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/converting.html @@ -0,0 +1,35 @@ + + + + + Converting Tensors and Matrices + + + + + +
+

Converting Tensors and Matrices

+ +

The Tensor Toolbox provides the following classes to support + conversion of tensors to/from matrices:

+ +
    +
  • tenmat - Tensor as + a matrix, with extra information so that it can be converted + back into a tensor.
  • + +
  • sptenmat - Store + an sptensor as a sparse matrix in coordinate format, with + extra information so that it can be converted back into an + sptensor.
  • +
+ +

+ + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp.html new file mode 100644 index 0000000..8254caf --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp.html @@ -0,0 +1,45 @@ + + + + + CP Decompositions + + + + + +
+

CP Decompositions

+ +

The Tensor Toolbox provides the following CP-like decompositions:

+ +
    +
  • cp_als - + Alternating least squares (ALS) method, the gold + standard
  • +
  • cp_arls - + Alternating randomized least squares (ALS) method, + randomizing the least squares solves
  • +
  • cp_opt - Direct + optimization (OPT) method
  • +
  • cp_wopt - + Weighted direct optimization (WOPT) method for handling + missing data
  • +
  • cp_apr - + Alternating Poisson regression (APR) using KL-divergence + fitting function for Poisson tensor decomposition
  • +
  • cp_sym - Direct optimization for symmetric CP + decomposition (to be documented)
  • +
  • cp_rals - Randomized ALS method that uses + matrix sketching
  • +
  • gcp_opt - Generalized CP with alternative loss functions
  • +
+ +

+ + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_als_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_als_doc.html new file mode 100644 index 0000000..d343997 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_als_doc.html @@ -0,0 +1,546 @@ + + + + + Alternating least squares for CANDECOMP/PARAFAC (CP) Decomposition

Alternating least squares for CANDECOMP/PARAFAC (CP) Decomposition

The function cp_als computes an estimate of the best rank-R CP model of a tensor X using the well-known alternating least-squares algorithm (see, e.g., Kolda and Bader, SIAM Review, 2009, for more information). The input X can be almost any type of tensor inclusing a tensor, sptensor, ktensor, or ttensor. The output CP model is a ktensor.

Contents

Load some data

We use the well-known amino acids data set from Andersson and Bro. It contains fluorescence measurements of 5 samples containing 3 amino acids: Tryptophan, Tyrosine, and Phenylalanine.Each amino acid corresponds to a rank-one component. The tensor is of size 5 x 51 x 201 from 5 samples, 51 excitations, and 201 emissions. Further details can be found here: http://www.models.life.ku.dk/Amino_Acid_fluo. Please cite the following paper for this data: Rasmus Bro, PARAFAC: Tutorial and applications, Chemometrics and Intelligent Laboratory Systems, 1997, 38, 149-171. This dataset can be found in the doc directory.

load aminoacids
+

Basic call to the method, specifying the data tensor and its rank

This uses a random initial guess. At each iteration, it reports the 'fit' which is defined as 1-(norm(X-M)/norm(X)) and is loosely the proportion of the data described by the CP model, i.e., a fit of 1 is perfect.

rng(3) %<- Setting random seed for reproducibility of this script
+M1 = cp_als(X,3); %<- Call the method
+
+CP_ALS:
+ Iter  1: f = 4.431681e-01 f-delta = 4.4e-01
+ Iter  2: f = 6.811163e-01 f-delta = 2.4e-01
+ Iter  3: f = 8.302664e-01 f-delta = 1.5e-01
+ Iter  4: f = 9.053451e-01 f-delta = 7.5e-02
+ Iter  5: f = 9.335960e-01 f-delta = 2.8e-02
+ Iter  6: f = 9.410321e-01 f-delta = 7.4e-03
+ Iter  7: f = 9.446529e-01 f-delta = 3.6e-03
+ Iter  8: f = 9.474319e-01 f-delta = 2.8e-03
+ Iter  9: f = 9.499205e-01 f-delta = 2.5e-03
+ Iter 10: f = 9.522513e-01 f-delta = 2.3e-03
+ Iter 11: f = 9.544558e-01 f-delta = 2.2e-03
+ Iter 12: f = 9.565373e-01 f-delta = 2.1e-03
+ Iter 13: f = 9.584909e-01 f-delta = 2.0e-03
+ Iter 14: f = 9.603111e-01 f-delta = 1.8e-03
+ Iter 15: f = 9.619935e-01 f-delta = 1.7e-03
+ Iter 16: f = 9.635362e-01 f-delta = 1.5e-03
+ Iter 17: f = 9.649398e-01 f-delta = 1.4e-03
+ Iter 18: f = 9.662071e-01 f-delta = 1.3e-03
+ Iter 19: f = 9.673433e-01 f-delta = 1.1e-03
+ Iter 20: f = 9.683552e-01 f-delta = 1.0e-03
+ Iter 21: f = 9.692506e-01 f-delta = 9.0e-04
+ Iter 22: f = 9.700386e-01 f-delta = 7.9e-04
+ Iter 23: f = 9.707283e-01 f-delta = 6.9e-04
+ Iter 24: f = 9.713293e-01 f-delta = 6.0e-04
+ Iter 25: f = 9.718507e-01 f-delta = 5.2e-04
+ Iter 26: f = 9.723014e-01 f-delta = 4.5e-04
+ Iter 27: f = 9.726898e-01 f-delta = 3.9e-04
+ Iter 28: f = 9.730234e-01 f-delta = 3.3e-04
+ Iter 29: f = 9.733095e-01 f-delta = 2.9e-04
+ Iter 30: f = 9.735541e-01 f-delta = 2.4e-04
+ Iter 31: f = 9.737631e-01 f-delta = 2.1e-04
+ Iter 32: f = 9.739412e-01 f-delta = 1.8e-04
+ Iter 33: f = 9.740930e-01 f-delta = 1.5e-04
+ Iter 34: f = 9.742221e-01 f-delta = 1.3e-04
+ Iter 35: f = 9.743319e-01 f-delta = 1.1e-04
+ Iter 36: f = 9.744252e-01 f-delta = 9.3e-05
+ Final f = 9.744252e-01 
+

We typically can achieve a final fit of f = 0.97. The method stops when the change in the fit becomes less than the specified tolerance, which defaults to 1-e4.

Visualize the results

Use the ktensor/viz function to visualize the results.

vizopts = {'PlotCommands',{'bar','line','line'},...
+    'ModeTitles',{'Concentration','Emission','Excitation'},...
+    'BottomSpace',0.10,'HorzSpace',0.04,'Normalize',0};
+info1 = viz(M1,'Figure',1,vizopts{:});
+

Run again with a different initial guess, output the initial guess.

This time we have two outputs. The first output is the solution as a ktensor. The second output is a cell array containing the initial guess. Since the first mode is not needed, it is omitted from the cell array.

[M2bad,U2] = cp_als(X,3);
+
+CP_ALS:
+ Iter  1: f = 5.512552e-01 f-delta = 5.5e-01
+ Iter  2: f = 6.377027e-01 f-delta = 8.6e-02
+ Iter  3: f = 6.438691e-01 f-delta = 6.2e-03
+ Iter  4: f = 6.728753e-01 f-delta = 2.9e-02
+ Iter  5: f = 7.088155e-01 f-delta = 3.6e-02
+ Iter  6: f = 7.144994e-01 f-delta = 5.7e-03
+ Iter  7: f = 7.188603e-01 f-delta = 4.4e-03
+ Iter  8: f = 7.245002e-01 f-delta = 5.6e-03
+ Iter  9: f = 7.313569e-01 f-delta = 6.9e-03
+ Iter 10: f = 7.398597e-01 f-delta = 8.5e-03
+ Iter 11: f = 7.514761e-01 f-delta = 1.2e-02
+ Iter 12: f = 7.690794e-01 f-delta = 1.8e-02
+ Iter 13: f = 7.939227e-01 f-delta = 2.5e-02
+ Iter 14: f = 8.166727e-01 f-delta = 2.3e-02
+ Iter 15: f = 8.299350e-01 f-delta = 1.3e-02
+ Iter 16: f = 8.374720e-01 f-delta = 7.5e-03
+ Iter 17: f = 8.422694e-01 f-delta = 4.8e-03
+ Iter 18: f = 8.458255e-01 f-delta = 3.6e-03
+ Iter 19: f = 8.488925e-01 f-delta = 3.1e-03
+ Iter 20: f = 8.518580e-01 f-delta = 3.0e-03
+ Iter 21: f = 8.549459e-01 f-delta = 3.1e-03
+ Iter 22: f = 8.583117e-01 f-delta = 3.4e-03
+ Iter 23: f = 8.620862e-01 f-delta = 3.8e-03
+ Iter 24: f = 8.663939e-01 f-delta = 4.3e-03
+ Iter 25: f = 8.713540e-01 f-delta = 5.0e-03
+ Iter 26: f = 8.770652e-01 f-delta = 5.7e-03
+ Iter 27: f = 8.835659e-01 f-delta = 6.5e-03
+ Iter 28: f = 8.907687e-01 f-delta = 7.2e-03
+ Iter 29: f = 8.983839e-01 f-delta = 7.6e-03
+ Iter 30: f = 9.058962e-01 f-delta = 7.5e-03
+ Iter 31: f = 9.126883e-01 f-delta = 6.8e-03
+ Iter 32: f = 9.183110e-01 f-delta = 5.6e-03
+ Iter 33: f = 9.226911e-01 f-delta = 4.4e-03
+ Iter 34: f = 9.260765e-01 f-delta = 3.4e-03
+ Iter 35: f = 9.288145e-01 f-delta = 2.7e-03
+ Iter 36: f = 9.311877e-01 f-delta = 2.4e-03
+ Iter 37: f = 9.333745e-01 f-delta = 2.2e-03
+ Iter 38: f = 9.354731e-01 f-delta = 2.1e-03
+ Iter 39: f = 9.375329e-01 f-delta = 2.1e-03
+ Iter 40: f = 9.395775e-01 f-delta = 2.0e-03
+ Iter 41: f = 9.416167e-01 f-delta = 2.0e-03
+ Iter 42: f = 9.436525e-01 f-delta = 2.0e-03
+ Iter 43: f = 9.456825e-01 f-delta = 2.0e-03
+ Iter 44: f = 9.477010e-01 f-delta = 2.0e-03
+ Iter 45: f = 9.496999e-01 f-delta = 2.0e-03
+ Iter 46: f = 9.516689e-01 f-delta = 2.0e-03
+ Iter 47: f = 9.535964e-01 f-delta = 1.9e-03
+ Iter 48: f = 9.554701e-01 f-delta = 1.9e-03
+ Iter 49: f = 9.572772e-01 f-delta = 1.8e-03
+ Iter 50: f = 9.590055e-01 f-delta = 1.7e-03
+ Final f = 9.590055e-01 
+

Increase the maximium number of iterations

Note that the previous run kicked out at only 50 iterations before reaching the specified convegence tolerance. Let's increate the maximum number of iterations and try again, using the same initial guess.

M2 = cp_als(X,3,'maxiters',100,'init',U2);
+
+CP_ALS:
+ Iter  1: f = 5.512552e-01 f-delta = 5.5e-01
+ Iter  2: f = 6.377027e-01 f-delta = 8.6e-02
+ Iter  3: f = 6.438691e-01 f-delta = 6.2e-03
+ Iter  4: f = 6.728753e-01 f-delta = 2.9e-02
+ Iter  5: f = 7.088155e-01 f-delta = 3.6e-02
+ Iter  6: f = 7.144994e-01 f-delta = 5.7e-03
+ Iter  7: f = 7.188603e-01 f-delta = 4.4e-03
+ Iter  8: f = 7.245002e-01 f-delta = 5.6e-03
+ Iter  9: f = 7.313569e-01 f-delta = 6.9e-03
+ Iter 10: f = 7.398597e-01 f-delta = 8.5e-03
+ Iter 11: f = 7.514761e-01 f-delta = 1.2e-02
+ Iter 12: f = 7.690794e-01 f-delta = 1.8e-02
+ Iter 13: f = 7.939227e-01 f-delta = 2.5e-02
+ Iter 14: f = 8.166727e-01 f-delta = 2.3e-02
+ Iter 15: f = 8.299350e-01 f-delta = 1.3e-02
+ Iter 16: f = 8.374720e-01 f-delta = 7.5e-03
+ Iter 17: f = 8.422694e-01 f-delta = 4.8e-03
+ Iter 18: f = 8.458255e-01 f-delta = 3.6e-03
+ Iter 19: f = 8.488925e-01 f-delta = 3.1e-03
+ Iter 20: f = 8.518580e-01 f-delta = 3.0e-03
+ Iter 21: f = 8.549459e-01 f-delta = 3.1e-03
+ Iter 22: f = 8.583117e-01 f-delta = 3.4e-03
+ Iter 23: f = 8.620862e-01 f-delta = 3.8e-03
+ Iter 24: f = 8.663939e-01 f-delta = 4.3e-03
+ Iter 25: f = 8.713540e-01 f-delta = 5.0e-03
+ Iter 26: f = 8.770652e-01 f-delta = 5.7e-03
+ Iter 27: f = 8.835659e-01 f-delta = 6.5e-03
+ Iter 28: f = 8.907687e-01 f-delta = 7.2e-03
+ Iter 29: f = 8.983839e-01 f-delta = 7.6e-03
+ Iter 30: f = 9.058962e-01 f-delta = 7.5e-03
+ Iter 31: f = 9.126883e-01 f-delta = 6.8e-03
+ Iter 32: f = 9.183110e-01 f-delta = 5.6e-03
+ Iter 33: f = 9.226911e-01 f-delta = 4.4e-03
+ Iter 34: f = 9.260765e-01 f-delta = 3.4e-03
+ Iter 35: f = 9.288145e-01 f-delta = 2.7e-03
+ Iter 36: f = 9.311877e-01 f-delta = 2.4e-03
+ Iter 37: f = 9.333745e-01 f-delta = 2.2e-03
+ Iter 38: f = 9.354731e-01 f-delta = 2.1e-03
+ Iter 39: f = 9.375329e-01 f-delta = 2.1e-03
+ Iter 40: f = 9.395775e-01 f-delta = 2.0e-03
+ Iter 41: f = 9.416167e-01 f-delta = 2.0e-03
+ Iter 42: f = 9.436525e-01 f-delta = 2.0e-03
+ Iter 43: f = 9.456825e-01 f-delta = 2.0e-03
+ Iter 44: f = 9.477010e-01 f-delta = 2.0e-03
+ Iter 45: f = 9.496999e-01 f-delta = 2.0e-03
+ Iter 46: f = 9.516689e-01 f-delta = 2.0e-03
+ Iter 47: f = 9.535964e-01 f-delta = 1.9e-03
+ Iter 48: f = 9.554701e-01 f-delta = 1.9e-03
+ Iter 49: f = 9.572772e-01 f-delta = 1.8e-03
+ Iter 50: f = 9.590055e-01 f-delta = 1.7e-03
+ Iter 51: f = 9.606440e-01 f-delta = 1.6e-03
+ Iter 52: f = 9.621834e-01 f-delta = 1.5e-03
+ Iter 53: f = 9.636167e-01 f-delta = 1.4e-03
+ Iter 54: f = 9.649394e-01 f-delta = 1.3e-03
+ Iter 55: f = 9.661493e-01 f-delta = 1.2e-03
+ Iter 56: f = 9.672469e-01 f-delta = 1.1e-03
+ Iter 57: f = 9.682348e-01 f-delta = 9.9e-04
+ Iter 58: f = 9.691174e-01 f-delta = 8.8e-04
+ Iter 59: f = 9.699007e-01 f-delta = 7.8e-04
+ Iter 60: f = 9.705915e-01 f-delta = 6.9e-04
+ Iter 61: f = 9.711975e-01 f-delta = 6.1e-04
+ Iter 62: f = 9.717263e-01 f-delta = 5.3e-04
+ Iter 63: f = 9.721858e-01 f-delta = 4.6e-04
+ Iter 64: f = 9.725836e-01 f-delta = 4.0e-04
+ Iter 65: f = 9.729269e-01 f-delta = 3.4e-04
+ Iter 66: f = 9.732222e-01 f-delta = 3.0e-04
+ Iter 67: f = 9.734758e-01 f-delta = 2.5e-04
+ Iter 68: f = 9.736930e-01 f-delta = 2.2e-04
+ Iter 69: f = 9.738788e-01 f-delta = 1.9e-04
+ Iter 70: f = 9.740375e-01 f-delta = 1.6e-04
+ Iter 71: f = 9.741730e-01 f-delta = 1.4e-04
+ Iter 72: f = 9.742885e-01 f-delta = 1.2e-04
+ Iter 73: f = 9.743869e-01 f-delta = 9.8e-05
+ Final f = 9.743869e-01 
+

This solution looks more or less the same as the previous one.

info2 = viz(M2,'Figure',2,vizopts{:});
+

Compare the two solutions

Use the ktensor/score function to compare the two solutions. A score of 1 indicates a perfect match. These are not exactly the same, but they are pretty close.

score(M1,M2)
+
+ans =
+
+    0.9981
+
+

Rerun with same initial guess

Using the same initial guess (and all other parameters) gives the exact same solution.

M2alt = cp_als(X,3,'maxiters',100,'init',U2);
+score(M2, M2alt) %<- Score of 1 indicates the same solution
+
+CP_ALS:
+ Iter  1: f = 5.512552e-01 f-delta = 5.5e-01
+ Iter  2: f = 6.377027e-01 f-delta = 8.6e-02
+ Iter  3: f = 6.438691e-01 f-delta = 6.2e-03
+ Iter  4: f = 6.728753e-01 f-delta = 2.9e-02
+ Iter  5: f = 7.088155e-01 f-delta = 3.6e-02
+ Iter  6: f = 7.144994e-01 f-delta = 5.7e-03
+ Iter  7: f = 7.188603e-01 f-delta = 4.4e-03
+ Iter  8: f = 7.245002e-01 f-delta = 5.6e-03
+ Iter  9: f = 7.313569e-01 f-delta = 6.9e-03
+ Iter 10: f = 7.398597e-01 f-delta = 8.5e-03
+ Iter 11: f = 7.514761e-01 f-delta = 1.2e-02
+ Iter 12: f = 7.690794e-01 f-delta = 1.8e-02
+ Iter 13: f = 7.939227e-01 f-delta = 2.5e-02
+ Iter 14: f = 8.166727e-01 f-delta = 2.3e-02
+ Iter 15: f = 8.299350e-01 f-delta = 1.3e-02
+ Iter 16: f = 8.374720e-01 f-delta = 7.5e-03
+ Iter 17: f = 8.422694e-01 f-delta = 4.8e-03
+ Iter 18: f = 8.458255e-01 f-delta = 3.6e-03
+ Iter 19: f = 8.488925e-01 f-delta = 3.1e-03
+ Iter 20: f = 8.518580e-01 f-delta = 3.0e-03
+ Iter 21: f = 8.549459e-01 f-delta = 3.1e-03
+ Iter 22: f = 8.583117e-01 f-delta = 3.4e-03
+ Iter 23: f = 8.620862e-01 f-delta = 3.8e-03
+ Iter 24: f = 8.663939e-01 f-delta = 4.3e-03
+ Iter 25: f = 8.713540e-01 f-delta = 5.0e-03
+ Iter 26: f = 8.770652e-01 f-delta = 5.7e-03
+ Iter 27: f = 8.835659e-01 f-delta = 6.5e-03
+ Iter 28: f = 8.907687e-01 f-delta = 7.2e-03
+ Iter 29: f = 8.983839e-01 f-delta = 7.6e-03
+ Iter 30: f = 9.058962e-01 f-delta = 7.5e-03
+ Iter 31: f = 9.126883e-01 f-delta = 6.8e-03
+ Iter 32: f = 9.183110e-01 f-delta = 5.6e-03
+ Iter 33: f = 9.226911e-01 f-delta = 4.4e-03
+ Iter 34: f = 9.260765e-01 f-delta = 3.4e-03
+ Iter 35: f = 9.288145e-01 f-delta = 2.7e-03
+ Iter 36: f = 9.311877e-01 f-delta = 2.4e-03
+ Iter 37: f = 9.333745e-01 f-delta = 2.2e-03
+ Iter 38: f = 9.354731e-01 f-delta = 2.1e-03
+ Iter 39: f = 9.375329e-01 f-delta = 2.1e-03
+ Iter 40: f = 9.395775e-01 f-delta = 2.0e-03
+ Iter 41: f = 9.416167e-01 f-delta = 2.0e-03
+ Iter 42: f = 9.436525e-01 f-delta = 2.0e-03
+ Iter 43: f = 9.456825e-01 f-delta = 2.0e-03
+ Iter 44: f = 9.477010e-01 f-delta = 2.0e-03
+ Iter 45: f = 9.496999e-01 f-delta = 2.0e-03
+ Iter 46: f = 9.516689e-01 f-delta = 2.0e-03
+ Iter 47: f = 9.535964e-01 f-delta = 1.9e-03
+ Iter 48: f = 9.554701e-01 f-delta = 1.9e-03
+ Iter 49: f = 9.572772e-01 f-delta = 1.8e-03
+ Iter 50: f = 9.590055e-01 f-delta = 1.7e-03
+ Iter 51: f = 9.606440e-01 f-delta = 1.6e-03
+ Iter 52: f = 9.621834e-01 f-delta = 1.5e-03
+ Iter 53: f = 9.636167e-01 f-delta = 1.4e-03
+ Iter 54: f = 9.649394e-01 f-delta = 1.3e-03
+ Iter 55: f = 9.661493e-01 f-delta = 1.2e-03
+ Iter 56: f = 9.672469e-01 f-delta = 1.1e-03
+ Iter 57: f = 9.682348e-01 f-delta = 9.9e-04
+ Iter 58: f = 9.691174e-01 f-delta = 8.8e-04
+ Iter 59: f = 9.699007e-01 f-delta = 7.8e-04
+ Iter 60: f = 9.705915e-01 f-delta = 6.9e-04
+ Iter 61: f = 9.711975e-01 f-delta = 6.1e-04
+ Iter 62: f = 9.717263e-01 f-delta = 5.3e-04
+ Iter 63: f = 9.721858e-01 f-delta = 4.6e-04
+ Iter 64: f = 9.725836e-01 f-delta = 4.0e-04
+ Iter 65: f = 9.729269e-01 f-delta = 3.4e-04
+ Iter 66: f = 9.732222e-01 f-delta = 3.0e-04
+ Iter 67: f = 9.734758e-01 f-delta = 2.5e-04
+ Iter 68: f = 9.736930e-01 f-delta = 2.2e-04
+ Iter 69: f = 9.738788e-01 f-delta = 1.9e-04
+ Iter 70: f = 9.740375e-01 f-delta = 1.6e-04
+ Iter 71: f = 9.741730e-01 f-delta = 1.4e-04
+ Iter 72: f = 9.742885e-01 f-delta = 1.2e-04
+ Iter 73: f = 9.743869e-01 f-delta = 9.8e-05
+ Final f = 9.743869e-01 
+
+ans =
+
+     1
+
+

Changing the output frequency

Using the 'printitn' option to change the output frequency.

M2alt2 = cp_als(X,3,'maxiters',100,'init',U2,'printitn',10);
+
+CP_ALS:
+ Iter 10: f = 7.398597e-01 f-delta = 8.5e-03
+ Iter 20: f = 8.518580e-01 f-delta = 3.0e-03
+ Iter 30: f = 9.058962e-01 f-delta = 7.5e-03
+ Iter 40: f = 9.395775e-01 f-delta = 2.0e-03
+ Iter 50: f = 9.590055e-01 f-delta = 1.7e-03
+ Iter 60: f = 9.705915e-01 f-delta = 6.9e-04
+ Iter 70: f = 9.740375e-01 f-delta = 1.6e-04
+ Iter 73: f = 9.743869e-01 f-delta = 9.8e-05
+ Final f = 9.743869e-01 
+

Suppress all output

Set 'printitn' to zero to suppress all output.

M2alt3 = cp_als(X,3,'maxiters',100,'init',U2,'printitn',0); % <-No output
+

Use HOSVD initial guess

Use the 'nvecs' option to use the leading mode-n singular vectors as the initial guess.

M3 = cp_als(X,3,'init','nvecs','printitn',10);
+
+CP_ALS:
+ Iter 10: f = 9.334888e-01 f-delta = 3.5e-03
+ Iter 20: f = 9.604549e-01 f-delta = 1.9e-03
+ Iter 30: f = 9.712518e-01 f-delta = 5.8e-04
+ Iter 40: f = 9.741285e-01 f-delta = 1.4e-04
+ Iter 43: f = 9.744312e-01 f-delta = 8.6e-05
+ Final f = 9.744312e-01 
+

Compare to the first solution using score, and see they are nearly the same because the score is close to 1.

score(M1,M3)
+
+ans =
+
+    0.9847
+
+

Change the order of the dimensions in CP

[M4,~,info] = cp_als(X,3,'dimorder',[2 3 1],'init','nvecs','printitn',10);
+score(M1,M4)
+
+CP_ALS:
+ Iter 10: f = 9.449957e-01 f-delta = 3.1e-03
+ Iter 20: f = 9.657394e-01 f-delta = 1.3e-03
+ Iter 30: f = 9.727566e-01 f-delta = 3.5e-04
+ Iter 39: f = 9.743928e-01 f-delta = 9.2e-05
+ Final f = 9.743928e-01 
+
+ans =
+
+    0.9844
+
+

In the last example, we also collected the third output argument which has some extra information in it. The field info.iters has the total number of iterations. The field info.params has the information used to run the method. Unless the initialization method is 'random', passing the parameters back to the method will yield the exact same results.

M4alt = cp_als(X,3,info.params);
+score(M4,M4alt)
+
+CP_ALS:
+ Iter 10: f = 9.449957e-01 f-delta = 3.1e-03
+ Iter 20: f = 9.657394e-01 f-delta = 1.3e-03
+ Iter 30: f = 9.727566e-01 f-delta = 3.5e-04
+ Iter 39: f = 9.743928e-01 f-delta = 9.2e-05
+ Final f = 9.743928e-01 
+
+ans =
+
+    1.0000
+
+

Change the tolerance

It's also possible to loosen or tighten the tolerance on the change in the fit. You may need to increase the number of iterations for it to converge.

M5 = cp_als(X,3,'init','nvecs','tol',1e-6,'maxiters',1000,'printitn',10);
+
+CP_ALS:
+ Iter 10: f = 9.334888e-01 f-delta = 3.5e-03
+ Iter 20: f = 9.604549e-01 f-delta = 1.9e-03
+ Iter 30: f = 9.712518e-01 f-delta = 5.8e-04
+ Iter 40: f = 9.741285e-01 f-delta = 1.4e-04
+ Iter 50: f = 9.747733e-01 f-delta = 2.9e-05
+ Iter 60: f = 9.749128e-01 f-delta = 6.4e-06
+ Iter 70: f = 9.749430e-01 f-delta = 1.4e-06
+ Iter 73: f = 9.749461e-01 f-delta = 8.8e-07
+ Final f = 9.749461e-01 
+

Control sign ambiguity of factor matrices

The default behavior of cp_als is to make a call to fixsigns to fix the sign ambiguity of the factor matrices. You can turn off this behavior by passing the 'fixsigns' parameter value of false when calling cp_als.

X = ktensor([1;1], {[1, 1; 1, -10],[1, 1; 1, -10]});
+M = cp_als(X, 2, 'printitn', 0, 'init', X.U) % <-default behavior, fixsigns called
+M = cp_als(X, 2, 'printitn', 0, 'init', X.U, 'fixsigns', false) % <-fixsigns not called
+
M is a ktensor of size 2 x 2
+	M.lambda = 
+		  101.0000    2.0000
+	M.U{1} = 
+		   -0.0995    0.7071
+		    0.9950    0.7071
+	M.U{2} = 
+		   -0.0995    0.7071
+		    0.9950    0.7071
+M is a ktensor of size 2 x 2
+	M.lambda = 
+		  101.0000    2.0000
+	M.U{1} = 
+		    0.0995    0.7071
+		   -0.9950    0.7071
+	M.U{2} = 
+		    0.0995    0.7071
+		   -0.9950    0.7071
+

Recommendations

  • Run multiple times with different guesses and select the solution with the best fit.
  • Try different ranks and choose the solution that is the best descriptor for your data based on the combination of the fit and the interpretaton of the factors, e.g., by visualizing the results.
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_apr_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_apr_doc.html new file mode 100644 index 0000000..f934ac6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_apr_doc.html @@ -0,0 +1,161 @@ + + + + + Alternating Poisson Regression for fitting CP to sparse count data

Alternating Poisson Regression for fitting CP to sparse count data

Contents

Set up a sample problem

We follow the general procedure outlined by E. C. Chi and T. G. Kolda, On Tensors, Sparsity, and Nonnegative Factorizations, arXiv:1112.2414 [math.NA], December 2011 (http://arxiv.org/abs/1112.2414).

% Pick the size and rank
+sz = [100 80 60];
+R = 5;
+
+% Generate factor matrices with a few large entries in each column; this
+% will be the basis of our soln.
+A = cell(3,1);
+for n = 1:length(sz)
+    A{n} = rand(sz(n), R);
+    for r = 1:R
+        p = randperm(sz(n));
+        nbig = round( (1/R)*sz(n) );
+        A{n}(p(1:nbig),r) = 100 * A{n}(p(1:nbig),r);
+    end
+end
+lambda = rand(R,1);
+S = ktensor(lambda, A);
+S = normalize(S,'sort',1);
+
+% Create sparse test problem based on provided solution.
+nz = prod(sz) * .05;
+info = create_problem('Soln', S, 'Sparse_Generation', nz);
+
+% Extract data and solution
+X = info.Data;
+M_true = info.Soln;
+

Call CP-APR

% Compute a solution
+M = cp_apr(X, R, 'printitn', 10);
+
+% Score the solution
+factor_match_score = score(M, M_true, 'greedy', true)
+
+CP_PQNR (alternating Poisson regression using quasi-Newton)
+  Precomputing sparse index sets...done
+  10. Ttl Inner Its: 648, KKT viol = 2.32e-02, obj = -8.28190514e+03, nz: 301
+  20. Ttl Inner Its: 240, KKT viol = 9.81e-05, obj = -8.28188887e+03, nz: 302
+===========================================
+ Final log-likelihood = -8.281889e+03 
+ Final least squares fit = 5.784574e-01 
+ Final KKT violation = 9.8085093e-05
+ Total inner iterations = 20238
+ Total execution time = 5.85 secs
+
+factor_match_score =
+
+    0.9609
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_arls_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_arls_doc.html new file mode 100644 index 0000000..44c50f5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_arls_doc.html @@ -0,0 +1,381 @@ + + + + + Alternating randomized least squares for CP Decomposition

Alternating randomized least squares for CP Decomposition

The function cp_arls computes an estimate of the best rank-R CP model of a tensor X using alternating randomized least-squares algorithm. The input X must be a (dense) tensor. The output CP model is a ktensor. The CP-ARLS method is described in the following reference:

  • C. Battaglino, G. Ballard, T. G. Kolda. A Practical Randomized CP Tensor Decomposition, to appear in SIAM J. Matrix Analysis and Applications, 2017. Preprint: (arXiv:1701.06600).

Contents

Set up a sample problem

We set up an especially difficult and somewhat large sample problem that has high collinearity (0.9) and 1% noise. This is an example where the randomized method will generally outperform the standard method.

sz = [200 300 400];
+R = 5;
+ns = 0.01;
+coll = 0.9;
+
+info = create_problem('Size', sz, 'Num_Factors', R, 'Noise', ns, ...
+    'Factor_Generator', @(m,n) matrandcong(m,n,coll), ...
+    'Lambda_Generator', @ones);
+
+% Extract data and solution
+X = info.Data;
+M_true = info.Soln;
+

Running the CP-ARLS method

Running the method is essentially the same as using CP-ALS, feed the data matrix and the desired rank. Note that the iteration is of the form NxN which is the number of epochs x the number of iterations per epoch. The default number of iterations per epoch is 50. At the end of each epoch, we check the convergence criteria. Because this is a randomized method, we do not achieve strict decrease in the objective function. Instead, we look at the number of epochs without improvement (newi) and exit when this crosses the predefined tolerance (`newitol`), which defaults to 5. It is important to note that the fit values that are reported are approximate, so this is why it is denoted by `f~` rather than just `f`.

tic
+[M1, ~, out1] = cp_arls(X,R);
+time1 = toc;
+scr1 = score(M1,M_true);
+fprintf('\n*** Results for CP-ARLS (with mixing) ***\n');
+fprintf('Time (secs): %.3f\n', time1)
+fprintf('Score (max=1): %.3f\n', scr1);
+
+CP-ARLS (with mixing): 
+ Iter 10x50: f~ = 9.895866e-01 newi = 0
+ Iter 20x50: f~ = 9.895783e-01 newi = 4
+ Iter 21x50: f~ = 9.895769e-01 newi = 5
+
+*** Results for CP-ARLS (with mixing) ***
+Time (secs): 8.923
+Score (max=1): 0.989
+

Speed things up by skipping the initial mixing

The default behavior is to mix the data in each mode using an FFT and diagonal random +/-1 matrix. This may add substantial preprocessing time, though it helps to ensure that the method converges. Oftentimes, such as with randomly-generated data, the mixing is not necessary.

tic
+[M2, ~, out2] = cp_arls(X,R,'mix',false);
+time2 = toc;
+scr2 = score(M2,M_true);
+
+fprintf('\n*** Results for CP-ARLS (no mix) ***\n');
+fprintf('Time (secs): %.3f\n', time2)
+fprintf('Score (max=1): %.3f\n', scr2);
+
+CP_ARLS (without mixing): 
+ Iter 10x50: f~ = 9.889934e-01 newi = 1
+ Iter 20x50: f~ = 9.891646e-01 newi = 0
+ Iter 30x50: f~ = 9.890769e-01 newi = 5
+
+*** Results for CP-ARLS (no mix) ***
+Time (secs): 8.029
+Score (max=1): 0.976
+

Comparing with CP-ALS

CP-ALS may be somewhat faster, especially since this is a relatively small problem, but it usually will not achieve as good of an answer in terms of the score.

tic;
+[M3, ~, out3] = cp_als(X,R,'maxiters',500,'printitn',10);
+time3 = toc;
+scr3 = score(M3,M_true);
+fprintf('\n*** Results for CP-ALS ***\n');
+fprintf('Time (secs): %.3f\n', time3)
+fprintf('Score (max=1): %.3f\n', scr3);
+
+CP_ALS:
+ Iter 10: f = 9.726647e-01 f-delta = 4.8e-04
+ Iter 20: f = 9.756937e-01 f-delta = 2.6e-04
+ Iter 30: f = 9.868227e-01 f-delta = 2.7e-03
+ Iter 33: f = 9.884157e-01 f-delta = 5.8e-05
+ Final f = 9.884157e-01 
+
+*** Results for CP-ALS ***
+Time (secs): 3.505
+Score (max=1): 0.709
+

How well does the approximate fit do?

It is possible to check the accuracy of the fit computation by having the code compute the true fit and the final solution, enabled by the `truefit` option.

[M4,~,out4] = cp_arls(X,R,'truefit',true);
+
+CP-ARLS (with mixing): 
+ Iter 10x50: f~ = 9.897749e-01 newi = 2
+ Iter 17x50: f~ = 9.898412e-01 newi = 5
+ Final fit = 9.898393e-01 Final estimated fit = 9.898489e-01 
+

Varying epoch size

It is possible to vary that number of iterations per epoch. Fewer iterations means that more time is spent checking for convergence and it may also be harder to detect as an single iteration can have some fluctuation and we are actually looking for the overall trend. In contrast, too many iterations means that the method won't realize when it has converged and may spend too much time computing.

tic
+M = cp_arls(X,R,'epoch',1,'newitol',20);
+toc
+fprintf('Score: %.4f\n',score(M,M_true));
+
+CP-ARLS (with mixing): 
+ Iter 10x1: f~ = 9.701522e-01 newi = 1
+ Iter 20x1: f~ = 9.725415e-01 newi = 5
+ Iter 30x1: f~ = 9.738450e-01 newi = 0
+ Iter 40x1: f~ = 9.745092e-01 newi = 0
+ Iter 50x1: f~ = 9.770124e-01 newi = 0
+ Iter 60x1: f~ = 9.877161e-01 newi = 0
+ Iter 70x1: f~ = 9.881069e-01 newi = 0
+ Iter 80x1: f~ = 9.882726e-01 newi = 2
+ Iter 90x1: f~ = 9.885071e-01 newi = 1
+ Iter 100x1: f~ = 9.886444e-01 newi = 0
+ Iter 110x1: f~ = 9.886701e-01 newi = 8
+ Iter 120x1: f~ = 9.888270e-01 newi = 1
+ Iter 130x1: f~ = 9.888974e-01 newi = 1
+ Iter 140x1: f~ = 9.891171e-01 newi = 0
+ Iter 150x1: f~ = 9.891283e-01 newi = 0
+ Iter 160x1: f~ = 9.890805e-01 newi = 1
+ Iter 170x1: f~ = 9.892718e-01 newi = 0
+ Iter 180x1: f~ = 9.891860e-01 newi = 8
+ Iter 190x1: f~ = 9.893004e-01 newi = 6
+ Iter 200x1: f~ = 9.894006e-01 newi = 0
+ Iter 210x1: f~ = 9.893971e-01 newi = 1
+ Iter 220x1: f~ = 9.894173e-01 newi = 0
+ Iter 230x1: f~ = 9.894356e-01 newi = 2
+ Iter 240x1: f~ = 9.894381e-01 newi = 3
+ Iter 250x1: f~ = 9.894686e-01 newi = 6
+ Iter 260x1: f~ = 9.894537e-01 newi = 4
+ Iter 270x1: f~ = 9.894538e-01 newi = 1
+ Iter 280x1: f~ = 9.895148e-01 newi = 1
+ Iter 290x1: f~ = 9.895310e-01 newi = 7
+ Iter 300x1: f~ = 9.895601e-01 newi = 17
+ Iter 303x1: f~ = 9.895373e-01 newi = 20
+Elapsed time is 3.350615 seconds.
+Score: 0.9171
+
tic
+M = cp_arls(X,R,'epoch',200,'newitol',3,'printitn',2);
+toc
+fprintf('Score: %.4f\n',score(M,M_true));
+
+CP-ARLS (with mixing): 
+ Iter  2x200: f~ = 9.896707e-01 newi = 0
+ Iter  4x200: f~ = 9.896960e-01 newi = 1
+ Iter  6x200: f~ = 9.896960e-01 newi = 3
+Elapsed time is 10.381222 seconds.
+Score: 0.9720
+

Set up another sample problem

We set up another problem with 10% noise, but no collinearity.

sz = [200 300 400];
+R = 5;
+ns = 0.10;
+
+info = create_problem('Size', sz, 'Num_Factors', R, 'Noise', ns, ...
+    'Factor_Generator', @rand,'Lambda_Generator', @ones);
+
+% Extract data and solution
+X = info.Data;
+M_true = info.Soln;
+

Terminating once a desired fit is achieved

If we know the noise level is 10%, we would expect a fit of 0.90 at best. So, we can set a threshold that is close to that and terminate as soon as we achieve that accuracy. Since detecting convergence is hard for a randomized method, this can lead to speed ups. However, if the fit is not high enough, the accuracy may suffer consequently.

M = cp_arls(X,R,'newitol',20,'fitthresh',0.895,'truefit',true);
+fprintf('Score: %.4f\n',score(M,M_true));
+
+CP-ARLS (with mixing): 
+ Iter  1x50: f~ = 8.966351e-01 newi = 0
+ Final fit = 8.972839e-01 Final estimated fit = 8.966351e-01 
+Score: 0.9566
+

Changing the number of function evaluation samples

The function evaluation is approximate and based on sampling the number of entries specified by `nsampfit`. If this is too small, the samples will not be accurate enough. If this is too large, the computation will take too long. The default is $2^14$, which should generally be sufficient. It may sometimes be possible to use smaller values. The same sampled entries are used for every convergence check --- we do not resample to check other entries.

M = cp_arls(X,R,'truefit',true,'nsampfit',100);
+fprintf('Score: %.4f\n',score(M,M_true));
+
+CP-ARLS (with mixing): 
+ Iter  7x50: f~ = 8.890117e-01 newi = 5
+ Final fit = 8.935068e-01 Final estimated fit = 8.948104e-01 
+Score: 0.9809
+

Change the number of sampled rows in least squares solve

The default number of sampled rows for the least squares solves is `ceil(10*R*log2®)`. This seemed to work well in most tests, but this can be varied higher or lower. For R=5, this means we sample 117 rows per solve. The rows are different for every least squares problem. Let's see what happens if we reduce this to 10.

M = cp_arls(X,R,'truefit',true,'nsamplsq',10);
+fprintf('Score: %.4f\n',score(M,M_true));
+
+CP-ARLS (with mixing): 
+ Iter 10x50: f~ = 7.180423e-01 newi = 4
+ Iter 11x50: f~ = 7.033075e-01 newi = 5
+ Final fit = 7.553195e-01 Final estimated fit = 7.554767e-01 
+Score: 0.1939
+

What if we use 25?

M = cp_arls(X,R,'truefit',true,'nsamplsq',25);
+fprintf('Score: %.4f\n',score(M,M_true));
+
+CP-ARLS (with mixing): 
+ Iter  8x50: f~ = 8.812275e-01 newi = 5
+ Final fit = 8.816266e-01 Final estimated fit = 8.813898e-01 
+Score: 0.9236
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_opt_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_opt_doc.html new file mode 100644 index 0000000..b0357af --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_opt_doc.html @@ -0,0 +1,413 @@ + + + + + All-at-once optimization for CP tensor decomposition

All-at-once optimization for CP tensor decomposition

We explain how to use cp_opt function which implements the CP-OPT method that fits the CP model using direct or all-at-once optimization. This is in contrast to the cp_als function which implements the CP-ALS that fits the CP model using alternating optimization. The CP-OPT method is described in the following reference:

  • E. Acar, D. M. Dunlavy and T. G. Kolda, A Scalable Optimization Approach for Fitting Canonical Tensor Decompositions, J. Chemometrics 25(2):67-86, February 2011 (doi:10.1002/cem.1335)

Contents

Third-party optimization software

The cp_opt method uses third-party optimization software to do the optimization. You can use either

The remainder of these instructions assume L-BFGS-B is being used. See here for instructions on using cp_opt with Poblano.

Check that the software is installed.

Be sure that lbfgsb is in your path.

help lbfgsb
+
  x = lbfgsb( fcn, l, u )
+    uses the lbfgsb v.3.0 library (fortran files must be installed;
+        see compile_mex.m ) which is the L-BFGS-B algorithm.
+    The algorithm is similar to the L-BFGS quasi-Newton algorithm,
+    but also handles bound constraints via an active-set type iteration.
+    This version is based on the modified C code L-BFGS-B-C, and so has 
+    a slightly different calling syntax than previous versions.
+ 
+   The minimization problem that it solves is:
+        min_x  f(x)     subject to   l <= x <= u
+ 
+  'fcn' is a function handle that accepts an input, 'x',
+    and returns two outputs, 'f' (function value), and 'g' (function gradient).
+ 
+  'l' and 'u' are column-vectors of constraints. Set their values to Inf
+    if you want to ignore them. (You can set some values to Inf, but keep
+    others enforced).
+ 
+  The full format of the function is:
+  [x,f,info] = lbfgsb( fcn, l, u, opts )
+    where the output 'f' has the value of the function f at the final iterate
+    and 'info' is a structure with useful information
+        (self-explanatory, except for info.err. The first column of info.err
+         is the history of the function values f, and the second column
+         is the history of norm( gradient, Inf ).  )
+ 
+    The 'opts' structure allows you to pass further options.
+    Possible field name values:
+ 
+        opts.x0     The starting value (default: all zeros)
+        opts.m      Number of limited-memory vectors to use in the algorithm
+                        Try 3 <= m <= 20. (default: 5 )
+        opts.factr  Tolerance setting (see this source code for more info)
+                        (default: 1e7 ). This is later multiplied by machine epsilon
+        opts.pgtol  Another tolerance setting, relating to norm(gradient,Inf)
+                        (default: 1e-5)
+        opts.maxIts         How many iterations to allow (default: 100)
+        opts.maxTotalIts    How many iterations to allow, including linesearch iterations
+                        (default: 5000)
+        opts.printEvery     How often to display information (default: 1)
+        opts.errFcn         A function handle (or cell array of several function handles)
+                        that computes whatever you want. The output will be printed
+                        to the screen every 'printEvery' iterations. (default: [] )
+                        Results saved in columns 3 and higher of info.err variable
+ 
+  Stephen Becker, srbecker@alumni.caltech.edu
+  Feb 14, 2012
+  Updated Feb 21 2015, Stephen Becker, stephen.becker@colorado.edu
+
+

Create an example problem.

Create an example 50 x 40 x 30 tensor with rank 5 and add 10% noise.

R = 5;
+info = create_problem('Size', [50 40 30], 'Num_Factors', R, 'Noise', 0.10);
+X = info.Data;
+M_true = info.Soln;
+

Create initial guess using 'nvecs'

M_init = create_guess('Data', X, 'Num_Factors', R, 'Factor_Generator', 'nvecs');
+

Call the cp_opt method

Here is an example call to the cp_opt method. By default, each iteration prints the least squares fit function value (being minimized) and the norm of the gradient.

[M,M0,output] = cp_opt(X, R, 'init', M_init);
+
Iter    10, f(x) = 1.239376e+03, ||grad||_infty = 9.52e+01
+Iter    20, f(x) = 1.204254e+03, ||grad||_infty = 6.93e-01
+Iter    30, f(x) = 1.033479e+03, ||grad||_infty = 9.20e+01
+Iter    40, f(x) = 5.932041e+02, ||grad||_infty = 8.66e+00
+Iter    50, f(x) = 5.891379e+02, ||grad||_infty = 2.58e-01
+Iter    60, f(x) = 5.891340e+02, ||grad||_infty = 7.49e-03
+Iter    61, f(x) = 5.891340e+02, ||grad||_infty = 5.58e-03
+

Check the output

It's important to check the output of the optimization method. In particular, it's worthwhile to check the exit message.

exitmsg = output.ExitMsg
+
+exitmsg =
+
+    'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.'
+
+

The fit is the percentage of the data that is explained by the model. Because we have noise, we do not expect the fit to be perfect.

fit = output.Fit
+
+fit =
+
+   99.0198
+
+

Evaluate the output

We can "score" the similarity of the model computed by CP and compare that with the truth. The score function on ktensor's gives a score in [0,1] with 1 indicating a perfect match. Because we have noise, we do not expect the fit to be perfect. See doc score for more details.

scr = score(M,M_true)
+
+scr =
+
+    0.9983
+
+

Overfitting example

Re-using the same example as before, consider the case where we don't know R in advance. We might guess too high. Here we show a case where we guess R+1 factors rather than R.

% Generate initial guess of the corret size
+M_plus_init = create_guess('Data', X, 'Num_Factors', R+1, ...
+    'Factor_Generator', 'nvecs');
+
% Run the algorithm
+[M_plus,~,output] = cp_opt(X, R+1, 'init', M_plus_init);
+exitmsg = output.ExitMsg
+fit = output.Fit
+
Iter    10, f(x) = 1.239651e+03, ||grad||_infty = 9.54e+01
+Iter    20, f(x) = 1.204230e+03, ||grad||_infty = 1.30e+00
+Iter    30, f(x) = 1.202606e+03, ||grad||_infty = 1.08e+01
+Iter    40, f(x) = 7.488881e+02, ||grad||_infty = 1.42e+02
+Iter    50, f(x) = 5.919198e+02, ||grad||_infty = 4.48e+00
+Iter    60, f(x) = 5.889069e+02, ||grad||_infty = 1.93e+00
+Iter    70, f(x) = 5.881156e+02, ||grad||_infty = 8.79e-01
+Iter    80, f(x) = 5.877669e+02, ||grad||_infty = 9.43e-01
+Iter    90, f(x) = 5.874421e+02, ||grad||_infty = 2.62e+00
+Iter   100, f(x) = 5.870874e+02, ||grad||_infty = 2.98e-01
+Iter   110, f(x) = 5.869636e+02, ||grad||_infty = 2.99e+00
+Iter   120, f(x) = 5.868945e+02, ||grad||_infty = 4.64e-01
+Iter   130, f(x) = 5.868383e+02, ||grad||_infty = 4.58e-01
+Iter   140, f(x) = 5.867528e+02, ||grad||_infty = 3.39e-01
+Iter   150, f(x) = 5.867107e+02, ||grad||_infty = 7.51e-01
+Iter   160, f(x) = 5.866676e+02, ||grad||_infty = 2.29e-01
+Iter   170, f(x) = 5.866355e+02, ||grad||_infty = 2.30e-01
+Iter   180, f(x) = 5.866268e+02, ||grad||_infty = 1.77e-01
+Iter   190, f(x) = 5.866142e+02, ||grad||_infty = 1.19e-01
+Iter   200, f(x) = 5.865911e+02, ||grad||_infty = 2.12e-01
+Iter   210, f(x) = 5.865839e+02, ||grad||_infty = 1.83e-01
+Iter   220, f(x) = 5.865713e+02, ||grad||_infty = 3.90e-01
+Iter   230, f(x) = 5.865528e+02, ||grad||_infty = 8.10e-01
+Iter   240, f(x) = 5.865372e+02, ||grad||_infty = 1.05e-01
+Iter   250, f(x) = 5.865300e+02, ||grad||_infty = 2.82e-01
+Iter   260, f(x) = 5.865229e+02, ||grad||_infty = 1.66e-01
+Iter   270, f(x) = 5.865135e+02, ||grad||_infty = 1.06e-01
+Iter   280, f(x) = 5.865011e+02, ||grad||_infty = 8.57e-02
+Iter   290, f(x) = 5.864903e+02, ||grad||_infty = 3.23e-01
+Iter   300, f(x) = 5.864816e+02, ||grad||_infty = 1.39e-01
+Iter   310, f(x) = 5.864728e+02, ||grad||_infty = 1.16e-01
+Iter   320, f(x) = 5.864642e+02, ||grad||_infty = 1.15e-01
+Iter   330, f(x) = 5.864525e+02, ||grad||_infty = 3.76e-01
+Iter   340, f(x) = 5.864428e+02, ||grad||_infty = 2.33e-01
+Iter   350, f(x) = 5.864069e+02, ||grad||_infty = 1.92e-01
+Iter   360, f(x) = 5.863788e+02, ||grad||_infty = 4.34e-01
+Iter   370, f(x) = 5.863443e+02, ||grad||_infty = 1.84e-01
+Iter   380, f(x) = 5.863030e+02, ||grad||_infty = 1.94e-01
+Iter   390, f(x) = 5.862826e+02, ||grad||_infty = 2.97e-01
+Iter   400, f(x) = 5.862708e+02, ||grad||_infty = 1.20e-01
+Iter   410, f(x) = 5.862519e+02, ||grad||_infty = 4.23e-01
+Iter   420, f(x) = 5.862373e+02, ||grad||_infty = 1.72e-01
+Iter   430, f(x) = 5.862314e+02, ||grad||_infty = 4.63e-01
+Iter   440, f(x) = 5.862213e+02, ||grad||_infty = 9.90e-02
+Iter   450, f(x) = 5.862163e+02, ||grad||_infty = 6.01e-01
+Iter   460, f(x) = 5.862098e+02, ||grad||_infty = 1.04e-01
+Iter   470, f(x) = 5.862055e+02, ||grad||_infty = 7.14e-02
+Iter   480, f(x) = 5.862028e+02, ||grad||_infty = 1.27e-01
+Iter   490, f(x) = 5.862019e+02, ||grad||_infty = 8.58e-02
+Iter   500, f(x) = 5.861960e+02, ||grad||_infty = 1.39e-01
+Iter   510, f(x) = 5.861923e+02, ||grad||_infty = 1.04e-01
+Iter   520, f(x) = 5.861899e+02, ||grad||_infty = 5.11e-02
+Iter   530, f(x) = 5.861883e+02, ||grad||_infty = 7.26e-02
+Iter   540, f(x) = 5.861877e+02, ||grad||_infty = 2.21e-02
+Iter   550, f(x) = 5.861871e+02, ||grad||_infty = 4.50e-02
+Iter   560, f(x) = 5.861869e+02, ||grad||_infty = 6.50e-02
+Iter   570, f(x) = 5.861868e+02, ||grad||_infty = 2.86e-02
+Iter   580, f(x) = 5.861860e+02, ||grad||_infty = 2.24e-02
+Iter   590, f(x) = 5.861859e+02, ||grad||_infty = 2.18e-02
+Iter   600, f(x) = 5.861857e+02, ||grad||_infty = 1.93e-02
+Iter   610, f(x) = 5.861856e+02, ||grad||_infty = 3.24e-02
+Iter   620, f(x) = 5.861854e+02, ||grad||_infty = 1.61e-02
+Iter   630, f(x) = 5.861853e+02, ||grad||_infty = 2.29e-02
+Iter   640, f(x) = 5.861852e+02, ||grad||_infty = 8.46e-03
+Iter   650, f(x) = 5.861851e+02, ||grad||_infty = 9.60e-03
+Iter   660, f(x) = 5.861851e+02, ||grad||_infty = 2.05e-02
+Iter   664, f(x) = 5.861851e+02, ||grad||_infty = 3.68e-02
+
+exitmsg =
+
+    'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.'
+
+
+fit =
+
+   99.0247
+
+
% Check the answer (1 is perfect)
+scr = score(M_plus, M_true)
+
+scr =
+
+    0.9983
+
+

Nonnegative factorization

We can employ lower bounds to get a nonnegative factorization.

Create an example problem.

Create an example 50 x 40 x 30 tensor with rank 5 and add 10% noise. We select nonnegative factor matrices and lambdas. The create_problem doesn't really know how to add noise without going negative, so we hack it to make the observed tensor be nonzero.

R = 5;
+info = create_problem('Size', [50 40 30], 'Num_Factors', R, 'Noise', 0.10,...
+    'Factor_Generator', 'rand', 'Lambda_Generator', 'rand');
+X = info.Data .* (info.Data > 0); % Force it to be nonnegative
+M_true = info.Soln;
+

Generate initial guess of the corret size

M_init = create_guess('Data', X, 'Num_Factors', R, ...
+    'Factor_Generator', 'rand');
+

Call the cp_opt method

Here we specify a lower bound of zero with the last two arguments.

[M,M0,output] = cp_opt(X, R, 'init', M_init,'lower',0);
+
Iter    10, f(x) = 1.373055e+02, ||grad||_infty = 1.52e+01
+Iter    20, f(x) = 5.094910e+01, ||grad||_infty = 4.21e+00
+Iter    30, f(x) = 4.495606e+01, ||grad||_infty = 2.25e+00
+Iter    40, f(x) = 4.154133e+01, ||grad||_infty = 1.92e+00
+Iter    50, f(x) = 4.067009e+01, ||grad||_infty = 2.14e+00
+Iter    60, f(x) = 3.996698e+01, ||grad||_infty = 1.80e+00
+Iter    70, f(x) = 3.964063e+01, ||grad||_infty = 1.45e+00
+Iter    80, f(x) = 3.941584e+01, ||grad||_infty = 5.82e-01
+Iter    90, f(x) = 3.935186e+01, ||grad||_infty = 2.06e-01
+Iter   100, f(x) = 3.933107e+01, ||grad||_infty = 1.76e-01
+Iter   110, f(x) = 3.932266e+01, ||grad||_infty = 1.61e-01
+Iter   120, f(x) = 3.931633e+01, ||grad||_infty = 2.04e-01
+Iter   130, f(x) = 3.931059e+01, ||grad||_infty = 2.72e-01
+Iter   140, f(x) = 3.930927e+01, ||grad||_infty = 2.68e-01
+Iter   150, f(x) = 3.930873e+01, ||grad||_infty = 2.33e-01
+Iter   160, f(x) = 3.930837e+01, ||grad||_infty = 1.91e-01
+Iter   170, f(x) = 3.930823e+01, ||grad||_infty = 1.91e-01
+Iter   180, f(x) = 3.930818e+01, ||grad||_infty = 1.87e-01
+Iter   190, f(x) = 3.930817e+01, ||grad||_infty = 1.84e-01
+Iter   200, f(x) = 3.930816e+01, ||grad||_infty = 1.81e-01
+Iter   202, f(x) = 3.930816e+01, ||grad||_infty = 1.81e-01
+

Check the output

exitmsg = output.ExitMsg
+
+exitmsg =
+
+    'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.'
+
+

The fit is the percentage of the data that is explained by the model. Because we have noise, we do not expect the fit to be perfect.

fit = output.Fit
+
+fit =
+
+   99.0267
+
+

Evaluate the output

We can "score" the similarity of the model computed by CP and compare that with the truth. The score function on ktensor's gives a score in [0,1] with 1 indicating a perfect match. Because we have noise, we do not expect the fit to be perfect. See doc score for more details.

scr = score(M,M_true)
+
+scr =
+
+    0.9854
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_opt_poblano_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_opt_poblano_doc.html new file mode 100644 index 0000000..8552222 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_opt_poblano_doc.html @@ -0,0 +1,296 @@ + + + + + All-at-once optimization for CP tensor decomposition (with Poblano)

All-at-once optimization for CP tensor decomposition (with Poblano)

We explain how to use cp_opt with the Poblano toolbox. The default is to use L-BFGS-B (not Poblabo), which is described here.

Contents

Poblano Optimization Toolbox

Check that you have Poblano 1.1 installed. The output of your 'ver' command should look something like the following.

ver
+
----------------------------------------------------------------------------------------------------
+MATLAB Version: 9.2.0.556344 (R2017a)
+MATLAB License Number: 192525
+Operating System: Microsoft Windows 10 Enterprise Version 10.0 (Build 14393)
+Java Version: Java 1.7.0_60-b19 with Oracle Corporation Java HotSpot(TM) 64-Bit Server VM mixed mode
+----------------------------------------------------------------------------------------------------
+MATLAB                                                Version 9.2         (R2017a)
+Parallel Computing Toolbox                            Version 6.10        (R2017a)
+Poblano Toolbox (Sandia National Labs)                Version 1.1                 
+Statistics and Machine Learning Toolbox               Version 11.1        (R2017a)
+Tensor Toolbox (Sandia National Labs)                 Version 3.0-dev             
+

Create an example problem.

Create an example 50 x 40 x 30 tensor with rank 5 and add 10% noise.

R = 5;
+info = create_problem('Size', [50 40 30], 'Num_Factors', R, 'Noise', 0.10);
+X = info.Data;
+M_true = info.Soln;
+

Create initial guess using 'nvecs'

M_init = create_guess('Data', X, 'Num_Factors', R, ...
+    'Factor_Generator', 'nvecs');
+

Set up the optimization parameters

It's genearlly a good idea to consider the parameters of the optimization method. The default options may be either too stringent or not stringent enough. The most important options to consider are detailed here.

% Get the defaults
+ncg_opts = ncg('defaults');
+% Tighten the stop tolerance (norm of gradient). This is often too large.
+ncg_opts.StopTol = 1.0e-6;
+% Tighten relative change in function value tolearnce. This is often too large.
+ncg_opts.RelFuncTol = 1.0e-20;
+% Increase the number of iterations.
+ncg_opts.MaxIters = 10^4;
+% Only display every 10th iteration
+ncg_opts.DisplayIters = 10;
+% Display the final set of options
+ncg_opts
+
+ncg_opts = 
+
+  struct with fields:
+
+                   Display: 'iter'
+              DisplayIters: 10
+           LineSearch_ftol: 1.0000e-04
+           LineSearch_gtol: 0.0100
+    LineSearch_initialstep: 1
+         LineSearch_maxfev: 20
+         LineSearch_method: 'more-thuente'
+         LineSearch_stpmax: 1.0000e+15
+         LineSearch_stpmin: 1.0000e-15
+           LineSearch_xtol: 1.0000e-15
+              MaxFuncEvals: 10000
+                  MaxIters: 10000
+                RelFuncTol: 1.0000e-20
+              RestartIters: 20
+                 RestartNW: 0
+              RestartNWTol: 0.1000
+                   StopTol: 1.0000e-06
+                 TraceFunc: 0
+            TraceFuncEvals: 0
+                 TraceGrad: 0
+             TraceGradNorm: 0
+              TraceRelFunc: 0
+                    TraceX: 0
+                    Update: 'PR'
+
+

Call the cp_opt method

Here is an example call to the cp_opt method. By default, each iteration prints the least squares fit function value (being minimized) and the norm of the gradient. The meaning of any line search warnings can be checked via doc cvsrch.

[M,~,output] = cp_opt(X, R, 'init', M_init, ...
+    'opt', 'ncg', 'opt_options', ncg_opts);
+
 Iter  FuncEvals       F(X)          ||G(X)||/N        
+------ --------- ---------------- ----------------
+     0         1   28323.90709757       0.49199185
+    10        73     318.63828960       0.34043521
+    20       126     276.86091334       0.02452650
+    30       164     276.07707790       0.00756378
+    40       196     275.99604005       0.00081675
+    50       216     275.99505143       0.00011444
+    60       236     275.99503708       0.00003744
+    70       256     275.99503597       0.00001239
+    80       276     275.99503582       0.00000121
+    81       278     275.99503582       0.00000072
+

Check the output

It's important to check the output of the optimization method. In particular, it's worthwhile to check the exit flag. A zero (0) indicates successful termination with the gradient smaller than the specified StopTol, and a three (3) indicates a successful termination where the change in function value is less than RelFuncTol. The meaning of any other flags can be checked via doc poblano_params.

exitflag = output.ExitFlag
+
+exitflag =
+
+     0
+
+

The fit is the percentage of the data that is explained by the model. Because we have noise, we do not expect the fit to be perfect.

fit = output.Fit
+
+fit =
+
+   99.0205
+
+

Evaluate the output

We can "score" the similarity of the model computed by CP and compare that with the truth. The score function on ktensor's gives a score in [0,1] with 1 indicating a perfect match. Because we have noise, we do not expect the fit to be perfect. See doc score for more details.

scr = score(M,M_true)
+
+scr =
+
+    0.9960
+
+

Overfitting example

Consider the case where we don't know R in advance. We might guess too high. Here we show a case where we guess R+1 factors rather than R.

% Generate initial guess of the corret size
+M_plus_init = create_guess('Data', X, 'Num_Factors', R+1, ...
+    'Factor_Generator', 'nvecs');
+
% Loosen the stop tolerance (norm of gradient).
+ncg_opts.StopTol = 1.0e-2;
+
% Run the algorithm
+[M_plus,~,output] = cp_opt(X, R+1, 'init', M_plus_init, ...
+    'opt', 'ncg', 'opt_options', ncg_opts);
+exitflag = output.ExitFlag
+fit = output.Fit
+
 Iter  FuncEvals       F(X)          ||G(X)||/N        
+------ --------- ---------------- ----------------
+     0         1   28324.22221703       0.41000218
+    10        73     318.70871296       0.28371386
+    20       126     276.85068977       0.02067054
+    28       158     276.04553324       0.00988816
+
+exitflag =
+
+     0
+
+
+fit =
+
+   99.0203
+
+
% Check the answer (1 is perfect)
+scr = score(M_plus, M_true)
+
+scr =
+
+    0.9926
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_wopt_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_wopt_doc.html new file mode 100644 index 0000000..9bf01ff --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_wopt_doc.html @@ -0,0 +1,218 @@ + + + + + Weighted optimization for CP tensor decomposition with incomplete data

Weighted optimization for CP tensor decomposition with incomplete data

We explain how to use the CP Weighted Optimization (CP-WOPR) method implements in cp_wopt. The method is described in the following article:

  • E. Acar, D. M. Dunlavy, T. G. Kolda and M. Mørup, Scalable Tensor Factorizations for Incomplete Data, Chemometrics and Intelligent Laboratory Systems 106(1):41-56, March 2011 (doi:10.1016/j.chemolab.2010.08.004)

Contents

Third-party optimization software

The cp_wopt method uses third-party optimization software to do the optimization. You can use either

The remainder of these instructions assume L-BFGS-B is being used. See here for instructions on using cp_wopt with Poblano.

Important Information

It is critical to zero out the values in the missing entries of the data tensor. This can be done by calling cp_wopt(X.*P,P,...). This is a frequent source of errors in using this method.

Create an example problem with missing data.

Here we have 25% missing data and 10% noise.

R = 2;
+info = create_problem('Size', [15 10 5], 'Num_Factors', R, ...
+    'M', 0.25, 'Noise', 0.10);
+X = info.Data;
+P = info.Pattern;
+M_true= info.Soln;
+

Create initial guess using 'nvecs'

M_init = create_guess('Data', X, 'Num_Factors', R, ...
+    'Factor_Generator', 'nvecs');
+

Call the cp_wopt method

Here is an example call to the cp_opt method. By default, each iteration prints the least squares fit function value (being minimized) and the norm of the gradient.

[M,~,output] = cp_wopt(X, P, R, 'init', M_init);
+
Running CP-WOPT...
+Time for zeroing out masked entries of data tensor is 4.39e-04 seconds.
+(If zeroing is done in preprocessing, set 'skip_zeroing' to true.)
+Iter    10, f(x) = 1.299287e+01, ||grad||_infty = 5.84e+00
+Iter    20, f(x) = 9.896947e-01, ||grad||_infty = 4.82e-02
+Iter    30, f(x) = 9.893514e-01, ||grad||_infty = 9.85e-05
+Iter    32, f(x) = 9.893514e-01, ||grad||_infty = 6.39e-05
+

Check the output

It's important to check the output of the optimization method. In particular, it's worthwhile to check the exit message for any problems. The message CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH means that it has converged because the function value stopped improving.

exitmsg = output.ExitMsg
+
+exitmsg =
+
+    'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.'
+
+

Evaluate the output

We can "score" the similarity of the model computed by CP and compare that with the truth. The score function on ktensor's gives a score in [0,1] with 1 indicating a perfect match. Because we have noise, we do not expect the fit to be perfect. See doc score for more details.

scr = score(M,M_true)
+
+scr =
+
+    0.9991
+
+

Create a SPARSE example problem with missing data.

Here we have 95% missing data and 10% noise.

R = 2;
+info = create_problem('Size', [150 100 50], 'Num_Factors', R, ...
+    'M', 0.95, 'Sparse_M', true, 'Noise', 0.10);
+X = info.Data;
+P = info.Pattern;
+M_true= info.Soln;
+

Create initial guess using 'nvecs'

M_init = create_guess('Data', X, 'Num_Factors', R, ...
+    'Factor_Generator', 'nvecs');
+

Call the cp_wopt method

[M,~,output] = cp_wopt(X, P, R, 'init', M_init);
+
Running CP-WOPT...
+Time for zeroing out masked entries of data tensor is 3.66e-02 seconds.
+(If zeroing is done in preprocessing, set 'skip_zeroing' to true.)
+Iter    10, f(x) = 1.895160e+02, ||grad||_infty = 2.96e+01
+Iter    20, f(x) = 1.711120e+02, ||grad||_infty = 8.23e-03
+Iter    21, f(x) = 1.711120e+02, ||grad||_infty = 1.18e-03
+

Check the output

exitmsg = output.ExitMsg
+
+exitmsg =
+
+    'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.'
+
+

Evaluate the output

scr = score(M,M_true)
+
+scr =
+
+    0.9995
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_wopt_poblano_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_wopt_poblano_doc.html new file mode 100644 index 0000000..25db0c6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/cp_wopt_poblano_doc.html @@ -0,0 +1,369 @@ + + + + + Weighted optimization for CP tensor decomposition with incomplete data

Weighted optimization for CP tensor decomposition with incomplete data

We explain how to use cp_wopt with the POBLANO toolbox. The method is described in the following article:

E. Acar, D. M. Dunlavy, T. G. Kolda and M. Mørup, Scalable Tensor Factorizations for Incomplete Data, Chemometrics and Intelligent Laboratory Systems 106(1):41-56, March 2011 (doi:10.1016/j.chemolab.2010.08.004)

Contents

Important Information

It is critical to zero out the values in the missing entries of the data tensor. This can be done by calling cp_wopt(X.*P,P,...). This is a frequent source of errors in using this method.

Create an example problem with missing data.

Here we have 25% missing data and 10% noise.

R = 2;
+info = create_problem('Size', [15 10 5], 'Num_Factors', R, ...
+    'M', 0.25, 'Noise', 0.10);
+X = info.Data;
+P = info.Pattern;
+M_true= info.Soln;
+

Create initial guess using 'nvecs'

M_init = create_guess('Data', X, 'Num_Factors', R, ...
+    'Factor_Generator', 'nvecs');
+

Set up the optimization parameters

It's genearlly a good idea to consider the parameters of the optimization method. The default options may be either too stringent or not stringent enough. The most important options to consider are detailed here.

% Get the defaults
+ncg_opts = ncg('defaults');
+% Tighten the stop tolerance (norm of gradient). This is often too large.
+ncg_opts.StopTol = 1.0e-6;
+% Tighten relative change in function value tolearnce. This is often too large.
+ncg_opts.RelFuncTol = 1.0e-20;
+% Increase the number of iterations.
+ncg_opts.MaxIters = 10^4;
+% Only display every 10th iteration
+ncg_opts.DisplayIters = 10;
+% Display the final set of options
+ncg_opts
+
+ncg_opts = 
+
+  struct with fields:
+
+                   Display: 'iter'
+              DisplayIters: 10
+           LineSearch_ftol: 1.0000e-04
+           LineSearch_gtol: 0.0100
+    LineSearch_initialstep: 1
+         LineSearch_maxfev: 20
+         LineSearch_method: 'more-thuente'
+         LineSearch_stpmax: 1.0000e+15
+         LineSearch_stpmin: 1.0000e-15
+           LineSearch_xtol: 1.0000e-15
+              MaxFuncEvals: 10000
+                  MaxIters: 10000
+                RelFuncTol: 1.0000e-20
+              RestartIters: 20
+                 RestartNW: 0
+              RestartNWTol: 0.1000
+                   StopTol: 1.0000e-06
+                 TraceFunc: 0
+            TraceFuncEvals: 0
+                 TraceGrad: 0
+             TraceGradNorm: 0
+              TraceRelFunc: 0
+                    TraceX: 0
+                    Update: 'PR'
+
+

Call the cp_wopt method

Here is an example call to the cp_opt method. By default, each iteration prints the least squares fit function value (being minimized) and the norm of the gradient. The meaning of any line search warnings can be checked via doc cvsrch.

[M,~,output] = cp_wopt(X, P, R, 'init', M_init, ...
+    'opt', 'ncg', 'opt_options', ncg_opts);
+
Running CP-WOPT...
+Time for zeroing out masked entries of data tensor is 7.52e-04 seconds.
+(If zeroing is done in preprocessing, set 'skip_zeroing' to true.)
+ Iter  FuncEvals       F(X)          ||G(X)||/N        
+------ --------- ---------------- ----------------
+     0         1      42.12686745       0.23070139
+    10        37       3.20399740       0.01068598
+    20        74       1.86647166       0.01496155
+    30       102       1.75795458       0.00124314
+    40       122       1.75699489       0.00030707
+    50       164       1.59387469       0.03315542
+    60       202       0.62497618       0.03131283
+    70       229       0.34450247       0.00826425
+    80       254       0.31352609       0.00161307
+    90       275       0.31288321       0.00018934
+   100       295       0.31287112       0.00003616
+   110       315       0.31287061       0.00000820
+   120       335       0.31287058       0.00000100
+

Check the output

It's important to check the output of the optimization method. In particular, it's worthwhile to check the exit flag. A zero (0) indicates successful termination with the gradient smaller than the specified StopTol, and a three (3) indicates a successful termination where the change in function value is less than RelFuncTol. The meaning of any other flags can be checked via doc poblano_params.

exitflag = output.ExitFlag
+
+exitflag =
+
+     0
+
+

Evaluate the output

We can "score" the similarity of the model computed by CP and compare that with the truth. The score function on ktensor's gives a score in [0,1] with 1 indicating a perfect match. Because we have noise, we do not expect the fit to be perfect. See doc score for more details.

scr = score(M,M_true)
+
+scr =
+
+    0.9841
+
+

Create a SPARSE example problem with missing data.

Here we have 95% missing data and 10% noise.

R = 2;
+info = create_problem('Size', [150 100 50], 'Num_Factors', R, ...
+    'M', 0.95, 'Sparse_M', true, 'Noise', 0.10);
+X = info.Data;
+P = info.Pattern;
+M_true= info.Soln;
+

Create initial guess using 'nvecs'

M_init = create_guess('Data', X, 'Num_Factors', R, ...
+    'Factor_Generator', 'nvecs');
+

Set up the optimization parameters

It's genearlly a good idea to consider the parameters of the optimization method. The default options may be either too stringent or not stringent enough. The most important options to consider are detailed here.

% Get the defaults
+ncg_opts = ncg('defaults');
+% Tighten the stop tolerance (norm of gradient). This is often too large.
+ncg_opts.StopTol = 1.0e-6;
+% Tighten relative change in function value tolearnce. This is often too large.
+ncg_opts.RelFuncTol = 1.0e-20;
+% Increase the number of iterations.
+ncg_opts.MaxIters = 10^4;
+% Only display every 10th iteration
+ncg_opts.DisplayIters = 10;
+% Display the final set of options
+ncg_opts
+
+ncg_opts = 
+
+  struct with fields:
+
+                   Display: 'iter'
+              DisplayIters: 10
+           LineSearch_ftol: 1.0000e-04
+           LineSearch_gtol: 0.0100
+    LineSearch_initialstep: 1
+         LineSearch_maxfev: 20
+         LineSearch_method: 'more-thuente'
+         LineSearch_stpmax: 1.0000e+15
+         LineSearch_stpmin: 1.0000e-15
+           LineSearch_xtol: 1.0000e-15
+              MaxFuncEvals: 10000
+                  MaxIters: 10000
+                RelFuncTol: 1.0000e-20
+              RestartIters: 20
+                 RestartNW: 0
+              RestartNWTol: 0.1000
+                   StopTol: 1.0000e-06
+                 TraceFunc: 0
+            TraceFuncEvals: 0
+                 TraceGrad: 0
+             TraceGradNorm: 0
+              TraceRelFunc: 0
+                    TraceX: 0
+                    Update: 'PR'
+
+

Call the cp_wopt method

Here is an example call to the cp_opt method. By default, each iteration prints the least squares fit function value (being minimized) and the norm of the gradient. The meaning of any line search warnings can be checked via doc cvsrch.

[M,~,output] = cp_wopt(X, P, R, 'init', M_init, ...
+    'opt', 'ncg', 'opt_options', ncg_opts);
+
Running CP-WOPT...
+Time for zeroing out masked entries of data tensor is 3.01e-02 seconds.
+(If zeroing is done in preprocessing, set 'skip_zeroing' to true.)
+ Iter  FuncEvals       F(X)          ||G(X)||/N        
+------ --------- ---------------- ----------------
+     0         1     561.51059983       0.01729762
+    10        51      17.54710510       0.00825676
+    20        82      16.81335392       0.00164784
+    30       112      16.63583752       0.00182422
+    40       149      16.09905940       0.00369331
+    50       186      12.61496613       0.01236469
+    60       223       5.78600003       0.00503093
+    70       251       5.42873077       0.00059175
+    80       274       5.42075260       0.00006912
+    90       294       5.42061561       0.00001694
+   100       314       5.42061124       0.00000194
+   102       318       5.42061122       0.00000084
+

Check the output

It's important to check the output of the optimization method. In particular, it's worthwhile to check the exit flag. A zero (0) indicates successful termination with the gradient smaller than the specified StopTol, and a three (3) indicates a successful termination where the change in function value is less than RelFuncTol. The meaning of any other flags can be checked via doc poblano_params.

exitflag = output.ExitFlag
+
+exitflag =
+
+     0
+
+

Evaluate the output

We can "score" the similarity of the model computed by CP and compare that with the truth. The score function on ktensor's gives a score in [0,1] with 1 indicating a perfect match. Because we have noise, we do not expect the fit to be perfect. See doc score for more details.

scr = score(M,M_true)
+
+scr =
+
+    0.9972
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/eigen.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/eigen.html new file mode 100644 index 0000000..f1dd474 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/eigen.html @@ -0,0 +1,28 @@ + + + + + Eigenproblem + + + + + +
+

Eigenproblem

+ +
    +
  • sshopm - Shifted + symmetric higher-order power method
  • +
  • eiggeap - Shifted power method for generalized + tensor eigenproblem (to be documented)
  • +
+ +

+ + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_amino_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_amino_doc.html new file mode 100644 index 0000000..6d934e0 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_amino_doc.html @@ -0,0 +1,504 @@ + + + + + GCP-OPT Examples with Amino Acids Dataset

GCP-OPT Examples with Amino Acids Dataset

For more details, see Generalized CP Tensor Decomposition.

Contents

Setup

We use the well known amino acids dataset for some tests. This data has some negative values, but the factorization itself should be nonnegative.

% Load the data
+load(fullfile(getfield(what('tensor_toolbox'),'path'),'doc','aminoacids.mat'))
+
+clear M fit
+
+vizopts = {'PlotCommands',{@bar,@(x,y) plot(x,y,'r'),@(x,y) plot(x,y,'g')},...
+    'BottomSpace',0.1, 'HorzSpace', 0.04, 'Normalize', @(x) normalize(x,'sort',2)};
+

CP-ALS

Just a reminder of what CP-ALS does.

cnt = 1;
+
+tic, M{cnt} = cp_als(X,3,'printitn',10); toc
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
+CP_ALS:
+ Iter 10: f = 8.923994e-01 f-delta = 5.1e-02
+ Iter 20: f = 9.645445e-01 f-delta = 1.3e-03
+ Iter 30: f = 9.720363e-01 f-delta = 4.3e-04
+ Iter 40: f = 9.742512e-01 f-delta = 1.1e-04
+ Iter 41: f = 9.743468e-01 f-delta = 9.6e-05
+ Final f = 9.743468e-01 
+Elapsed time is 0.171536 seconds.
+Fit: 0.974347
+

GCP with Gaussian

We can instead call the GCP with the Gaussian function.

cnt = 2;
+M{cnt} = gcp_opt(X,3,'type','Gaussian','printitn',10);
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 5 x 201 x 61 (61305 total entries)
+Generalized function Type: Gaussian
+Objective function: @(x,m)(m-x).^2
+Gradient function: @(x,m)2.*(m-x)
+Lower bound of factor matrices: -Inf
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 6.131
+
+Begin Main loop
+Iter    10, f(x) = 3.271851e+08, ||grad||_infty = 2.42e+06
+Iter    20, f(x) = 5.025022e+06, ||grad||_infty = 2.52e+05
+Iter    30, f(x) = 1.497984e+06, ||grad||_infty = 5.00e+04
+Iter    40, f(x) = 1.445673e+06, ||grad||_infty = 6.42e+03
+Iter    50, f(x) = 1.445131e+06, ||grad||_infty = 2.16e+03
+Iter    60, f(x) = 1.445110e+06, ||grad||_infty = 1.63e+02
+Iter    66, f(x) = 1.445110e+06, ||grad||_infty = 1.47e+01
+End Main Loop
+
+Final objective: 1.4451e+06
+Setup time: 0.00 seconds
+Main loop time: 0.17 seconds
+Outer iterations: 66
+Total iterations: 139
+L-BFGS-B Exit message: CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.
+Fit: 0.974951
+

GCP with Gaussian and Missing Data

What is some data is missing?

cnt = 3;
+
+% Proportion of missing data
+p = 0.35;
+
+% Create a mask with the missing entries set to 0 and everything else 1
+W = tensor(double(rand(size(X))>p));
+
+% Fit the model, using the 'mask' option
+M{cnt} = gcp_opt(X.*W,3,'type','Gaussian','mask',W,'printitn',10);
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 5 x 201 x 61 (61305 total entries)
+Missing entries: 21488 (35%)
+Generalized function Type: Gaussian
+Objective function: @(x,m)(m-x).^2
+Gradient function: @(x,m)2.*(m-x)
+Lower bound of factor matrices: -Inf
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 6.131
+
+Begin Main loop
+Iter    10, f(x) = 6.949081e+07, ||grad||_infty = 3.55e+06
+Iter    20, f(x) = 2.586026e+06, ||grad||_infty = 1.70e+05
+Iter    30, f(x) = 9.454964e+05, ||grad||_infty = 8.12e+04
+Iter    40, f(x) = 9.283551e+05, ||grad||_infty = 4.45e+03
+Iter    50, f(x) = 9.280079e+05, ||grad||_infty = 1.57e+03
+Iter    60, f(x) = 9.280041e+05, ||grad||_infty = 4.03e+01
+Iter    65, f(x) = 9.280040e+05, ||grad||_infty = 1.03e+01
+End Main Loop
+
+Final objective: 9.2800e+05
+Setup time: 0.00 seconds
+Main loop time: 0.20 seconds
+Outer iterations: 65
+Total iterations: 138
+L-BFGS-B Exit message: CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.
+Fit: 0.974836
+

GCP with ADAM

We can also use stochastic gradient, though it's pretty slow for such a small tensor.

cnt = 4;
+
+% Specify 'opt' = 'adam'
+M{cnt} = gcp_opt(X,3,'type','Gaussian','opt','adam','printitn',1,'fsamp',5000,'gsamp',500);
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
+GCP-OPT-ADAM (Generalized CP Tensor Decomposition)
+
+Tensor size: 5 x 201 x 61 (61305 total entries)
+Generalized function Type: Gaussian
+Objective function: @(x,m)(m-x).^2
+Gradient function: @(x,m)2.*(m-x)
+Lower bound of factor matrices: -Inf
+Optimization method: adam
+Max iterations (epochs): 1000
+Iterations per epoch: 1000
+Learning rate / decay / maxfails: 0.001 0.1 1
+Function Sampler: uniform with 5000 samples
+Gradient Sampler: uniform with 500 samples
+
+Begin Main loop
+Initial f-est: 2.370396e+09
+Epoch  1: f-est = 1.587379e+09, step = 0.001
+Epoch  2: f-est = 1.227244e+09, step = 0.001
+Epoch  3: f-est = 9.959063e+08, step = 0.001
+Epoch  4: f-est = 8.378263e+08, step = 0.001
+Epoch  5: f-est = 7.074626e+08, step = 0.001
+Epoch  6: f-est = 5.734918e+08, step = 0.001
+Epoch  7: f-est = 4.291828e+08, step = 0.001
+Epoch  8: f-est = 2.976357e+08, step = 0.001
+Epoch  9: f-est = 1.912060e+08, step = 0.001
+Epoch 10: f-est = 1.158204e+08, step = 0.001
+Epoch 11: f-est = 6.630478e+07, step = 0.001
+Epoch 12: f-est = 3.868896e+07, step = 0.001
+Epoch 13: f-est = 2.407166e+07, step = 0.001
+Epoch 14: f-est = 1.597763e+07, step = 0.001
+Epoch 15: f-est = 1.115011e+07, step = 0.001
+Epoch 16: f-est = 7.931146e+06, step = 0.001
+Epoch 17: f-est = 5.626357e+06, step = 0.001
+Epoch 18: f-est = 4.003385e+06, step = 0.001
+Epoch 19: f-est = 2.934238e+06, step = 0.001
+Epoch 20: f-est = 2.211545e+06, step = 0.001
+Epoch 21: f-est = 1.798214e+06, step = 0.001
+Epoch 22: f-est = 1.588134e+06, step = 0.001
+Epoch 23: f-est = 1.490513e+06, step = 0.001
+Epoch 24: f-est = 1.456792e+06, step = 0.001
+Epoch 25: f-est = 1.436484e+06, step = 0.001
+Epoch 26: f-est = 1.440624e+06, step = 0.001, nfails = 1 (resetting to solution from last epoch)
+Epoch 27: f-est = 1.428806e+06, step = 0.0001
+Epoch 28: f-est = 1.424006e+06, step = 0.0001
+Epoch 29: f-est = 1.420764e+06, step = 0.0001
+Epoch 30: f-est = 1.424495e+06, step = 0.0001, nfails = 2 (resetting to solution from last epoch)
+End Main Loop
+
+Final f-est: 1.4208e+06
+Setup time: 0.00 seconds
+Main loop time: 26.05 seconds
+Total iterations: 30000
+Fit: 0.974894
+

GCP with Gamma (terrible!)

We can try Gamma, but it's not really the right distribution and produces a terrible result.

cnt = 5;
+
+Y = tensor(X(:) .* (X(:) > 0), size(X));
+M{cnt} = gcp_opt(Y,3,'type','Gamma','printitn',25);
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
Warning: Using 'Gamma' type but tensor X is not nonnegative 
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 5 x 201 x 61 (61305 total entries)
+Generalized function Type: Gamma
+Objective function: @(x,m)x./(m+1e-10)+log(m+1e-10)
+Gradient function: @(x,m)-x./((m+1e-10).^2)+1./(m+1e-10)
+Lower bound of factor matrices: 0
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 6.131
+
+Begin Main loop
+Iter    25, f(x) = 3.043120e+05, ||grad||_infty = 1.77e+03
+Iter    50, f(x) = 3.020489e+05, ||grad||_infty = 2.15e+03
+Iter    75, f(x) = 3.004138e+05, ||grad||_infty = 3.86e+03
+Iter   100, f(x) = 2.990434e+05, ||grad||_infty = 3.64e+03
+Iter   125, f(x) = 2.982634e+05, ||grad||_infty = 2.88e+03
+Iter   150, f(x) = 2.970770e+05, ||grad||_infty = 1.85e+03
+Iter   175, f(x) = 2.958838e+05, ||grad||_infty = 3.16e+03
+Iter   200, f(x) = 2.942980e+05, ||grad||_infty = 2.12e+04
+Iter   225, f(x) = 2.932444e+05, ||grad||_infty = 2.94e+03
+Iter   250, f(x) = 2.924878e+05, ||grad||_infty = 2.45e+03
+Iter   275, f(x) = 2.918342e+05, ||grad||_infty = 3.05e+03
+Iter   300, f(x) = 2.912069e+05, ||grad||_infty = 7.50e+03
+Iter   325, f(x) = 2.906649e+05, ||grad||_infty = 3.50e+03
+Iter   350, f(x) = 2.902689e+05, ||grad||_infty = 3.76e+03
+Iter   375, f(x) = 2.899448e+05, ||grad||_infty = 3.33e+03
+Iter   400, f(x) = 2.895878e+05, ||grad||_infty = 3.82e+03
+Iter   425, f(x) = 2.892458e+05, ||grad||_infty = 3.32e+03
+Iter   450, f(x) = 2.887209e+05, ||grad||_infty = 1.10e+04
+Iter   475, f(x) = 2.879184e+05, ||grad||_infty = 3.67e+03
+Iter   500, f(x) = 2.872554e+05, ||grad||_infty = 2.57e+03
+Iter   525, f(x) = 2.869756e+05, ||grad||_infty = 2.59e+03
+Iter   550, f(x) = 2.867130e+05, ||grad||_infty = 4.64e+03
+Iter   575, f(x) = 2.865309e+05, ||grad||_infty = 2.61e+03
+Iter   600, f(x) = 2.863194e+05, ||grad||_infty = 1.33e+04
+Iter   625, f(x) = 2.860109e+05, ||grad||_infty = 2.89e+03
+Iter   650, f(x) = 2.857354e+05, ||grad||_infty = 2.62e+03
+Iter   675, f(x) = 2.852209e+05, ||grad||_infty = 3.94e+03
+Iter   700, f(x) = 2.849618e+05, ||grad||_infty = 2.73e+03
+Iter   725, f(x) = 2.847007e+05, ||grad||_infty = 2.72e+03
+Iter   750, f(x) = 2.844455e+05, ||grad||_infty = 2.68e+03
+Iter   775, f(x) = 2.842615e+05, ||grad||_infty = 2.67e+03
+Iter   800, f(x) = 2.839909e+05, ||grad||_infty = 2.81e+03
+Iter   825, f(x) = 2.838567e+05, ||grad||_infty = 2.55e+03
+Iter   850, f(x) = 2.836044e+05, ||grad||_infty = 2.62e+03
+Iter   875, f(x) = 2.834286e+05, ||grad||_infty = 2.84e+03
+Iter   900, f(x) = 2.832111e+05, ||grad||_infty = 2.52e+03
+Iter   925, f(x) = 2.828901e+05, ||grad||_infty = 2.39e+03
+Iter   950, f(x) = 2.826842e+05, ||grad||_infty = 2.94e+03
+Iter   975, f(x) = 2.824698e+05, ||grad||_infty = 4.91e+03
+Iter  1000, f(x) = 2.822129e+05, ||grad||_infty = 3.33e+03
+End Main Loop
+
+Final objective: 2.8221e+05
+Setup time: 0.01 seconds
+Main loop time: 4.41 seconds
+Outer iterations: 1000
+Total iterations: 2187
+L-BFGS-B Exit message: UNRECOGNIZED EXIT FLAG
+Fit: 0.417271
+

GCP with Huber + Lower Bound

Huber works well. By default, Huber has no lower bound. To add one, we have to pass in the func/grad/lower information explicitly. We can use gcp_fg_setup to get the func/grad parameters.

cnt = 6;
+
+% Call helper function tt_gcp_fg_setup to get the function and gradient handles
+[fh,gh] = tt_gcp_fg_setup('Huber (0.25)');
+
+% Pass the func/grad/lower explicitly.
+M{cnt} = gcp_opt(X,3,'func',fh,'grad',gh,'lower',0,'printitn',25);
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 5 x 201 x 61 (61305 total entries)
+Generalized function Type: user-specified
+Objective function: @(x,m)(x-m).^2.*(abs(x-m)<0.25)+(0.5.*abs(x-m)-0.0625).*(abs(x-m)>=0.25)
+Gradient function: @(x,m)-2.*(x-m).*(abs(x-m)<0.25)-(0.5.*sign(x-m)).*(abs(x-m)>=0.25)
+Lower bound of factor matrices: 0
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 6.131
+
+Begin Main loop
+Iter    25, f(x) = 7.711627e+05, ||grad||_infty = 6.87e+03
+Iter    50, f(x) = 7.127088e+05, ||grad||_infty = 6.76e+03
+Iter    75, f(x) = 6.640264e+05, ||grad||_infty = 6.07e+03
+Iter   100, f(x) = 6.138988e+05, ||grad||_infty = 5.99e+03
+Iter   125, f(x) = 4.665418e+05, ||grad||_infty = 8.79e+03
+Iter   150, f(x) = 1.222041e+05, ||grad||_infty = 5.90e+03
+Iter   175, f(x) = 8.320133e+04, ||grad||_infty = 3.23e+03
+Iter   200, f(x) = 8.304100e+04, ||grad||_infty = 3.34e+03
+Iter   222, f(x) = 8.303986e+04, ||grad||_infty = 3.32e+03
+End Main Loop
+
+Final objective: 8.3040e+04
+Setup time: 0.01 seconds
+Main loop time: 0.73 seconds
+Outer iterations: 222
+Total iterations: 450
+L-BFGS-B Exit message: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL.
+Fit: 0.973482
+

GCP with Beta

This is also pretty bad, which gives an idea of the struggle of choosing the wrong distribution. It can work a little bit, but it's clearly the wrong objective.

cnt = 7;
+
+M{cnt} = gcp_opt(X,3,'type','beta (0.75)','printitn',25);
+
+fit(cnt) = 1 - norm(full(M{cnt})-X)/norm(X);
+fprintf('Fit: %g\n', fit(cnt));
+viz(M{cnt},'Figure',cnt,vizopts{:});
+
Warning: Using 'beta' type but tensor X is not nonnegative 
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 5 x 201 x 61 (61305 total entries)
+Generalized function Type: beta (0.75)
+Objective function: @(x,m)(1.33333).*(m+1e-10).^(0.75)-(-4).*x.*(m+1e-10).^(-0.25)
+Gradient function: @(x,m)(m+1e-10).^(-0.25)-x.*(m+1e-10).^(-1.25)
+Lower bound of factor matrices: 0
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 6.131
+
+Begin Main loop
+Iter    25, f(x) = 1.000642e+07, ||grad||_infty = 7.42e+06
+Iter    50, f(x) = 9.999832e+06, ||grad||_infty = 8.75e+06
+Iter    75, f(x) = 9.999234e+06, ||grad||_infty = 8.18e+06
+Iter   100, f(x) = 9.902039e+06, ||grad||_infty = 9.00e+15
+Iter   101, f(x) = 9.902039e+06, ||grad||_infty = 9.00e+15
+End Main Loop
+
+Final objective: 9.9020e+06
+Setup time: 0.00 seconds
+Main loop time: 3.18 seconds
+Outer iterations: 101
+Total iterations: 262
+L-BFGS-B Exit message: CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH.
+Fit: 0.462047
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_doc.html new file mode 100644 index 0000000..0890800 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_doc.html @@ -0,0 +1,801 @@ + + + + + Generalized CP (GCP) Tensor Decomposition

Generalized CP (GCP) Tensor Decomposition

This document outlines usage and examples for the generalized CP (GCP) tensor decomposition implmented in gcp_opt. GCP allows alternate objective functions besides sum of squared errors, which is the standard for CP. The code support both dense and sparse input tensors, but the sparse input tensors require randomized optimization methods. For some examples, see also GCP-OPT Examples with Amino Acids Dataset.

GCP is described in greater detail in the manuscripts:

  • D. Hong, T. G. Kolda, J. A. Duersch. Generalized Canonical Polyadic Tensor Decomposition. arXiv:1808.07452, 2018. To appear in SIAM Review, 2019.
  • T. G. Kolda, D. Hong, J. Duersch. Stochastic Gradients for Large-Scale Tensor Decomposition, 2019.

Contents

Basic Usage

The idea of GCP is to use alternative objective functions. As such, the most important thing to specify is the objective function.

The command M = gcp_opt(X,R,'type',type) computes an estimate of the best rank-|R| generalized CP (GCP) decomposition of the tensor X for the specified generalized loss function specified by type. The input X can be a tensor or sparse tensor. The result M is a Kruskal tensor. Some options for the objective function are:

  • 'binary' - Bernoulli distribution for binary data
  • 'count' - Poisson distribution for count data (see also cp_apr)
  • 'normal' - Gaussian distribution (see also cp_als and cp_opt)
  • 'huber (0.25)' - Similar to Gaussian but robust to outliers
  • 'rayleigh' - Rayleigh distribution for nonnegative data
  • 'gamma' - Gamma distribution for nonnegative data

See Function Types for GCP for a complete list of options.

Manually specifying the loss function

Rather than specifying a type, the user has the option to explicitly specify the objection function, gradient, and lower bounds using the following options:

  • 'func' - Objective function handle, e.g., @(x,m) (m-x).^2
  • 'grad' - Gradient function handle, e.g., @(x,m) 2.*(m-x)
  • 'lower' - Lower bound, e.g., 0 or -Inf

Note that the function must be able to work on vectors of x and m values.

Choice of Optimzation Method

The default optimization method is L-BFGS-B (bound-constrained limited-memory BFGS). To use this, install the third-party software:

The L-BFGS-B software can only be used for dense tensors. The other choice is to use a stochastic optimization method, either stochastic gradient descent (SGD) or ADAM. This can be used for dense or sparse tensors.

The command M = gcp_opt(X,R,...,'opt',opt) specifies the optimization method where opt is one of the following strings:

  • 'lbfgsb' - Bound-constrained limited-memory BFGS
  • 'sgd' - Stochastic gradient descent (SGD)
  • 'adam' - Momentum-based SGD method

Each methods has parameters, which are described below.

Specifying Missing or Incomplete Data Using the Mask Option

If some entries of the tensor are unknown, the method can mask off that data during the fitting process. To do so, specify a mask tensor W that is the same size as the data tensor X. The mask tensor should be 1 if the entry in X is known and 0 otherwise. The call is M = gcp_opt(X,R,'type',type','mask',W).

Other Options

A few common options are as follows:

  • 'maxiters' - Maximum number of outer iterations {1000}
  • 'init' - Initialization for factor matrices {|'rand'|}
  • 'printitn' - Print every n iterations; 0 for no printing {1}
  • 'state' - Random state, to re-create the same outcome {[]}

Specifying L-BFGS-B Parameters

In addition to the options above, there are two options used to modify the L-BFGS-B behavior.

  • 'factr' - Tolerance on the change on the objective value. Defaults to 1e7, which is multiplied by machine epsilon.
  • 'pgtol' - Projected gradient tolerance, defaults to 1e-5.

It can sometimes be useful to increase or decrease pgtol depending on the objective function and size of the tensor.

Specifying SGD and ADAM Parameters

There are a number of parameters that can be adjusted for SGD and ADAM.

Stochastic Gradient. There are three different sampling methods for computing the stochastic gradient:

  • Uniform - Entries are selected uniformly at random. Default for dense tensors.
  • Stratified - Zeros and nonzeros are sampled separately, which is recommended for sparse tensors. Default for sparse tensors.
  • Semi-Stratified - Modification to stratified sampling that avoids rejection sampling for better efficiency at the cost of potentially higher variance.

The options corresponding to these are as follows.

  • 'sampler' - Type of sampling to use for stochastic gradient. Defaults to 'uniform' for dense and 'stratified' for sparse. The third options is 'semi-stratified'.
  • 'gsamp' - Number of samples for stochastic gradient. This should generally be O(sum(sz)*r). For the stratified or semi-stratified sampler, this can be two numbers. The first is the number of nonzero samples and the second is the number of zero samples. If only one number is specified, then this is used as the number for both nonzeros and zeros, and the total number of samples is 2x what is specified.

Estimating the Function. We also use sampling to estimate the function value.

  • 'fsampler' - This can be 'uniform' (default for dense) or 'stratified' (default for sparse) or a custom function handle. The custom function handleis primarily useful in reusing the same sampled elements across different tests. For instance, we might create such a sampler by calling the hidden sampling function and saving its outputs:
[xsubs, xvals, wghts] = tt_sample_uniform(X, 10000);
+fsampler = @() deal(xsubs, xvals, wghts);'
  • 'fsamp' - Number of samples to estimate function. This should generally be somewhat large since we want this sample to generate a reliable estimate of the true function value.

The 'stratified' sampler has an extra option: * 'oversample' - Factor to oversample when implicitly sampling zeros in the sparse case. Defaults to 1.1. Only adjust for very small tensors.

There are some other options that are needed for SGD, the learning rate and a decrease schedule. Our schedule is very simple - we decrease the rate if there is no improvement in the approximate function value after an epoch. After a specified number of decreases ('maxfails'), we quit.

  • 'rate' - Initial learning rate. Defaults to 1e-3.
  • 'decay' - How much to decrease the learning rate once progress stagnates, i.e., no decrease in objective function between epochs. Default to 0.1.
  • 'maxfails' - How many times to decrease the learning rate. Can be zero. Defaults to 1.
  • 'epciters' - Iterations per epoch. Defaults to 1000.
  • 'festtol' - Quit if the function estimate goes below this level. Defaults to -Inf.

There are some options that are specific to ADAM and generally needn't change:

  • 'beta1' - Default to 0.9
  • 'beta2' - Defaults to 0.999
  • 'epsilon' - Defaults to 1e-8

Example on Gaussian distributed

We set up the example with known low-rank structure. Here nc is the rank and sz is the size.

clear
+rng(4)
+nc = 2;
+sz = [50 60 70];
+info = create_problem('Size',sz,'Num_Factors',nc);
+X = info.Data;
+M_true = info.Soln;
+whos
+
  Name         Size                 Bytes  Class      Attributes
+
+  M_true      50x60x70               3584  ktensor              
+  X           50x60x70            1680376  tensor               
+  info         1x1                1684312  struct               
+  nc           1x1                      8  double               
+  sz           1x3                     24  double               
+
+

Run GCP-OPT

tic, [M1,M0,out] = gcp_opt(X,nc,'type','normal','printitn',10); toc
+fprintf('Final fit: %e (for comparison to f in CP-ALS)\n',1 - norm(X-full(M1))/norm(X));
+fprintf('Score: %f\n',score(M1,M_true));
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 50 x 60 x 70 (210000 total entries)
+Generalized function Type: normal
+Objective function: @(x,m)(m-x).^2
+Gradient function: @(x,m)2.*(m-x)
+Lower bound of factor matrices: -Inf
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 21
+
+Begin Main loop
+Iter    10, f(x) = 9.397039e+04, ||grad||_infty = 2.88e+03
+Iter    20, f(x) = 2.137406e+04, ||grad||_infty = 3.79e+01
+Iter    30, f(x) = 2.092906e+04, ||grad||_infty = 1.64e+02
+Iter    40, f(x) = 4.836752e+03, ||grad||_infty = 1.11e+03
+Iter    47, f(x) = 1.320194e+03, ||grad||_infty = 1.73e+01
+End Main Loop
+
+Final objective: 1.3202e+03
+Setup time: 0.01 seconds
+Main loop time: 0.60 seconds
+Outer iterations: 47
+Total iterations: 117
+L-BFGS-B Exit message: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL.
+Elapsed time is 0.611522 seconds.
+Final fit: 9.004310e-01 (for comparison to f in CP-ALS)
+Score: 0.999567
+

Compare to CP-ALS, which should usually be faster

tic, M2 = cp_als(X,nc,'init',tocell(M0),'printitn',1); toc
+fprintf('Objective function: %e (for comparison to f(x) in GCP-OPT)\n', norm(X-full(M2))^2/prod(size(X)));
+fprintf('Score: %f\n',score(M2,M_true));
+
+CP_ALS:
+ Iter  1: f = 5.755636e-01 f-delta = 5.8e-01
+ Iter  2: f = 6.779797e-01 f-delta = 1.0e-01
+ Iter  3: f = 8.776194e-01 f-delta = 2.0e-01
+ Iter  4: f = 9.002202e-01 f-delta = 2.3e-02
+ Iter  5: f = 9.005574e-01 f-delta = 3.4e-04
+ Iter  6: f = 9.005586e-01 f-delta = 1.2e-06
+ Final f = 9.005586e-01 
+Elapsed time is 0.023634 seconds.
+Objective function: 6.270533e-03 (for comparison to f(x) in GCP-OPT)
+Score: 0.999536
+

Now let's try is with the ADAM functionality

tic, [M3,~,out] = gcp_opt(X,nc,'type','normal','opt','adam','init',M0,'printitn',1); toc
+fprintf('Final fit: %e (for comparison to f in CP-ALS)\n',1 - norm(X-full(M1))/norm(X));
+fprintf('Score: %f\n',score(M3,M_true));
+
+GCP-OPT-ADAM (Generalized CP Tensor Decomposition)
+
+Tensor size: 50 x 60 x 70 (210000 total entries)
+Generalized function Type: normal
+Objective function: @(x,m)(m-x).^2
+Gradient function: @(x,m)2.*(m-x)
+Lower bound of factor matrices: -Inf
+Optimization method: adam
+Max iterations (epochs): 1000
+Iterations per epoch: 1000
+Learning rate / decay / maxfails: 0.001 0.1 1
+Function Sampler: uniform with 210000 samples
+Gradient Sampler: uniform with 2100 samples
+
+Begin Main loop
+Initial f-est: 2.660878e+05
+Epoch  1: f-est = 1.361652e+05, step = 0.001
+Epoch  2: f-est = 1.330132e+05, step = 0.001
+Epoch  3: f-est = 1.319047e+05, step = 0.001
+Epoch  4: f-est = 2.539533e+04, step = 0.001
+Epoch  5: f-est = 2.144132e+04, step = 0.001
+Epoch  6: f-est = 2.403361e+03, step = 0.001
+Epoch  7: f-est = 1.317062e+03, step = 0.001
+Epoch  8: f-est = 1.316352e+03, step = 0.001
+Epoch  9: f-est = 1.318167e+03, step = 0.001, nfails = 1 (resetting to solution from last epoch)
+Epoch 10: f-est = 1.313968e+03, step = 0.0001
+Epoch 11: f-est = 1.314306e+03, step = 0.0001, nfails = 2 (resetting to solution from last epoch)
+End Main Loop
+
+Final f-est: 1.3140e+03
+Setup time: 0.05 seconds
+Main loop time: 14.34 seconds
+Total iterations: 11000
+Elapsed time is 14.390320 seconds.
+Final fit: 9.004310e-01 (for comparison to f in CP-ALS)
+Score: 0.999683
+

Create an example Rayleigh tensor model and data instance.

Consider a tensor that is Rayleigh-distribued. This means its entries are all nonnegative. First, we generate such a tensor with low-rank structure.

clear
+rng(65)
+nc = 3;
+sz = [50 60 70];
+nd = length(sz);
+
+% Create factor matrices that correspond to smooth sinusidal factors
+U=cell(1,nd);
+for k=1:nd
+    V = 1.1 + cos(bsxfun(@times, 2*pi/sz(k)*(0:sz(k)-1)', 1:nc));
+    U{k} = V(:,randperm(nc));
+end
+M_true = normalize(ktensor(U));
+X = tenfun(@raylrnd, full(M_true));
+

Visualize the true solution

viz(M_true, 'Figure', 1)
+
ktensor/viz: Normalizing factors and sorting components according to the 2-norm.
+
+ans = 
+
+  struct with fields:
+
+              height: 0.2933
+               width: [3×1 double]
+          GlobalAxis: [1×1 Axes]
+          FactorAxes: [3×3 Axes]
+    ModeTitleHandles: [3×1 Text]
+    CompTitleHandles: [3×1 Text]
+         PlotHandles: {3×3 cell}
+
+

Run GCP-OPT

tic, [M1,~,out] = gcp_opt(X,nc,'type','rayleigh','printitn',10); toc
+fprintf('Score: %f\n',score(M1,M_true));
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 50 x 60 x 70 (210000 total entries)
+Generalized function Type: rayleigh
+Objective function: @(x,m)2*log(m+1e-10)+(pi/4)*(x./(m+1e-10)).^2
+Gradient function: @(x,m)2./(m+1e-10)-(pi/2)*x.^2./(m+1e-10).^3
+Lower bound of factor matrices: 0
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 21
+
+Begin Main loop
+Iter    10, f(x) = 9.142571e+05, ||grad||_infty = 1.41e+03
+Positive dir derivative in projection 
+Using the backtracking step
+Iter    20, f(x) = 8.450604e+05, ||grad||_infty = 1.89e+03
+Iter    30, f(x) = 7.770233e+05, ||grad||_infty = 1.41e+03
+Iter    40, f(x) = 7.632798e+05, ||grad||_infty = 1.80e+03
+Iter    50, f(x) = 7.580042e+05, ||grad||_infty = 1.10e+03
+Iter    60, f(x) = 7.573270e+05, ||grad||_infty = 2.52e+02
+Iter    70, f(x) = 7.572930e+05, ||grad||_infty = 7.99e+01
+End Main Loop
+
+Final objective: 7.5729e+05
+Setup time: 0.01 seconds
+Main loop time: 2.85 seconds
+Outer iterations: 70
+Total iterations: 165
+L-BFGS-B Exit message: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL.
+Elapsed time is 2.852499 seconds.
+Score: 0.795733
+

Visualize the solution from GCP-OPT

viz(M1, 'Figure', 2)
+
ktensor/viz: Normalizing factors and sorting components according to the 2-norm.
+
+ans = 
+
+  struct with fields:
+
+              height: 0.2933
+               width: [3×1 double]
+          GlobalAxis: [1×1 Axes]
+          FactorAxes: [3×3 Axes]
+    ModeTitleHandles: [3×1 Text]
+    CompTitleHandles: [3×1 Text]
+         PlotHandles: {3×3 cell}
+
+

Now let's try is with the scarce functionality - this leaves out all but 10% of the data!

tic, [M2,~,out] = gcp_opt(X,nc,'type','rayleigh','opt','adam'); toc
+fprintf('Final fit: %e (for comparison to f in CP-ALS)\n',1 - norm(X-full(M1))/norm(X));
+fprintf('Score: %f\n',score(M2,M_true));
+
+GCP-OPT-ADAM (Generalized CP Tensor Decomposition)
+
+Tensor size: 50 x 60 x 70 (210000 total entries)
+Generalized function Type: rayleigh
+Objective function: @(x,m)2*log(m+1e-10)+(pi/4)*(x./(m+1e-10)).^2
+Gradient function: @(x,m)2./(m+1e-10)-(pi/2)*x.^2./(m+1e-10).^3
+Lower bound of factor matrices: 0
+Optimization method: adam
+Max iterations (epochs): 1000
+Iterations per epoch: 1000
+Learning rate / decay / maxfails: 0.001 0.1 1
+Function Sampler: uniform with 210000 samples
+Gradient Sampler: uniform with 2100 samples
+
+Begin Main loop
+Initial f-est: 2.715221e+06
+Epoch  1: f-est = 1.017441e+06, step = 0.001
+Epoch  2: f-est = 9.204216e+05, step = 0.001
+Epoch  3: f-est = 8.791755e+05, step = 0.001
+Epoch  4: f-est = 8.496629e+05, step = 0.001
+Epoch  5: f-est = 8.276276e+05, step = 0.001
+Epoch  6: f-est = 8.053227e+05, step = 0.001
+Epoch  7: f-est = 7.839439e+05, step = 0.001
+Epoch  8: f-est = 7.710536e+05, step = 0.001
+Epoch  9: f-est = 7.653168e+05, step = 0.001
+Epoch 10: f-est = 7.619699e+05, step = 0.001
+Epoch 11: f-est = 7.600227e+05, step = 0.001
+Epoch 12: f-est = 7.590060e+05, step = 0.001
+Epoch 13: f-est = 7.585602e+05, step = 0.001
+Epoch 14: f-est = 7.583133e+05, step = 0.001
+Epoch 15: f-est = 7.582559e+05, step = 0.001
+Epoch 16: f-est = 7.582295e+05, step = 0.001
+Epoch 17: f-est = 7.582587e+05, step = 0.001, nfails = 1 (resetting to solution from last epoch)
+Epoch 18: f-est = 7.581745e+05, step = 0.0001
+Epoch 19: f-est = 7.581687e+05, step = 0.0001
+Epoch 20: f-est = 7.581605e+05, step = 0.0001
+Epoch 21: f-est = 7.581473e+05, step = 0.0001
+Epoch 22: f-est = 7.581537e+05, step = 0.0001, nfails = 2 (resetting to solution from last epoch)
+End Main Loop
+
+Final f-est: 7.5815e+05
+Setup time: 0.08 seconds
+Main loop time: 40.89 seconds
+Total iterations: 22000
+Elapsed time is 40.977851 seconds.
+Final fit: 5.380785e-01 (for comparison to f in CP-ALS)
+Score: 0.797088
+

Visualize the solution with scarce

viz(M2, 'Figure', 3)
+
ktensor/viz: Normalizing factors and sorting components according to the 2-norm.
+
+ans = 
+
+  struct with fields:
+
+              height: 0.2933
+               width: [3×1 double]
+          GlobalAxis: [1×1 Axes]
+          FactorAxes: [3×3 Axes]
+    ModeTitleHandles: [3×1 Text]
+    CompTitleHandles: [3×1 Text]
+         PlotHandles: {3×3 cell}
+
+

Boolean tensor.

The model will predict the odds of observing a 1. Recall that the odds related to the probability as follows. If $p$ is the probability adn $r$ is the odds, then $r = p / (1-p)$. Higher odds indicates a higher probability of observing a one.

clear
+rng(7639)
+nc = 3; % Number of components
+sz = [50 60 70]; % Tensor size
+nd = length(sz); % Number of dimensions
+

We assume that the underlying model tensor has factor matrices with only a few "large" entries in each column. The small entries should correspond to a low but nonzero entry of observing a 1, while the largest entries, if multiplied together, should correspond to a very high likelihood of observing a 1.

probrange = [0.01 0.99]; % Absolute min and max of probabilities
+oddsrange = probrange ./ (1 - probrange);
+smallval = nthroot(min(oddsrange)/nc,nd);
+largeval = nthroot(max(oddsrange)/nc,nd);
+
+A = cell(nd,1);
+for k = 1:nd
+    A{k} = smallval * ones(sz(k), nc);
+    nbig = 5;
+    for j = 1:nc
+        p = randperm(sz(k));
+        A{k}(p(1:nbig),j) = largeval;
+    end
+end
+M_true = ktensor(A);
+

Convert K-tensor to an observed tensor Get the model values, which correspond to odds of observing a 1

Mfull = full(M_true);
+% Convert odds to probabilities
+Mprobs = Mfull ./ (1 + Mfull);
+% Flip a coin for each entry, with the probability of observing a one
+% dictated by Mprobs
+Xfull = 1.0*(tensor(@rand, sz) < Mprobs);
+% Convert to sparse tensor, real-valued 0/1 tensor since it was constructed
+% to be sparse
+X = sptensor(Xfull);
+fprintf('Proportion of nonzeros in X is %.2f%%\n', 100*nnz(X) / prod(sz));
+
Proportion of nonzeros in X is 8.42%
+

Just for fun, let's visualize the distribution of the probabilities in the model tensor.

histogram(Mprobs(:))
+

Call GCP_OPT on the full tensor

[M1,~,out] = gcp_opt(Xfull, nc, 'type', 'binary','printitn',25);
+fprintf('Final score: %f\n', score(M1,M_true));
+
+GCP-OPT-LBFGSB (Generalized CP Tensor Decomposition)
+
+Tensor size: 50 x 60 x 70 (210000 total entries)
+Generalized function Type: binary
+Objective function: @(x,m)log(m+1)-x.*log(m+1e-10)
+Gradient function: @(x,m)1./(m+1)-x./(m+1e-10)
+Lower bound of factor matrices: 0
+Optimization method: lbfgsb
+Max iterations: 1000
+Projected gradient tolerance: 21
+
+Begin Main loop
+Iter    25, f(x) = 4.498422e+04, ||grad||_infty = 7.66e+01
+Iter    50, f(x) = 4.323442e+04, ||grad||_infty = 1.27e+02
+Iter    62, f(x) = 4.309850e+04, ||grad||_infty = 1.95e+01
+End Main Loop
+
+Final objective: 4.3098e+04
+Setup time: 0.01 seconds
+Main loop time: 1.45 seconds
+Outer iterations: 62
+Total iterations: 140
+L-BFGS-B Exit message: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL.
+Final score: 0.739944
+

GCP-OPT as sparse tensor

[M2,~,out] = gcp_opt(X, nc, 'type', 'binary');
+fprintf('Final score: %f\n', score(M2,M_true));
+
+GCP-OPT-ADAM (Generalized CP Tensor Decomposition)
+
+Tensor size: 50 x 60 x 70 (210000 total entries)
+Sparse tensor: 17690 (8.4%) Nonzeros and 192310 (91.58%) Zeros
+Generalized function Type: binary
+Objective function: @(x,m)log(m+1)-x.*log(m+1e-10)
+Gradient function: @(x,m)1./(m+1)-x./(m+1e-10)
+Lower bound of factor matrices: 0
+Optimization method: adam
+Max iterations (epochs): 1000
+Iterations per epoch: 1000
+Learning rate / decay / maxfails: 0.001 0.1 1
+Function Sampler: stratified with 17690 nonzero and 17690 zero samples
+Gradient Sampler: stratified with 1000 nonzero and 1000 zero samples
+
+Begin Main loop
+Initial f-est: 7.428218e+04
+Epoch  1: f-est = 4.717953e+04, step = 0.001
+Epoch  2: f-est = 4.654388e+04, step = 0.001
+Epoch  3: f-est = 4.622779e+04, step = 0.001
+Epoch  4: f-est = 4.556995e+04, step = 0.001
+Epoch  5: f-est = 4.513884e+04, step = 0.001
+Epoch  6: f-est = 4.497412e+04, step = 0.001
+Epoch  7: f-est = 4.478373e+04, step = 0.001
+Epoch  8: f-est = 4.417746e+04, step = 0.001
+Epoch  9: f-est = 4.361799e+04, step = 0.001
+Epoch 10: f-est = 4.347086e+04, step = 0.001
+Epoch 11: f-est = 4.341842e+04, step = 0.001
+Epoch 12: f-est = 4.338054e+04, step = 0.001
+Epoch 13: f-est = 4.341225e+04, step = 0.001, nfails = 1 (resetting to solution from last epoch)
+Epoch 14: f-est = 4.336923e+04, step = 0.0001
+Epoch 15: f-est = 4.336528e+04, step = 0.0001
+Epoch 16: f-est = 4.335960e+04, step = 0.0001
+Epoch 17: f-est = 4.336279e+04, step = 0.0001, nfails = 2 (resetting to solution from last epoch)
+End Main Loop
+
+Final f-est: 4.3360e+04
+Setup time: 0.01 seconds
+Main loop time: 33.04 seconds
+Total iterations: 17000
+Final score: 0.836891
+

Create and test a Poisson count tensor.

nc = 3;
+sz = [80 90 100];
+nd = length(sz);
+paramRange = [0.5 60];
+factorRange = paramRange.^(1/nd);
+minFactorRatio = 95/100;
+lambdaDamping = 0.8;
+rng(21);
+info = create_problem('Size', sz, ...
+    'Num_Factors', nc, ...
+    'Factor_Generator', @(m,n)factorRange(1)+(rand(m,n)>minFactorRatio)*(factorRange(2)-factorRange(1)), ...
+    'Lambda_Generator', @(m,n)ones(m,1)*(lambdaDamping.^(0:n-1)'), ...
+    'Sparse_Generation', 0.2);
+
+M_true = normalize(arrange(info.Soln));
+X = info.Data;
+viz(M_true, 'Figure',3);
+
ktensor/viz: Normalizing factors and sorting components according to the 2-norm.
+

Loss function for Poisson negative log likelihood with identity link.

% Call GCP_OPT on sparse tensor
+[M1,M0,out] = gcp_opt(X, nc, 'type', 'count','printitn',25);
+fprintf('Final score: %f\n', score(M1,M_true));
+
+GCP-OPT-ADAM (Generalized CP Tensor Decomposition)
+
+Tensor size: 80 x 90 x 100 (720000 total entries)
+Sparse tensor: 123856 (17%) Nonzeros and 596144 (82.80%) Zeros
+Generalized function Type: count
+Objective function: @(x,m)m-x.*log(m+1e-10)
+Gradient function: @(x,m)1-x./(m+1e-10)
+Lower bound of factor matrices: 0
+Optimization method: adam
+Max iterations (epochs): 1000
+Iterations per epoch: 1000
+Learning rate / decay / maxfails: 0.001 0.1 1
+Function Sampler: stratified with 100000 nonzero and 100000 zero samples
+Gradient Sampler: stratified with 1000 nonzero and 1000 zero samples
+
+Begin Main loop
+Initial f-est: 4.721309e+05
+Epoch 14: f-est = 3.448798e+05, step = 0.001, nfails = 1 (resetting to solution from last epoch)
+Epoch 18: f-est = 3.447516e+05, step = 0.0001, nfails = 2 (resetting to solution from last epoch)
+End Main Loop
+
+Final f-est: 3.4475e+05
+Setup time: 0.13 seconds
+Main loop time: 34.05 seconds
+Total iterations: 18000
+Final score: 0.954415
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_fg_options_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_fg_options_doc.html new file mode 100644 index 0000000..bd53228 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/gcp_opt_fg_options_doc.html @@ -0,0 +1,425 @@ + + + + + Function Types for GCP

Function Types for GCP

The GCP capability of the Tensor Toolbox allows the user to specify a fit function. There are a number of ''standard'' choices that we provide via the helper function tt_gcp_fg_setup function. These choices are presented in detail below. Motivations and details for these choices can be found in:

  • D. Hong, T. G. Kolda, J. A. Duersch. Generalized Canonical Polyadic Tensor Decomposition. arXiv:1808.07452, 2018. To appear in SIAM Review, 2019.

These choices can be passed directly to gcp_opt via the 'type' option. To test the options, call the hidden function:

[f,g,lowerbnd] = tt_gcp_fg_setup(type)

We discuss the choices for the type below.

Contents

Gaussian (real-valued data)

This is indicated by specifying the type as either 'normal' or 'gaussian'. This choice correspond to standard CP, which is implemented in cp_als and cp_opt. It is useful for continuous real-valued data tensors. This choice specifies

$$f(x,m) = (x-m)^2, \quad g(x,m) = 2(m-x), \quad \ell=-\infty$$

[f,g,lowerbnd] = tt_gcp_fg_setup('normal')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)(m-x).^2
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)2.*(m-x)
+
+
+lowerbnd =
+
+  -Inf
+
+

Poisson (count data)

This is indicated by specifying the type as either 'count' or 'poisson'. This choice is useful for count data tensors, i.e., tensors that have only entries in {0,1,2,...}. This choice corresponds to Poisson CP, which is implemente din cp_apr. This choice specifies

$$f(x,m) = m - x \log(m + 10^{-10}),
\quad g(x,m) = 1 - \frac{x}{m+10^{-10}},
\quad \ell=0$$

The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors.

[f,g,lowerbnd] = tt_gcp_fg_setup('count')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)m-x.*log(m+1e-10)
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)1-x./(m+1e-10)
+
+
+lowerbnd =
+
+     0
+
+

Poisson with Log Link (count data)

This is indicated by specifying the type as 'poisson-log'. This choice is useful for count data tensors, i.e., tensors that have only entries in {0,1,2,...}. This choice specifies

$$f(x,m) = e^m - x m,
\quad g(x,m) = e^m - x,
\quad \ell=-\infty$$

[f,g,lowerbnd] = tt_gcp_fg_setup('poisson-log')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)exp(m)-x.*m
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)exp(m)-x
+
+
+lowerbnd =
+
+  -Inf
+
+

Bernoulli with Odds Link (binary data)

This is indicated by specifying the type as either 'binary' or 'bernoulli-odds'. This choice is useful for binary data tensors, i.e., tensors that have only 0 or 1 entries. This choice specifies

$$f(x,m) = \log(m+1) - x \log(m + 10^{-10}),
\quad g(x,m) = \frac{1}{m+1} - \frac{x}{m+10^{-10}},
\quad \ell=0$$

The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors.

[f,g,lowerbnd] = tt_gcp_fg_setup('binary')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)log(m+1)-x.*log(m+1e-10)
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)1./(m+1)-x./(m+1e-10)
+
+
+lowerbnd =
+
+     0
+
+

Bernoulli with Logit Link (binary data)

This is indicated by specifying the type as 'bernoulli-logit'. This choice is useful for binary data tensors, i.e., tensors that have only 0 or 1 entries. This choice specifies

$$f(x,m) = \log(e^m+1) - x m,
\quad g(x,m) = \frac{e^m}{e^m+1} - x,
\quad \ell=-\infty$$

[f,g,lowerbnd] = tt_gcp_fg_setup('bernoulli-logit')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)log(exp(m)+1)-x.*m
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)exp(m)./(exp(m)+1)-x
+
+
+lowerbnd =
+
+  -Inf
+
+

Rayleigh (real-valued data)

This is indicated by specifying the type 'rayleigh'. This choice is useful for nonnegative real-values data tensors, i.e., tensors that have only nonnegative. This choice specifies

$$f(x,m) = 2 \log(m+10^{-10}) - \frac{\pi}{4} \frac{x}{(m + 10^{-10})^2},
\quad g(x,m) = \frac{1}{m+10^{-10}} - \frac{\pi}{2} \frac{x}{(m + 10^{-10})^3},
\quad \ell=0$$

The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors.

[f,g,lowerbnd] = tt_gcp_fg_setup('rayleigh')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)2*log(m+1e-10)+(pi/4)*(x./(m+1e-10)).^2
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)2./(m+1e-10)-(pi/2)*x.^2./(m+1e-10).^3
+
+
+lowerbnd =
+
+     0
+
+

Gamma (nonnegative real-valued data)

This is indicated by specifying the type 'gamma'. This choice is useful for nonnegative real-values data tensors, i.e., tensors that have only nonnegative. This choice specifies

$$f(x,m) = \frac{x}{m+10^{-10}} + \log(m + 10^{-10}),
\quad g(x,m) = \frac{-x}{(m+10^{-10})^2} - \frac{1}{m + 10^{-10}},
\quad \ell=0$$

The quantity $10^{-10}$ is a fudge factor to avoid divide-by-zero errors.

[f,g,lowerbnd] = tt_gcp_fg_setup('gamma')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)x./(m+1e-10)+log(m+1e-10)
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)-x./((m+1e-10).^2)+1./(m+1e-10)
+
+
+lowerbnd =
+
+     0
+
+

Huber (nonnegative real-valued data)

This is indicated by specifying the type 'huber (DELTA)', where DELTA is $\Delta$ in the equations below. This choice is useful for nonnegative real-values data tensors, i.e., tensors that have only nonnegative. This choice specifies

$$f(x,m) = \left\{ \begin{array}{ll}(x-m)^2 & \mbox{if } |x-m| \leq \Delta, \\
2\Delta|x-m|-\Delta^2 & \mbox{otherwise}\end{array}\right.,
\quad
g(x,m) = \left\{ \begin{array}{ll}-2(x-m) & \mbox{if } |x-m| \leq \Delta, \\
2\Delta\mbox{sgn}(x-m) & \mbox{otherwise}\end{array}\right.,
\quad
\ell = 0
$$

[f,g,lowerbnd] = tt_gcp_fg_setup('huber (0.25)')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)(x-m).^2.*(abs(x-m)<0.25)+(0.5.*abs(x-m)-0.0625).*(abs(x-m)>=0.25)
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)-2.*(x-m).*(abs(x-m)<0.25)-(0.5.*sign(x-m)).*(abs(x-m)>=0.25)
+
+
+lowerbnd =
+
+  -Inf
+
+

Negative Binomial (count data)

This is indicated by specifying the type 'negative-binomial (r)', where r is $r$ in the equations below. This choice is useful for count data tensors. This choice specifies

$$f(x,m) = (r+x) \log(1+m) - x \log(m+10^{-10}),
\quad
g(x,m) = \frac{(r+x)}{1+m} - \frac{x}{m+10^{-10}},
\quad
\ell = 0
$$

[f,g,lowerbnd] = tt_gcp_fg_setup('negative-binomial (4)')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)(4+x).*log(1+m)-x*log(m+1e-10)
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)(5)./(1+m)-x./(m+1e-10)
+
+
+lowerbnd =
+
+     0
+
+

Beta (nonnegative real-valued data)

This is indicated by specifying the type 'beta (BETA)', where BETA is $\beta$ in the equations below. This choice is useful for nonnegative data tensors. Choices of $\beta=0$ or $\beta=1$ are not allowed because these correspond to 'gamma' or 'rayleigh'. This choice specifies

$$f(x,m) = \frac{ (m+10^{-10})^\beta }{\beta} - \frac{x(m+10^{-10})^{(\beta-1)} }{\beta-1},
\quad
g(x,m) = (m+10^{-10})^{(\beta-1)} - x(m+10^{-10})^{(\beta-2)},
\quad
\ell = 0
$$

[f,g,lowerbnd] = tt_gcp_fg_setup('beta (0.3)')
+
+f =
+
+  function_handle with value:
+
+    @(x,m)(3.33333).*(m+1e-10).^(0.3)-(-1.42857).*x.*(m+1e-10).^(-0.7)
+
+
+g =
+
+  function_handle with value:
+
+    @(x,m)(m+1e-10).^(-0.7)-x.*(m+1e-10).^(-1.7)
+
+
+lowerbnd =
+
+     0
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/getting_started.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/getting_started.html new file mode 100644 index 0000000..c9a2699 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/getting_started.html @@ -0,0 +1,85 @@ + + + + + Getting Started with the Tensor Toolbox + + + + + +
+

Getting Started

+ + + +

Getting Started

+ +

Download

+ +

The software has recently moved to GITLAB: + http://gitlab.com/tensors/tensor_toolbox. + Official releases can be obtained by visiting the Releases page. + The latest development realease can be obtained by + cloning or downloading from the main GITLAB page. + Version 2.6 and earlier can be obtained + here.

+ +

Installation

+ +
    +
  1. Unpack the files, if necessary
  2. + +
  3. Start MATLAB
  4. + +
  5. Within MATLAB, navigate to the tensor_toolbox + directory and execute the following commands: + +
      +
    1. addpath(pwd)
    2. + +
    3. savepath
    +
  6. +
+ +

Getting help

+ +

At any time, type help tensor_toolbox for help on classes or functions. + You can also find a getting started guide via MATLAB's help system. Launch help + by pressing the question mark button and look for Tensor Toolbox under supplemental + software, as highlighted in the image below.

+ +

Navigating
+      MATLAB Help Screen

+ +

What's new in Version 3.1?

+ +

Version 3.1 adds

+ +
    +
  • New location on GITLAB plus open source BSD licence. +
  • New classes and functions for symmetric + tensors: symtensor, symktensor, cp_sym, tucker_sym
  • +
  • New class for sums of different tensor types: sumtensor
  • +
  • Function to compute HOSVD and ST-HOSVD: hosvd
  • +
  • New function for CP with alternating randomized least squares(also known as CPRAND): cp_arls
  • +
  • New generalized CP, including stochastic version: gcp_opt
  • +
  • New method for creating binary problems where the nonnegative model tensor represents the odds of seeing a one: create_problem_binary +
  • Added methods to create all-ones sparse tensor that has the same pattern as a given sparse tensor: sptensor/spones
  • +
  • New function to vizualize CP-decompoised Kruskal tensors: ktensor/viz
  • +
+ +

We have also fixed many bugs. View the + RELEASE_NOTES.txt file for details.

+ + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.cfe b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.cfe new file mode 100644 index 0000000..55185b9 Binary files /dev/null and b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.cfe differ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.cfs b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.cfs new file mode 100644 index 0000000..fdddf25 Binary files /dev/null and b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.cfs differ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.si b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.si new file mode 100644 index 0000000..dc8df49 Binary files /dev/null and b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/_1.si differ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/segments.gen b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/segments.gen new file mode 100644 index 0000000..e9fa600 Binary files /dev/null and b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/segments.gen differ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/segments_3 b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/segments_3 new file mode 100644 index 0000000..86faab3 Binary files /dev/null and b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helpsearch-v3/segments_3 differ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helptoc.xml b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helptoc.xml new file mode 100644 index 0000000..3cb48fa --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/helptoc.xml @@ -0,0 +1,44 @@ + + + + + +Tensor Toolbox + +Tensor Types + Dense Tensors + Sparse Tensors + Symmetric Tensors + Kruskal Tensors + Tucker Tensors + Symmetric Kruskal Tensors + Sums of Structured Tensors + +Converting Tensors and Matrices + Dense Case + Sparse Case + +Working with Tensors + Multiplying Tensors + Mode-n Vectors + Collapsing and Scaling Tensors + Creating Test Problems + Identities + +CP Decompositions + Alternating Least Squares (ALS) + Alternating Randomized Least Squares (ARLS) + All-at-once Optimization (OPT) + Incomplete Data (WOPT) + Alternating Poisson Regression (APR) + Generalized CP Decomposition (GCP-OPT) + +Tucker Decompositions + Higher-order SVD (HOSVD) + Alternating Least Squares (ALS) + +Eigenproblem + Shifted Symmetric Higher-order Power Method + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/hosvd_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/hosvd_doc.html new file mode 100644 index 0000000..ee59e9b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/hosvd_doc.html @@ -0,0 +1,559 @@ + + + + + Computing Tucker via the HOSVD

Computing Tucker via the HOSVD

Contents

Higher-order Singular Value Decomposition (HOSVD) and Sequentially-truncased HOSVD (ST-HOSVD)

The HOSVD computes a Tucker decomposition of a tensor via a simple process. For each mode k, it computes the r_k leading left singular values of the matrix unfolding and stores those as factor matrix U_k. Then it computes a ttm of the original tensor and all the factor matrices to yield the core of size r_1 x r_2 x ... x r_d. The core and factor matrices are used to form the ttensor. The values of r_k that lead to a good approximation can be computed automatically to yield a specified error tolerance; this is recommended and the default in our code. The ST-HOSVD is an improvement on the HOSVD that does a TTM in each mode before moving on to the next mode. This has the advantage of shrinking the tensor at each step and reducing subsequent computations. ST-HOSVD is the default in the hosvd code.

  • L. R. Tucker, Some mathematical notes on three-mode factor analysis, Psychometrika 31:279-311, 1966, doi:10.1007/BF02289464
  • L. D. Lathauwer, B. D. Moor and J. Vandewalle, A multilinear singular value decomposition, SIAM J. Matrix Analysis and Applications 21(4):1253-1278, 2000, doi:10.1137/S0895479896305696
  • N. Vannieuwenhoven, R. Vandebril and K. Meerbergen, A New Truncation Strategy for the Higher-Order Singular Value Decomposition, SIAM J. Scientific Computing 34(2):A1027-A1052, 2012, doi:10.1137/110836067

Simple example of usage

% Create random 50 x 40 x 30 tensor with 5 x 4 x 3 core
+info = create_problem('Type','Tucker','Num_Factors',[5 4 3],'Size',[50 40 30],'Noise',0.01);
+X = info.Data;
+
+% Compute HOSVD with desired relative error = 0.1
+T = hosvd(X,0.1);
+
+% Check size of core
+coresize = size(T.core)
+
+% Check relative error
+relerr = norm(X-full(T))/norm(X)
+
Computing HOSVD...
+Size of core: 5 x 4 x 3
+||X-T||/||X|| = 0.00995932 <=0.100000 (tol)
+
+
+coresize =
+
+     5     4     3
+
+
+relerr =
+
+    0.0100
+
+

Generate a core with different accuracies for different sizes

We will create a core tensor that has is nearly block diagonal. The blocks are expontentially decreasing in norm, with the idea that we can pick off one block at a time as we increate the prescribed accuracy of the HOSVD. To do this, we use tenrandblk.

% Block sizes (need not be cubic). Number of rows is the number
+% of levels and number of columns is the order of the tensor.
+bsz = [3 2 1; 2 2 2; 2 3 4];
+
+% Squared norm of each block. Must be length L and sum to <= 1
+bsn = [.9 .09 .009]';
+
+% Create core tensor with given block structure and norm 1
+G = tenrandblk(bsz,bsn,true);
+
Created block of size 3 x 2 x 1 with norm (0.948683)^2=0.900000
+Created block of size 2 x 2 x 2 with norm (0.300000)^2=0.090000
+Created block of size 2 x 3 x 4 with norm (0.094868)^2=0.009000
+Created tensor of size 7 x 7 x 7 with off-block-diaognal norm (0.031623)^2=0.001000
+
fprintf('Size of G: %s\n', tt_size2str(size(G)));
+
Size of G: 7 x 7 x 7
+

Generate data tensor with core described above

We take the core G and embed into into a larger tensor X by using orthogonal transformations. The true rank of this tensor is equal to the size of G.

% Size of X
+xsz = [20 20 20];
+
+% Create orthogonal matrices
+U = cell(3,1);
+for k = 1:3
+    V = matrandorth(xsz(k));
+    U{k} = V(:,1:size(G,k));
+end
+
+% Create X
+X = full(ttensor(G,U));
+
+% The norm should be unchanged
+fprintf('||X||=%f\n',norm(X));
+
||X||=1.000000
+

Compute (full) HOSVD

We compute the ST-HOSVD using the hosvd method. We specify the tolerance to close to machine precision. Ideally, it finds a core that is the same size as G.

fprintf('ST-HOSVD...\n');
+T = hosvd(X,2*sqrt(eps));
+
ST-HOSVD...
+Computing HOSVD...
+Size of core: 7 x 7 x 7
+||X-T||/||X|| = 4.50378e-15 <=0.000000 (tol)
+
+

Compute low-rank HOSVD approximation

The norm squared of the first two blocks of G is 0.99, so specifying an error of 1e-2 should yield a core of size 4 x 4 x 3. However, the conservative nature of the algorithm means that it may pick something larger. We can compensate by specifying a larger tolerance.

% Using 1e-2 exactly is potentially too conservative...
+fprintf('Result with tol = sqrt(1e-2):\n');
+T = hosvd(X, sqrt(1e-2),'verbosity',1);
+
+% But a small multiple (i.e., |ndims(X)|) usually works...
+fprintf('Result with tol = sqrt(3e-2):\n');
+T = hosvd(X, sqrt(3e-2),'verbosity',1);
+
Result with tol = sqrt(1e-2):
+Computing HOSVD...
+Size of core: 6 x 6 x 5
+||X-T||/||X|| = 0.0577333 <=0.100000 (tol)
+
+Result with tol = sqrt(3e-2):
+Computing HOSVD...
+Size of core: 4 x 4 x 3
+||X-T||/||X|| = 0.0988936 <=0.173205 (tol)
+
+

Similarly, lhe norm squared of the first block of G is 0.9, so specifying an error of 1e-1 should result in a core of size 3 x 2 x 1.

% Using 1e-1 exactly is potentially too conservative...
+fprintf('Result with tol = sqrt(1e-1):\n');
+T = hosvd(X, sqrt(1e-1),'verbosity',1);
+
+% But a small multiple (i.e., |ndims(X)|) usually works...
+fprintf('Result with tol = sqrt(3e-1):\n');
+T = hosvd(X, sqrt(3e-1),'verbosity',1);
+
Result with tol = sqrt(1e-1):
+Computing HOSVD...
+Size of core: 4 x 4 x 3
+||X-T||/||X|| = 0.0988936 <=0.316228 (tol)
+
+Result with tol = sqrt(3e-1):
+Computing HOSVD...
+Size of core: 2 x 2 x 1
+||X-T||/||X|| = 0.316115 <=0.547723 (tol)
+
+

Verbosity - Getting more or less information.

Setting the verbosity to zero suppresses all output. Cranking up the verbosity gives some insight into the decision-making process...

% Example 1
+T = hosvd(X, sqrt(3e-1),'verbosity',10);
+
Computing HOSVD...
+||X||^2 = 1
+tol = 0.547723
+eigenvalue sum threshold = tol^2 ||X||^2 / d = 0.1
+Reverse cummulative sum of evals of Gram matrix:
+1: 1.0000 
+2: 0.3963 <-- Cutoff
+3: 0.0996 
+4: 0.0532 
+5: 0.0093 
+6: 0.0039 
+7: 0.0001 
+8: 0.0000 
+9: -0.0000 
+10: -0.0000 
+11: -0.0000 
+12: -0.0000 
+13: -0.0000 
+14: -0.0000 
+15: -0.0000 
+16: -0.0000 
+17: -0.0000 
+18: -0.0000 
+19: -0.0000 
+20: -0.0000 
+Reverse cummulative sum of evals of Gram matrix:
+1: 0.9004 
+2: 0.2968 <-- Cutoff
+3: 0.0003 
+4: 0.0002 
+5: 0.0001 
+6: 0.0000 
+7: 0.0000 
+8: 0.0000 
+9: -0.0000 
+10: -0.0000 
+11: -0.0000 
+12: -0.0000 
+13: -0.0000 
+14: -0.0000 
+15: -0.0000 
+16: -0.0000 
+17: -0.0000 
+18: -0.0000 
+19: -0.0000 
+20: -0.0000 
+Reverse cummulative sum of evals of Gram matrix:
+1: 0.9001 <-- Cutoff
+2: 0.0001 
+3: 0.0000 
+4: 0.0000 
+5: 0.0000 
+6: -0.0000 
+7: -0.0000 
+8: -0.0000 
+9: -0.0000 
+10: -0.0000 
+11: -0.0000 
+12: -0.0000 
+13: -0.0000 
+14: -0.0000 
+15: -0.0000 
+16: -0.0000 
+17: -0.0000 
+18: -0.0000 
+19: -0.0000 
+20: -0.0000 
+Size of core: 2 x 2 x 1
+||X-T||/||X|| = 0.316115 <=0.547723 (tol)
+
+

Example 2

T = hosvd(X, sqrt(3*eps),'verbosity',10);
+
Computing HOSVD...
+||X||^2 = 1
+tol = 2.58096e-08
+eigenvalue sum threshold = tol^2 ||X||^2 / d = 2.22045e-16
+Reverse cummulative sum of evals of Gram matrix:
+1: 1.0000 
+2: 0.3963 
+3: 0.0996 
+4: 0.0532 
+5: 0.0093 
+6: 0.0039 
+7: 0.0001 <-- Cutoff
+8: 0.0000 
+9: -0.0000 
+10: -0.0000 
+11: -0.0000 
+12: -0.0000 
+13: -0.0000 
+14: -0.0000 
+15: -0.0000 
+16: -0.0000 
+17: -0.0000 
+18: -0.0000 
+19: -0.0000 
+20: -0.0000 
+Reverse cummulative sum of evals of Gram matrix:
+1: 1.0000 
+2: 0.3963 
+3: 0.0997 
+4: 0.0531 
+5: 0.0094 
+6: 0.0042 
+7: 0.0015 <-- Cutoff
+8: -0.0000 
+9: -0.0000 
+10: -0.0000 
+11: -0.0000 
+12: -0.0000 
+13: -0.0000 
+14: -0.0000 
+15: -0.0000 
+16: -0.0000 
+17: -0.0000 
+18: -0.0000 
+19: -0.0000 
+20: -0.0000 
+Reverse cummulative sum of evals of Gram matrix:
+1: 1.0000 
+2: 0.0998 
+3: 0.0533 
+4: 0.0095 
+5: 0.0056 
+6: 0.0032 
+7: 0.0016 <-- Cutoff
+8: -0.0000 
+9: -0.0000 
+10: -0.0000 
+11: -0.0000 
+12: -0.0000 
+13: -0.0000 
+14: -0.0000 
+15: -0.0000 
+16: -0.0000 
+17: -0.0000 
+18: -0.0000 
+19: -0.0000 
+20: -0.0000 
+Size of core: 7 x 7 x 7
+||X-T||/||X|| = 4.50378e-15 <=0.000000 (tol)
+
+

Specify the ranks

If you know the rank you want, you can specify it. But there's no guarantee that it will satisfy the specified tolerance. In such cases, the method will throw a warning.

% Rank is okay
+T = hosvd(X,sqrt(3e-1),'ranks',bsz(1,:));
+
+% Rank is too small for the specified error
+T = hosvd(X,sqrt(3e-1),'ranks',[1 1 1]);
+
+% But you can set the error to the tensor norm to make the warning go away
+T = hosvd(X,norm(X),'ranks',[1 1 1]);
+
Computing HOSVD...
+Size of core: 3 x 2 x 1
+||X-T||/||X|| = 0.316113 <=0.547723 (tol)
+
+Computing HOSVD...
+Size of core: 1 x 1 x 1
+Tolerance not satisfied!! ||X-T||/||X|| = 0.629625 >=0.547723 (tol)
+Warning: Specified tolerance was not achieved 
+
+Computing HOSVD...
+Size of core: 1 x 1 x 1
+||X-T||/||X|| = 0.629625 <=1.000000 (tol)
+
+

Specify the mode order

It's also possible to specify the order of the modes. The default is 1:ndims(X).

T = hosvd(X,sqrt(3e-1),'dimorder',ndims(X):-1:1);
+
Computing HOSVD...
+Size of core: 2 x 2 x 1
+||X-T||/||X|| = 0.316106 <=0.547723 (tol)
+
+

Generate bigger data tensor with core described above

Uses the same procedure as before, but now the size is bigger.

% Size of Y
+ysz = [100 100 100];
+
+% Create orthogonal matrices
+U = cell(3,1);
+for k = 1:3
+    V = matrandorth(ysz(k));
+    U{k} = V(:,1:size(G,k));
+end
+
+% Create Y
+Y = full(ttensor(G,U));
+

ST-HOSVD compared to HOSVD

The answers are essentially the same for the sequentially-truncated HOSVD and the HOSVD...

fprintf('ST-HOSVD...\n');
+T = hosvd(Y,2*sqrt(eps));
+fprintf('HOSVD...\n');
+T = hosvd(Y,2*sqrt(eps),'sequential',false);
+
ST-HOSVD...
+Computing HOSVD...
+Size of core: 7 x 7 x 7
+||X-T||/||X|| = 2.78285e-15 <=0.000000 (tol)
+
+HOSVD...
+Computing HOSVD...
+Size of core: 7 x 7 x 7
+||X-T||/||X|| = 2.27855e-15 <=0.000000 (tol)
+
+

But ST-HOSVD may be slightly faster than HOSVD for larger tensors.

fprintf('Time for 10 runs of ST-HOSVD:\n');
+tic, for i =1:10, T = hosvd(Y,2*sqrt(eps),'verbosity',0); end; toc
+
+fprintf('Time for 10 runs of HOSVD:\n');
+tic, for i =1:10, T = hosvd(Y,2*sqrt(eps),'verbosity',0,'sequential',false); end; toc
+
Time for 10 runs of ST-HOSVD:
+Elapsed time is 0.194390 seconds.
+Time for 10 runs of HOSVD:
+Elapsed time is 0.405164 seconds.
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/identities_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/identities_doc.html new file mode 100644 index 0000000..ee2e0b3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/identities_doc.html @@ -0,0 +1,815 @@ + + + + + Identities and relationships of tensors

Identities and relationships of tensors

There are many mathematical relationships, identities, and connections among tensors. These identities are presented here and show the versatility of the Tensor Toolbox. The propositions indicated below are references to the following report:

T.G. Kolda, "Multilinear operators for higher-order decompositions", Tech. Rep. SAND2006-2081, Sandia National Laboratories, April 2006, http://csmr.ca.sandia.gov/~tgkolda/pubs/index.html#SAND2006-2081.

Contents

N-mode product properties

Create some data.

Y = tenrand([4 3 2]);
+A = rand(3,4);
+B = rand(3,3);
+

Prop 3.4(a): The order of the multiplication in different modes is irrelevant.

$$(Y \times_1 A) \times_2 B = (Y \times_2 B) \times_1 A$$

X1 = ttm( ttm(Y,A,1), B, 2); %<-- Y x_1 A x_2 B
+X2 = ttm( ttm(Y,B,2), A, 1); %<-- Y x_2 B x_1 A
+norm(X1 - X2) %<-- difference is zero
+
+ans =
+
+   4.6278e-16
+
+

N-mode product and matricization

Generate some data to work with.

Y = tenrand([5 4 3]);
+A = rand(4,5); B = rand(3,4); C = rand(2,3); U = {A,B,C};
+

Prop. 3.7a: N-mode multiplication can be expressed in terms of matricized tensors.

$$X = Y \times_n U \Leftrightarrow  X_{(n)} = UY_{(n)} $$

for n = 1:ndims(Y)
+  X = ttm(Y,U,n); %<-- X = Y x_n U{n}
+  Xn = U{n} * tenmat(Y,n); %<-- Xn = U{n} * Yn
+  norm(tenmat(X,n) - Xn)  % <-- should be zero
+end
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+

Prop. 3.7b: We can do matricizations in various ways and still be equivalent.

X = ttm(Y,U); %<-- X = Y x_1 A x_2 B x_3 C
+Xm1 = kron(B,A)*tenmat(Y,[1 2])*C';  %<-- Kronecker product version
+Xm2 = tenmat(X,[1 2]); %<-- Matriczed version
+norm(Xm1 - Xm2)  % <-- should be zero
+Xm1 = B * tenmat(Y,2,[3 1]) * kron(A,C)'; %<-- Kronecker product version
+Xm2 = tenmat(X,2,[3 1]); %<-- Matricized version
+norm(Xm1 - Xm2) % <-- should be zero
+Xm1 = tenmat(Y,[],[1 2 3]) * kron(kron(C,B),A)'; %<-- Vectorized via Kronecker
+Xm2 = tenmat(X,[],[1 2 3]); %<-- Vectorized via matricize
+norm(Xm1 - Xm2)
+
+ans =
+
+   1.6616e-15
+
+
+ans =
+
+   2.1756e-15
+
+
+ans =
+
+   2.8087e-15
+
+

Norm of difference between two tensors

Prop. 3.9: For tensors X and Y, we have:

$$\|X-Y\|^2 = \|X\|^2 + \|Y\|^2 - 2<X,Y> $$

X = tenrand([5 4 3]); Y = tenrand([5 4 3]);
+% The following 2 results should be equal
+norm(X-Y)
+sqrt(norm(X)^2 - 2*innerprod(X,Y) + norm(Y)^2)
+
+ans =
+
+    2.9316
+
+
+ans =
+
+    2.9316
+
+

This relationship makes it more convenient to compare the norm of the difference between two different tensor objects. Imagine if we have a sptensor and a ktensor and we want the norm of the difference, which may be needed to check for convergence, for example, but which is very expensive to convert to a full (dense) tensor. Because innerprod and norm are defined for all types of tensor objects, this is a handy formula.

X = sptensor(X);
+Y = ktensor({[1:5]',[1:4]',[1:3]'});
+% The following 2 results should be equal
+norm(full(X)-full(Y))
+sqrt(norm(X)^2 - 2*innerprod(X,Y) + norm(Y)^2)
+
+ans =
+
+  148.5941
+
+
+ans =
+
+  148.5941
+
+

Tucker tensor properties

The properties of the Tucker operator follow directly from the properties of n-mode multiplication.

% Initialize data
+Y = tensor(1:24,[4 3 2]);
+A1 = reshape(1:20,[5 4]);
+A2 = reshape(1:12,[4 3]);
+A3 = reshape(1:6,[3 2]);
+A = {A1,A2,A3};
+B1 = reshape(1:20,[4 5]);
+B2 = reshape(1:12,[3 4]);
+B3 = reshape(1:6,[2 3]);
+B = {B1,B2,B3};
+

Proposition 4.2a

X = ttensor(ttensor(Y,A),B)
+
X is a ttensor of size 4 x 3 x 2
+	X.core is a ttensor of size 5 x 4 x 3
+		X.core.core is a tensor of size 4 x 3 x 2
+			X.core.core(:,:,1) = 
+	     1     5     9
+	     2     6    10
+	     3     7    11
+	     4     8    12
+			X.core.core(:,:,2) = 
+	    13    17    21
+	    14    18    22
+	    15    19    23
+	    16    20    24
+		X.core.U{1} = 
+		     1     6    11    16
+		     2     7    12    17
+		     3     8    13    18
+		     4     9    14    19
+		     5    10    15    20
+		X.core.U{2} = 
+		     1     5     9
+		     2     6    10
+		     3     7    11
+		     4     8    12
+		X.core.U{3} = 
+		     1     4
+		     2     5
+		     3     6
+	X.U{1} = 
+		     1     5     9    13    17
+		     2     6    10    14    18
+		     3     7    11    15    19
+		     4     8    12    16    20
+	X.U{2} = 
+		     1     4     7    10
+		     2     5     8    11
+		     3     6     9    12
+	X.U{3} = 
+		     1     3     5
+		     2     4     6
+
AB = {B1*A1, B2*A2, B3*A3};
+Y = ttensor(Y,AB)
+
Y is a ttensor of size 4 x 3 x 2
+	Y.core is a tensor of size 4 x 3 x 2
+		Y.core(:,:,1) = 
+	     1     5     9
+	     2     6    10
+	     3     7    11
+	     4     8    12
+		Y.core(:,:,2) = 
+	    13    17    21
+	    14    18    22
+	    15    19    23
+	    16    20    24
+	Y.U{1} = 
+		         175         400         625         850
+		         190         440         690         940
+		         205         480         755        1030
+		         220         520         820        1120
+	Y.U{2} = 
+		    70   158   246
+		    80   184   288
+		    90   210   330
+	Y.U{3} = 
+		    22    49
+		    28    64
+
norm(full(X)-full(Y))  %<-- should be zero
+
+ans =
+
+     0
+
+

Proposition 4.2b

Y = tensor(1:24,[4 3 2]);
+X = ttensor(Y,A);
+Apinv = {pinv(A1),pinv(A2),pinv(A3)};
+Y2 = ttensor(full(X),Apinv);
+norm(full(Y)-full(Y2))  %<-- should be zero
+
+ans =
+
+   3.8576e-13
+
+

Proposition 4.2c

Y = tensor(1:24,[4 3 2]);
+rand('state',0);
+Q1 = orth(rand(5,4));
+Q2 = orth(rand(4,3));
+Q3 = orth(rand(3,2));
+Q = {Q1,Q2,Q3};
+X = ttensor(Y,Q)
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 4 x 3 x 2
+		X.core(:,:,1) = 
+	     1     5     9
+	     2     6    10
+	     3     7    11
+	     4     8    12
+		X.core(:,:,2) = 
+	    13    17    21
+	    14    18    22
+	    15    19    23
+	    16    20    24
+	X.U{1} = 
+		   -0.4727    0.5608    0.0275   -0.3954
+		   -0.4394   -0.4243    0.3178    0.5707
+		   -0.4659   -0.6116   -0.1037   -0.5982
+		   -0.4209    0.3259    0.5458    0.1138
+		   -0.4350    0.1587   -0.7679    0.3837
+	X.U{2} = 
+		   -0.2570    0.1257    0.8908
+		   -0.3751   -0.2111    0.2636
+		   -0.4640   -0.7988   -0.1591
+		   -0.7602    0.5492   -0.3341
+	X.U{3} = 
+		   -0.3907   -0.0625
+		   -0.8045   -0.4616
+		   -0.4473    0.8849
+
Qt = {Q1',Q2',Q3'};
+Y2 = ttensor(full(X),Qt)
+norm(full(Y)-full(Y2))  %<-- should be zero
+
Y2 is a ttensor of size 4 x 3 x 2
+	Y2.core is a tensor of size 5 x 4 x 3
+		Y2.core(:,:,1) = 
+	    1.4195   -0.0317   -1.4127   -0.3848
+	   -0.7708    0.2767    1.3323    0.4969
+	    8.6788   -0.0536   -8.3316   -2.1970
+	   -3.0735    0.1529    3.2424    0.9267
+	    2.9652    0.0889   -2.6130   -0.6316
+		Y2.core(:,:,2) = 
+	    4.6979   -0.3995   -5.3170   -1.6004
+	   -2.2186    0.8006    3.8440    1.4349
+	   28.9023   -2.1266  -31.9901   -9.4788
+	  -10.0638    1.0545   11.8230    3.6490
+	   10.0122   -0.4854  -10.5344   -3.0047
+		Y2.core(:,:,3) = 
+	   -3.4733    0.9238    5.3001    1.8807
+	    0.9310   -0.3464   -1.6360   -0.6138
+	  -21.7522    5.7319   33.0762   11.7190
+	    7.2102   -1.9498  -11.0723   -3.9398
+	   -7.8266    2.0225   11.8142    4.1724
+	Y2.U{1} = 
+		   -0.4727   -0.4394   -0.4659   -0.4209   -0.4350
+		    0.5608   -0.4243   -0.6116    0.3259    0.1587
+		    0.0275    0.3178   -0.1037    0.5458   -0.7679
+		   -0.3954    0.5707   -0.5982    0.1138    0.3837
+	Y2.U{2} = 
+		   -0.2570   -0.3751   -0.4640   -0.7602
+		    0.1257   -0.2111   -0.7988    0.5492
+		    0.8908    0.2636   -0.1591   -0.3341
+	Y2.U{3} = 
+		   -0.3907   -0.8045   -0.4473
+		   -0.0625   -0.4616    0.8849
+
+ans =
+
+   3.8190e-14
+
+

Tucker operator and matricized tensors

The Tucker operator also has various epressions in terms of matricized tensors and the Kronecker product. Proposition 4.3a

Y = tensor(1:24,[4 3 2]);
+A1 = reshape(1:20,[5 4]);
+A2 = reshape(1:12,[4 3]);
+A3 = reshape(1:6,[3 2]);
+A = {A1,A2,A3};
+X = ttensor(Y,A)
+for n = 1:ndims(Y)
+  rdims = n;
+  cdims = setdiff(1:ndims(Y),rdims);
+  Xn = A{n} * tenmat(Y,rdims,cdims) * kron(A{cdims(2)}, A{cdims(1)})';
+  norm(tenmat(full(X),rdims,cdims) - Xn)  % <-- should be zero
+end
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 4 x 3 x 2
+		X.core(:,:,1) = 
+	     1     5     9
+	     2     6    10
+	     3     7    11
+	     4     8    12
+		X.core(:,:,2) = 
+	    13    17    21
+	    14    18    22
+	    15    19    23
+	    16    20    24
+	X.U{1} = 
+		     1     6    11    16
+		     2     7    12    17
+		     3     8    13    18
+		     4     9    14    19
+		     5    10    15    20
+	X.U{2} = 
+		     1     5     9
+		     2     6    10
+		     3     7    11
+		     4     8    12
+	X.U{3} = 
+		     1     4
+		     2     5
+		     3     6
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+

Orthogonalization of Tucker factors

Proposition 4.4

Y = tensor(1:24,[4 3 2]);
+A1 = rand(5,4);
+A2 = rand(4,3);
+A3 = rand(3,2);
+A = {A1,A2,A3};
+X = ttensor(Y,A)
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 4 x 3 x 2
+		X.core(:,:,1) = 
+	     1     5     9
+	     2     6    10
+	     3     7    11
+	     4     8    12
+		X.core(:,:,2) = 
+	    13    17    21
+	    14    18    22
+	    15    19    23
+	    16    20    24
+	X.U{1} = 
+		    0.2026    0.3795    0.3046    0.5417
+		    0.6721    0.8318    0.1897    0.1509
+		    0.8381    0.5028    0.1934    0.6979
+		    0.0196    0.7095    0.6822    0.3784
+		    0.6813    0.4289    0.3028    0.8600
+	X.U{2} = 
+		    0.8537    0.8216    0.3420
+		    0.5936    0.6449    0.2897
+		    0.4966    0.8180    0.3412
+		    0.8998    0.6602    0.5341
+	X.U{3} = 
+		    0.7271    0.5681
+		    0.3093    0.3704
+		    0.8385    0.7027
+
[Q1,R1] = qr(A1);
+[Q2,R2] = qr(A2);
+[Q3,R3] = qr(A3);
+R = {R1,R2,R3};
+Z = ttensor(Y,R);
+norm(X) - norm(Z)  %<-- should be zero
+
+ans =
+
+     0
+
+

Kruskal operator properties

Proposition 5.2

A1 = reshape(1:10,[5 2]);
+A2 = reshape(1:8,[4 2]);
+A3 = reshape(1:6,[3 2]);
+K = ktensor({A1,A2,A3});
+B1 = reshape(1:20,[4 5]);
+B2 = reshape(1:12,[3 4]);
+B3 = reshape(1:6,[2 3]);
+X = ttensor(K,{B1,B2,B3})
+
+Y = ktensor({B1*A1, B2*A2, B3*A3});
+norm(full(X) - full(Y))  %<-- should be zero
+
X is a ttensor of size 4 x 3 x 2
+	X.core is a ktensor of size 5 x 4 x 3
+		X.core.lambda = [ 1  1 ]
+		X.core.U{1} = 
+		     1     6
+		     2     7
+		     3     8
+		     4     9
+		     5    10
+		X.core.U{2} = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+		X.core.U{3} = 
+		     1     4
+		     2     5
+		     3     6
+	X.U{1} = 
+		     1     5     9    13    17
+		     2     6    10    14    18
+		     3     7    11    15    19
+		     4     8    12    16    20
+	X.U{2} = 
+		     1     4     7    10
+		     2     5     8    11
+		     3     6     9    12
+	X.U{3} = 
+		     1     3     5
+		     2     4     6
+
+ans =
+
+     0
+
+

Proposition 5.3a (second part)

A1 = reshape(1:10,[5 2]);
+A2 = reshape(1:8,[4 2]);
+A3 = reshape(1:6,[3 2]);
+A = {A1,A2,A3};
+X = ktensor(A);
+rdims = 1:ndims(X);
+Z = double(tenmat(full(X), rdims, []));
+Xn = khatrirao(A{rdims},'r') * ones(length(X.lambda),1);
+norm(Z - Xn)  % <-- should be zero
+
+ans =
+
+     0
+
+
cdims = 1:ndims(X);
+Z = double(tenmat(full(X), [], cdims));
+Xn = ones(length(X.lambda),1)' * khatrirao(A{cdims},'r')';
+norm(Z - Xn)  % <-- should be zero
+
+ans =
+
+     0
+
+

Proposition 5.3b

A1 = reshape(1:10,[5 2]);
+A2 = reshape(1:8,[4 2]);
+A3 = reshape(1:6,[3 2]);
+A = {A1,A2,A3};
+X = ktensor(A);
+for n = 1:ndims(X)
+  rdims = n;
+  cdims = setdiff(1:ndims(X),rdims);
+  Xn = khatrirao(A{rdims}) * khatrirao(A{cdims},'r')';
+  Z = double(tenmat(full(X),rdims,cdims));
+  norm(Z - Xn)  % <-- should be zero
+end
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+

Proposition 5.3a (first part)

X = ktensor(A);
+for n = 1:ndims(X)
+  cdims = n;
+  rdims = setdiff(1:ndims(X),cdims);
+  Xn = khatrirao(A{rdims},'r') * khatrirao(A{cdims})';
+  Z = double(tenmat(full(X),rdims,cdims));
+  norm(Z - Xn)  % <-- should be zero
+end
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+
+ans =
+
+     0
+
+

Norm of Kruskal operator

The norm of a ktensor has a special form because it can be reduced to summing the entries of the Hadamard product of N matrices of size R x R. Proposition 5.4

A1 = reshape(1:10,[5 2]);
+A2 = reshape(1:8,[4 2]);
+A3 = reshape(1:6,[3 2]);
+A = {A1,A2,A3};
+X = ktensor(A);
+M = ones(size(A{1},2), size(A{1},2));
+for i = 1:numel(A)
+  M = M .* (A{i}'*A{i});
+end
+norm(X) - sqrt(sum(M(:)))  %<-- should be zero
+
+ans =
+
+     0
+
+

Inner product of Kruskal operator with a tensor

The inner product of a ktensor with a tensor yields Proposition 5.5

X = tensor(1:60,[5 4 3]);
+A1 = reshape(1:10,[5 2]);
+A2 = reshape(2:9,[4 2]);
+A3 = reshape(3:8,[3 2]);
+A = {A1,A2,A3};
+K = ktensor(A);
+v = khatrirao(A,'r') * ones(size(A{1},2),1);
+% The following 2 results should be equal
+double(tenmat(X,1:ndims(X),[]))' * v
+innerprod(X,K)
+
+ans =
+
+      935340
+
+
+ans =
+
+      935340
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/index.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/index.html new file mode 100644 index 0000000..5874ce1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/index.html @@ -0,0 +1,140 @@ + + + + + Tensor Toolbox for MATLAB + + + + + +
+

Tensor Toolbox for MATLAB

+ +

Tensors (also known as multidimensional arrays or N-way + arrays) are used in a variety of applications ranging from + chemometrics to network analysis. +

+

+ +

Functionality

+ + +

The Tensor Toolbox provides the following classes and + functions for manipulating dense, sparse, and structured + tensors using MATLAB's object-oriented features. This + documention is provided via the MATLAB help interface under + "Supplemental Software".

+ +
    +
  • Tensor Types - The Tensor + Toolbox supports multiple tensor types, including dense, + sparse, and symmetric tensors as well as specially structured + tensors, such as Tucker format (core tensor plus factor + matrices), Krusal format (stored as factor matrices), sum + format (sum of differnt types of tensors such as sparse plus + rank-1 tensor in Kruskal format), and so.
  • +
  • Converting Tensors and + Matrices - The Tensor Toolbox includes special matrix + classes to enable conversion to/from tensors.
  • +
  • Working with Tensors - Creating + test problems, tensor multiplication, and more. +
  • CP Decompositions - CP methods such + as alternating least squares, direct optimization, and + weighted optimization (for missing data). Also alternative + decompositions such as Poisson Tensor Factorization via + alternating Poisson regression and symmetric CP tensor + factorization. +
  • Tucker Decomposition - Tucker + methods including as the higher-order SVD (HOSVD), the + sequentially-truncated HOSVD (ST-HOSVD), and the + higher-order orthognal interation (HOOI).
  • +
  • Eigenproblems - Methods to solve + the tensor eigenproblem including the shifted higher-order + power method (SSHOPM) and the adaptive shift version (GEAP). +
+ +

How to Cite

+ +

Because it helps us to show the relevance of this work, if + you use the Tensor Toolbox in your work in any way, please + cite the software itself along with at least one publication + or preprint. The help and documentation will generally + suggest the appropriate reference, but the three primary + references are given below. We + provide BibTeX source for each + suggested citation. Thanks very much for your support.

+ +
    +
  • General software: + Brett W. Bader, Tamara G. Kolda and others. + MATLAB Tensor Toolbox, Version [VERSION]. + Available online at + https://www.tensortoolbox.org, + 20XX. + [TTB_Software] +
  • Dense tensors: + B. W. Bader and T. G. Kolda. + Algorithm 862: MATLAB tensor classes for fast algorithm prototyping, + ACM Transactions on Mathematical Software 32(4):635-653, December 2006. + DOI: 10.1145/1186785.1186794. + [TTB_Dense] +
  • Sparse, Kruskal, and Tucker tensors: + B. W. Bader and T. G. Kolda. + Efficient MATLAB computations with sparse and factored tensors, + SIAM Journal on Scientific Computing 30(1):205-231, December 2007. + DOI: 10.1137/060676489. + [TTB_Sparse] +
+ +

Consider adding the short hash for the exact version that was used. + If you clone the repository, use the command + git log --pretty=format:'%h' -n 1. + If you download, the long hash is baked into the filename, but you need only use + the first 8 characters.

+ +

How to Contribute

+ +

This is an open-source project hosted on GITLAB at + http://gitlab.com/tensors/tensor_toolbox. + Visit this website to submit bug reports and suggestions for + improvement. +

+ + +

Contact

+ Please email + tensortoolbox@sandia.gov + with any questions about the toolbox that cannot be resolved via issue reporting. + Stories of its usefulness are especially welcome. + We will try to respond to every email may not always be successful due to the volume of emails. +
+ + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ktensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ktensor_doc.html new file mode 100644 index 0000000..3084cd7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ktensor_doc.html @@ -0,0 +1,783 @@ + + + + + Kruskal tensors

Kruskal tensors

Kruskal format is a decomposition of a tensor X as the sum of the outer products as the columns of matrices. For example, we might write

$${\mathcal X} = \sum_r a_r \circ b_r \circ c_r$$

where a subscript denotes column index and a circle denotes outer product. In other words, the tensor X is built from the columns of the matrices A,B, and C. It's often helpful to explicitly specify a weight for each outer product, which we do here:

$${\mathcal X} = \sum_r \lambda_r \; a_r \circ b_r \circ c_r$$

The ktensor class stores the components of the tensor X and can perform many operations, e.g., ttm, without explicitly forming the tensor X.

Contents

Kruskal tensor format via ktensor

Kruskal format stores a tensor as a sum of rank-1 outer products. For example, consider a tensor of the following form.

$$X = a_1 \circ b_1 \circ c_1 + a_2 \circ b_2 \circ c_2$$

This can be stored in Kruskal form as follows.

rand('state',0);
+A = rand(4,2); %<-- First column is a_1, second is a_2.
+B = rand(3,2); %<-- Likewise for B.
+C = rand(2,2); %<-- Likewise for C.
+X = ktensor({A,B,C}) %<-- Create the ktensor.
+
X is a ktensor of size 4 x 3 x 2
+	X.lambda = [ 1  1 ]
+	X.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	X.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	X.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+

For Kruskal format, there can be any number of matrices, but every matrix must have the same number of columns. The number of rows can vary.

Y = ktensor({rand(4,1),rand(2,1),rand(3,1)}) %<-- Another ktensor.
+
Y is a ktensor of size 4 x 2 x 3
+	Y.lambda = [ 1 ]
+	Y.U{1} = 
+		    0.4103
+		    0.8936
+		    0.0579
+		    0.3529
+	Y.U{2} = 
+		    0.8132
+		    0.0099
+	Y.U{3} = 
+		    0.1389
+		    0.2028
+		    0.1987
+

Specifying weights in a ktensor

Weights for each rank-1 tensor can be specified by passing in a column vector. For example,

$$X = \lambda_1 \; a_1 \circ b_1 \circ c_1 + \lambda_2 \; a_2 \circ b_2 \circ c_2$$

lambda = [5.0; 0.25]; %<-- Weights for each factor.
+X = ktensor(lambda,{A,B,C}) %<-- Create the ktensor.
+
X is a ktensor of size 4 x 3 x 2
+	X.lambda = [ 5        0.25 ]
+	X.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	X.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	X.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+

Creating a one-dimensional ktensor

Y = ktensor({rand(4,5)}) %<-- A one-dimensional ktensor.
+
Y is a ktensor of size 4
+	Y.lambda = [ 1  1  1  1  1 ]
+	Y.U{1} = 
+		    0.6038    0.7468    0.4186    0.6721    0.3795
+		    0.2722    0.4451    0.8462    0.8381    0.8318
+		    0.1988    0.9318    0.5252    0.0196    0.5028
+		    0.0153    0.4660    0.2026    0.6813    0.7095
+

Constituent parts of a ktensor

X.lambda %<-- Weights or multipliers.
+
+ans =
+
+    5.0000
+    0.2500
+
+
X.U %<-- Cell array of matrices.
+
+ans =
+
+  3×1 cell array
+
+    [4×2 double]
+    [3×2 double]
+    [2×2 double]
+
+

Creating a ktensor from its constituent parts

Y = ktensor(X.lambda,X.U) %<-- Recreate X.
+
Y is a ktensor of size 4 x 3 x 2
+	Y.lambda = [ 5        0.25 ]
+	Y.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	Y.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	Y.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+

Creating an empty ktensor

Z = ktensor %<-- Empty ktensor.
+
Z is a ktensor of size [empty tensor]
+	Z.lambda = [  ]
+

Use full or tensor to convert a ktensor to a tensor

full(X) %<-- Converts to a tensor.
+
ans is a tensor of size 4 x 3 x 2
+	ans(:,:,1) = 
+	    0.8529    0.5645    0.6692
+	    0.3085    0.2549    0.2569
+	    0.5239    0.3362    0.4080
+	    0.3552    0.1945    0.2668
+	ans(:,:,2) = 
+	    1.7450    1.0454    1.3370
+	    0.5235    0.3695    0.4175
+	    1.0940    0.6439    0.8348
+	    0.8131    0.4423    0.6098
+
tensor(X) %<-- Same as above.
+
ans is a tensor of size 4 x 3 x 2
+	ans(:,:,1) = 
+	    0.8529    0.5645    0.6692
+	    0.3085    0.2549    0.2569
+	    0.5239    0.3362    0.4080
+	    0.3552    0.1945    0.2668
+	ans(:,:,2) = 
+	    1.7450    1.0454    1.3370
+	    0.5235    0.3695    0.4175
+	    1.0940    0.6439    0.8348
+	    0.8131    0.4423    0.6098
+

Use double to convert a ktensor to a multidimensional array

double(X) %<-- Converts to an array.
+
+ans(:,:,1) =
+
+    0.8529    0.5645    0.6692
+    0.3085    0.2549    0.2569
+    0.5239    0.3362    0.4080
+    0.3552    0.1945    0.2668
+
+
+ans(:,:,2) =
+
+    1.7450    1.0454    1.3370
+    0.5235    0.3695    0.4175
+    1.0940    0.6439    0.8348
+    0.8131    0.4423    0.6098
+
+

Use tendiag or sptendiag to convert a ktensor to a ttensor.

A ktensor can be regarded as a ttensor with a diagonal core.

R = length(X.lambda);  %<-- Number of factors in X.
+core = tendiag(X.lambda, repmat(R,1,ndims(X))); %<-- Create a diagonal core.
+Y = ttensor(core, X.u) %<-- Assemble the ttensor.
+
Y is a ttensor of size 4 x 3 x 2
+	Y.core is a tensor of size 2 x 2 x 2
+		Y.core(:,:,1) = 
+	     5     0
+	     0     0
+		Y.core(:,:,2) = 
+	         0         0
+	         0    0.2500
+	Y.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	Y.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	Y.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+
norm(full(X)-full(Y)) %<-- They are the same.
+
+ans =
+
+   3.9252e-16
+
+
core = sptendiag(X.lambda, repmat(R,1,ndims(X))); %<-- Sparse diagonal core.
+Y = ttensor(core, X.u) %<-- Assemble the ttensor
+
Y is a ttensor of size 4 x 3 x 2
+	Y.core is a sparse tensor of size 2 x 2 x 2 with 2 nonzeros
+	(1,1,1)    5.0000
+	(2,2,2)    0.2500
+	Y.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	Y.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	Y.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+
norm(full(X)-full(Y)) %<-- They are the same.
+
+ans =
+
+   3.9252e-16
+
+

Use ndims and size for the dimensions of a ktensor

ndims(X) %<-- Number of dimensions.
+
+ans =
+
+     3
+
+
size(X) %<-- Row vector of the sizes.
+
+ans =
+
+     4     3     2
+
+
size(X,2) %<-- Size of the 2nd mode.
+
+ans =
+
+     3
+
+

Subscripted reference for a ktensor

X(1,1,1) %<-- Assemble the (1,1,1) element (requires computation).
+
+ans =
+
+    0.8529
+
+
X.lambda(2) %<-- Weight of 2nd factor.
+
+ans =
+
+    0.2500
+
+
X.U{2} %<-- Extract a matrix.
+
+ans =
+
+    0.8214    0.7919
+    0.4447    0.9218
+    0.6154    0.7382
+
+
X{2} %<-- Same as above.
+
+ans =
+
+    0.8214    0.7919
+    0.4447    0.9218
+    0.6154    0.7382
+
+

Subscripted assignment for a ktensor

X.lambda = ones(size(X.lambda)) %<-- Insert new multipliers.
+
X is a ktensor of size 4 x 3 x 2
+	X.lambda = [ 1  1 ]
+	X.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	X.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	X.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+
X.lambda(1) = 7 %<-- Change a single element of lambda.
+
X is a ktensor of size 4 x 3 x 2
+	X.lambda = [ 7  1 ]
+	X.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	X.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	X.U{3} = 
+		    0.1763    0.9355
+		    0.4057    0.9169
+
X{3}(1:2,1) = [1;1] %<-- Change the matrix for mode 3.
+
X is a ktensor of size 4 x 3 x 2
+	X.lambda = [ 7  1 ]
+	X.U{1} = 
+		    0.9501    0.8913
+		    0.2311    0.7621
+		    0.6068    0.4565
+		    0.4860    0.0185
+	X.U{2} = 
+		    0.8214    0.7919
+		    0.4447    0.9218
+		    0.6154    0.7382
+	X.U{3} = 
+		    1.0000    0.9355
+		    1.0000    0.9169
+

Use end for the last array index.

X(3:end,1,1)  %<-- Calculated X(3,1,1) and X((4,1,1).
+
+ans =
+
+    3.8274
+    2.8080
+
+
X(1,1,1:end-1)  %<-- Calculates X(1,1,1).
+
+ans =
+
+    6.1234
+
+
X{end}  %<-- Or use inside of curly braces. This is X{3}.
+
+ans =
+
+    1.0000    0.9355
+    1.0000    0.9169
+
+

Adding and subtracting ktensors

Adding two ktensors is the same as concatenating the matrices

X = ktensor({rand(4,2),rand(2,2),rand(3,2)}) %<-- Data.
+Y = ktensor({rand(4,2),rand(2,2),rand(3,2)}) %<-- More data.
+
X is a ktensor of size 4 x 2 x 3
+	X.lambda = [ 1  1 ]
+	X.U{1} = 
+		    0.4289    0.6822
+		    0.3046    0.3028
+		    0.1897    0.5417
+		    0.1934    0.1509
+	X.U{2} = 
+		    0.6979    0.8600
+		    0.3784    0.8537
+	X.U{3} = 
+		    0.5936    0.8216
+		    0.4966    0.6449
+		    0.8998    0.8180
+Y is a ktensor of size 4 x 2 x 3
+	Y.lambda = [ 1  1 ]
+	Y.U{1} = 
+		    0.6602    0.5341
+		    0.3420    0.7271
+		    0.2897    0.3093
+		    0.3412    0.8385
+	Y.U{2} = 
+		    0.5681    0.7027
+		    0.3704    0.5466
+	Y.U{3} = 
+		    0.4449    0.7948
+		    0.6946    0.9568
+		    0.6213    0.5226
+
Z = X + Y %<-- Concatenates the factor matrices.
+
Z is a ktensor of size 4 x 2 x 3
+	Z.lambda = [ 1  1  1  1 ]
+	Z.U{1} = 
+		    0.4289    0.6822    0.6602    0.5341
+		    0.3046    0.3028    0.3420    0.7271
+		    0.1897    0.5417    0.2897    0.3093
+		    0.1934    0.1509    0.3412    0.8385
+	Z.U{2} = 
+		    0.6979    0.8600    0.5681    0.7027
+		    0.3784    0.8537    0.3704    0.5466
+	Z.U{3} = 
+		    0.5936    0.8216    0.4449    0.7948
+		    0.4966    0.6449    0.6946    0.9568
+		    0.8998    0.8180    0.6213    0.5226
+
Z = X - Y %<-- Concatenates as with plus, but changes the weights.
+
Z is a ktensor of size 4 x 2 x 3
+	Z.lambda = [ 1  1 -1 -1 ]
+	Z.U{1} = 
+		    0.4289    0.6822    0.6602    0.5341
+		    0.3046    0.3028    0.3420    0.7271
+		    0.1897    0.5417    0.2897    0.3093
+		    0.1934    0.1509    0.3412    0.8385
+	Z.U{2} = 
+		    0.6979    0.8600    0.5681    0.7027
+		    0.3784    0.8537    0.3704    0.5466
+	Z.U{3} = 
+		    0.5936    0.8216    0.4449    0.7948
+		    0.4966    0.6449    0.6946    0.9568
+		    0.8998    0.8180    0.6213    0.5226
+
norm( full(Z) - (full(X)-full(Y)) ) %<-- Should be zero.
+
+ans =
+
+   1.7110e-16
+
+

Basic operations with a ktensor

+X %<-- Calls uplus.
+
ans is a ktensor of size 4 x 2 x 3
+	ans.lambda = [ 1  1 ]
+	ans.U{1} = 
+		    0.4289    0.6822
+		    0.3046    0.3028
+		    0.1897    0.5417
+		    0.1934    0.1509
+	ans.U{2} = 
+		    0.6979    0.8600
+		    0.3784    0.8537
+	ans.U{3} = 
+		    0.5936    0.8216
+		    0.4966    0.6449
+		    0.8998    0.8180
+
-X %<-- Calls uminus.
+
ans is a ktensor of size 4 x 2 x 3
+	ans.lambda = [ -1 -1 ]
+	ans.U{1} = 
+		    0.4289    0.6822
+		    0.3046    0.3028
+		    0.1897    0.5417
+		    0.1934    0.1509
+	ans.U{2} = 
+		    0.6979    0.8600
+		    0.3784    0.8537
+	ans.U{3} = 
+		    0.5936    0.8216
+		    0.4966    0.6449
+		    0.8998    0.8180
+
5*X %<-- Calls mtimes.
+
ans is a ktensor of size 4 x 2 x 3
+	ans.lambda = [ 5  5 ]
+	ans.U{1} = 
+		    0.4289    0.6822
+		    0.3046    0.3028
+		    0.1897    0.5417
+		    0.1934    0.1509
+	ans.U{2} = 
+		    0.6979    0.8600
+		    0.3784    0.8537
+	ans.U{3} = 
+		    0.5936    0.8216
+		    0.4966    0.6449
+		    0.8998    0.8180
+

Use permute to reorder the modes of a ktensor

permute(X,[2 3 1]) %<-- Reorders modes of X
+
ans is a ktensor of size 2 x 3 x 4
+	ans.lambda = [ 1  1 ]
+	ans.U{1} = 
+		    0.6979    0.8600
+		    0.3784    0.8537
+	ans.U{2} = 
+		    0.5936    0.8216
+		    0.4966    0.6449
+		    0.8998    0.8180
+	ans.U{3} = 
+		    0.4289    0.6822
+		    0.3046    0.3028
+		    0.1897    0.5417
+		    0.1934    0.1509
+

Use arrange to normalize the factors of a ktensor

The function arrange normalizes the columns of the factors and then arranges the rank-one pieces in decreasing order of size.

X = ktensor({rand(3,2),rand(4,2),rand(2,2)})  % <-- Unit weights.
+
X is a ktensor of size 3 x 4 x 2
+	X.lambda = [ 1  1 ]
+	X.U{1} = 
+		    0.8801    0.2714
+		    0.1730    0.2523
+		    0.9797    0.8757
+	X.U{2} = 
+		    0.7373    0.1991
+		    0.1365    0.2987
+		    0.0118    0.6614
+		    0.8939    0.2844
+	X.U{3} = 
+		    0.4692    0.9883
+		    0.0648    0.5828
+
arrange(X) %<-- Normalized and rearranged.
+
ans is a ktensor of size 3 x 4 x 2
+	ans.lambda = [ 0.87781     0.73416 ]
+	ans.U{1} = 
+		    0.2855    0.6626
+		    0.2653    0.1302
+		    0.9209    0.7376
+	ans.U{2} = 
+		    0.2475    0.6319
+		    0.3713    0.1170
+		    0.8221    0.0101
+		    0.3535    0.7661
+	ans.U{3} = 
+		    0.8614    0.9906
+		    0.5079    0.1368
+

Use fixsigns for sign indeterminacies in a ktensor

The largest magnitude entry for each factor is changed to be positive provided that we can flip the signs of pairs of vectors in that rank-1 component.

Y = X;
+Y.u{1}(:,1) = -Y.u{1}(:,1);  % switch the sign on a pair of columns
+Y.u{2}(:,1) = -Y.u{2}(:,1)
+
Y is a ktensor of size 3 x 4 x 2
+	Y.lambda = [ 1  1 ]
+	Y.U{1} = 
+		   -0.8801    0.2714
+		   -0.1730    0.2523
+		   -0.9797    0.8757
+	Y.U{2} = 
+		   -0.7373    0.1991
+		   -0.1365    0.2987
+		   -0.0118    0.6614
+		   -0.8939    0.2844
+	Y.U{3} = 
+		    0.4692    0.9883
+		    0.0648    0.5828
+
fixsigns(Y)
+
ans is a ktensor of size 3 x 4 x 2
+	ans.lambda = [ 1  1 ]
+	ans.U{1} = 
+		    0.8801    0.2714
+		    0.1730    0.2523
+		    0.9797    0.8757
+	ans.U{2} = 
+		    0.7373    0.1991
+		    0.1365    0.2987
+		    0.0118    0.6614
+		    0.8939    0.2844
+	ans.U{3} = 
+		    0.4692    0.9883
+		    0.0648    0.5828
+

Use ktensor to store the 'skinny' SVD of a matrix

A = rand(4,3) %<-- A random matrix.
+
+A =
+
+    0.4235    0.2259    0.6405
+    0.5155    0.5798    0.2091
+    0.3340    0.7604    0.3798
+    0.4329    0.5298    0.7833
+
+
[U,S,V] = svd(A,0); %<-- Compute the SVD.
+X = ktensor(diag(S),{U,V}) %<-- Store the SVD as a ktensor.
+
X is a ktensor of size 4 x 3
+	X.lambda = [ 1.7002     0.50951     0.22772 ]
+	X.U{1} = 
+		   -0.4346   -0.5816    0.3635
+		   -0.4365    0.5184    0.6947
+		   -0.5109    0.4983   -0.5366
+		   -0.5996   -0.3804   -0.3120
+	X.U{2} = 
+		   -0.4937    0.0444    0.8685
+		   -0.6220    0.6800   -0.3883
+		   -0.6078   -0.7319   -0.3080
+
double(X) %<-- Reassemble the original matrix.
+
+ans =
+
+    0.4235    0.2259    0.6405
+    0.5155    0.5798    0.2091
+    0.3340    0.7604    0.3798
+    0.4329    0.5298    0.7833
+
+

Displaying a ktensor

disp(X) %<-- Displays the vector lambda and each factor matrix.
+
ans is a ktensor of size 4 x 3
+	ans.lambda = [ 1.7002     0.50951     0.22772 ]
+	ans.U{1} = 
+		   -0.4346   -0.5816    0.3635
+		   -0.4365    0.5184    0.6947
+		   -0.5109    0.4983   -0.5366
+		   -0.5996   -0.3804   -0.3120
+	ans.U{2} = 
+		   -0.4937    0.0444    0.8685
+		   -0.6220    0.6800   -0.3883
+		   -0.6078   -0.7319   -0.3080
+

Displaying data

The datadisp function allows the user to associate meaning to the modes and display those modes with the most meaning (i.e., corresponding to the largest values).

X = ktensor({[0.8 0.1 1e-10]',[1e-5 2 3 1e-4]',[0.5 0.5]'}); %<-- Create tensor.
+X = arrange(X) %<-- Normalize the factors.
+
X is a ktensor of size 3 x 4 x 2
+	X.lambda = [ 2.0555 ]
+	X.U{1} = 
+		    0.9923
+		    0.1240
+		    0.0000
+	X.U{2} = 
+		    0.0000
+		    0.5547
+		    0.8321
+		    0.0000
+	X.U{3} = 
+		    0.7071
+		    0.7071
+
labelsDim1 = {'one','two','three'}; %<-- Labels for mode 1.
+labelsDim2 = {'A','B','C','D'}; %<-- Labels for mode 2.
+labelsDim3 = {'on','off'}; %<-- Labels for mode 3.
+datadisp(X,{labelsDim1,labelsDim2,labelsDim3}) %<-- Display.
+
+======== Group 1 ========
+
+Weight = 2.055480
+Score      Id   Name
+ 0.9922779     1 one
+ 0.1240347     2 two
+Score      Id   Name
+ 0.8320503     3 C
+ 0.5547002     2 B
+ 2.774e-05     4 D
+ 2.774e-06     1 A
+Score      Id   Name
+ 0.7071068     1 on
+ 0.7071068     2 off
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/multiply_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/multiply_doc.html new file mode 100644 index 0000000..6a7aa5b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/multiply_doc.html @@ -0,0 +1,1551 @@ + + + + + Multiplying tensors

Multiplying tensors

Contents

Tensor times vector (ttv for tensor)

Compute a tensor times a vector (or vectors) in one (or more) modes.

rand('state',0);
+X = tenrand([5,3,4,2]); %<-- Create a dense tensor.
+A = rand(5,1); B = rand(3,1); C = rand(4,1); D = rand(2,1); %<-- Some vectors.
+
Y = ttv(X, A, 1) %<-- X times A in mode 1.
+
Y is a tensor of size 3 x 4 x 2
+	Y(:,:,1) = 
+	    1.6875    1.9480    0.9951    0.9505
+	    0.8258    0.9495    1.4104    0.6771
+	    1.4496    0.8295    1.5943    1.6259
+	Y(:,:,2) = 
+	    1.8369    1.3352    1.0743    1.4354
+	    1.0471    1.2250    1.5317    1.2519
+	    1.4225    1.2897    1.1595    0.5775
+
Y = ttv(X, {A,B,C,D}, 1) %<-- Same as above.
+
Y is a tensor of size 3 x 4 x 2
+	Y(:,:,1) = 
+	    1.6875    1.9480    0.9951    0.9505
+	    0.8258    0.9495    1.4104    0.6771
+	    1.4496    0.8295    1.5943    1.6259
+	Y(:,:,2) = 
+	    1.8369    1.3352    1.0743    1.4354
+	    1.0471    1.2250    1.5317    1.2519
+	    1.4225    1.2897    1.1595    0.5775
+
Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply produces a scalar.
+
+Y =
+
+    7.8707
+
+
Y = ttv(X, {D,C,B,A}, [4 3 2 1]) %<-- Same as above.
+
+Y =
+
+    7.8707
+
+
Y = ttv(X, {A,B,C,D}) %<-- Same as above.
+
+Y =
+
+    7.8707
+
+
Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4.
+
Y is a tensor of size 5 x 3
+	Y(:,:) = 
+	    1.0157    1.1081    1.5654
+	    1.3799    1.2137    1.0599
+	    1.5625    1.1830    1.0658
+	    1.2323    1.2410    1.4481
+	    1.4374    0.9573    0.8644
+
Y = ttv(X, {A,B,C,D}, [3 4]) %<-- Same as above.
+
Y is a tensor of size 5 x 3
+	Y(:,:) = 
+	    1.0157    1.1081    1.5654
+	    1.3799    1.2137    1.0599
+	    1.5625    1.1830    1.0658
+	    1.2323    1.2410    1.4481
+	    1.4374    0.9573    0.8644
+
Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way multiplication.
+
Y is a tensor of size 4
+	Y(:) = 
+	    4.9369
+	    4.5013
+	    4.4941
+	    3.8857
+
Y = ttv(X, {A,B,C,D}, [1 2 4]) %<-- Same as above.
+
Y is a tensor of size 4
+	Y(:) = 
+	    4.9369
+	    4.5013
+	    4.4941
+	    3.8857
+
Y = ttv(X, {A,B,D}, -3) %<-- Same as above.
+
Y is a tensor of size 4
+	Y(:) = 
+	    4.9369
+	    4.5013
+	    4.4941
+	    3.8857
+
Y = ttv(X, {A,B,C,D}, -3) %<-- Same as above.
+
Y is a tensor of size 4
+	Y(:) = 
+	    4.9369
+	    4.5013
+	    4.4941
+	    3.8857
+

Sparse tensor times vector (ttv for sptensor)

This is the same as in the dense case, except that the result may be either dense or sparse (or a scalar).

X = sptenrand([5,3,4,2],5); %<-- Create a sparse tensor.
+
Y = ttv(X, A, 1) %<-- X times A in mode 1. Result is sparse.
+
Y is a sparse tensor of size 3 x 4 x 2 with 5 nonzeros
+	(1,3,1)    0.0014
+	(2,2,1)    0.3357
+	(3,1,1)    0.5973
+	(3,1,2)    0.0005
+	(3,3,1)    0.0039
+
Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply.
+
+Y =
+
+    0.1196
+
+
Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4.
+
Y is a sparse tensor of size 5 x 3 with 5 nonzeros
+	(2,3)    0.0009
+	(3,2)    0.0612
+	(3,3)    0.0959
+	(4,1)    0.0064
+	(4,3)    0.0149
+
Y = ttv(X, {A,B,D}, -3) %<-- 3-way multiplication. Result is *dense*!
+
Y is a tensor of size 4
+	Y(:) = 
+	    0.1512
+	    0.1064
+	    0.0014
+	         0
+

Kruskal tensor times vector (ttv for ktensor)

The special structure of a ktensor allows an efficient implementation of vector multiplication. The result is a ktensor or a scalar.

X = ktensor([10;1],rand(5,2),rand(3,2),rand(4,2),rand(2,2)); %<-- Ktensor.
+Y = ttv(X, A, 1) %<-- X times A in mode 1. Result is a ktensor.
+
Y is a ktensor of size 3 x 4 x 2
+	Y.lambda = [ 5.9997      1.1433 ]
+	Y.U{1} = 
+		    0.6927    0.4418
+		    0.0841    0.3533
+		    0.4544    0.1536
+	Y.U{2} = 
+		    0.6756    0.5548
+		    0.6992    0.1210
+		    0.7275    0.4508
+		    0.4784    0.7159
+	Y.U{3} = 
+		    0.8928    0.2548
+		    0.2731    0.8656
+
norm(full(Y) - ttv(full(X),A,1)) %<-- Result is the same as dense case.
+
+ans =
+
+   6.6844e-16
+
+
Y = ttv(X, {A,B,C,D}) %<-- All-mode multiply -- scalar result.
+
+Y =
+
+    4.8677
+
+
Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4.
+
Y is a ktensor of size 5 x 3
+	Y.lambda = [ 6.0729     0.78558 ]
+	Y.U{1} = 
+		    0.6124    0.5869
+		    0.6085    0.0576
+		    0.0158    0.3676
+		    0.0164    0.6315
+		    0.1901    0.7176
+	Y.U{2} = 
+		    0.6927    0.4418
+		    0.0841    0.3533
+		    0.4544    0.1536
+
Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way multiplication.
+
Y is a ktensor of size 4
+	Y.lambda = [ 3.6628     0.93892 ]
+	Y.U{1} = 
+		    0.6756    0.5548
+		    0.6992    0.1210
+		    0.7275    0.4508
+		    0.4784    0.7159
+

Tucker tensor times vector (ttv for ttensor)

The special structure of a ttensor allows an efficient implementation of vector multiplication. The result is a ttensor or a scalar.

X = ttensor(tenrand([2,2,2,2]),rand(5,2),rand(3,2),rand(4,2),rand(2,2));
+Y = ttv(X, A, 1) %<-- X times A in mode 1.
+
Y is a ttensor of size 3 x 4 x 2
+	Y.core is a tensor of size 2 x 2 x 2
+		Y.core(:,:,1) = 
+	    1.3171    0.2658
+	    1.0694    0.9612
+		Y.core(:,:,2) = 
+	    1.3377    1.4308
+	    0.3816    0.7186
+	Y.U{1} = 
+		    0.8729    0.9669
+		    0.2379    0.6649
+		    0.6458    0.8704
+	Y.U{2} = 
+		    0.0099    0.8903
+		    0.1370    0.7349
+		    0.8188    0.6873
+		    0.4302    0.3461
+	Y.U{3} = 
+		    0.1660    0.1911
+		    0.1556    0.4225
+
norm(full(Y) - ttv(full(X),A, 1)) %<-- Same as dense case.
+
+ans =
+
+   4.7266e-16
+
+
Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply -- scalar result.
+
+Y =
+
+    3.8758
+
+
Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4.
+
Y is a ttensor of size 5 x 3
+	Y.core is a tensor of size 2 x 2
+		Y.core(:,:) = 
+	    0.6489    0.3358
+	    0.5348    0.3779
+	Y.U{1} = 
+		    0.3651    0.4586
+		    0.3932    0.8699
+		    0.5915    0.9342
+		    0.1197    0.2644
+		    0.0381    0.1603
+	Y.U{2} = 
+		    0.8729    0.9669
+		    0.2379    0.6649
+		    0.6458    0.8704
+
Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way multiplication.
+
Y is a ttensor of size 4
+	Y.core is a tensor of size 2
+		Y.core(:) = 
+	    2.3205
+	    2.3598
+	Y.U{1} = 
+		    0.0099    0.8903
+		    0.1370    0.7349
+		    0.8188    0.6873
+		    0.4302    0.3461
+

Tensor times matrix (ttm for tensor)

Compute a tensor times a matrix (or matrices) in one (or more) modes.

X = tensor(rand(5,3,4,2));
+A = rand(4,5); B = rand(4,3); C = rand(3,4); D = rand(3,2);
+
Y = ttm(X, A, 1);         %<-- X times A in mode-1.
+Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above.
+Y = ttm(X, A', 1, 't')    %<-- Same as above.
+
Y is a tensor of size 4 x 3 x 4 x 2
+	Y(:,:,1,1) = 
+	    1.0365    0.6095    0.7110
+	    1.9302    1.4742    2.0003
+	    1.7555    1.2961    1.7017
+	    1.8896    1.4325    1.5902
+	Y(:,:,2,1) = 
+	    0.6694    0.9350    0.8098
+	    1.4311    2.0724    1.5604
+	    1.2080    1.5796    1.4965
+	    1.2773    1.7966    1.4659
+	Y(:,:,3,1) = 
+	    1.1284    1.1872    1.2511
+	    1.8427    1.8095    1.8762
+	    1.6982    1.5964    1.5908
+	    1.8864    1.8810    1.8543
+	Y(:,:,4,1) = 
+	    0.9565    1.0452    0.8766
+	    1.7992    1.8762    1.8659
+	    1.4832    1.6716    1.9043
+	    1.6718    1.8121    1.7510
+	Y(:,:,1,2) = 
+	    1.1974    0.8965    0.8668
+	    1.5665    2.1589    1.3825
+	    1.3373    2.0494    1.1534
+	    1.5943    2.0267    1.4569
+	Y(:,:,2,2) = 
+	    1.0229    1.3605    1.0827
+	    2.3149    2.1127    1.9503
+	    2.1861    1.8910    1.5869
+	    2.0542    1.9491    1.9094
+	Y(:,:,3,2) = 
+	    0.7033    0.8874    0.5347
+	    1.4749    1.4350    1.3381
+	    1.5048    1.3274    1.2796
+	    1.2465    1.5395    1.1617
+	Y(:,:,4,2) = 
+	    1.3135    0.2809    0.9096
+	    2.4720    1.0792    1.5503
+	    2.2423    0.9677    1.1401
+	    2.3171    0.8680    1.4500
+
Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way mutliply.
+Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above.
+Y = ttm(X, {A,B,C,D});            %<-- Same as above.
+Y = ttm(X, {A',B',C',D'}, 't')    %<-- Same as above.
+
Y is a tensor of size 4 x 4 x 3 x 3
+	Y(:,:,1,1) = 
+	    2.4869    4.5774    4.3080    2.4909
+	    4.7042    8.5104    8.0518    4.6694
+	    4.1588    7.5379    7.1537    4.1590
+	    4.4802    8.1581    7.6647    4.4226
+	Y(:,:,2,1) = 
+	    2.4107    4.4549    4.1826    2.4144
+	    4.8310    8.7053    8.2015    4.7393
+	    4.2267    7.6101    7.2157    4.1903
+	    4.4979    8.1691    7.6629    4.4153
+	Y(:,:,3,1) = 
+	    1.8798    3.4093    3.2097    1.8545
+	    3.3879    6.1536    5.8167    3.3717
+	    3.0143    5.4614    5.1902    3.0207
+	    3.2654    5.9270    5.5773    3.2215
+	Y(:,:,1,2) = 
+	    1.4376    2.7014    2.5398    1.4693
+	    2.7470    5.0786    4.7737    2.7583
+	    2.4439    4.5664    4.2765    2.4655
+	    2.6095    4.8234    4.5165    2.6018
+	Y(:,:,2,2) = 
+	    1.4740    2.7639    2.5977    1.5023
+	    2.8931    5.3300    4.9868    2.8703
+	    2.5479    4.7381    4.4189    2.5385
+	    2.7060    4.9891    4.6705    2.6895
+	Y(:,:,3,2) = 
+	    1.0365    1.9368    1.8275    1.0598
+	    1.9305    3.6151    3.4161    1.9837
+	    1.7213    3.2586    3.0731    1.7830
+	    1.8406    3.4297    3.2279    1.8680
+	Y(:,:,1,3) = 
+	    1.3367    2.4889    2.3411    1.3540
+	    2.5428    4.6564    4.3894    2.5403
+	    2.2559    4.1595    3.9180    2.2671
+	    2.4183    4.4405    4.1641    2.4005
+	Y(:,:,2,3) = 
+	    1.3373    2.4918    2.3410    1.3528
+	    2.6485    4.8327    4.5351    2.6148
+	    2.3258    4.2653    4.0063    2.3123
+	    2.4723    4.5286    4.2431    2.4440
+	Y(:,:,3,3) = 
+	    0.9845    1.8150    1.7108    0.9905
+	    1.8066    3.3375    3.1542    1.8302
+	    1.6093    2.9879    2.8273    1.6426
+	    1.7309    3.1876    2.9998    1.7345
+
Y = ttm(X, {C,D}, [3 4]);    %<-- X times C in mode-3 & D in mode-4
+Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above.
+
Y is a tensor of size 5 x 3 x 3 x 3
+	Y(:,:,1,1) = 
+	    1.5822    1.3415    1.3949
+	    1.2553    1.1977    1.2159
+	    1.4244    1.6067    1.3862
+	    1.2104    0.7492    1.5089
+	    1.2932    1.3210    1.1591
+	Y(:,:,2,1) = 
+	    1.5112    1.0441    1.3712
+	    1.1439    1.2984    1.2236
+	    1.5205    1.6549    1.5110
+	    1.3913    0.8995    1.5692
+	    1.2733    1.4256    1.0567
+	Y(:,:,3,1) = 
+	    1.0920    1.0458    1.1802
+	    0.9569    0.8498    0.8816
+	    0.9992    1.0501    0.9638
+	    0.8583    0.5944    1.0157
+	    0.9676    1.0079    0.8150
+	Y(:,:,1,2) = 
+	    0.9474    0.8303    0.7452
+	    0.8904    0.7530    0.6234
+	    0.7403    0.9461    0.8350
+	    0.8144    0.4605    0.7893
+	    0.7803    0.6910    0.7280
+	Y(:,:,2,2) = 
+	    0.9542    0.7092    0.7569
+	    0.8336    0.8635    0.6079
+	    0.8297    0.9292    0.9154
+	    0.8980    0.5883    0.7834
+	    0.8064    0.7848    0.7567
+	Y(:,:,3,2) = 
+	    0.6673    0.6108    0.5736
+	    0.6973    0.4752    0.4593
+	    0.5460    0.6273    0.5737
+	    0.5921    0.3631    0.5820
+	    0.5514    0.4938    0.5138
+	Y(:,:,1,3) = 
+	    0.8673    0.7494    0.7182
+	    0.7597    0.6751    0.6126
+	    0.7228    0.8726    0.7625
+	    0.7098    0.4168    0.7683
+	    0.7120    0.6726    0.6529
+	Y(:,:,2,3) = 
+	    0.8539    0.6157    0.7186
+	    0.7037    0.7561    0.6064
+	    0.7919    0.8754    0.8338
+	    0.7962    0.5187    0.7797
+	    0.7207    0.7460    0.6432
+	Y(:,:,3,3) = 
+	    0.6056    0.5653    0.5783
+	    0.5887    0.4485    0.4479
+	    0.5208    0.5749    0.5266
+	    0.5108    0.3295    0.5433
+	    0.5160    0.4959    0.4601
+
Y = ttm(X, {A,B,D}, [1 2 4]);   %<-- 3-way multiply.
+Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above.
+Y = ttm(X, {A,B,D}, -3);        %<-- Same as above.
+Y = ttm(X, {A,B,C,D}, -3)       %<-- Same as above.
+
Y is a tensor of size 4 x 4 x 4 x 3
+	Y(:,:,1,1) = 
+	    1.0560    2.1553    2.0387    1.1911
+	    2.4278    4.3898    4.2181    2.4754
+	    2.1441    3.9274    3.7356    2.1771
+	    2.2457    4.2196    3.9658    2.2936
+	Y(:,:,2,1) = 
+	    1.3324    2.2654    2.0972    1.1901
+	    2.6489    4.6589    4.2674    2.4065
+	    2.2210    3.9024    3.6500    2.0934
+	    2.3874    4.1487    3.8458    2.1882
+	Y(:,:,3,1) = 
+	    1.5259    2.6902    2.5346    1.4625
+	    2.4233    4.3577    4.1285    2.3958
+	    2.1583    3.9709    3.7404    2.1642
+	    2.4544    4.4132    4.1332    2.3769
+	Y(:,:,4,1) = 
+	    1.2470    2.3630    2.2327    1.2974
+	    2.4531    4.5551    4.3305    2.5251
+	    2.2263    4.0016    3.8576    2.2685
+	    2.3016    4.2637    4.0530    2.3627
+	Y(:,:,1,2) = 
+	    0.6944    1.3989    1.3155    0.7645
+	    1.4979    2.6951    2.5093    1.4364
+	    1.3402    2.4234    2.2267    1.2614
+	    1.4163    2.6071    2.4098    1.3735
+	Y(:,:,2,2) = 
+	    0.8962    1.5379    1.4173    0.8018
+	    1.6626    3.0068    2.7746    1.5774
+	    1.4220    2.6151    2.4249    1.3856
+	    1.5214    2.7047    2.5241    1.4463
+	Y(:,:,3,2) = 
+	    0.8062    1.4427    1.3306    0.7556
+	    1.3527    2.4504    2.3102    1.3362
+	    1.2317    2.2839    2.1471    1.2410
+	    1.3547    2.4308    2.2533    1.2850
+	Y(:,:,4,2) = 
+	    0.6588    1.3312    1.3007    0.7781
+	    1.3637    2.6926    2.5765    1.5156
+	    1.2072    2.3770    2.2702    1.3333
+	    1.2520    2.4805    2.3848    1.4080
+	Y(:,:,1,3) = 
+	    0.6109    1.2374    1.1665    0.6794
+	    1.3537    2.4408    2.3039    1.3333
+	    1.2045    2.1900    2.0426    1.1717
+	    1.2680    2.3547    2.1921    1.2574
+	Y(:,:,2,3) = 
+	    0.7812    1.3355    1.2330    0.6984
+	    1.4915    2.6660    2.4525    1.3896
+	    1.2650    2.2829    2.1244    1.2158
+	    1.3562    2.3881    2.2224    1.2697
+	Y(:,:,3,3) = 
+	    0.7811    1.3880    1.2929    0.7398
+	    1.2777    2.3069    2.1797    1.2626
+	    1.1519    2.1284    2.0027    1.1581
+	    1.2862    2.3100    2.1515    1.2317
+	Y(:,:,4,3) = 
+	    0.6383    1.2522    1.2053    0.7119
+	    1.2905    2.4790    2.3654    1.3861
+	    1.1555    2.1838    2.0942    1.2306
+	    1.1966    2.2999    2.2001    1.2917
+

Sparse tensor times matrix (ttm for sptensor)

It is also possible to multiply an sptensor times a matrix or series of matrices. The arguments are the same as for the dense case. The result may be dense or sparse, depending on its density.

X = sptenrand([5 3 4 2],10);
+Y = ttm(X, A, 1);         %<-- X times A in mode-1.
+Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above.
+Y = ttm(X, A', 1, 't')    %<-- Same as above
+
Y is a sparse tensor of size 4 x 3 x 4 x 2 with 28 nonzeros
+	(1,1,1,1)    0.1506
+	(1,2,2,1)    0.0129
+	(1,3,2,1)    0.0893
+	(1,2,1,2)    0.4196
+	(1,1,2,2)    0.0066
+	(1,3,2,2)    0.0377
+	(1,3,3,2)    0.4310
+	(2,1,1,1)    0.7738
+	(2,2,2,1)    0.3652
+	(2,3,2,1)    0.4116
+	(2,2,1,2)    0.2315
+	(2,1,2,2)    0.1852
+	(2,3,2,2)    0.2910
+	(2,3,3,2)    0.7843
+	(3,1,1,1)    0.8920
+	(3,2,2,1)    0.2743
+	(3,3,2,1)    0.3639
+	(3,2,1,2)    0.3365
+	(3,1,2,2)    0.1391
+	(3,3,2,2)    0.2387
+	(3,3,3,2)    0.9501
+	(4,1,1,1)    0.5556
+	(4,2,2,1)    0.3154
+	(4,3,2,1)    0.1610
+	(4,2,1,2)    0.4202
+	(4,1,2,2)    0.1600
+	(4,3,2,2)    0.1799
+	(4,3,3,2)    0.7447
+
norm(full(Y) - ttm(full(X),A, 1) ) %<-- Same as dense case.
+
+ans =
+
+     0
+
+
Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way multiply.
+Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above.
+Y = ttm(X, {A,B,C,D});            %<-- Same as above.
+Y = ttm(X, {A',B',C',D'}, 't')    %<-- Same as above.
+
Y is a tensor of size 4 x 4 x 3 x 3
+	Y(:,:,1,1) = 
+	    0.0890    0.1395    0.1470    0.0913
+	    0.2458    0.5003    0.5113    0.3157
+	    0.2497    0.5323    0.5461    0.3388
+	    0.2063    0.4095    0.3985    0.2375
+	Y(:,:,2,1) = 
+	    0.0985    0.1757    0.1675    0.0976
+	    0.3510    0.7241    0.7029    0.4190
+	    0.3310    0.7403    0.7158    0.4273
+	    0.2770    0.5858    0.5249    0.2942
+	Y(:,:,3,1) = 
+	    0.0419    0.0376    0.0617    0.0462
+	    0.1291    0.1583    0.1999    0.1358
+	    0.1288    0.1546    0.2055    0.1430
+	    0.1050    0.1303    0.1583    0.1054
+	Y(:,:,1,2) = 
+	    0.0981    0.1224    0.1323    0.0821
+	    0.1729    0.2599    0.3046    0.2013
+	    0.1919    0.2865    0.3360    0.2219
+	    0.1682    0.2465    0.2699    0.1710
+	Y(:,:,2,2) = 
+	    0.0973    0.1452    0.1299    0.0708
+	    0.1977    0.3532    0.3625    0.2229
+	    0.2021    0.3706    0.3744    0.2280
+	    0.1835    0.3276    0.3052    0.1747
+	Y(:,:,3,2) = 
+	    0.0483    0.0371    0.0666    0.0510
+	    0.1054    0.0994    0.1585    0.1176
+	    0.1157    0.1031    0.1730    0.1305
+	    0.0945    0.0895    0.1386    0.1018
+	Y(:,:,1,3) = 
+	    0.0719    0.0965    0.1034    0.0642
+	    0.1480    0.2538    0.2795    0.1794
+	    0.1587    0.2752    0.3040    0.1956
+	    0.1361    0.2251    0.2346    0.1450
+	Y(:,:,2,3) = 
+	    0.0738    0.1169    0.1071    0.0599
+	    0.1860    0.3555    0.3552    0.2152
+	    0.1835    0.3684    0.3644    0.2199
+	    0.1610    0.3092    0.2830    0.1605
+	Y(:,:,3,3) = 
+	    0.0350    0.0282    0.0491    0.0374
+	    0.0853    0.0892    0.1296    0.0932
+	    0.0905    0.0902    0.1384    0.1016
+	    0.0739    0.0773    0.1094    0.0777
+
Y = ttm(X, {C,D}, [3 4]);    %<-- X times C in mode-3 & D in mode-4
+Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above.
+
Y is a tensor of size 5 x 3 x 3 x 3
+	Y(:,:,1,1) = 
+	         0    0.1004    0.0998
+	    0.4075         0    0.1363
+	    0.0221    0.1332    0.0167
+	         0         0    0.3307
+	         0         0         0
+	Y(:,:,2,1) = 
+	         0    0.1323    0.0395
+	    0.5371         0    0.0540
+	    0.0427    0.2578    0.0322
+	         0         0    0.6257
+	         0         0         0
+	Y(:,:,3,1) = 
+	         0    0.0154    0.0943
+	    0.0626         0    0.1289
+	    0.0123    0.0742    0.0093
+	         0         0    0.1878
+	         0         0         0
+	Y(:,:,1,2) = 
+	         0    0.1342    0.1334
+	    0.1478         0    0.1823
+	    0.0295    0.0483    0.0223
+	         0         0    0.1626
+	         0         0         0
+	Y(:,:,2,2) = 
+	         0    0.1769    0.0528
+	    0.1948         0    0.0722
+	    0.0571    0.0935    0.0431
+	         0         0    0.2955
+	         0         0         0
+	Y(:,:,3,2) = 
+	         0    0.0206    0.1262
+	    0.0227         0    0.1724
+	    0.0164    0.0269    0.0124
+	         0         0    0.0954
+	         0         0         0
+	Y(:,:,1,3) = 
+	         0    0.0933    0.0927
+	    0.1737         0    0.1267
+	    0.0205    0.0568    0.0155
+	         0         0    0.1630
+	         0         0         0
+	Y(:,:,2,3) = 
+	         0    0.1229    0.0367
+	    0.2290         0    0.0502
+	    0.0397    0.1099    0.0300
+	         0         0    0.3022
+	         0         0         0
+	Y(:,:,3,3) = 
+	         0    0.0143    0.0877
+	    0.0267         0    0.1198
+	    0.0114    0.0316    0.0086
+	         0         0    0.0942
+	         0         0         0
+
Y = ttm(X, {A,B,D}, [1 2 4]);   %<-- 3-way multiply.
+Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above.
+Y = ttm(X, {A,B,D}, -3);        %<-- Same as above.
+Y = ttm(X, {A,B,C,D}, -3)       %<-- Same as above.
+
Y is a tensor of size 4 x 4 x 4 x 3
+	Y(:,:,1,1) = 
+	    0.1010    0.2463    0.1918    0.0950
+	    0.1515    0.7021    0.6066    0.3415
+	    0.1879    0.8297    0.7130    0.3989
+	    0.1573    0.5785    0.4856    0.2645
+	Y(:,:,2,1) = 
+	    0.0416    0.0313    0.0563    0.0431
+	    0.3903    0.4667    0.4983    0.3058
+	    0.3136    0.3615    0.4026    0.2533
+	    0.2647    0.3646    0.3297    0.1801
+	Y(:,:,3,1) = 
+	    0.0468    0.0249    0.0651    0.0543
+	    0.0851    0.0453    0.1184    0.0988
+	    0.1031    0.0549    0.1434    0.1196
+	    0.0808    0.0430    0.1124    0.0938
+	Y(:,:,4,1) = 
+	     0     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+	Y(:,:,1,2) = 
+	    0.1147    0.2091    0.1501    0.0656
+	    0.0980    0.3207    0.2645    0.1410
+	    0.1308    0.3970    0.3232    0.1696
+	    0.1353    0.3298    0.2568    0.1271
+	Y(:,:,2,2) = 
+	    0.0194    0.0152    0.0275    0.0211
+	    0.1805    0.2340    0.2663    0.1713
+	    0.1451    0.1809    0.2133    0.1397
+	    0.1221    0.1841    0.1830    0.1087
+	Y(:,:,3,2) = 
+	    0.0626    0.0333    0.0870    0.0726
+	    0.1138    0.0606    0.1584    0.1321
+	    0.1379    0.0735    0.1919    0.1600
+	    0.1081    0.0576    0.1504    0.1254
+	Y(:,:,4,2) = 
+	     0     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+	Y(:,:,1,3) = 
+	    0.0834    0.1668    0.1233    0.0566
+	    0.0868    0.3335    0.2816    0.1545
+	    0.1125    0.4033    0.3373    0.1830
+	    0.1074    0.3086    0.2487    0.1289
+	Y(:,:,2,3) = 
+	    0.0200    0.0153    0.0277    0.0212
+	    0.1865    0.2324    0.2566    0.1616
+	    0.1499    0.1798    0.2064    0.1327
+	    0.1263    0.1822    0.1733    0.0992
+	Y(:,:,3,3) = 
+	    0.0435    0.0232    0.0605    0.0504
+	    0.0791    0.0421    0.1100    0.0918
+	    0.0958    0.0510    0.1333    0.1112
+	    0.0751    0.0400    0.1045    0.0872
+	Y(:,:,4,3) = 
+	     0     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+

The result may be dense or sparse.

X = sptenrand([5 3 4],1);
+Y = ttm(X, A, 1) %<-- Sparse result.
+
Y is a sparse tensor of size 4 x 3 x 4 with 4 nonzeros
+	(1,3,2)    0.0232
+	(2,3,2)    0.1067
+	(3,3,2)    0.0943
+	(4,3,2)    0.0417
+
X = sptenrand([5 3 4],50);
+Y = ttm(X, A, 1) %<-- Dense result.
+
Y is a tensor of size 4 x 3 x 4
+	Y(:,:,1) = 
+	    0.4159    0.5631    0.0406
+	    0.9765    1.4239    0.2088
+	    0.9029    1.2234    0.2406
+	    0.8744    1.2606    0.1499
+	Y(:,:,2) = 
+	    1.0127    0.5477         0
+	    1.4153    1.1141         0
+	    1.1989    1.0105         0
+	    1.6381    0.9835         0
+	Y(:,:,3) = 
+	    0.5923    0.6934    0.9184
+	    0.8260    0.9650    0.8641
+	    0.4955    0.7236    0.5323
+	    0.8762    0.9604    1.0351
+	Y(:,:,4) = 
+	    1.2906    0.7036    0.4899
+	    1.2638    0.8427    1.1469
+	    1.1332    0.8936    1.0464
+	    1.5305    0.9649    0.9588
+

Sometimes the product may be too large to reside in memory. For example, try the following: X = sptenrand([100 100 100 100], 1e4); A = rand(1000,100); ttm(X,A,1); %<-- too large for memory

Matricized Khatri-Rao product of a tensor.

mttkrp computes the matricized Khatri-Rao product of a tensor X with a cell array of matrices U. The operation first matricizes (i.e. flattens) a tensor X with m modes, in a given mode n. Then the Khatri-Rao product of a cell array of matrices U={U1,...,Um} is computed, omitting the nth term in the array. The returned value is then the product matricized tensor X and Khatri-Rao product of the cell array. This operation is useful in many numerical procedures, e.g. formulating the subproblems in an alternating least squares CP decomposition of tensor.

Each matrix in the cell array U must have the same number of columns. The number of rows of the matrix Ui equal the dimension of X in mode i. In the example that follows we will verify that mttkrp performs the calculation indicated above.

U = {rand(2,3), 2*rand(3,3), 3*rand(4,3)}; %<--the cell array
+X = tensor(rand(2,3,4)); %<--the tensor
+n = 2; %<--the dimension to matricize with respect to.
+
+KRP = khatrirao(U{1}, U{3}); %<--Khatri-Rao product, omitting U{2}
+M = permute(X.data, [n:size(X,n), 1:n-1]);
+M = reshape(M,size(X,n),[]); %<--Matricized tensor data
+
+norm(M*KRP-mttkrp(X,U,n)) < 1e-14 %<--They are equal, within machine precision
+
+ans =
+
+  logical
+
+   1
+
+

Kruskal tensor times matrix (ttm for ktensor)

The special structure of a ktensor allows an efficient implementation of matrix multiplication. The arguments are the same as for the dense case.

X = ktensor({rand(5,1) rand(3,1) rand(4,1) rand(2,1)});
+
Y = ttm(X, A, 1);         %<-- X times A in mode-1.
+Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above.
+Y = ttm(X, A', 1, 't')    %<-- Same as above.
+
Y is a ktensor of size 4 x 3 x 4 x 2
+	Y.lambda = [ 1 ]
+	Y.U{1} = 
+		    0.9195
+		    1.0464
+		    1.0804
+		    1.2324
+	Y.U{2} = 
+		    0.3348
+		    0.3762
+		    0.9522
+	Y.U{3} = 
+		    0.7193
+		    0.7793
+		    0.6177
+		    0.6492
+	Y.U{4} = 
+		    0.7563
+		    0.1478
+
Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way mutliply.
+Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above.
+Y = ttm(X, {A,B,C,D});            %<-- Same as above.
+Y = ttm(X, {A',B',C',D'}, 't')    %<-- Same as above.
+
Y is a ktensor of size 4 x 4 x 3 x 3
+	Y.lambda = [ 1 ]
+	Y.U{1} = 
+		    0.9195
+		    1.0464
+		    1.0804
+		    1.2324
+	Y.U{2} = 
+		    0.6186
+		    0.8281
+		    0.9655
+		    0.6314
+	Y.U{3} = 
+		    1.2905
+		    1.3673
+		    0.9019
+	Y.U{4} = 
+		    0.7582
+		    0.3193
+		    0.3461
+
Y = ttm(X, {C,D}, [3 4]);    %<-- X times C in mode-3 & D in mode-4.
+Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above.
+
Y is a ktensor of size 5 x 3 x 3 x 3
+	Y.lambda = [ 1 ]
+	Y.U{1} = 
+		    0.9971
+		    0.3462
+		    0.1761
+		    0.0679
+		    0.3094
+	Y.U{2} = 
+		    0.3348
+		    0.3762
+		    0.9522
+	Y.U{3} = 
+		    1.2905
+		    1.3673
+		    0.9019
+	Y.U{4} = 
+		    0.7582
+		    0.3193
+		    0.3461
+
Y = ttm(X, {A,B,D}, [1 2 4]);   %<-- 3-way multiply.
+Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above.
+Y = ttm(X, {A,B,D}, -3);        %<-- Same as above.
+Y = ttm(X, {A,B,C,D}, -3)       %<-- Same as above.
+
Y is a ktensor of size 4 x 4 x 4 x 3
+	Y.lambda = [ 1 ]
+	Y.U{1} = 
+		    0.9195
+		    1.0464
+		    1.0804
+		    1.2324
+	Y.U{2} = 
+		    0.6186
+		    0.8281
+		    0.9655
+		    0.6314
+	Y.U{3} = 
+		    0.7193
+		    0.7793
+		    0.6177
+		    0.6492
+	Y.U{4} = 
+		    0.7582
+		    0.3193
+		    0.3461
+

Tucker tensor times matrix (ttm for ttensor)

The special structure of a ttensor allows an efficient implementation of matrix multiplication.

X = ttensor(tensor(rand(2,2,2,2)),{rand(5,2) rand(3,2) rand(4,2) rand(2,2)});
+
Y = ttm(X, A, 1);         %<-- computes X times A in mode-1.
+Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above.
+Y = ttm(X, A', 1, 't')    %<-- Same as above.
+
Y is a ttensor of size 4 x 3 x 4 x 2
+	Y.core is a tensor of size 2 x 2 x 2 x 2
+		Y.core(:,:,1,1) = 
+	    0.5995    0.1719
+	    0.8986    0.8189
+		Y.core(:,:,2,1) = 
+	    0.0693    0.3173
+	    0.9557    0.0052
+		Y.core(:,:,1,2) = 
+	    0.7599    0.7153
+	    0.3087    0.0809
+		Y.core(:,:,2,2) = 
+	    0.8459    0.8704
+	    0.7184    0.8722
+	Y.U{1} = 
+		    1.3576    0.9672
+		    2.5857    1.7605
+		    2.2392    1.4629
+		    2.4193    1.6863
+	Y.U{2} = 
+		    0.7233    0.4236
+		    0.2397    0.7957
+		    0.4899    0.7634
+	Y.U{3} = 
+		    0.2389    0.2685
+		    0.6351    0.9912
+		    0.2315    0.7603
+		    0.6159    0.4822
+	Y.U{4} = 
+		    0.9452    0.0844
+		    0.3607    0.9485
+
Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way multiply.
+Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above.
+Y = ttm(X, {A,B,C,D});            %<-- Same as above.
+Y = ttm(X, {A',B',C',D'}, 't')    %<-- Same as above.
+
Y is a ttensor of size 4 x 4 x 3 x 3
+	Y.core is a tensor of size 2 x 2 x 2 x 2
+		Y.core(:,:,1,1) = 
+	    0.5995    0.1719
+	    0.8986    0.8189
+		Y.core(:,:,2,1) = 
+	    0.0693    0.3173
+	    0.9557    0.0052
+		Y.core(:,:,1,2) = 
+	    0.7599    0.7153
+	    0.3087    0.0809
+		Y.core(:,:,2,2) = 
+	    0.8459    0.8704
+	    0.7184    0.8722
+	Y.U{1} = 
+		    1.3576    0.9672
+		    2.5857    1.7605
+		    2.2392    1.4629
+		    2.4193    1.6863
+	Y.U{2} = 
+		    0.4280    0.8254
+		    0.9494    1.2692
+		    0.9504    1.2097
+		    0.5811    0.6974
+	Y.U{3} = 
+		    0.7658    1.1522
+		    0.8756    1.1782
+		    0.5976    0.9002
+	Y.U{4} = 
+		    1.0017    0.3714
+		    0.4715    0.4192
+		    0.4829    0.3051
+
Y = ttm(X, {C,D}, [3 4]);    %<-- X times C in mode-3 & D in mode-4
+Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above.
+
Y is a ttensor of size 5 x 3 x 3 x 3
+	Y.core is a tensor of size 2 x 2 x 2 x 2
+		Y.core(:,:,1,1) = 
+	    0.5995    0.1719
+	    0.8986    0.8189
+		Y.core(:,:,2,1) = 
+	    0.0693    0.3173
+	    0.9557    0.0052
+		Y.core(:,:,1,2) = 
+	    0.7599    0.7153
+	    0.3087    0.0809
+		Y.core(:,:,2,2) = 
+	    0.8459    0.8704
+	    0.7184    0.8722
+	Y.U{1} = 
+		    0.7616    0.6399
+		    0.6695    0.1562
+		    0.9020    0.7832
+		    0.8215    0.7314
+		    0.8327    0.5647
+	Y.U{2} = 
+		    0.7233    0.4236
+		    0.2397    0.7957
+		    0.4899    0.7634
+	Y.U{3} = 
+		    0.7658    1.1522
+		    0.8756    1.1782
+		    0.5976    0.9002
+	Y.U{4} = 
+		    1.0017    0.3714
+		    0.4715    0.4192
+		    0.4829    0.3051
+
Y = ttm(X, {A,B,D}, [1 2 4]);   %<-- 3-way multiply
+Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above.
+Y = ttm(X, {A,B,D}, -3);        %<-- Same as above.
+Y = ttm(X, {A,B,C,D}, -3)       %<-- Same as above.
+
Y is a ttensor of size 4 x 4 x 4 x 3
+	Y.core is a tensor of size 2 x 2 x 2 x 2
+		Y.core(:,:,1,1) = 
+	    0.5995    0.1719
+	    0.8986    0.8189
+		Y.core(:,:,2,1) = 
+	    0.0693    0.3173
+	    0.9557    0.0052
+		Y.core(:,:,1,2) = 
+	    0.7599    0.7153
+	    0.3087    0.0809
+		Y.core(:,:,2,2) = 
+	    0.8459    0.8704
+	    0.7184    0.8722
+	Y.U{1} = 
+		    1.3576    0.9672
+		    2.5857    1.7605
+		    2.2392    1.4629
+		    2.4193    1.6863
+	Y.U{2} = 
+		    0.4280    0.8254
+		    0.9494    1.2692
+		    0.9504    1.2097
+		    0.5811    0.6974
+	Y.U{3} = 
+		    0.2389    0.2685
+		    0.6351    0.9912
+		    0.2315    0.7603
+		    0.6159    0.4822
+	Y.U{4} = 
+		    1.0017    0.3714
+		    0.4715    0.4192
+		    0.4829    0.3051
+

Tensor times tensor (ttt for tensor)

X = tensor(rand(4,2,3)); Y = tensor(rand(3,4,2));
+Z = ttt(X,Y); %<-- Outer product of X and Y.
+size(Z)
+
+ans =
+
+     4     2     3     3     4     2
+
+
Z = ttt(X,X,1:3) %<-- Inner product of X with itself.
+
+Z =
+
+    8.9942
+
+
Z = ttt(X,Y,[1 2 3],[2 3 1]) %<-- Inner product of X & Y.
+
+Z =
+
+    6.3410
+
+
Z = innerprod(X,permute(Y, [2 3 1])) %<-- Same as above.
+
+Z =
+
+    6.3410
+
+
Z = ttt(X,Y,[1 3],[2 1]) %<-- Product of X & Y along specified dims.
+
Z is a tensor of size 2 x 2
+	Z(:,:) = 
+	    2.4886    2.4765
+	    3.2827    3.8524
+

Sparse tensor times sparse tensor (ttt for sptensor)

X = sptenrand([4 2 3],3); Y = sptenrand([3 4 2],3);
+Z = ttt(X,Y) %<--Outer product of X and Y.
+
Z is a sparse tensor of size 4 x 2 x 3 x 3 x 4 x 2 with 9 nonzeros
+	(1,1,3,1,3,1)    0.1445
+	(1,1,3,3,1,1)    0.4935
+	(1,1,3,3,4,1)    0.5292
+	(2,2,3,1,3,1)    0.1181
+	(2,2,3,3,1,1)    0.4033
+	(2,2,3,3,4,1)    0.4325
+	(3,2,1,1,3,1)    0.1132
+	(3,2,1,3,1,1)    0.3867
+	(3,2,1,3,4,1)    0.4147
+
norm(full(Z)-ttt(full(X),full(Y))) %<-- Same as dense.
+
+ans =
+
+     0
+
+
Z = ttt(X,X,1:3) %<-- Inner product of X with itself.
+
+Z =
+
+    2.2738
+
+
X = sptenrand([2 3],1); Y = sptenrand([3 2],1);
+Z = ttt(X, Y) %<-- Sparse result.
+
Z is a sparse tensor of size 2 x 3 x 3 x 2 with 1 nonzeros
+	(2,1,2,1)    0.7262
+
X = sptenrand([2 3],20); Y = sptenrand([3 2],20);
+Z = ttt(X, Y) %<-- Dense result.
+
Z is a tensor of size 2 x 3 x 3 x 2
+	Z(:,:,1,1) = 
+	         0    0.1584    0.1237
+	    0.5586    0.2357    0.0374
+	Z(:,:,2,1) = 
+	         0    0.1367    0.1067
+	    0.4820    0.2034    0.0322
+	Z(:,:,3,1) = 
+	         0    0.1968    0.1536
+	    0.6939    0.2928    0.0464
+	Z(:,:,1,2) = 
+	         0    0.0550    0.0430
+	    0.1940    0.0819    0.0130
+	Z(:,:,2,2) = 
+	         0    0.0418    0.0326
+	    0.1473    0.0622    0.0099
+	Z(:,:,3,2) = 
+	         0    0.1293    0.1009
+	    0.4559    0.1924    0.0305
+
Z = ttt(X,Y,[1 2],[2 1]) %<-- inner product of X & Y
+
+Z =
+
+    0.5770
+
+

Inner product (innerprod)

The function innerprod efficiently computes the inner product between two tensors X and Y. The code does this efficiently depending on what types of tensors X and Y.

X = tensor(rand(2,2,2))
+Y = ktensor({rand(2,2),rand(2,2),rand(2,2)})
+
X is a tensor of size 2 x 2 x 2
+	X(:,:,1) = 
+	    0.6496    0.3348
+	    0.0418    0.4334
+	X(:,:,2) = 
+	    0.0986    0.0271
+	    0.6809    0.3208
+Y is a ktensor of size 2 x 2 x 2
+	Y.lambda = [ 1  1 ]
+	Y.U{1} = 
+		    0.0525    0.3839
+		    0.3696    0.3359
+	Y.U{2} = 
+		    0.8907    0.0810
+		    0.2381    0.3015
+	Y.U{3} = 
+		    0.1800    0.7903
+		    0.7162    0.3313
+
z = innerprod(X,Y)
+
+z =
+
+    0.3010
+
+

Contraction on tensors (contract for tensor)

The function contract sums the entries of X along dimensions I and J. Contraction is a generalization of matrix trace. In other words, the trace is performed along the two-dimensional slices defined by dimensions I and J. It is possible to implement tensor multiplication as an outer product followed by a contraction.

X = sptenrand([4 3 2],5);
+Y = sptenrand([3 2 4],5);
+
Z1 = ttt(X,Y,1,3); %<-- Normal tensor multiplication
+
Z2 = contract(ttt(X,Y),1,6); %<-- Outer product + contract
+
norm(Z1-Z2) %<-- Should be zero
+
+ans =
+
+     0
+
+

Using contract on either sparse or dense tensors gives the same result

X = sptenrand([4 2 3 4],20);
+Z1 = contract(X,1,4)        % sparse version of contract
+
Z1 is a tensor of size 2 x 3
+	Z1(:,:) = 
+	    0.0047    0.2498    0.2491
+	    0.5821    0.4739    0.3943
+
Z2 = contract(full(X),1,4)  % dense version of contract
+
Z2 is a tensor of size 2 x 3
+	Z2(:,:) = 
+	    0.0047    0.2498    0.2491
+	    0.5821    0.4739    0.3943
+
norm(full(Z1) - Z2) %<-- Should be zero
+
+ans =
+
+     0
+
+

The result may be dense or sparse, depending on its density.

X = sptenrand([4 2 3 4],8);
+Y = contract(X,1,4) %<-- should be sparse
+
Y is a sparse tensor of size 2 x 3 with 2 nonzeros
+	(1,3)    0.5531
+	(2,2)    0.7281
+
X = sptenrand([4 2 3 4],80);
+Y = contract(X,1,4) %<-- should be dense
+
Y is a tensor of size 2 x 3
+	Y(:,:) = 
+	    1.0451    1.9378    0.9447
+	    1.9514    1.3196    1.6253
+

Relationships among ttv, ttm, and ttt

The three "tensor times _" functions (ttv, ttm, ttt) all perform specialized calculations, but they are all related to some degree. Here are several relationships among them:

X = tensor(rand(4,3,2));
+A = rand(4,1);
+

Tensor times vector gives a 3 x 2 result

Y1 = ttv(X,A,1)
+
Y1 is a tensor of size 3 x 2
+	Y1(:,:) = 
+	    1.3897    1.2870
+	    0.3184    0.6756
+	    0.5032    1.3496
+

When ttm is used with the transpose option, the result is almost the same as ttv

Y2 = ttm(X,A,1,'t')
+
Y2 is a tensor of size 1 x 3 x 2
+	Y2(:,:,1) = 
+	    1.3897    0.3184    0.5032
+	Y2(:,:,2) = 
+	    1.2870    0.6756    1.3496
+

We can use squeeze to remove the singleton dimension left over from ttm to give the same answer as ttv

squeeze(Y2)
+
ans is a tensor of size 3 x 2
+	ans(:,:) = 
+	    1.3897    1.2870
+	    0.3184    0.6756
+	    0.5032    1.3496
+

Tensor outer product may be used in conjuction with contract to produce the result of ttm. Please note that this is more expensive than using ttm.

Z = ttt(tensor(A),X);
+size(Z)
+
+ans =
+
+     4     1     4     3     2
+
+
Y3 = contract(Z,1,3)
+
Y3 is a tensor of size 1 x 3 x 2
+	Y3(:,:,1) = 
+	    1.3897    0.3184    0.5032
+	Y3(:,:,2) = 
+	    1.2870    0.6756    1.3496
+

Finally, use squeeze to remove the singleton dimension to get the same result as ttv.

squeeze(Y3)
+
ans is a tensor of size 3 x 2
+	ans(:,:) = 
+	    1.3897    1.2870
+	    0.3184    0.6756
+	    0.5032    1.3496
+

Frobenius norm of a tensor

The Frobenius norm of any type of tensor may be computed with the function norm. Each class is optimized to calculate the norm in the most efficient manner.

X = sptenrand([4 3 2],5)
+norm(X)
+norm(full(X))
+
X is a sparse tensor of size 4 x 3 x 2 with 5 nonzeros
+	(2,2,1)    0.8295
+	(2,3,2)    0.7905
+	(3,2,2)    0.8002
+	(3,3,1)    0.9895
+	(4,1,1)    0.8698
+
+ans =
+
+    1.9206
+
+
+ans =
+
+    1.9206
+
+
X = ktensor({rand(4,2),rand(3,2)})
+norm(X)
+
X is a ktensor of size 4 x 3
+	X.lambda = [ 1  1 ]
+	X.U{1} = 
+		    0.8201    0.6458
+		    0.9506    0.2407
+		    0.5689    0.0888
+		    0.1064    0.9876
+	X.U{2} = 
+		    0.7440    0.7862
+		    0.5461    0.8098
+		    0.4219    0.6288
+
+ans =
+
+    2.6010
+
+
X = ttensor(tensor(rand(2,2)),{rand(4,2),rand(3,2)})
+norm(X)
+
X is a ttensor of size 4 x 3
+	X.core is a tensor of size 2 x 2
+		X.core(:,:) = 
+	    0.3186    0.0491
+	    0.3027    0.6021
+	X.U{1} = 
+		    0.2244    0.6691
+		    0.8587    0.8619
+		    0.7536    0.8412
+		    0.2630    0.0178
+	X.U{2} = 
+		    0.8048    0.1238
+		    0.5318    0.0792
+		    0.3100    0.3897
+
+ans =
+
+    1.0605
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/nvecs_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/nvecs_doc.html new file mode 100644 index 0000000..ee478db --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/nvecs_doc.html @@ -0,0 +1,276 @@ + + + + + Generating the leading mode-n vectors

Generating the leading mode-n vectors

The leading mode-n vectors are those vectors that span the subspace of the mode-n fibers. In other words, the left singular vectors of the n-mode matricization of X.

Contents

Using nvecs to calculate the leading mode-n vectors

The nvecs command efficient computes the leading n-mode vectors.

rand('state',0);
+X = sptenrand([4,3,2],6) %<-- A sparse tensor
+
X is a sparse tensor of size 4 x 3 x 2 with 6 nonzeros
+	(1,2,1)    0.8385
+	(2,3,1)    0.5681
+	(3,2,1)    0.3704
+	(3,3,1)    0.7027
+	(4,2,2)    0.5466
+	(4,3,2)    0.4449
+
nvecs(X,1,2) %<-- The 2 leading mode-1 vectors
+
+ans =
+
+    0.5810    0.7687
+    0.3761   -0.5451
+    0.7219   -0.3347
+   -0.0000   -0.0000
+
+
nvecs(X,1,3) % <-- The 3 leading mode-1 vectors
+
+ans =
+
+    0.5810    0.7687    0.0000
+    0.3761   -0.5451   -0.0000
+    0.7219   -0.3347   -0.0000
+    0.0000   -0.0000    1.0000
+
+
nvecs(full(X),1,3) %<-- The same thing for a dense tensor
+
+ans =
+
+    0.5810    0.7687    0.0000
+    0.3761   -0.5451   -0.0000
+    0.7219   -0.3347   -0.0000
+   -0.0000   -0.0000    1.0000
+
+
X = ktensor({rand(3,2),rand(3,2),rand(2,2)}) %<-- A random ktensor
+
X is a ktensor of size 3 x 3 x 2
+	X.lambda = [ 1  1 ]
+	X.U{1} = 
+		    0.1365    0.1991
+		    0.0118    0.2987
+		    0.8939    0.6614
+	X.U{2} = 
+		    0.2844    0.9883
+		    0.4692    0.5828
+		    0.0648    0.4235
+	X.U{3} = 
+		    0.5155    0.4329
+		    0.3340    0.2259
+
nvecs(X,2,1) %<-- The 1 leading mode-2 vector
+
+ans =
+
+    0.7147
+    0.6480
+    0.2633
+
+
nvecs(full(X),2,1) %<-- Same thing for a dense tensor
+
+ans =
+
+    0.7147
+    0.6480
+    0.2633
+
+
X = ttensor(tenrand([2,2,2,2]),{rand(3,2),rand(3,2),rand(2,2),rand(2,2)}); %<-- A random ttensor
+
nvecs(X,4,2) %<-- The 1 leading mode-2 vector
+
+ans =
+
+    0.7401    0.6725
+   -0.6725    0.7401
+
+
nvecs(full(X),4,2) %<-- Same thing for a dense tensor
+
+ans =
+
+    0.7401    0.6725
+   -0.6725    0.7401
+
+

Using nvecs for the HOSVD

X = tenrand([4 3 2]) %<-- Generate data
+
X is a tensor of size 4 x 3 x 2
+	X(:,:,1) = 
+	    0.0272    0.6831    0.6085
+	    0.3127    0.0928    0.0158
+	    0.0129    0.0353    0.0164
+	    0.3840    0.6124    0.1901
+	X(:,:,2) = 
+	    0.5869    0.7176    0.4418
+	    0.0576    0.6927    0.3533
+	    0.3676    0.0841    0.1536
+	    0.6315    0.4544    0.6756
+
U1 = nvecs(X,1,4); %<-- Mode 1
+U2 = nvecs(X,2,3); %<-- Mode 2
+U3 = nvecs(X,3,2); %<-- Mode 3
+S = ttm(X,{pinv(U1),pinv(U2),pinv(U3)}); %<-- Core
+Y = ttensor(S,{U1,U2,U3}) %<-- HOSVD of X
+
Y is a ttensor of size 4 x 3 x 2
+	Y.core is a tensor of size 4 x 3 x 2
+		Y.core(:,:,1) = 
+	    0.0974   -0.0452   -0.1786
+	   -0.3189    0.0200   -0.0393
+	   -0.0932    0.5059   -0.2194
+	   -0.0911   -0.2314    0.0560
+		Y.core(:,:,2) = 
+	   -0.0282    0.0852    0.0013
+	    0.0848    0.3216   -0.0229
+	   -0.1012   -0.1193    0.0669
+	   -0.0009    0.0684    1.9279
+	Y.U{1} = 
+		    0.0424   -0.6031   -0.4132    0.6809
+		    0.1799   -0.2230    0.8973    0.3358
+		    0.9126    0.3452   -0.1551    0.1548
+		   -0.3647    0.6836    0.0064    0.6321
+	Y.U{2} = 
+		   -0.1521    0.8780    0.4538
+		   -0.5319   -0.4597    0.7111
+		    0.8330   -0.1332    0.5370
+	Y.U{3} = 
+		    0.8409    0.5412
+		   -0.5412    0.8409
+
norm(full(Y) - X) %<-- Reproduces the same result.
+
+ans =
+
+   9.0558e-16
+
+
U1 = nvecs(X,1,2); %<-- Mode 1
+U2 = nvecs(X,2,2); %<-- Mode 2
+U3 = nvecs(X,3,2); %<-- Mode 3
+S = ttm(X,{pinv(U1),pinv(U2),pinv(U3)}); %<-- Core
+Y = ttensor(S,{U1,U2,U3}) %<-- Rank-(2,2,2) HOSVD approximation of X
+
Y is a ttensor of size 4 x 3 x 2
+	Y.core is a tensor of size 2 x 2 x 2
+		Y.core(:,:,1) = 
+	    0.0560   -0.2314
+	   -0.2194    0.5059
+		Y.core(:,:,2) = 
+	    1.9279    0.0684
+	    0.0669   -0.1193
+	Y.U{1} = 
+		    0.6809   -0.4132
+		    0.3358    0.8973
+		    0.1548   -0.1551
+		    0.6321    0.0064
+	Y.U{2} = 
+		    0.4538    0.8780
+		    0.7111   -0.4597
+		    0.5370   -0.1332
+	Y.U{3} = 
+		    0.8409    0.5412
+		   -0.5412    0.8409
+
100*(1-norm(full(Y)-X)/norm(X)) %<-- Percentage explained by approximation
+
+ans =
+
+   74.1571
+
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sptenmat_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sptenmat_doc.html new file mode 100644 index 0000000..856d917 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sptenmat_doc.html @@ -0,0 +1,564 @@ + + + + + Converting sparse tensors to matrices and vice versa

Converting sparse tensors to matrices and vice versa

We show how to convert a sptensor to a matrix stored in coordinate format and with extra information so that it can be converted back to a sptensor.

Contents

Creating a sptenmat (sparse tensor as sparse matrix) object

A sparse tensor can be converted to a sparse matrix. The matrix, however, is not stored as a MATLAB sparse matrix because that format is sometimes inefficient for converted sparse tensors. Instead, the row and column indices are stored explicitly.

First, we create a sparse tensor to be converted.

X = sptenrand([10 10 10 10],10) %<-- Generate some data.
+
X is a sparse tensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	( 1, 5, 2, 3)    0.4712
+	( 1,10, 2,10)    0.1493
+	( 3, 5, 1, 4)    0.1359
+	( 3, 9,10, 8)    0.5325
+	( 4, 6,10, 6)    0.7258
+	( 5, 2, 1, 9)    0.3987
+	( 5, 7, 7, 2)    0.3584
+	( 9, 4, 6, 2)    0.2853
+	( 9, 7, 6, 5)    0.8686
+	(10, 6, 9,10)    0.6264
+

All the same options for tenmat are available as for tenmat.

A = sptenmat(X,1) %<-- Mode-1 matricization.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	( 1,215)	0.471156
+	( 1,920)	0.14931
+	( 3,305)	0.135864
+	( 3,799)	0.532498
+	( 4,596)	0.725789
+	( 5,802)	0.398703
+	( 5,167)	0.358419
+	( 9,154)	0.285279
+	( 9,457)	0.868635
+	(10,986)	0.626413
+
A = sptenmat(X,[2 3]) %<-- More than one mode is mapped to the columns.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 2  3 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  4 ] (modes of tensor corresponding to columns)
+	(15, 21)	0.471156
+	(20, 91)	0.14931
+	( 5, 33)	0.135864
+	(99, 73)	0.532498
+	(96, 54)	0.725789
+	( 2, 85)	0.398703
+	(67, 15)	0.358419
+	(54, 19)	0.285279
+	(57, 49)	0.868635
+	(86,100)	0.626413
+
A = sptenmat(X,[2 3],'t') %<-- Specify column dimensions (transpose).
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 1  4 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 2  3 ] (modes of tensor corresponding to columns)
+	( 21,15)	0.471156
+	( 91,20)	0.14931
+	( 33, 5)	0.135864
+	( 73,99)	0.532498
+	( 54,96)	0.725789
+	( 85, 2)	0.398703
+	( 15,67)	0.358419
+	( 19,54)	0.285279
+	( 49,57)	0.868635
+	(100,86)	0.626413
+
A = sptenmat(X,1:4) %<-- All modes mapped to rows, i.e., vectorize.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 1  2  3  4 ] (modes of tensor corresponding to rows)
+	A.cindices = [  ] (modes of tensor corresponding to columns)
+	(2141,1)	0.471156
+	(9191,1)	0.14931
+	(3043,1)	0.135864
+	(7983,1)	0.532498
+	(5954,1)	0.725789
+	(8015,1)	0.398703
+	(1665,1)	0.358419
+	(1539,1)	0.285279
+	(4569,1)	0.868635
+	(9860,1)	0.626413
+
A = sptenmat(X,2) %<-- By default, columns are ordered as [1 3 4].
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  3  4 ] (modes of tensor corresponding to columns)
+	( 5,211)	0.471156
+	(10,911)	0.14931
+	( 5,303)	0.135864
+	( 9,793)	0.532498
+	( 6,594)	0.725789
+	( 2,805)	0.398703
+	( 7,165)	0.358419
+	( 4,159)	0.285279
+	( 7,459)	0.868635
+	( 6,990)	0.626413
+
A = sptenmat(X,2,[3 1 4]) %<-- Explicit column ordering.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 3  1  4 ] (modes of tensor corresponding to columns)
+	( 5,202)	0.471156
+	(10,902)	0.14931
+	( 5,321)	0.135864
+	( 9,730)	0.532498
+	( 6,540)	0.725789
+	( 2,841)	0.398703
+	( 7,147)	0.358419
+	( 4,186)	0.285279
+	( 7,486)	0.868635
+	( 6,999)	0.626413
+
A = sptenmat(X,2,'fc') %<-- Foward cyclic.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 3  4  1 ] (modes of tensor corresponding to columns)
+	( 5, 22)	0.471156
+	(10, 92)	0.14931
+	( 5,231)	0.135864
+	( 9,280)	0.532498
+	( 6,360)	0.725789
+	( 2,481)	0.398703
+	( 7,417)	0.358419
+	( 4,816)	0.285279
+	( 7,846)	0.868635
+	( 6,999)	0.626413
+
A = sptenmat(X,2,'bc') %<-- Backward cyclic.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	( 5,121)	0.471156
+	(10,191)	0.14931
+	( 5, 33)	0.135864
+	( 9,973)	0.532498
+	( 6,954)	0.725789
+	( 2, 85)	0.398703
+	( 7,615)	0.358419
+	( 4,519)	0.285279
+	( 7,549)	0.868635
+	( 6,900)	0.626413
+

Constituent parts of a sptenmat

A.subs %<-- Subscripts of the nonzeros.
+
+ans =
+
+     5   121
+    10   191
+     5    33
+     9   973
+     6   954
+     2    85
+     7   615
+     4   519
+     7   549
+     6   900
+
+
A.vals %<-- The corresponding nonzero values.
+
+ans =
+
+    0.4712
+    0.1493
+    0.1359
+    0.5325
+    0.7258
+    0.3987
+    0.3584
+    0.2853
+    0.8686
+    0.6264
+
+
A.tsize %<-- Size of the original tensor.
+
+ans =
+
+    10    10    10    10
+
+
A.rdims %<-- Dimensions that were mapped to the rows.
+
+ans =
+
+     2
+
+
A.cdims %<-- Dimensions that were mapped to the columns.
+
+ans =
+
+     1     4     3
+
+

Creating a sptenmat from its constituent parts

B = sptenmat(A.subs,A.vals,A.rdims,A.cdims,A.tsize) %<-- Copies A
+
B is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	B.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	B.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	( 2, 85)	0.398703
+	( 4,519)	0.285279
+	( 5, 33)	0.135864
+	( 5,121)	0.471156
+	( 6,900)	0.626413
+	( 6,954)	0.725789
+	( 7,549)	0.868635
+	( 7,615)	0.358419
+	( 9,973)	0.532498
+	(10,191)	0.14931
+
B = sptenmat(double(A),A.rdims,A.cdims,A.tsize) %<-- More efficient to pass a matrix.
+
B is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	B.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	B.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	( 5, 33)	0.135864
+	( 2, 85)	0.398703
+	( 5,121)	0.471156
+	(10,191)	0.14931
+	( 4,519)	0.285279
+	( 7,549)	0.868635
+	( 7,615)	0.358419
+	( 6,900)	0.626413
+	( 6,954)	0.725789
+	( 9,973)	0.532498
+

Creating a sptenmat with no nonzeros

A = sptenmat([],[],A.rdims,A.cdims,A.tsize) %<-- An empty sptenmat.
+
A is an all-zero sptenmat from an sptensor of size 10 x 10 x 10 x 10
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+

Creating an emtpy sptenmat

A = sptenmat %<-- A really empty sptenmat.
+
A is an all-zero sptenmat from an sptensor of size [empty tensor]
+	A.rindices = [  ] (modes of tensor corresponding to rows)
+	A.cindices = [  ] (modes of tensor corresponding to columns)
+

Use double to convert a sptenmat to a MATLAB sparse matrix

X = sptenrand([10 10 10 10],10); %<-- Create a tensor.
+A = sptenmat(X,1) %<-- Convert it to a sptenmat
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	A.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	( 1,384)	0.608106
+	( 2,763)	0.175996
+	( 2,499)	0.00202556
+	( 3,111)	0.790224
+	( 3,451)	0.513609
+	( 3,781)	0.213229
+	( 7,852)	0.10345
+	( 7,636)	0.157337
+	( 7,658)	0.407515
+	(10,129)	0.407757
+
B = double(A) %<-- Convert it to a MATLAB sparse matrix
+
+B =
+
+   (3,111)     0.7902
+  (10,129)     0.4078
+   (1,384)     0.6081
+   (3,451)     0.5136
+   (2,499)     0.0020
+   (7,636)     0.1573
+   (7,658)     0.4075
+   (2,763)     0.1760
+   (3,781)     0.2132
+   (7,852)     0.1034
+
+
whos A B %<-- The storage for B (the sparse matrix) is larger than for A.
+
  Name       Size              Bytes  Class       Attributes
+
+  A         10x1000             1184  sptenmat              
+  B         10x1000             8168  double      sparse    
+
+
C = B'; %<-- Transposing the result fixes the problem.
+whos C
+
  Name         Size            Bytes  Class     Attributes
+
+  C         1000x10              248  double    sparse    
+
+

Use full to convert a sptenmat to a tenmat

B = sptenmat(sptenrand([3 3 3], 3), 1) %<-- Create a sptenmat
+
B is a sptenmat from an sptensor of size 3 x 3 x 3 with 3 nonzeros
+	B.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	B.cindices = [ 2  3 ] (modes of tensor corresponding to columns)
+	(1,7)	0.410904
+	(1,8)	0.399794
+	(3,1)	0.505522
+
C = full(B) %<-- Convert to a tenmat
+
C is a matrix corresponding to a tensor of size 3 x 3 x 3
+	C.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	C.cindices = [ 2  3 ] (modes of tensor corresponding to columns)
+	C.data = 
+		  Columns 1 through 7
+		         0         0         0         0         0         0    0.4109
+		         0         0         0         0         0         0         0
+		    0.5055         0         0         0         0         0         0
+		  Columns 8 through 9
+		    0.3998         0
+		         0         0
+		         0         0
+

Use sptensor to convert a sptenmat to a sptensor

Y = sptensor(A) %<-- Convert a sptenmat to a sptensor
+
Y is a sparse tensor of size 10 x 10 x 10 x 10 with 10 nonzeros
+	( 1,4, 9,4)    0.6081
+	( 2,3, 7,8)    0.1760
+	( 2,9,10,5)    0.0020
+	( 3,1, 2,2)    0.7902
+	( 3,1, 6,5)    0.5136
+	( 3,1, 9,8)    0.2132
+	( 7,2, 6,9)    0.1034
+	( 7,6, 4,7)    0.1573
+	( 7,8, 6,7)    0.4075
+	(10,9, 3,2)    0.4078
+

Use size and tsize for the dimensions of a sptenmat

size(A) %<-- Matrix size
+tsize(A) %<-- Corresponding tensor size
+
+ans =
+
+          10        1000
+
+
+ans =
+
+    10    10    10    10
+
+

Subscripted reference for a sptenmat

This is not supported beyond getting the constituent parts.

Subscripted assignment for a sptenmat

A(1:2,1:2) = ones(2) %<-- Replace part of the matrix.
+
A is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 14 nonzeros
+	A.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	( 1,  1)	1
+	( 1,  2)	1
+	( 1,384)	0.608106
+	( 2,  1)	1
+	( 2,  2)	1
+	( 2,499)	0.00202556
+	( 2,763)	0.175996
+	( 3,111)	0.790224
+	( 3,451)	0.513609
+	( 3,781)	0.213229
+	( 7,636)	0.157337
+	( 7,658)	0.407515
+	( 7,852)	0.10345
+	(10,129)	0.407757
+

Use end for the last index

End is not supported.

Basic operations for sptenmat

norm(A) %<-- Norm of the matrix.
+
+ans =
+
+    2.3879
+
+
+A %<-- Calls uplus.
+
ans is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 14 nonzeros
+	ans.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	( 1,  1)	1
+	( 1,  2)	1
+	( 1,384)	0.608106
+	( 2,  1)	1
+	( 2,  2)	1
+	( 2,499)	0.00202556
+	( 2,763)	0.175996
+	( 3,111)	0.790224
+	( 3,451)	0.513609
+	( 3,781)	0.213229
+	( 7,636)	0.157337
+	( 7,658)	0.407515
+	( 7,852)	0.10345
+	(10,129)	0.407757
+
-A %<-- Calls uminus.
+
ans is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 14 nonzeros
+	ans.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	( 1,  1)	-1
+	( 1,  2)	-1
+	( 1,384)	-0.608106
+	( 2,  1)	-1
+	( 2,  2)	-1
+	( 2,499)	-0.00202556
+	( 2,763)	-0.175996
+	( 3,111)	-0.790224
+	( 3,451)	-0.513609
+	( 3,781)	-0.213229
+	( 7,636)	-0.157337
+	( 7,658)	-0.407515
+	( 7,852)	-0.10345
+	(10,129)	-0.407757
+

Use aatx to efficiently compute A * A' * x for a sptenmat

x = ones(10,1); %<-- Create vector
+aatx(A,x) %<-- Compute A * A' * x
+
+ans =
+
+    4.3698
+    4.0310
+    0.9337
+         0
+         0
+         0
+    0.2015
+         0
+         0
+    0.1663
+
+
double(A) * double(A)' * x %<-- Same as above but less efficient
+
+ans =
+
+    4.3698
+    4.0310
+    0.9337
+         0
+         0
+         0
+    0.2015
+         0
+         0
+    0.1663
+
+

Displaying a tenmat

Shows the original tensor dimensions, the modes mapped to rows, the modes mapped to columns, and the matrix.

disp(A)
+
ans is a sptenmat from an sptensor of size 10 x 10 x 10 x 10 with 14 nonzeros
+	ans.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	( 1,  1)	1
+	( 1,  2)	1
+	( 1,384)	0.608106
+	( 2,  1)	1
+	( 2,  2)	1
+	( 2,499)	0.00202556
+	( 2,763)	0.175996
+	( 3,111)	0.790224
+	( 3,451)	0.513609
+	( 3,781)	0.213229
+	( 7,636)	0.157337
+	( 7,658)	0.407515
+	( 7,852)	0.10345
+	(10,129)	0.407757
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sptensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sptensor_doc.html new file mode 100644 index 0000000..1cdb91a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sptensor_doc.html @@ -0,0 +1,756 @@ + + + + + Sparse tensors

Sparse tensors

MATLAB has no native ability to store sparse multidimensional arrays, only sparse matrices. Moreover, the compressed sparse column storage format for MATLAB sparse matrices is not readily adaptable to sparse tensors. Instead, the sptensor class stores the data in coordinate format.

Contents

Creating a sptensor

A sparse tensor can be created by passing in a list of subscripts and values. For example, here we pass in three subscripts and a scalar value. The resuling sparse tensor has three nonzero entries, and the size is the size of the largest subscript in each dimension.

rand('state',0); %<-- Setup for the script
+subs = [1,1,1;1,2,1;3,4,2]; %<-- Subscripts of the nonzeros.
+vals = [1; 2; 3]; %<-- The values of the nonzeros.
+X = sptensor(subs,vals) %<-- Create a sparse tensor with 3 nonzeros.
+
X is a sparse tensor of size 3 x 4 x 2 with 3 nonzeros
+	(1,1,1)     1
+	(1,2,1)     2
+	(3,4,2)     3
+
X = sptensor(subs,vals,[3 5 2]) %<-- Or, specify the size explicitly.
+
X is a sparse tensor of size 3 x 5 x 2 with 3 nonzeros
+	(1,1,1)     1
+	(1,2,1)     2
+	(3,4,2)     3
+

Values corresponding to repeated subscripts are summed. Also note that we can use a scalar as the second argument.

subs = [1 1 1; 1 1 3; 2 2 2; 4 4 4; 1 1 1; 1 1 1]; %<-- (1,1,1) is repeated.
+X = sptensor(subs,2) %<-- Equivalent to X = sptensor(subs,2*ones(6,1)).
+
X is a sparse tensor of size 4 x 4 x 4 with 4 nonzeros
+	(1,1,1)     6
+	(1,1,3)     2
+	(2,2,2)     2
+	(4,4,4)     2
+

Specifying the accumulation method for the constructor

By default, values corresponding to repeated elements are summed. However, it is possible to specify other actions to be taken.

X = sptensor(subs,2*ones(6,1),[4 4 4],@max) %<-- Maximum element.
+
X is a sparse tensor of size 4 x 4 x 4 with 4 nonzeros
+	(1,1,1)     2
+	(1,1,3)     2
+	(2,2,2)     2
+	(4,4,4)     2
+
myfun = @(x) sum(x) / 3; %<-- Total sum divided by three.
+X = sptensor(subs,2*ones(6,1),[4 4 4],myfun) %<-- Custom accumulation function.
+
X is a sparse tensor of size 4 x 4 x 4 with 4 nonzeros
+	(1,1,1)    2.0000
+	(1,1,3)    0.6667
+	(2,2,2)    0.6667
+	(4,4,4)    0.6667
+

Creating a one-dimensional sptensor.

X = sptensor([1;3;5],1,10) %<-- Same as X = sptensor([1;3;5],[1;1;1],1,10).
+
X is a sparse tensor of size 10 with 3 nonzeros
+	(1)     1
+	(3)     1
+	(5)     1
+
X = sptenrand(50,5) %<-- A random, sparse, order-1 tensor with 5 nonzeros.
+
X is a sparse tensor of size 50 with 5 nonzeros
+	(12)    0.7621
+	(25)    0.4565
+	(31)    0.0185
+	(45)    0.8214
+	(48)    0.4447
+

Creating an all-zero sptensor

X = sptensor([],[],[10 10 10]) %<-- Creates an all-zero tensor.
+
X is an all-zero sparse tensor of size 10 x 10 x 10
+
X = sptensor([10 10 10]) %<-- Same as above.
+
X is an all-zero sparse tensor of size 10 x 10 x 10
+

Constituent parts of a sptensor

X = sptenrand([40 30 20],5); %<-- Create data.
+X.subs %<-- Subscripts of nonzeros.
+
+ans =
+
+     8    27     3
+    25    13     2
+    30    13     1
+    32    29     8
+    37    28    17
+
+
X.vals %<-- Corresponding nonzero values.
+
+ans =
+
+    0.2028
+    0.1987
+    0.6038
+    0.2722
+    0.1988
+
+
X.size %<-- The size.
+
+ans =
+
+    40    30    20
+
+

Creating a sparse tensor from its constituent parts

Y = sptensor(X.subs,X.vals,X.size) %<-- Copies X.
+
Y is a sparse tensor of size 40 x 30 x 20 with 5 nonzeros
+	( 8,27, 3)    0.2028
+	(25,13, 2)    0.1987
+	(30,13, 1)    0.6038
+	(32,29, 8)    0.2722
+	(37,28,17)    0.1988
+

Creating an empty sptensor

An empty constructor exists, primarily to support loads of previously saved data.

Y = sptensor %<-- Create an empty sptensor.
+
Y is an all-zero sparse tensor of size [empty tensor]
+

Use sptenrand to create a random sptensor

X = sptenrand([10 10 10],0.01) %<-- Create a tensor with 1% nonzeroes.
+
X is a sparse tensor of size 10 x 10 x 10 with 10 nonzeros
+	( 1,9,2)    0.4966
+	( 3,4,9)    0.8998
+	( 5,6,7)    0.8216
+	( 5,7,4)    0.6449
+	( 5,9,2)    0.8180
+	( 6,5,9)    0.6602
+	( 7,2,6)    0.3420
+	( 8,1,7)    0.2897
+	( 9,8,4)    0.3412
+	(10,4,6)    0.5341
+

It is also posible to specify the precise number of nonzeros rather than a percentage.

X = sptenrand([10 10 10],10) %<-- Create a tensor with 10 nonzeros.
+
X is a sparse tensor of size 10 x 10 x 10 with 10 nonzeros
+	(4, 2, 3)    0.5828
+	(4,10, 1)    0.4235
+	(5, 3, 5)    0.5155
+	(6, 3, 3)    0.3340
+	(6, 9, 2)    0.4329
+	(7, 8,10)    0.2259
+	(7, 9, 1)    0.5798
+	(8, 8, 2)    0.7604
+	(8,10, 7)    0.5298
+	(9, 6, 9)    0.6405
+

Use squeeze to remove singleton dimensions from a sptensor

Y = sptensor([1 1 1; 2 1 1], 1, [2 1 1]) %<-- Create a sparse tensor.
+squeeze(Y) %<-- Remove singleton dimensions.
+
Y is a sparse tensor of size 2 x 1 x 1 with 2 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+ans is a sparse tensor of size 2 with 2 nonzeros
+	(1)     1
+	(2)     1
+

Use full or tensor to convert a sptensor to a (dense) tensor

X = sptensor([1 1 1; 2 2 2], [1; 1]); %<-- Create a sparse tensor.
+Y = full(X) %<-- Convert it to a (dense) tensor.
+
Y is a tensor of size 2 x 2 x 2
+	Y(:,:,1) = 
+	     1     0
+	     0     0
+	Y(:,:,2) = 
+	     0     0
+	     0     1
+
Y = tensor(X) %<-- Same as above.
+
Y is a tensor of size 2 x 2 x 2
+	Y(:,:,1) = 
+	     1     0
+	     0     0
+	Y(:,:,2) = 
+	     0     0
+	     0     1
+

Use sptensor to convert a (dense) tensor to a sptensor

Z = sptensor(Y) %<-- Convert a tensor to a sptensor.
+
Z is a sparse tensor of size 2 x 2 x 2 with 2 nonzeros
+	(1,1,1)     1
+	(2,2,2)     1
+

Use double to convert a sptensor to a (dense) multidimensional array

Y = double(X) %<-- Creates a MATLAB array.
+
+Y(:,:,1) =
+
+     1     0
+     0     0
+
+
+Y(:,:,2) =
+
+     0     0
+     0     1
+
+

Use find to extract nonzeros from a tensor and then create a sptensor

The find command can be used to extract specific elements and then convert those into a sptensor.

X = tensor(rand(5,4,2),[5 4 2]) %<-- Create a tensor.
+S = find(X > 0.9) %<-- Extract subscipts of values greater than 0.9.
+V = X(S) %<-- Extract the corresponding values.
+Y = sptensor(S,V,[5 4 2]) %<-- Create a new tensor.
+
X is a tensor of size 5 x 4 x 2
+	X(:,:,1) = 
+	    0.2091    0.5678    0.4154    0.9708
+	    0.3798    0.7942    0.3050    0.9901
+	    0.7833    0.0592    0.8744    0.7889
+	    0.6808    0.6029    0.0150    0.4387
+	    0.4611    0.0503    0.7680    0.4983
+	X(:,:,2) = 
+	    0.2140    0.4120    0.6833    0.2071
+	    0.6435    0.7446    0.2126    0.6072
+	    0.3200    0.2679    0.8392    0.6299
+	    0.9601    0.4399    0.6288    0.3705
+	    0.7266    0.9334    0.1338    0.5751
+
+S =
+
+     1     4     1
+     2     4     1
+     4     1     2
+     5     2     2
+
+
+V =
+
+    0.9708
+    0.9901
+    0.9601
+    0.9334
+
+Y is a sparse tensor of size 5 x 4 x 2 with 4 nonzeros
+	(1,4,1)    0.9708
+	(2,4,1)    0.9901
+	(4,1,2)    0.9601
+	(5,2,2)    0.9334
+

Use ndims and size to get the size of a sptensor

ndims(Y) %<-- Number of dimensions or modes.
+
+ans =
+
+     3
+
+
size(Y) %<-- Size of Y.
+
+ans =
+
+     5     4     2
+
+
size(Y,3) %<-- Size of mode 3 of Y.
+
+ans =
+
+     2
+
+

Use nnz to get the number of nonzeros of a sptensor

nnz(Y) %<-- Number of nonzeros in Y.
+
+ans =
+
+     4
+
+

Subscripted reference for a sptensor

X = sptensor([4,4,4;2,2,1;2,3,2],[3;5;1],[4 4 4]) %<-- Create a sptensor.
+
X is a sparse tensor of size 4 x 4 x 4 with 3 nonzeros
+	(2,2,1)     5
+	(2,3,2)     1
+	(4,4,4)     3
+
X(1,2,1) %<-- Extract the (1,2,1) element, which is zero.
+
+ans =
+
+     0
+
+
X(4,4,4) %<-- Extract the (4,4,4) element, which is nonzero.
+
+ans =
+
+     3
+
+
X(1:2,2:4,:) %<-- Extract a 2 x 3 x 4 subtensor.
+
ans is a sparse tensor of size 2 x 3 x 4 with 2 nonzeros
+	(2,1,1)     5
+	(2,2,2)     1
+
X([1 1 1; 2 2 1]) %<-- Extract elements by subscript.
+
+ans =
+
+     0
+     5
+
+
X([1;6]) %<-- Same as above but with linear indices.
+
+ans =
+
+     0
+     5
+
+

As with a tensor, subscriped reference may be ambiguous for one-dimensional tensors.

X = sptensor([1;3;5],1,7) %<-- Create a sparse tensor.
+
X is a sparse tensor of size 7 with 3 nonzeros
+	(1)     1
+	(3)     1
+	(5)     1
+
X(3) %<-- Fully specified, single elements are always returned as scalars.
+
+ans =
+
+     1
+
+
X([3;6]) %<-- Returns a subtensor.
+
ans is a sparse tensor of size 2 with 1 nonzeros
+	(1)     1
+
X([3;6],'extract') %<-- Same as above *but* returns an array.
+
+ans =
+
+     1
+     0
+
+

Subscripted assignment for a sptensor

X = sptensor([30 40 20]) %<-- Create an emtpy 30 x 40 x 20 sptensor.
+
X is an all-zero sparse tensor of size 30 x 40 x 20
+
X(30,40,20) = 7 %<-- Assign a single element.
+
X is a sparse tensor of size 30 x 40 x 20 with 1 nonzeros
+	(30,40,20)     7
+
X([1,1,1;2,2,2]) = [1;1] %<-- Assign a list of elements.
+
X is a sparse tensor of size 30 x 40 x 20 with 3 nonzeros
+	(30,40,20)     7
+	( 1, 1, 1)     1
+	( 2, 2, 2)     1
+
X(11:20,11:20,11:20) = sptenrand([10,10,10],10) %<-- Assign a subtensor.
+
X is a sparse tensor of size 30 x 40 x 20 with 13 nonzeros
+	(30,40,20)    7.0000
+	( 1, 1, 1)    1.0000
+	( 2, 2, 2)    1.0000
+	(12,13,15)    0.9342
+	(13,12,11)    0.2644
+	(13,12,16)    0.1603
+	(13,17,14)    0.8729
+	(15,13,14)    0.2379
+	(18,11,14)    0.6458
+	(19,11,14)    0.9669
+	(19,12,15)    0.6649
+	(19,19,12)    0.8704
+	(20,20,19)    0.0099
+
X(31,41,21) = 4 %<-- Grows the size of the sptensor.
+
X is a sparse tensor of size 31 x 41 x 21 with 14 nonzeros
+	(30,40,20)    7.0000
+	( 1, 1, 1)    1.0000
+	( 2, 2, 2)    1.0000
+	(12,13,15)    0.9342
+	(13,12,11)    0.2644
+	(13,12,16)    0.1603
+	(13,17,14)    0.8729
+	(15,13,14)    0.2379
+	(18,11,14)    0.6458
+	(19,11,14)    0.9669
+	(19,12,15)    0.6649
+	(19,19,12)    0.8704
+	(20,20,19)    0.0099
+	(31,41,21)    4.0000
+
X(111:120,111:120,111:120) = sptenrand([10,10,10],10) %<-- Grow more.
+
X is a sparse tensor of size 120 x 120 x 120 with 24 nonzeros
+	( 30, 40, 20)    7.0000
+	(  1,  1,  1)    1.0000
+	(  2,  2,  2)    1.0000
+	( 12, 13, 15)    0.9342
+	( 13, 12, 11)    0.2644
+	( 13, 12, 16)    0.1603
+	( 13, 17, 14)    0.8729
+	( 15, 13, 14)    0.2379
+	( 18, 11, 14)    0.6458
+	( 19, 11, 14)    0.9669
+	( 19, 12, 15)    0.6649
+	( 19, 19, 12)    0.8704
+	( 20, 20, 19)    0.0099
+	( 31, 41, 21)    4.0000
+	(112,111,118)    0.3759
+	(112,115,112)    0.0099
+	(112,115,113)    0.4199
+	(112,120,117)    0.7537
+	(114,115,115)    0.7939
+	(115,115,117)    0.9200
+	(117,115,116)    0.8447
+	(118,115,120)    0.3678
+	(119,119,111)    0.6208
+	(119,119,117)    0.7313
+

Use end as the last index.

X(end-10:end,end-10:end,end-5:end)  %<-- Same as X(108:118,110:120,115:120)
+
ans is a sparse tensor of size 11 x 11 x 6 with 7 nonzeros
+	( 3, 2,4)    0.3759
+	( 3,11,3)    0.7537
+	( 5, 6,1)    0.7939
+	( 6, 6,3)    0.9200
+	( 8, 6,2)    0.8447
+	( 9, 6,6)    0.3678
+	(10,10,3)    0.7313
+

Use elemfun to manipulate the nonzeros of a sptensor

The function elemfun is similar to spfun for sparse matrices.

X = sptenrand([10,10,10],3) %<-- Create some data.
+
X is a sparse tensor of size 10 x 10 x 10 with 3 nonzeros
+	( 2,7,10)    0.3919
+	( 6,6, 7)    0.6273
+	(10,3, 4)    0.6991
+
Z = elemfun(X, @sqrt) %<-- Square root of every nonzero.
+
Z is a sparse tensor of size 10 x 10 x 10 with 3 nonzeros
+	( 2,7,10)    0.6260
+	( 6,6, 7)    0.7920
+	(10,3, 4)    0.8361
+
Z = elemfun(X, @(x) x+1) %<-- Use a custom function.
+
Z is a sparse tensor of size 10 x 10 x 10 with 3 nonzeros
+	( 2,7,10)    1.3919
+	( 6,6, 7)    1.6273
+	(10,3, 4)    1.6991
+
Z = elemfun(X, @(x) x~=0) %<-- Set every nonzero to one.
+
Z is a sparse tensor of size 10 x 10 x 10 with 3 nonzeros
+	( 2,7,10)   1
+	( 6,6, 7)   1
+	(10,3, 4)   1
+
Z = ones(X) %<-- An easier way to change every nonzero to one.
+
Z is a sparse tensor of size 10 x 10 x 10 with 3 nonzeros
+	( 2,7,10)     1
+	( 6,6, 7)     1
+	(10,3, 4)     1
+

Basic operations (plus, minus, times, etc.) on a sptensor

A = sptensor(tensor(floor(5*rand(2,2,2)))) %<-- Create data.
+B = sptensor(tensor(floor(5*rand(2,2,2)))) %<-- Create more data.
+
A is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)     1
+	(2,1,1)     2
+	(1,2,1)     3
+	(2,2,1)     4
+	(1,1,2)     1
+	(2,1,2)     2
+	(1,2,2)     2
+	(2,2,2)     2
+B is a sparse tensor of size 2 x 2 x 2 with 7 nonzeros
+	(1,1,1)     3
+	(2,1,1)     2
+	(1,2,1)     3
+	(2,2,1)     2
+	(2,1,2)     3
+	(1,2,2)     4
+	(2,2,2)     4
+
+A %<-- Calls uplus.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)     1
+	(2,1,1)     2
+	(1,2,1)     3
+	(2,2,1)     4
+	(1,1,2)     1
+	(2,1,2)     2
+	(1,2,2)     2
+	(2,2,2)     2
+
-A %<-- Calls uminus.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)    -1
+	(2,1,1)    -2
+	(1,2,1)    -3
+	(2,2,1)    -4
+	(1,1,2)    -1
+	(2,1,2)    -2
+	(1,2,2)    -2
+	(2,2,2)    -2
+
A+B %<-- Calls plus.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)     4
+	(1,1,2)     1
+	(1,2,1)     6
+	(1,2,2)     6
+	(2,1,1)     4
+	(2,1,2)     5
+	(2,2,1)     6
+	(2,2,2)     6
+
A-B %<-- Calls minus.
+
ans is a sparse tensor of size 2 x 2 x 2 with 6 nonzeros
+	(1,1,1)    -2
+	(1,1,2)     1
+	(1,2,2)    -2
+	(2,1,2)    -1
+	(2,2,1)     2
+	(2,2,2)    -2
+
A.*B %<-- Calls times.
+
ans is a sparse tensor of size 2 x 2 x 2 with 7 nonzeros
+	(1,1,1)     3
+	(1,2,1)     9
+	(1,2,2)     8
+	(2,1,1)     4
+	(2,1,2)     6
+	(2,2,1)     8
+	(2,2,2)     8
+
5*A %<-- Calls mtimes.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)     5
+	(1,1,2)     5
+	(1,2,1)    15
+	(1,2,2)    10
+	(2,1,1)    10
+	(2,1,2)    10
+	(2,2,1)    20
+	(2,2,2)    10
+
A./2 %<-- Calls rdivide.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)    0.5000
+	(1,1,2)    0.5000
+	(1,2,1)    1.5000
+	(1,2,2)    1.0000
+	(2,1,1)    1.0000
+	(2,1,2)    1.0000
+	(2,2,1)    2.0000
+	(2,2,2)    1.0000
+

Elementwise divsion by another sptensor is allowed, but if the sparsity pattern of the denominator should be a superset of the numerator.

A./(A+B) %<-- Calls rdivide.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)    0.2500
+	(1,1,2)    1.0000
+	(1,2,1)    0.5000
+	(1,2,2)    0.3333
+	(2,1,1)    0.5000
+	(2,1,2)    0.4000
+	(2,2,1)    0.6667
+	(2,2,2)    0.3333
+
A./B %<-- Uh-oh. Getting a divide by zero.
+
ans is a sparse tensor of size 2 x 2 x 2 with 8 nonzeros
+	(1,1,1)    0.3333
+	(1,1,2)       Inf
+	(1,2,1)    1.0000
+	(1,2,2)    0.5000
+	(2,1,1)    1.0000
+	(2,1,2)    0.6667
+	(2,2,1)    2.0000
+	(2,2,2)    0.5000
+

Use permute to reorder the modes of a sptensor

A = sptenrand([30 40 20 1], 5) %<-- Create data.
+
A is a sparse tensor of size 30 x 40 x 20 x 1 with 5 nonzeros
+	( 4,33, 8,1)    0.7505
+	(11,40, 6,1)    0.7400
+	(15,23, 2,1)    0.4319
+	(20,27,11,1)    0.6343
+	(22, 6,20,1)    0.8030
+
permute(A,[4 3 2 1]) %<-- Reorder the modes.
+
ans is a sparse tensor of size 1 x 20 x 40 x 30 with 5 nonzeros
+	(1, 8,33, 4)    0.7505
+	(1, 6,40,11)    0.7400
+	(1, 2,23,15)    0.4319
+	(1,11,27,20)    0.6343
+	(1,20, 6,22)    0.8030
+

Permute works correctly for a 1-dimensional sptensor.

X = sptenrand(40,4) %<-- Create data.
+
X is a sparse tensor of size 40 with 4 nonzeros
+	( 4)    0.2536
+	(25)    0.8735
+	(37)    0.5134
+	(38)    0.7327
+
permute(X,1) %<-- Permute.
+
ans is a sparse tensor of size 40 with 4 nonzeros
+	( 4)    0.2536
+	(25)    0.8735
+	(37)    0.5134
+	(38)    0.7327
+

Displaying a tensor

The function disp handles small and large elements appropriately, as well as aligning the indices.

X = sptensor([1 1 1]); %<-- Create an empty sptensor.
+X(1,1,1) = rand(1)*1e15; %<-- Insert a very big element.
+X(4,3,2) = rand(1)*1e-15; %<-- Insert a very small element.
+X(2,2,2) = rand(1); %<-- Insert a 'normal' element.
+disp(X)
+
ans is a sparse tensor of size 4 x 3 x 2 with 3 nonzeros
+   1.0e+14 *
+	(1,1,1)    4.2223
+	(4,3,2)    0.0000
+	(2,2,2)    0.0000
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sshopm_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sshopm_doc.html new file mode 100644 index 0000000..a0aecb6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sshopm_doc.html @@ -0,0 +1,123 @@ + + + + + Shifted symmetric higher-order power method

Shifted symmetric higher-order power method

Contents

Data tensor

From Example 1 in E. Kofidis and P. A. Regalia, On the best rank-1 approximation of higher-order supersymmetric tensors, SIAM J. Matrix Anal. Appl., 23 (2002), pp. 863–884, DOI: 10.1137/S0895479801387413.

A = tenzeros([3 3 3 3]);
+A(perms([1 1 1 1])) = 0.2883;
+A(perms([1 1 1 2])) = -0.0031;
+A(perms([1 1 1 3])) = 0.1973;
+A(perms([1 1 2 2])) = -0.2485;
+A(perms([1 1 2 3])) = -0.2939;
+A(perms([1 1 3 3])) = 0.3847;
+A(perms([1 2 2 2])) = 0.2972;
+A(perms([1 2 2 3])) = 0.1862;
+A(perms([1 2 3 3])) = 0.0919;
+A(perms([1 3 3 3])) = -0.3619;
+A(perms([2 2 2 2])) = 0.1241;
+A(perms([2 2 2 3])) = -0.3420;
+A(perms([2 2 3 3])) = 0.2127;
+A(perms([2 3 3 3])) = 0.2727;
+A(perms([3 3 3 3])) = -0.3054;
+

Call eig_sshopm with no shift

The method with no shift will fail to converge.

[lambda, x, flag, it] = eig_sshopm(A, 'MaxIts', 100);
+

Call eig_sshopm with shift

[lambda, x, flag, it] = eig_sshopm(A, 'MaxIts', 100, 'Shift', 1);
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sumtensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sumtensor_doc.html new file mode 100644 index 0000000..1419729 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/sumtensor_doc.html @@ -0,0 +1,439 @@ + + + + + Sums of structured tensors

Sums of structured tensors

When certain operations are performed on a tensor which is formed as a sum of tensors, it can be beneficial to avoid explicitly forming the sum. For example, if a tensor is formed as a sum of a low rank tensor and a sparse tensor, the structure of the summands can make storage, decomposition and operations with other tensors significantly more efficient. The tensor toolbox supports a sumtensor object designed to exploit this structure. Here we explain the basics of defining and using sumtensors.

Contents

Creating sumtensors

A sumtensor T can only be delared as a sum of same-sized tensors T1, T2,...,TN. The summand tensors are stored in a cell array, which define the "parts" of the sumtensor. The parts of a sumtensor can be (generic) tensors (as tensor), sparse tensors (as sptensor), Kruskal tensors (as ktensor), or Tucker tensors (as ttensor). An example of the use of the sumtensor constructor follows.

T1 = tensor(ones(3,3,3)); %<--A tensor
+T2 = sptensor([1 1 1; 2 2 2; 3 3 2; 2 1 1], 1, [3,3,3]); %<--A sparse tensor
+
+T = sumtensor(T1,T2)
+
T is a sumtensor of size 3 x 3 x 3 with 2 parts
+T.part{1} is a tensor of size 3 x 3 x 3
+	T.part{1}(:,:,1) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	T.part{1}(:,:,2) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	T.part{1}(:,:,3) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+T.part{2} is a sparse tensor of size 3 x 3 x 3 with 4 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+	(2,2,2)     1
+	(3,3,2)     1
+

An Large-Scale Example

For large-scale problems, the sumtensor class may make the difference as to whether or not a tensor can be stored in memory. Consider the following example, where $\mathcal{T}$ is of size $1000 x 1000 x 1000$, formed from the sum of a ktensor and an sptensor.

X1 = rand(500, 3); %Generating some factor matrices
+X2 = rand(500, 3);
+X3 = rand(500, 3);
+K = ktensor([1; 1; 1], X1, X2, X3);
+S = sptenrand([500, 500, 500], 1e-100);
+
+ST = sumtensor(K,S); %<-- Declare the sumtensor
+TT = full(ST); %<-- Form the sum of the tensors explicitly
+
+whos ST TT %<--Output the storage information for these variables
+
  Name        Size                      Bytes  Class        Attributes
+
+  ST        500x500x500                 37696  sumtensor              
+  TT        500x500x500            1000000376  tensor                 
+
+

The difference in memory between the full and sumtensor is a factor of 10^5! Hence we prefer to use the sumtensor object whenever possible.

Further examples of the sumtensor constructer

The sumtensor constructor can declare an empty sumtensor object, having no parts, as follows

P = sumtensor()
+
P is an empty sumtensor
+

sumtensor also supports use as a copy constructor.

S = sumtensor(P)
+
S is an empty sumtensor
+

Use ndims and size for the dimensions of a sumtensor

For a given sumtensor, ndims returns the number of modes of a sumtensor. Similarly, size returns a size array of the sumtensor.

ndims(T)
+size(T)
+
+ans =
+
+     3
+
+
+ans =
+
+     3     3     3
+
+

Use full to convert a sumtensor to a "generic" tensor

The full function can be used to convert a sumtensor to a generic tensor. Note that for large-scale tensors, this can a large amount of memory because each part of the sumtensor will be expanded and then summed.

full(T)
+
ans is a tensor of size 3 x 3 x 3
+	ans(:,:,1) = 
+	     2     1     1
+	     2     1     1
+	     1     1     1
+	ans(:,:,2) = 
+	     1     1     1
+	     1     2     1
+	     1     1     2
+	ans(:,:,3) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+

Use double to convert a sumtensor to a multidimensional array

The double function can be used to convert a sumtensor to a multidimensional array. Similarly to the full expansion, this can use a prohibitive amount of memory for large-scale problems.

double(T)
+
+ans(:,:,1) =
+
+     2     1     1
+     2     1     1
+     1     1     1
+
+
+ans(:,:,2) =
+
+     1     1     1
+     1     2     1
+     1     1     2
+
+
+ans(:,:,3) =
+
+     1     1     1
+     1     1     1
+     1     1     1
+
+

Matricized Khatri-Rao product of a sumtensor

The mttkrp function computes the Khatri-Rao product of a matricized tensor and a sumtensor. The required arguments are: a sumtensor X, a cell array of matrices U={U1,...,Um}, and a mode n. The cell array must consist of m matrices, where m is the number of modes in X. The number of columns of these matrices should be constant, and number of rows of matrix Ui should match the dimension of the tensor X in mode i. The matricized Khatri-Rao product operation on sumtensor distributes the operation to the summands of the sumtensor. For details of this specific computation, see the mttkrp documentation for a generic tensor. An example of the use of mttkrp follows.

U={eye(3), ones(3,3), randn(3,3)}; %<--The cell array of matrices
+mttkrp(T,U,2)
+
+ans =
+
+   -1.8319   -2.6403    1.1656
+   -0.7620   -0.2925    1.1656
+   -0.7620   -1.3297    1.2842
+
+

Use innerprod to compute the inner product of a sumtensor

The innerprod function computes the inner product of a sumtensor T and any type of tensor. The operation is performed by distributing across each of the sumtensor's parts.

S = sptensor([1 1 1; 2 1 3; 3 2 2; 2 1 1], 1, [3,3,3]);
+innerprod(T,S)
+
+ans =
+
+     6
+
+

Use norm for compatibility with the other types of tensors.

The norm function returns 0 and a warning when called on a sumtensor. The procedure of computing the Frobenius norm of a sumtensor does not distribute across its parts, and hence is not supported for sumtensors. This default behavior is provided in order to ensure compatibility of the sumtensor class with existing decomposition routines.

norm(T)
+
Warning: The NORM function is not supported by SUMTENSOR. Returning zero. 
+
+ans =
+
+     0
+
+

In order avoid this default behavior and return the Frobenius norm of a sumtensor, it can be converted to a tensor using full.

norm(full(T))
+
+ans =
+
+    6.2450
+
+

Use cp_als to find a CP decomposition of a sumtensor

One of the primary motivations for defining the sumtensor class is for efficient decomposition. In particular, when trying to find a CP decomposition of a tensor using alternating least squares, the subproblems can be efficiently created and solved using mttkrp and innerprod. Both of these operations can be performed more efficiently by exploiting extra structure in the tensors which form the sum, so the performance of cp_als is also improved. Consider the following example, where a cp_als is run on a sumtensor.

cp_als(T, 2)
+
Warning: The NORM function is not supported by SUMTENSOR. Returning zero. 
+
+CP_ALS:
+ Iter  1: f = -3.742762e+01 f-delta = 3.7e+01
+ Iter  2: f = -3.774831e+01 f-delta = 3.2e-01
+ Iter  3: f = -3.778260e+01 f-delta = 3.4e-02
+ Iter  4: f = -3.779716e+01 f-delta = 1.5e-02
+ Iter  5: f = -3.780460e+01 f-delta = 7.4e-03
+ Iter  6: f = -3.780837e+01 f-delta = 3.8e-03
+ Iter  7: f = -3.781066e+01 f-delta = 2.3e-03
+ Iter  8: f = -3.781237e+01 f-delta = 1.7e-03
+ Iter  9: f = -3.781382e+01 f-delta = 1.5e-03
+ Iter 10: f = -3.781512e+01 f-delta = 1.3e-03
+ Iter 11: f = -3.781631e+01 f-delta = 1.2e-03
+ Iter 12: f = -3.781740e+01 f-delta = 1.1e-03
+ Iter 13: f = -3.781841e+01 f-delta = 1.0e-03
+ Iter 14: f = -3.781933e+01 f-delta = 9.3e-04
+ Iter 15: f = -3.782019e+01 f-delta = 8.5e-04
+ Iter 16: f = -3.782097e+01 f-delta = 7.9e-04
+ Iter 17: f = -3.782170e+01 f-delta = 7.2e-04
+ Iter 18: f = -3.782236e+01 f-delta = 6.6e-04
+ Iter 19: f = -3.782297e+01 f-delta = 6.1e-04
+ Iter 20: f = -3.782353e+01 f-delta = 5.6e-04
+ Iter 21: f = -3.782405e+01 f-delta = 5.1e-04
+ Iter 22: f = -3.782452e+01 f-delta = 4.7e-04
+ Iter 23: f = -3.782495e+01 f-delta = 4.3e-04
+ Iter 24: f = -3.782535e+01 f-delta = 3.9e-04
+ Iter 25: f = -3.782571e+01 f-delta = 3.6e-04
+ Iter 26: f = -3.782604e+01 f-delta = 3.3e-04
+ Iter 27: f = -3.782634e+01 f-delta = 3.0e-04
+ Iter 28: f = -3.782661e+01 f-delta = 2.7e-04
+ Iter 29: f = -3.782686e+01 f-delta = 2.5e-04
+ Iter 30: f = -3.782709e+01 f-delta = 2.3e-04
+ Iter 31: f = -3.782730e+01 f-delta = 2.1e-04
+ Iter 32: f = -3.782749e+01 f-delta = 1.9e-04
+ Iter 33: f = -3.782766e+01 f-delta = 1.7e-04
+ Iter 34: f = -3.782782e+01 f-delta = 1.6e-04
+ Iter 35: f = -3.782796e+01 f-delta = 1.4e-04
+ Iter 36: f = -3.782809e+01 f-delta = 1.3e-04
+ Iter 37: f = -3.782820e+01 f-delta = 1.2e-04
+ Iter 38: f = -3.782831e+01 f-delta = 1.1e-04
+ Iter 39: f = -3.782841e+01 f-delta = 9.6e-05
+ Final f = -3.782841e+01 
+ans is a ktensor of size 3 x 3 x 3
+	ans.lambda = [ 4.622      2.6197 ]
+	ans.U{1} = 
+		    0.4651    0.7251
+		    0.5891    0.6429
+		    0.6608    0.2468
+	ans.U{2} = 
+		    0.3998    0.9451
+		    0.6327    0.2576
+		    0.6632    0.2010
+	ans.U{3} = 
+		    0.4069    0.9333
+		    0.7497    0.1635
+		    0.5218    0.3196
+

It follows that in cases where $\mathcal{T}$ is too large for its full expansion to be stored in memory, we may still be able find a CP decomposition by exploiting the sumtensor structure.

Note that the fit returned by cp_als is not correct for sumtensors, because the norm operation is not supported.

Basic operations (plus) for sumtensors

Sumtensors can be added to any other type of tensor. The result is a new sumtensor with the tensor appended to the parts of the original sumtensor. Note that the tensor is always appended, despite the order of the operation.

T+S %<--S is appended to the parts of T
+S+T %<--S is still the last part of T, despite order
+
ans is a sumtensor of size 3 x 3 x 3 with 3 parts
+ans.part{1} is a tensor of size 3 x 3 x 3
+	ans.part{1}(:,:,1) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	ans.part{1}(:,:,2) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	ans.part{1}(:,:,3) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+ans.part{2} is a sparse tensor of size 3 x 3 x 3 with 4 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+	(2,2,2)     1
+	(3,3,2)     1
+ans.part{3} is a sparse tensor of size 3 x 3 x 3 with 4 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+	(2,1,3)     1
+	(3,2,2)     1
+ans is a sumtensor of size 3 x 3 x 3 with 3 parts
+ans.part{1} is a tensor of size 3 x 3 x 3
+	ans.part{1}(:,:,1) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	ans.part{1}(:,:,2) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	ans.part{1}(:,:,3) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+ans.part{2} is a sparse tensor of size 3 x 3 x 3 with 4 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+	(2,2,2)     1
+	(3,3,2)     1
+ans.part{3} is a sparse tensor of size 3 x 3 x 3 with 4 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+	(2,1,3)     1
+	(3,2,2)     1
+

Subscripted reference for sumtensors

Subscripted reference can be used to return the individual parts of a sumtensor.

T.part{1}
+T.part{2}
+
ans is a tensor of size 3 x 3 x 3
+	ans(:,:,1) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	ans(:,:,2) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	ans(:,:,3) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+ans is a sparse tensor of size 3 x 3 x 3 with 4 nonzeros
+	(1,1,1)     1
+	(2,1,1)     1
+	(2,2,2)     1
+	(3,3,2)     1
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/symktensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/symktensor_doc.html new file mode 100644 index 0000000..e92bdae --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/symktensor_doc.html @@ -0,0 +1,539 @@ + + + + + Symmetric Kruskal tensors

Symmetric Kruskal tensors

A symmetric Kruskal tensor is a decomposition of a tensor into a sum of vector outer products. The symmetric structure means that each term in the summand is the outer product of a single vector with itself $m$ times, where $m$ is the number of modes of the decomposed tensor. This contrasts with a generic Kruskal tensor, where each summand is an outer product of m different vectors. More concisely, a symmetric Kruskal tensor decomposition of a tensor $\mathcal{A}$ has the following form:

$$\mathcal{A} = \sum_{i=1}^{r} x_{i}^{m}$$

In this notation, a subscript refers to a column index. A superscript refers to the outer product of a single vector with itself $m$ times.

$$x^{m} = \underbrace{x \circ x \circ ... \circ x}_{\mbox{m-times}}.$$

The number of summands in the decomposition, $r$, is referred to as the number of components of the symmetric Kruskal tensor.

An alternative, often equivalent expression for a symmetric Kruskal tensor decomposition specifies a real-valued weight for each of the summands in the outer product. The $r$-vector formed by these weights is referred to as the weight or lambda vector of the symmetric Kruskal decomposition.

$$\mathcal{A} = \sum_{i=1}^{r} \lambda_{i} \; x_{i}^{m}$$

In certain cases the lambda vector is required in order for a symmetric Kruskal decomposition to exist, e.g. when a symmetric Kruskal tensor has an even number of components and the tensor to be decomposed has a negative element on its main diagonal. In many other cases, the lambda vector is optional and the symmetric Kruskal decomposition can be represented without specifying a lambda vector.

The symktensor class stores symmetric Kruskal tensors, and exploits the extra symmetric structure to perform many calculations more efficiently.

Contents

Declaring a symmetric Kruskal tensor with symktensor

The symktensor format stores the vectors and weights of a symmetric Kruskal tensor decomposition. The vectors in the decomposition are collected as the columns of a matrix X, referred to as the factor matrix. The lambda vector, containing the (often optional) weights is input into the constructor as a column vector. The lambda vector and factor matrix are collectively referred to as the constituent parts in the declaration of a symktensor. For example, consider the decomposition of a tensor $\mathcal{A}$.

$$\mathcal{A} = \sum_{r} \lambda_{r} \; x_{r}^{m}$$

In the example that follows, we form a symmetric Kruskal decomposition by specifying a factor matrix, lambda vector, and the number of modes of the decomposed tensor. We pass all three arguments to the symktensor constructor. This can be stored as a symmetric Kruskal tensor as follows.

n = 4; %The dimension in each mode of the tensor A
+m = 3; %The number of modes of A
+r = 2; %The rank of the decomposition
+X = reshape(1:n*r,n,r); %The columns of this matrix are the vectors in decomposition
+L = [1; -1]; %the weights (should be a column vector of length r)
+S = symktensor(L, X, m) %Declare a symktensor object
+
S is a symktensor of order 3 and dimension 4
+	S.lambda = [ 1 -1 ]
+	S.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+

A symktensor object can be declared without a weight vector by specifiying the number of modes, the rank, and an additional 'nolambda' option. In this case, the lambda vector is set to a vector of all ones.

S2 = symktensor(X, m, r, true)
+
S2 is a symktensor of order 3 and dimension 4
+	S2.lambda = [ 1  1 ]
+	S2.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+

A random symktensor object can be declared by passing the constructor two arguments: the rank of the decomposition and a tensor or symtensor (for size). The lambda vector is taken to be all ones, and the factor matrix has elements drawn uniformly from (0,1).

T1 = tensor(n*ones(1,m)); %<-- Declare a tensor for size
+T2 = symtensor(@ones, n,m); %<-- Declare a symtensor for size
+
+S2 = symktensor(r, T1) %<--Declare a random symktensor from tensor for size
+S2 = symktensor(r, T2) %<--Declare a random symktensor from symtensor for size
+
S2 is a symktensor of order 2 and dimension 1
+	S2.lambda = [ 1  1 ]
+	S2.U = 
+		    0.1035    0.1838
+S2 is a symktensor of order 4 and dimension 3
+	S2.lambda = [ 1  1 ]
+	S2.U = 
+		    0.4516    0.7426
+		    0.1448    0.3310
+		    0.8133    0.9775
+

This method of randomly generating a symktensor is useful when setting an initialization point in symmetric decomposition methods (i.e. cp_sym).

Lastly, a symktensor object can be declared from a vectorized version of the factor matrix and lambda vector, in which the lambda vector is stacked on top of a vectorized version of the factor matrix. The shape of the tensor must also be specified, by either passing a tensor/symtensor or listing the number of modes and the rank of the decomposition explicitly. Additionally, a 'nolambda' option can be added to any of these constructions, in which case the lambda vector should not be stacked onto the factor matrix.

V = [L; X(:)]; %<--Forming the vectorized version
+S2 = symktensor(V, symtensor(@ones,m,n)) %<--size specified from symtensor
+
+S2 = symktensor(X(:), symtensor(@ones,m,n), true) %<--'nolambda' option
+
+S2 = symktensor(V, m, r) %<--size specified from modes and dimension
+
+S2 = symktensor(X(:), m, r, true) %<--size from modes and dimension, 'nolambda' option
+
S2 is a symktensor of order 3 and dimension 4
+	S2.lambda = [ 1 -1 ]
+	S2.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+S2 is a symktensor of order 3 and dimension 4
+	S2.lambda = [ 1  1 ]
+	S2.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+S2 is a symktensor of order 3 and dimension 4
+	S2.lambda = [ 1 -1 ]
+	S2.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+S2 is a symktensor of order 3 and dimension 4
+	S2.lambda = [ 1  1 ]
+	S2.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+

A symmetric Kruskal tensor can also be constructed directly from a generic Kruskal tensor in the ktensor format. If the Kruskal tensor is not symmetric, it is symmetrized by averaging the factor matrices and taking care to get the signs aligned.

K = ktensor(L, X-1, X+2, 2*X);
+S2 = symktensor(K)
+
S2 is a symktensor of order 3 and dimension 4
+	S2.lambda = [ 1  1 ]
+	S2.U = 
+		    1.2220   -6.5523
+		    2.5685   -7.8311
+		    3.9151   -9.1100
+		    5.2616  -10.3889
+

This method of declaring a symktensor is useful in comparing decomposition methods: this constructor allows any decomposition method which generates a ktensor CP model to also generate a symktensor. In this way, decomposition methods which are non-symmetric in nature may easily be applied to symmetric problems.

Use ndims and size for the dimensions of a symktensor

For a given symktensor, ndims returns the number of dimensions (i.e. the number of modes) of the symmetric Kruskal tensor. size returns a size array of the symmetric Kruskal tensor.

%Declaring a symmetric Kruskal tensor
+ndims(S)
+size(S)
+
+ans =
+
+     3
+
+
+ans =
+
+     4     4     4
+
+

Use ncomponents for the rank of symktensor

The function ncomponents returns the number of components of a symktensor object. This is $r$ in the symktensor's definition, the number of outer-product summands in the symmetric Kruskal tensor decomposition.

ncomponents(S)
+
+ans =
+
+     2
+
+

Use full to convert a symktensor to a tensor

The function full converts a symktensor to a tensor.

full(S)
+
ans is a symmetric tensor with 3 modes of dimension 4
+	(1,1,1)  -124
+	(1,1,2)  -148
+	(1,1,3)  -172
+	(1,1,4)  -196
+	(1,2,2)  -176
+	(1,2,3)  -204
+	(1,2,4)  -232
+	(1,3,3)  -236
+	(1,3,4)  -268
+	(1,4,4)  -304
+	(2,2,2)  -208
+	(2,2,3)  -240
+	(2,2,4)  -272
+	(2,3,3)  -276
+	(2,3,4)  -312
+	(2,4,4)  -352
+	(3,3,3)  -316
+	(3,3,4)  -356
+	(3,4,4)  -400
+	(4,4,4)  -448
+

Use double to convert a symktensor to a multi-dimensional array

The function double converts a symktensor to a multi-dimensional array.

double(S)
+
+ans(:,:,1) =
+
+  -124  -148  -172  -196
+  -148  -176  -204  -232
+  -172  -204  -236  -268
+  -196  -232  -268  -304
+
+
+ans(:,:,2) =
+
+  -148  -176  -204  -232
+  -176  -208  -240  -272
+  -204  -240  -276  -312
+  -232  -272  -312  -352
+
+
+ans(:,:,3) =
+
+  -172  -204  -236  -268
+  -204  -240  -276  -312
+  -236  -276  -316  -356
+  -268  -312  -356  -400
+
+
+ans(:,:,4) =
+
+  -196  -232  -268  -304
+  -232  -272  -312  -352
+  -268  -312  -356  -400
+  -304  -352  -400  -448
+
+

Basic operations with symktensors

Symktensors support multiplication by scalars. The result is the symktensor with the weight vector multiplied by the scalar.

4*S
+
ans is a symktensor of order 3 and dimension 4
+	ans.lambda = [ 4 -4 ]
+	ans.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+

Use norm to compute the Frobenius norm of a symktensor

The function norm returns the Frobenius norm of a symktensor.

norm(S)
+
+ans =
+
+   2.1469e+03
+
+

Use normalize to normalize the components of a symktensor.

The function normalize divides each of the columns in a factor matrix by its vector 2-norm. The 2-norm weight is then absorbed into the weight vector of that column.

normalize(S)
+
ans is a symktensor of order 3 and dimension 4
+	ans.lambda = [ 164.31677      2295.2176 ]
+	ans.U = 
+		    0.1826   -0.3790
+		    0.3651   -0.4549
+		    0.5477   -0.5307
+		    0.7303   -0.6065
+

By passing an additional $0$ argument to the normalize function, the weight vector is set to $\pm 1$ and the weights are absorbed into the factor matrix.

normalize(S,0)
+
ans is a symktensor of order 3 and dimension 4
+	ans.lambda = [ 1  1 ]
+	ans.U = 
+		    1.0000   -5.0000
+		    2.0000   -6.0000
+		    3.0000   -7.0000
+		    4.0000   -8.0000
+

Use arrange to normalize and sort a symktensor

The function arrange normalizes the components of symktensor and sorts them according to the weight vector, in descending order.

arrange(S)
+% Additionally, one can pass a permutation array of number of components of
+% S. In this case the components are arranged according to the permutation.
+arrange(S,[2 1])
+
ans is a symktensor of order 3 and dimension 4
+	ans.lambda = [ 2295.2176      164.31677 ]
+	ans.U = 
+		   -0.3790    0.1826
+		   -0.4549    0.3651
+		   -0.5307    0.5477
+		   -0.6065    0.7303
+ans is a symktensor of order 3 and dimension 4
+	ans.lambda = [ -1  1 ]
+	ans.U = 
+		     5     1
+		     6     2
+		     7     3
+		     8     4
+

Computing the score of the match between two symktensors

The function score provides a measure of similarity between two symktensors. Given two symktensors $R1$ and $R2$, we denote by $\lambda_{R1}$ and $\lambda_{R2}$ their respective weight vectors and X and Y their respective factor matrices. The function score(R1,R2) first normalizes the symtensor. It then attempts to match the symktensor $R1$ to $R2$ and returns the following numeric quantification of their similarity.

$$\frac{1 - ||\lambda_{R1}-\lambda_{R2}||}{\max(\lambda_{R1}, \lambda_{R2})} \prod_{i=1}^{r} X_{i}' Y_{i}$$

In the above formula, $r$ is the number of components of $R1$. $R1$ must have at least as many components as $R2$. Any additional components are ignored in the score calculation. Since the formula for score depends on the arrangement of the components of $R1$, score rearranges $R1$ and tries a number of permuations. By default, $R1$ is rearranged by permuting indices greedily to increase the score. Calling score on two symktensors converts the symktensors to ktensors and calls the |score

R1 = symktensor([1; -1; 1], reshape(1:9, 3, 3), 3); %Declare some symtensors
+R2 = symktensor([1; -1], reshape(1:6, 3,2), 3);
+
+score(R1, R2) %The score is 1 (perfect match) because the 1st 2 components of R1 match those of R2
+
+ans =
+
+     1
+
+

Calling score on two symktensor converts the symktensors to ktensors and calls the score function for ktensor. See the ktensor/score documentation for more information.

Subscripted reference for symktensors

After defining a symktensor, one can reference its weight vector, factor matrix, or element using the following conventions. Note that elements are queried using multi-dimensional subscript notation, as opposed to linear.

S.lambda %<-- The weight vector
+S.X %<-- The factor matrix
+
+S(1,2,1) %<-- Generate the element of index (1,2,1) from the factorization
+
+ans =
+
+     1
+    -1
+
+
+ans =
+
+     1     5
+     2     6
+     3     7
+     4     8
+
+
+ans =
+
+  -148
+
+

Subscripted assignment for symktensors

Subscripted assignment can be used to change the order, weight vector, or factor matrix of a symktensor. First, we change the weight vector

S.lambda = [1;1]
+
S is a symktensor of order 3 and dimension 4
+	S.lambda = [ 1  1 ]
+	S.U = 
+		     1     5
+		     2     6
+		     3     7
+		     4     8
+

Next, we alter the factor matrix. U can be used instead of X in the notation that follows

S.X = [1 0; 0 1; 1 0; 0 1]
+
S is a symktensor of order 3 and dimension 4
+	S.lambda = [ 1  1 ]
+	S.U = 
+		     1     0
+		     0     1
+		     1     0
+		     0     1
+

Lastly, we alter the order. This changes $m$, in the $m$-way outer product expansion of a symmetric Kruskal tensor.

S.m = 4
+
S is a symktensor of order 4 and dimension 4
+	S.lambda = [ 1  1 ]
+	S.U = 
+		     1     0
+		     0     1
+		     1     0
+		     0     1
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/symtensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/symtensor_doc.html new file mode 100644 index 0000000..cc11c23 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/symtensor_doc.html @@ -0,0 +1,472 @@ + + + + + Symmetric tensors

Symmetric tensors

A symmetric tensor is a tensor that is invariant under all permutations of its modes. Because many of the elements of a symmetric tensor are guaranteed to be equal, we can simplify the storage requirements by only storing the unique values of the symmetric tensor. There are ${n+m-1}\choose{m}$ such values for an m-way tensor of dimension n. The symtensor class is designed to take advantage of this symmetric structure by only storing the unique values of the tensor.

Contents

Definition of a symmetric tensor

A symmetric tensor is invariant under any permutation of the indices. Here is a small example. The issymmetric function checks symmetry of a dense tensor.

T(:,:,1) = [1 2; 2 3]; T(:,:,2)= [2 3; 3 6];
+T = tensor(T)
+issymmetric(T)
+
T is a tensor of size 2 x 2 x 2
+	T(:,:,1) = 
+	     1     2
+	     2     3
+	T(:,:,2) = 
+	     2     3
+	     3     6
+
+ans =
+
+  logical
+
+   1
+
+

Creating a symtensor from a symmetric tensor

We can construct a symtensor object from a symmetric tensor. This object only stores the unique entries of the tensor. For the 2 x 2 x 2 tensor, this means there are only four unique entries. Everything else comes from permuting the indices of those four entries.

S = symtensor(T)
+
S is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     1
+	(1,1,2)     2
+	(1,2,2)     3
+	(2,2,2)     6
+

Unique entries of a tensor

Note from TGK: This needs to be added. It should have some discussion of all the return values from indices. What is a monomial description, etc.

[I,C,W,Q] = indices(S)
+
+I =
+
+     1     1     1
+     1     1     2
+     1     2     2
+     2     2     2
+
+
+C =
+
+     3     0
+     2     1
+     1     2
+     0     3
+
+
+W =
+
+     1
+     3
+     3
+     1
+
+
+Q =
+
+     4
+
+

Creating a symtensor from a nonsymmetric tensor

A symmetric tensors can be created from the symmetrization of nonsymmetric tensor so long as it is the same size in every mode. If the input is not symmetric, it is symmetrized by creating an average of elements in the same permutation class. For instance, this example starts with a nonsymmetric tensor and symmetrizes it:

T2 = tensor([1:8],[2 2 2])
+S2 = symtensor(T2)
+
T2 is a tensor of size 2 x 2 x 2
+	T2(:,:,1) = 
+	     1     3
+	     2     4
+	T2(:,:,2) = 
+	     5     7
+	     6     8
+S2 is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)    1.0000
+	(1,1,2)    3.3333
+	(1,2,2)    5.6667
+	(2,2,2)    8.0000
+

Converting the symtensor back to a generic tensor is equivalent to running symmetrize on the original tensor. In the following example, the full command converts a symtensor to a tensor.

full(S2)
+symmetrize(T2)
+
ans is a tensor of size 2 x 2 x 2
+	ans(:,:,1) = 
+	    1.0000    3.3333
+	    3.3333    5.6667
+	ans(:,:,2) = 
+	    3.3333    5.6667
+	    5.6667    8.0000
+ans is a tensor of size 2 x 2 x 2
+	ans(:,:,1) = 
+	    1.0000    3.3333
+	    3.3333    5.6667
+	ans(:,:,2) = 
+	    3.3333    5.6667
+	    5.6667    8.0000
+

Create an all ones symtensor

The first argument is the generating function, the second argument is the number of modes, and the third argument is the size of each mode.

S3 = symtensor(@ones, 3, 2)
+
S3 is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     1
+	(1,1,2)     1
+	(1,2,2)     1
+	(2,2,2)     1
+

Create a random symtensor

S4 = symtensor(@randn, 3, 2)
+
S4 is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)   -1.0112
+	(1,1,2)   -0.2374
+	(1,2,2)   -0.2810
+	(2,2,2)    1.4135
+

Using a generating function to populate a symmetric tensor

In general, a symmetric tensor can also have its entries created by any generating function. This is done by passing a function handle, the number of modes, and the dimension. The function is expected to take a two-dimension size as input and return a matrix of that shape. In fact, the second argument to the function will always be 1.

% For example, we can also declare a binary symmetric tensor as follows:
+S5 = symtensor(@(x,y) double(rand(x,y)>.25), 3, 3)
+
S5 is a symmetric tensor with 3 modes of dimension 3
+	(1,1,1)     1
+	(1,1,2)     1
+	(1,1,3)     1
+	(1,2,2)     1
+	(1,2,3)     1
+	(1,3,3)     1
+	(2,2,2)     1
+	(2,2,3)     1
+	(2,3,3)     1
+	(3,3,3)     1
+

Use ndims and size to get the size of a symmetric tensor

ndims(S) %<-- Number of modes of the symmetric tensor
+
+ans =
+
+     3
+
+
size(S) %<-- Size of a symmetric tensor
+
+ans =
+
+     2     2     2
+
+

Use full to convert a symmetric tensor to a multidimensional array

full(S) %<-- Converts from a symmetric tensor to a tensor
+
ans is a tensor of size 2 x 2 x 2
+	ans(:,:,1) = 
+	     1     2
+	     2     3
+	ans(:,:,2) = 
+	     2     3
+	     3     6
+

Subscripted reference of a symmetric tensor

Subindex notation extracts the tensor value.

S(1,2,2)
+S(2,1,2) %<-- Equal to above, by symmetry
+
+ans =
+
+     3
+
+
+ans =
+
+     3
+
+

This works the same as applying it to the full tensor.

T(1,2,2)
+T(2,1,2)
+
+ans =
+
+     3
+
+
+ans =
+
+     3
+
+

Multiple indices can be queried by combining these indices into the rows of a matrix. Consider the following example, which returns a vector consisting of the values of S at indices indicated by the rows of the input matrix.

S([1 2 1; 2 1 2])
+
+ans =
+
+     2
+     3
+
+

Single indices are interpretted as an index into the unique value array, which is stored with respect to increasing indices. This is very different than using linear indexing on the full tensor.

S(3) %<- Third unique entry corresponding to (1,2,2)
+S(4) %<- Fourth unique entry, corresponding to (2,2,2)
+T(3) %<- Third entry in the tensor, i.e., (1,2,1) = (1,1,2)
+T(4) %<- Fourth entry in the tensor, i.e., (2,2,1) = (1,2,2)
+
+ans =
+
+     3
+
+
+ans =
+
+     6
+
+
+ans =
+
+     2
+
+
+ans =
+
+     3
+
+

Mulitple entries can be obtained at once as well.

S([3:4]')
+
+ans =
+
+     3
+     6
+
+

Subscripted assignment

Symmetric tensors also support subscripted assignment. Either linear or subindex notation is valid. Multiple values can be assigned the same quantity, but assigning a subset of a symmetric tensor from a multidimensional arrays, tensor, or symtensor is not allowed.

S5(1) = 7 %<-- Linear indexing
+S5(2,1,2) = 6 %<-- Subindex indexing
+
S5 is a symmetric tensor with 3 modes of dimension 3
+	(1,1,1)     7
+	(1,1,2)     1
+	(1,1,3)     1
+	(1,2,2)     1
+	(1,2,3)     1
+	(1,3,3)     1
+	(2,2,2)     1
+	(2,2,3)     1
+	(2,3,3)     1
+	(3,3,3)     1
+S5 is a symmetric tensor with 3 modes of dimension 3
+	(1,1,1)     7
+	(1,1,2)     1
+	(1,1,3)     1
+	(1,2,2)     6
+	(1,2,3)     1
+	(1,3,3)     1
+	(2,2,2)     1
+	(2,2,3)     1
+	(2,3,3)     1
+	(3,3,3)     1
+

Symmetric tensors do not support enlargement with the assignment operator, so assigning a value to an index other than those which have already been declared produces an error.

Basic operations (plus, minus, and, or, etc.) on a symmetric tensor

The tensor object supports many basic operations, illustrated here.

A = symtensor(@(x,y) rand(x,y)>.5, 3, 2)
+B = symtensor(@(x,y) rand(x,y)>.5, 3, 2)
+
A is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     0
+	(1,1,2)     1
+	(1,2,2)     1
+	(2,2,2)     0
+B is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     0
+	(1,1,2)     1
+	(1,2,2)     0
+	(2,2,2)     1
+
A==B %<-- Calls eq.
+
ans is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)   1
+	(1,1,2)   1
+	(1,2,2)   0
+	(2,2,2)   0
+
A<B %<-- Calls lt.
+
ans is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)   0
+	(1,1,2)   0
+	(1,2,2)   0
+	(2,2,2)   1
+
A.*B %<-- Calls times. (elementwise multiply)
+
ans is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     0
+	(1,1,2)     1
+	(1,2,2)     0
+	(2,2,2)     0
+
5*A %<-- Calls mtimes. (scalar multiply)
+
ans is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     0
+	(1,1,2)     5
+	(1,2,2)     5
+	(2,2,2)     0
+

The symtensor class supports the following additional MATLABbinary operations: and, or, xor, neq, gt, ge, le, plus, minus, power, ldivide, and rdivide. Supported unary operations include: not, uplus, uminus.

Using symtenfun for elementwise operations on one or more symmetric tensors

The function symtenfun applies a function to a number of symmetric symtensors. This function mirrors the capability of tenfun for tensors.

tenfun(@min, S, S2, S+1) %<-- Symtensor formed from elementwise minimization
+
ans is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)     1
+	(1,1,2)     2
+	(1,2,2)     3
+	(2,2,2)     6
+
tenfun(@(x)(x-1.5),S) %<-- Subtract 1.5 from each element of B
+
ans is a symmetric tensor with 3 modes of dimension 2
+	(1,1,1)   -0.5000
+	(1,1,2)    0.5000
+	(1,2,2)    1.5000
+	(2,2,2)    4.5000
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tenmat_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tenmat_doc.html new file mode 100644 index 0000000..0c71f25 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tenmat_doc.html @@ -0,0 +1,424 @@ + + + + + Converting a tensor to a matrix and vice versa

Converting a tensor to a matrix and vice versa

We show how to convert a tensor to a matrix stored with extra information so that it can be converted back to a tensor. Converting to a matrix requies an ordered mapping of the tensor indices to the rows and the columns of the matrix.

Contents

Creating a tenmat (tensor as matrix) object

X = tensor(1:24,[3 2 2 2]) %<-- Create a tensor.
+
X is a tensor of size 3 x 2 x 2 x 2
+	X(:,:,1,1) = 
+	     1     4
+	     2     5
+	     3     6
+	X(:,:,2,1) = 
+	     7    10
+	     8    11
+	     9    12
+	X(:,:,1,2) = 
+	    13    16
+	    14    17
+	    15    18
+	X(:,:,2,2) = 
+	    19    22
+	    20    23
+	    21    24
+
A = tenmat(X,[1 2],[3 4]) %<-- Dims [1 2] map to rows, [3 4] to columns.
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 1  2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 3  4 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     7    13    19
+		     2     8    14    20
+		     3     9    15    21
+		     4    10    16    22
+		     5    11    17    23
+		     6    12    18    24
+
B = tenmat(X,[2 1],[3 4]) %<-- Order matters!
+
B is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	B.rindices = [ 2  1 ] (modes of tensor corresponding to rows)
+	B.cindices = [ 3  4 ] (modes of tensor corresponding to columns)
+	B.data = 
+		     1     7    13    19
+		     4    10    16    22
+		     2     8    14    20
+		     5    11    17    23
+		     3     9    15    21
+		     6    12    18    24
+
C = tenmat(X,[1 2],[4 3]) %<-- Order matters!
+
C is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	C.rindices = [ 1  2 ] (modes of tensor corresponding to rows)
+	C.cindices = [ 4  3 ] (modes of tensor corresponding to columns)
+	C.data = 
+		     1    13     7    19
+		     2    14     8    20
+		     3    15     9    21
+		     4    16    10    22
+		     5    17    11    23
+		     6    18    12    24
+

Creating a tenmat by specifying the dimensions mapped to the rows

If just the row indices are specified, then the columns are arranged in increasing order.

A = tenmat(X,1) %<-- Same as A = tenmat(X,1,2:4)
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 2  3  4 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     4     7    10    13    16    19    22
+		     2     5     8    11    14    17    20    23
+		     3     6     9    12    15    18    21    24
+

Creating a tenmat by specifying the dimensions mapped to the columns

Likewise, just the columns can be specified if the 3rd argument is a 't'. The rows are arranged in increasing order.

A = tenmat(X, [2 3], 't') %<-- Same as A = tenmat(X,[1 4],[2 3]).
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 1  4 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 2  3 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     4     7    10
+		     2     5     8    11
+		     3     6     9    12
+		    13    16    19    22
+		    14    17    20    23
+		    15    18    21    24
+

Vectorize via tenmat

All the dimensions can be mapped to the rows or the columnns.

A = tenmat(X,1:4,'t') %<-- Map all the dimensions to the columns
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [  ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  2  3  4 ] (modes of tensor corresponding to columns)
+	A.data = 
+		  Columns 1 through 13
+		     1     2     3     4     5     6     7     8     9    10    11    12    13
+		  Columns 14 through 24
+		    14    15    16    17    18    19    20    21    22    23    24
+

Alternative ordering for the columns for mode-n matricization

Mode-n matricization means that only mode n is mapped to the rows. Different column orderings are available.

A = tenmat(X,2) %<-- By default, columns are ordered as [1 3 4].
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  3  4 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     2     3     7     8     9    13    14    15    19    20    21
+		     4     5     6    10    11    12    16    17    18    22    23    24
+
A = tenmat(X,2,[3 1 4]) %<-- Explicit specification.
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 3  1  4 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     7     2     8     3     9    13    19    14    20    15    21
+		     4    10     5    11     6    12    16    22    17    23    18    24
+
A = tenmat(X,2,'fc') %<-- Forward cyclic, i.e., [3 4 1].
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 3  4  1 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     7    13    19     2     8    14    20     3     9    15    21
+		     4    10    16    22     5    11    17    23     6    12    18    24
+
A = tenmat(X,2,'bc') %<-- Backward cyclic, i.e., [1 4 3].
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     2     3    13    14    15     7     8     9    19    20    21
+		     4     5     6    16    17    18    10    11    12    22    23    24
+

Constituent parts of a tenmat

A.data %<-- The matrix itself.
+
+ans =
+
+     1     2     3    13    14    15     7     8     9    19    20    21
+     4     5     6    16    17    18    10    11    12    22    23    24
+
+
A.tsize %<-- Size of the original tensor.
+
+ans =
+
+     3     2     2     2
+
+
A.rdims %<-- Dimensions that were mapped to the rows.
+
+ans =
+
+     2
+
+
A.cdims %<-- Dimensions that were mapped to the columns.
+
+ans =
+
+     1     4     3
+
+

Creating a tenmat from its constituent parts

B = tenmat(A.data,A.rdims,A.cdims,A.tsize) %<-- Recreates A
+
B is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	B.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	B.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	B.data = 
+		     1     2     3    13    14    15     7     8     9    19    20    21
+		     4     5     6    16    17    18    10    11    12    22    23    24
+

Creating an empty tenmat

B = tenmat %<-- Empty tenmat.
+
B is a matrix corresponding to a tensor of size [empty tensor]
+	B.rindices = [  ] (modes of tensor corresponding to rows)
+	B.cindices = [  ] (modes of tensor corresponding to columns)
+	B.data = []
+

Use double to convert a tenmat to a MATLAB matrix

double(A) %<-- converts A to a standard matrix
+
+ans =
+
+     1     2     3    13    14    15     7     8     9    19    20    21
+     4     5     6    16    17    18    10    11    12    22    23    24
+
+

Use tensor to convert a tenmat to a tensor

Y = tensor(A)
+
Y is a tensor of size 3 x 2 x 2 x 2
+	Y(:,:,1,1) = 
+	     1     4
+	     2     5
+	     3     6
+	Y(:,:,2,1) = 
+	     7    10
+	     8    11
+	     9    12
+	Y(:,:,1,2) = 
+	    13    16
+	    14    17
+	    15    18
+	Y(:,:,2,2) = 
+	    19    22
+	    20    23
+	    21    24
+

Use size and tsize for the dimensions of a tenmat

size(A) %<-- Matrix size
+tsize(A) %<-- Corresponding tensor size
+
+ans =
+
+     2    12
+
+
+ans =
+
+     3     2     2     2
+
+

Subscripted reference for a tenmat

A(2,1) %<-- returns the (2,1) element of the matrix.
+
+ans =
+
+     4
+
+

Subscripted assignment for a tenmat

A(1:2,1:2) = ones(2) %<-- Replace part of the matrix.
+
A is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	A.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	A.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	A.data = 
+		     1     1     3    13    14    15     7     8     9    19    20    21
+		     1     1     6    16    17    18    10    11    12    22    23    24
+

Use end for the last index

A(end,end) %<-- Same as X(2,12)
+
+ans =
+
+    24
+
+

Basic operations for tenmat

norm(A) %<-- Norm of the matrix.
+
+ans =
+
+   69.6994
+
+
A' %<-- Calls ctranspose (also swaps mapped dimensions).
+
ans is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	ans.rindices = [ 1  4  3 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 2 ] (modes of tensor corresponding to columns)
+	ans.data = 
+		     1     1
+		     1     1
+		     3     6
+		    13    16
+		    14    17
+		    15    18
+		     7    10
+		     8    11
+		     9    12
+		    19    22
+		    20    23
+		    21    24
+
+A %<-- Calls uplus.
+
ans is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	ans.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	ans.data = 
+		     1     1     3    13    14    15     7     8     9    19    20    21
+		     1     1     6    16    17    18    10    11    12    22    23    24
+
-A %<-- Calls uminus.
+
ans is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	ans.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	ans.data = 
+		    -1    -1    -3   -13   -14   -15    -7    -8    -9   -19   -20   -21
+		    -1    -1    -6   -16   -17   -18   -10   -11   -12   -22   -23   -24
+
A+A %<-- Calls plus.
+
ans is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	ans.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	ans.data = 
+		     2     2     6    26    28    30    14    16    18    38    40    42
+		     2     2    12    32    34    36    20    22    24    44    46    48
+
A-A %<-- Calls minus.
+
ans is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	ans.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	ans.data = 
+		     0     0     0     0     0     0     0     0     0     0     0     0
+		     0     0     0     0     0     0     0     0     0     0     0     0
+

Multiplying two tenmats

It is possible to compute the product of two tenmats and have a result that can be converted into a tensor.

B = A * A' %<-- Tenmat that is the product of two tenmats.
+
B is a matrix corresponding to a tensor of size 2 x 2
+	B.rindices = [ 1 ] (modes of tensor corresponding to rows)
+	B.cindices = [ 2 ] (modes of tensor corresponding to columns)
+	B.data = 
+		        1997        2384
+		        2384        2861
+
tensor(B) %<-- Corresponding tensor.
+
ans is a tensor of size 2 x 2
+	ans(:,:) = 
+	        1997        2384
+	        2384        2861
+

Displaying a tenmat

Shows the original tensor dimensions, the modes mapped to rows, the modes mapped to columns, and the matrix.

disp(A)
+
ans is a matrix corresponding to a tensor of size 3 x 2 x 2 x 2
+	ans.rindices = [ 2 ] (modes of tensor corresponding to rows)
+	ans.cindices = [ 1  4  3 ] (modes of tensor corresponding to columns)
+	ans.data = 
+		     1     1     3    13    14    15     7     8     9    19    20    21
+		     1     1     6    16    17    18    10    11    12    22    23    24
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tensor_doc.html new file mode 100644 index 0000000..f6055d6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tensor_doc.html @@ -0,0 +1,906 @@ + + + + + Tensors + + +

Tensors

Tensors are extensions of multidimensional arrays with additional operations defined on them. Here we explain the basics of creating and working with tensors.

Contents

Creating a tensor from an array

The tensor command converts a (multidimensional) array to a tensor object.

M = ones(4,3,2); %<-- A 4 x 3 x 2 array.
+X = tensor(M) %<-- Convert to a tensor object.
+
X is a tensor of size 4 x 3 x 2
+	X(:,:,1) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	X(:,:,2) = 
+	     1     1     1
+	     1     1     1
+	     1     1     1
+	     1     1     1
+

Optionally, you can specify a different shape for the tensor, so long as the input array has the right number of elements.

X = tensor(M,[2 3 4]) %<-- M has 24 elements.
+
X is a tensor of size 2 x 3 x 4
+	X(:,:,1) = 
+	     1     1     1
+	     1     1     1
+	X(:,:,2) = 
+	     1     1     1
+	     1     1     1
+	X(:,:,3) = 
+	     1     1     1
+	     1     1     1
+	X(:,:,4) = 
+	     1     1     1
+	     1     1     1
+

Creating a one-dimensional tensor

The tensor class explicitly supports order-one tensors as well as trailing singleton dimensions, but the size must be explicit in the constructor. By default, a column array produces a 2-way tensor.

X = tensor(rand(5,1)) %<-- Creates a 2-way tensor.
+
X is a tensor of size 5 x 1
+	X(:,:) = 
+	    0.8147
+	    0.9058
+	    0.1270
+	    0.9134
+	    0.6324
+

This is fixed by specifying the size explicitly.

X = tensor(rand(5,1),5) %<-- Creates a 1-way tensor.
+
X is a tensor of size 5
+	X(:) = 
+	    0.0975
+	    0.2785
+	    0.5469
+	    0.9575
+	    0.9649
+

Specifying trailing singleton dimensions in a tensor

Likewise, trailing singleton dimensions must be explictly specified.

Y = tensor(rand(4,3,1)) %<-- Creates a 2-way tensor.
+
Y is a tensor of size 4 x 3
+	Y(:,:) = 
+	    0.1576    0.8003    0.7922
+	    0.9706    0.1419    0.9595
+	    0.9572    0.4218    0.6557
+	    0.4854    0.9157    0.0357
+
Y = tensor(rand(4,3,1),[4 3 1]) %<-- Creates a 3-way tensor.
+
Y is a tensor of size 4 x 3 x 1
+	Y(:,:,1) = 
+	    0.8491    0.7431    0.7060
+	    0.9340    0.3922    0.0318
+	    0.6787    0.6555    0.2769
+	    0.7577    0.1712    0.0462
+

Unfortunately, the whos command does not report the size of 1D objects correctly (last checked for MATLAB 2006a).

whos X Y %<-- Doesn't report the right size for X!
+
  Name      Size             Bytes  Class     Attributes
+
+  X         1x1                400  tensor              
+  Y         4x3x1              472  tensor              
+
+

The constituent parts of a tensor

X = tenrand([4 3 2]); %<-- Create data.
+X.data %<-- The array.
+
+ans(:,:,1) =
+
+    0.0971    0.9502    0.7655
+    0.8235    0.0344    0.7952
+    0.6948    0.4387    0.1869
+    0.3171    0.3816    0.4898
+
+
+ans(:,:,2) =
+
+    0.4456    0.2760    0.1190
+    0.6463    0.6797    0.4984
+    0.7094    0.6551    0.9597
+    0.7547    0.1626    0.3404
+
+
X.size %<-- The size.
+
+ans =
+
+     4     3     2
+
+

Creating a tensor from its constituent parts

Y = tensor(X.data,X.size) %<-- Copies X.
+
Y is a tensor of size 4 x 3 x 2
+	Y(:,:,1) = 
+	    0.0971    0.9502    0.7655
+	    0.8235    0.0344    0.7952
+	    0.6948    0.4387    0.1869
+	    0.3171    0.3816    0.4898
+	Y(:,:,2) = 
+	    0.4456    0.2760    0.1190
+	    0.6463    0.6797    0.4984
+	    0.7094    0.6551    0.9597
+	    0.7547    0.1626    0.3404
+

Creating an empty tensor

An empty constructor exists, primarily to support loading previously saved data in MAT-files.

X = tensor %<-- Creates an empty tensor.
+
X is a tensor of size [empty tensor]
+	X = []
+

Use tenone to create a tensor of all ones

X = tenones([3 4 2]) %<-- Creates a 3 x 4 x 2 tensor of ones.
+
X is a tensor of size 3 x 4 x 2
+	X(:,:,1) = 
+	     1     1     1     1
+	     1     1     1     1
+	     1     1     1     1
+	X(:,:,2) = 
+	     1     1     1     1
+	     1     1     1     1
+	     1     1     1     1
+

Use tenzeros to create a tensor of all zeros

X = tenzeros([1 4 2]) %<-- Creates a 1 x 4 x 2 tensor of zeros.
+
X is a tensor of size 1 x 4 x 2
+	X(:,:,1) = 
+	     0     0     0     0
+	X(:,:,2) = 
+	     0     0     0     0
+

Use tenrand to create a random tensor

X = tenrand([5 4 2]) %<-- Creates a random 5 x 4 x 2 tensor.
+
X is a tensor of size 5 x 4 x 2
+	X(:,:,1) = 
+	    0.5853    0.6991    0.1493    0.2435
+	    0.2238    0.8909    0.2575    0.9293
+	    0.7513    0.9593    0.8407    0.3500
+	    0.2551    0.5472    0.2543    0.1966
+	    0.5060    0.1386    0.8143    0.2511
+	X(:,:,2) = 
+	    0.6160    0.5497    0.3804    0.7792
+	    0.4733    0.9172    0.5678    0.9340
+	    0.3517    0.2858    0.0759    0.1299
+	    0.8308    0.7572    0.0540    0.5688
+	    0.5853    0.7537    0.5308    0.4694
+

Use squeeze to remove singleton dimensions from a tensor

squeeze(Y) %<-- Removes singleton dimensions.
+
ans is a tensor of size 4 x 3 x 2
+	ans(:,:,1) = 
+	    0.0971    0.9502    0.7655
+	    0.8235    0.0344    0.7952
+	    0.6948    0.4387    0.1869
+	    0.3171    0.3816    0.4898
+	ans(:,:,2) = 
+	    0.4456    0.2760    0.1190
+	    0.6463    0.6797    0.4984
+	    0.7094    0.6551    0.9597
+	    0.7547    0.1626    0.3404
+

Use double to convert a tensor to a (multidimensional) array

double(Y) %<-- Converts Y to a standard MATLAB array.
+
+ans(:,:,1) =
+
+    0.0971    0.9502    0.7655
+    0.8235    0.0344    0.7952
+    0.6948    0.4387    0.1869
+    0.3171    0.3816    0.4898
+
+
+ans(:,:,2) =
+
+    0.4456    0.2760    0.1190
+    0.6463    0.6797    0.4984
+    0.7094    0.6551    0.9597
+    0.7547    0.1626    0.3404
+
+
Y.data %<-- Same thing.
+
+ans(:,:,1) =
+
+    0.0971    0.9502    0.7655
+    0.8235    0.0344    0.7952
+    0.6948    0.4387    0.1869
+    0.3171    0.3816    0.4898
+
+
+ans(:,:,2) =
+
+    0.4456    0.2760    0.1190
+    0.6463    0.6797    0.4984
+    0.7094    0.6551    0.9597
+    0.7547    0.1626    0.3404
+
+

Use ndims and size to get the size of a tensor

ndims(Y) %<-- Number of dimensions (or ways).
+
+ans =
+
+     3
+
+
size(Y) %<-- Row vector with the sizes of all dimension.
+
+ans =
+
+     4     3     2
+
+
size(Y,3) %<-- Size of a single dimension.
+
+ans =
+
+     2
+
+

Subscripted reference for a tensor

X = tenrand([3 4 2 1]); %<-- Create a 3 x 4 x 2 x 1 random tensor.
+X(1,1,1,1) %<-- Extract a single element.
+
+ans =
+
+    0.0119
+
+

It is possible to extract a subtensor that contains a single element. Observe that singleton dimensions are not dropped unless they are specifically specified, e.g., as above.

X(1,1,1,:) %<-- Produces a tensor of order 1 and size 1.
+
ans is a tensor of size 1
+	ans(:) = 
+	    0.0119
+

In general, specified dimensions are dropped from the result. Here we specify the second and third dimension.

X(:,1,1,:) %<-- Produces a tensor of size 3 x 1.
+
ans is a tensor of size 3 x 1
+	ans(:,:) = 
+	    0.0119
+	    0.3371
+	    0.1622
+

Moreover, the subtensor is automatically renumbered/resized in the same way that MATLAB works for arrays except that singleton dimensions are handled explicitly.

X(1:2,[2 4],1,:) %<-- Produces a tensor of size 2 x 2 x 1.
+
ans is a tensor of size 2 x 2 x 1
+	ans(:,:,1) = 
+	    0.7943    0.6541
+	    0.3112    0.6892
+

It's also possible to extract a list of elements by passing in an array of subscripts or a column array of linear indices.

subs = [1,1,1,1; 3,4,2,1]; X(subs) %<-- Extract 2 values by subscript.
+
+ans =
+
+    0.0119
+    0.9619
+
+
inds = [1; 24]; X(inds) %<-- Same thing with linear indices.
+
+ans =
+
+    0.0119
+    0.9619
+
+

The difference between extracting a subtensor and a list of linear indices is ambiguous for 1-dimensional tensors. We can specify 'extract' as a second argument whenever we are using a list of subscripts.

X = tenrand(10); %<-- Create a random tensor.
+X([1:6]') %<-- Extract a subtensor.
+
ans is a tensor of size 6
+	ans(:) = 
+	    0.0046
+	    0.7749
+	    0.8173
+	    0.8687
+	    0.0844
+	    0.3998
+
X([1:6]','extract') %<-- Same thing *but* result is a vector.
+
+ans =
+
+    0.0046
+    0.7749
+    0.8173
+    0.8687
+    0.0844
+    0.3998
+
+

Subscripted assignment for a tensor

We can assign a single element, an entire subtensor, or a list of values for a tensor.

X = tenrand([3,4,2]); %<-- Create some data.
+X(1,1,1) = 0 %<-- Replaces the (1,1,1) element.
+
X is a tensor of size 3 x 4 x 2
+	X(:,:,1) = 
+	         0    0.1361    0.5499    0.6221
+	    0.2638    0.8693    0.1450    0.3510
+	    0.1455    0.5797    0.8530    0.5132
+	X(:,:,2) = 
+	    0.4018    0.1233    0.4173    0.9448
+	    0.0760    0.1839    0.0497    0.4909
+	    0.2399    0.2400    0.9027    0.4893
+
X(1:2,1:2,1) = ones(2,2) %<-- Replaces a 2 x 2 subtensor.
+
X is a tensor of size 3 x 4 x 2
+	X(:,:,1) = 
+	    1.0000    1.0000    0.5499    0.6221
+	    1.0000    1.0000    0.1450    0.3510
+	    0.1455    0.5797    0.8530    0.5132
+	X(:,:,2) = 
+	    0.4018    0.1233    0.4173    0.9448
+	    0.0760    0.1839    0.0497    0.4909
+	    0.2399    0.2400    0.9027    0.4893
+
X([1 1 1;1 1 2]) = [5;7] %<-- Replaces the (1,1,1) and (1,1,2) elements.
+
X is a tensor of size 3 x 4 x 2
+	X(:,:,1) = 
+	    5.0000    1.0000    0.5499    0.6221
+	    1.0000    1.0000    0.1450    0.3510
+	    0.1455    0.5797    0.8530    0.5132
+	X(:,:,2) = 
+	    7.0000    0.1233    0.4173    0.9448
+	    0.0760    0.1839    0.0497    0.4909
+	    0.2399    0.2400    0.9027    0.4893
+
X([1;13]) = [5;7] %<-- Same as above using linear indices.
+
X is a tensor of size 3 x 4 x 2
+	X(:,:,1) = 
+	    5.0000    1.0000    0.5499    0.6221
+	    1.0000    1.0000    0.1450    0.3510
+	    0.1455    0.5797    0.8530    0.5132
+	X(:,:,2) = 
+	    7.0000    0.1233    0.4173    0.9448
+	    0.0760    0.1839    0.0497    0.4909
+	    0.2399    0.2400    0.9027    0.4893
+

It is possible to grow the tensor automatically by assigning elements outside the original range of the tensor.

X(1,1,3) = 1 %<-- Grows the size of the tensor.
+
X is a tensor of size 3 x 4 x 3
+	X(:,:,1) = 
+	    5.0000    1.0000    0.5499    0.6221
+	    1.0000    1.0000    0.1450    0.3510
+	    0.1455    0.5797    0.8530    0.5132
+	X(:,:,2) = 
+	    7.0000    0.1233    0.4173    0.9448
+	    0.0760    0.1839    0.0497    0.4909
+	    0.2399    0.2400    0.9027    0.4893
+	X(:,:,3) = 
+	     1     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+

Using end for the last array index.

X(end,end,end)  %<-- Same as X(3,4,3).
+
+ans =
+
+     0
+
+
X(1,1,1:end-1)  %<-- Same as X(1,1,1:2).
+
ans is a tensor of size 2
+	ans(:) = 
+	     5
+	     7
+

It is also possible to use end to index past the end of an array.

X(1,1,end+1) = 5 %<-- Same as X(1,1,4).
+
X is a tensor of size 3 x 4 x 4
+	X(:,:,1) = 
+	    5.0000    1.0000    0.5499    0.6221
+	    1.0000    1.0000    0.1450    0.3510
+	    0.1455    0.5797    0.8530    0.5132
+	X(:,:,2) = 
+	    7.0000    0.1233    0.4173    0.9448
+	    0.0760    0.1839    0.0497    0.4909
+	    0.2399    0.2400    0.9027    0.4893
+	X(:,:,3) = 
+	     1     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+	X(:,:,4) = 
+	     5     0     0     0
+	     0     0     0     0
+	     0     0     0     0
+

Use find for subscripts of nonzero elements of a tensor

The find function returns a list of nonzero subscripts for a tensor. Note that differs from the standard version, which returns linear indices.

X = tensor(floor(3*rand(2,2,2))) %<-- Generate some data.
+
X is a tensor of size 2 x 2 x 2
+	X(:,:,1) = 
+	     1     1
+	     2     0
+	X(:,:,2) = 
+	     2     0
+	     1     1
+
[S,V] = find(X) %<-- Find all the nonzero subscripts and values.
+
+S =
+
+     1     1     1
+     2     1     1
+     1     2     1
+     1     1     2
+     2     1     2
+     2     2     2
+
+
+V =
+
+     1
+     2
+     1
+     2
+     1
+     1
+
+
S = find(X >= 2) %<-- Find subscripts of values >= 2.
+
+S =
+
+     2     1     1
+     1     1     2
+
+
V = X(S) %<-- Extract the corresponding values from X.
+
+V =
+
+     2
+     2
+
+

Computing the Frobenius norm of a tensor

norm computes the Frobenius norm of a tensor. This corresponds to the Euclidean norm of the vectorized tensor.

T = tensor(randn(2,3,3));
+norm(T)
+
+ans =
+
+    4.9219
+
+

Using reshape to rearrange elements in a tensor

reshape reshapes a tensor into a given size array. The total number of elements in the tensor cannot change.

X = tensor(randi(10,3,2,3));
+reshape(X,[3,3,2]);
+

Basic operations (plus, minus, and, or, etc.) on a tensor

The tensor object supports many basic operations, illustrated here.

A = tensor(floor(3*rand(2,3,2)))
+B = tensor(floor(3*rand(2,3,2)))
+
A is a tensor of size 2 x 3 x 2
+	A(:,:,1) = 
+	     1     2     1
+	     1     1     2
+	A(:,:,2) = 
+	     2     1     0
+	     1     1     0
+B is a tensor of size 2 x 3 x 2
+	B(:,:,1) = 
+	     1     2     0
+	     0     0     0
+	B(:,:,2) = 
+	     0     0     1
+	     1     2     0
+
A & B %<-- Calls and.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   1   1   0
+	   0   0   0
+	ans(:,:,2) = 
+	   0   0   0
+	   1   1   0
+
A | B %<-- Calls or.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   1   1   1
+	   1   1   1
+	ans(:,:,2) = 
+	   1   1   1
+	   1   1   0
+
xor(A,B) %<-- Calls xor.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   0   0   1
+	   1   1   1
+	ans(:,:,2) = 
+	   1   1   1
+	   0   0   0
+
A==B %<-- Calls eq.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   1   1   0
+	   0   0   0
+	ans(:,:,2) = 
+	   0   0   0
+	   1   0   1
+
A~=B %<-- Calls neq.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   0   0   1
+	   1   1   1
+	ans(:,:,2) = 
+	   1   1   1
+	   0   1   0
+
A>B %<-- Calls gt.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   0   0   1
+	   1   1   1
+	ans(:,:,2) = 
+	   1   1   0
+	   0   0   0
+
A>=B %<-- Calls ge.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   1   1   1
+	   1   1   1
+	ans(:,:,2) = 
+	   1   1   0
+	   1   0   1
+
A<B %<-- Calls lt.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   0   0   0
+	   0   0   0
+	ans(:,:,2) = 
+	   0   0   1
+	   0   1   0
+
A<=B %<-- Calls le.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   1   1   0
+	   0   0   0
+	ans(:,:,2) = 
+	   0   0   1
+	   1   1   1
+
~A %<-- Calls not.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	   0   0   0
+	   0   0   0
+	ans(:,:,2) = 
+	   0   0   1
+	   0   0   1
+
+A %<-- Calls uplus.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     2     1
+	     1     1     2
+	ans(:,:,2) = 
+	     2     1     0
+	     1     1     0
+
-A %<-- Calls uminus.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	    -1    -2    -1
+	    -1    -1    -2
+	ans(:,:,2) = 
+	    -2    -1     0
+	    -1    -1     0
+
A+B %<-- Calls plus.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     2     4     1
+	     1     1     2
+	ans(:,:,2) = 
+	     2     1     1
+	     2     3     0
+
A-B %<-- Calls minus.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     0     0     1
+	     1     1     2
+	ans(:,:,2) = 
+	     2     1    -1
+	     0    -1     0
+
A.*B %<-- Calls times.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     4     0
+	     0     0     0
+	ans(:,:,2) = 
+	     0     0     0
+	     1     2     0
+
5*A %<-- Calls mtimes.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     5    10     5
+	     5     5    10
+	ans(:,:,2) = 
+	    10     5     0
+	     5     5     0
+
A.^B %<-- Calls power.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     4     1
+	     1     1     1
+	ans(:,:,2) = 
+	     1     1     0
+	     1     1     1
+
A.^2 %<-- Calls power.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     4     1
+	     1     1     4
+	ans(:,:,2) = 
+	     4     1     0
+	     1     1     0
+
A.\B %<-- Calls ldivide.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     1     0
+	     0     0     0
+	ans(:,:,2) = 
+	     0     0   Inf
+	     1     2   NaN
+
A./2 %<-- Calls rdivide.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	    0.5000    1.0000    0.5000
+	    0.5000    0.5000    1.0000
+	ans(:,:,2) = 
+	    1.0000    0.5000         0
+	    0.5000    0.5000         0
+
A./B %<-- Calls rdivide (but beware divides by zero!)
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     1   Inf
+	   Inf   Inf   Inf
+	ans(:,:,2) = 
+	       Inf       Inf         0
+	    1.0000    0.5000       NaN
+

Using tenfun for elementwise operations on one or more tensors

The function tenfun applies a specified function to a number of tensors. This can be used for any function that is not predefined for tensors.

tenfun(@(x)(x+1),A) %<-- Increment every element of A by one.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     2     3     2
+	     2     2     3
+	ans(:,:,2) = 
+	     3     2     1
+	     2     2     1
+
tenfun(@max,A,B) %<-- Max of A and B, elementwise.
+
ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     2     1
+	     1     1     2
+	ans(:,:,2) = 
+	     2     1     1
+	     1     2     0
+
C = tensor(floor(5*rand(2,3,2))) %<-- Create another tensor.
+tenfun(@median,A,B,C) %<-- Elementwise means for A, B, and C.
+
C is a tensor of size 2 x 3 x 2
+	C(:,:,1) = 
+	     4     2     1
+	     4     0     2
+	C(:,:,2) = 
+	     2     3     1
+	     1     3     0
+ans is a tensor of size 2 x 3 x 2
+	ans(:,:,1) = 
+	     1     2     1
+	     1     0     2
+	ans(:,:,2) = 
+	     2     1     1
+	     1     2     0
+

Use permute to reorder the modes of a tensor

X = tensor(1:24,[3 4 2]) %<-- Create a tensor.
+
X is a tensor of size 3 x 4 x 2
+	X(:,:,1) = 
+	     1     4     7    10
+	     2     5     8    11
+	     3     6     9    12
+	X(:,:,2) = 
+	    13    16    19    22
+	    14    17    20    23
+	    15    18    21    24
+
permute(X,[3 2 1]) %<-- Reverse the modes.
+
ans is a tensor of size 2 x 4 x 3
+	ans(:,:,1) = 
+	     1     4     7    10
+	    13    16    19    22
+	ans(:,:,2) = 
+	     2     5     8    11
+	    14    17    20    23
+	ans(:,:,3) = 
+	     3     6     9    12
+	    15    18    21    24
+

Permuting a 1-dimensional tensor works correctly.

X = tensor(1:4,4); %<-- Create a 1-way tensor.
+permute(X,1) %<-- Call permute with *only* one dimension.
+
ans is a tensor of size 4
+	ans(:) = 
+	     1
+	     2
+	     3
+	     4
+

Symmetrizing and checking for symmetry in a tensor

A tensor can be symmetrized in a collection of modes with the command symmetrize. The new, symmetric tensor is formed by averaging over all elements in the tensor which are required to be equal.

W = tensor(rand(4,4,4));
+Y=symmetrize(X);
+

A second argument can also be passed to symmetrize which specifies an array of modes with respect to which the tensor should be symmetrized.

X = tensor(rand(3,2,3));
+Z = symmetrize(X,[1,3]);
+

Additionally, one can check for symmetry in tensors with the issymmetric function. Similar to symmetrize, a collection of modes can be passed as a second argument.

issymmetric(Y)
+issymmetric(Z,[1,3])
+
+ans =
+
+  logical
+
+   1
+
+
+ans =
+
+  logical
+
+   1
+
+

Displaying a tensor

The function disp can be used to display a tensor and correctly displays very small and large elements.

X = tensor(1:24,[3 4 2]); %<-- Create a 3 x 4 x 2 tensor.
+X(:,:,1) = X(:,:,1) * 1e15; %<-- Make the first slice very large.
+X(:,:,2) = X(:,:,2) * 1e-15; %<-- Make the second slice very small.
+disp(X)
+
ans is a tensor of size 3 x 4 x 2
+	ans(:,:,1) = 
+	   1.0e+16 *
+	    0.1000    0.4000    0.7000    1.0000
+	    0.2000    0.5000    0.8000    1.1000
+	    0.3000    0.6000    0.9000    1.2000
+	ans(:,:,2) = 
+	   1.0e-13 *
+	    0.1300    0.1600    0.1900    0.2200
+	    0.1400    0.1700    0.2000    0.2300
+	    0.1500    0.1800    0.2100    0.2400
+
diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tensor_types.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tensor_types.html new file mode 100644 index 0000000..b007500 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tensor_types.html @@ -0,0 +1,47 @@ + + + + + Tensor Types + + + + + +
+

Tensor Types

+ +

The Tensor Toolbox provides the following tensor classes:

+ +
    +
  • tensor - Dense + tensors, extending MATLAB's native mutlidimensional array + capabilities.
  • + +
  • sptensor - Sparse + tensors, only stores the nonzeros and their indices.
  • + +
  • symtensor - + Symmetric tensor, only stores the unique entries.
  • + +
  • ttensor - Tucker + decomposed tensor, stored as a core and factor matrices.
  • + +
  • ktensor - Kruskal + decomposed tensor, stored as weight and factor matrices.
  • + +
  • symktensor - + Kruskal decomposed symmetric tensor, stored as weight + and factor matrix.
  • + +
  • sumtensor - Sum + of different types of tensors, never formed explicitly.
  • +
+ + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/test_problems_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/test_problems_doc.html new file mode 100644 index 0000000..43183f3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/test_problems_doc.html @@ -0,0 +1,693 @@ + + + + + Creating test problems and initial guesses

Creating test problems and initial guesses

We demonstrate how to use Tensor Toolbox create_problem and create_guess functions to create test problems for fitting algorithms.

Contents

Creating a CP test problem

The create_problem function allows a user to generate a test problem with a known solution having a pre-specified solution. The create_problem function generates both the solution (as a ktensor for CP) and the test data (as a tensor). We later show that a pre-specificed solution can be used as well.

% Create a problem
+info = create_problem('Size', [5 4 3], 'Num_Factors', 3, 'Noise', 0.10);
+
% Display the solution created by create_problem
+soln = info.Soln
+
soln is a ktensor of size 5 x 4 x 3
+	soln.lambda = [ 0.67955     0.77677     0.43693 ]
+	soln.U{1} = 
+		    1.2659   -1.8263    0.1394
+		    1.2285   -0.0288    1.3588
+		    0.3697   -1.2996    0.7381
+		   -1.6088   -0.6544    0.5026
+		   -1.0133   -0.7654    0.7669
+	soln.U{2} = 
+		    0.4703   -0.4736    0.0978
+		   -0.1044    1.0593   -0.6853
+		   -1.0185   -1.3621   -1.1100
+		    0.2280    0.8114   -0.5553
+	soln.U{3} = 
+		    0.6467    0.4165    0.5587
+		   -1.5888    1.0288    0.5788
+		    0.0075   -1.5957   -0.9820
+
% Display the data created by create_problem
+data = info.Data
+
data is a tensor of size 5 x 4 x 3
+	data(:,:,1) = 
+	    0.6249   -0.8121    0.1202   -0.3563
+	    0.2137   -0.4577   -0.9183   -0.0238
+	    0.1200   -0.3537    0.2580   -0.4708
+	   -0.3357   -0.2214    0.6465   -0.5086
+	   -0.1006   -0.3410    0.4312   -0.3290
+	data(:,:,2) = 
+	   -0.0748   -1.3344    3.2810   -1.3842
+	   -0.5581   -0.1288    0.9371   -0.3729
+	    0.4566   -1.0594    1.4483   -0.8353
+	    1.0755   -0.8393   -1.1800   -0.2076
+	    0.7119   -0.8674   -0.4740   -0.2466
+	data(:,:,3) = 
+	   -1.1341    2.5656   -3.1061    1.9383
+	   -0.2261    0.4430    0.6498    0.4329
+	   -0.9159    1.9396   -1.9271    1.4065
+	   -0.5931    0.9982   -0.6803    0.8009
+	   -0.5887    1.2216   -1.0430    1.1270
+
% The difference between true solution and measured data should match the
+% specified 10% noise.
+diff = norm(full(info.Soln) - info.Data)/norm(full(info.Soln))
+
+diff =
+
+    0.1000
+
+

Creating a Tucker test problem

The create_problem function can also be used to create Tucker problems by specifying the 'Type' as 'Tucker'. In this case, the create_problem function generates both the solution (as a ttensor for Tucker) and the test data (as a tensor).

% Create a problem
+info = create_problem('Type', 'Tucker', 'Size', [5 4 3], 'Num_Factors', [3 3 2]);
+
% Display the Tucker-type solution created by create_problem
+soln = info.Soln
+
soln is a ttensor of size 5 x 4 x 3
+	soln.core is a tensor of size 3 x 3 x 2
+		soln.core(:,:,1) = 
+	    1.6229    0.3461    0.3951
+	   -1.2964   -0.9288    0.6931
+	    1.8595   -1.1376   -1.3269
+		soln.core(:,:,2) = 
+	    0.0635   -0.2179    1.8158
+	   -0.2798   -0.5542    1.6557
+	   -0.8710   -0.6151   -1.5281
+	soln.U{1} = 
+		    0.1595    0.4311   -0.0799
+		    0.0920    0.0801    1.7521
+		    1.7787    0.4889    1.3487
+		   -0.0043    0.6331   -0.1599
+		   -1.0328   -0.0141   -0.1235
+	soln.U{2} = 
+		   -0.5839    1.2308   -0.9507
+		    0.9942    0.9231   -0.0393
+		    1.5200   -0.8304    0.2026
+		    0.2689   -0.4069   -0.1487
+	soln.U{3} = 
+		   -0.3726    1.2228
+		    0.4328   -0.3496
+		    0.2976    0.1372
+
% Difference between true solution and measured data (default noise is 10%)
+diff = norm(full(info.Soln) - info.Data)/norm(full(info.Soln))
+
+diff =
+
+    0.1000
+
+

Recreating the same test problem

We can recreate exactly the same test problem when we use the same random seed and other parameters.

% Set-up, including specifying random state
+sz = [5 4 3]; %<- Size
+nf = 2; %<- Number of components
+state = RandStream.getGlobalStream.State; %<- Random state
+
% Generate first test problem
+info1 = create_problem('Size', sz, 'Num_Factors', nf, 'State', state);
+
% Generate second identical test problem
+info2 = create_problem('Size', sz, 'Num_Factors', nf, 'State', state);
+
% Check that the solutions are identical
+tf = isequal(info1.Soln, info2.Soln)
+
+tf =
+
+  logical
+
+   1
+
+
% Check that the data are identical
+diff = norm(info1.Data - info2.Data)
+
+diff =
+
+     0
+
+

Checking default parameters and recreating the same test problem

The create_problem function returns the parameters that were used to generate it. These can be used to see the defaults. Additionally, if these are saved, they can be used to recreate the same test problems for future experiments.

% Generate test problem and use second output argument for parameters.
+[info1,params] = create_problem('Size', [5 4 3], 'Num_Factors', 2);
+
% Here are the parameters
+params
+
+params = 
+
+  struct with fields:
+
+       Core_Generator: 'randn'
+     Factor_Generator: 'randn'
+     Lambda_Generator: 'rand'
+                    M: 0
+                Noise: 0.1000
+          Num_Factors: 2
+                 Size: [5 4 3]
+                 Soln: []
+    Sparse_Generation: 0
+             Sparse_M: 0
+                State: {1×6 cell}
+            Symmetric: []
+                 Type: 'CP'
+
+
% Recreate an identical test problem
+info2 = create_problem(params);
+
% Check that the solutions are identical
+tf = isequal(info1.Soln, info2.Soln)
+
+tf =
+
+  logical
+
+   1
+
+
% Check that the data are identical
+diff = norm(info1.Data - info2.Data)
+
+diff =
+
+     0
+
+

Options for creating factor matrices, core tensors, and lambdas

Any function with two arguments specifying the size can be used to generate the factor matrices. This is specified by the 'Factor_Generator' option to create_problem.

Pre-defined options for 'Factor_Generator' for creating factor matrices (for CP or Tucker) include:

  • 'rand' - Uniform on [0,1]
  • 'randn' - Gaussian with mean 0 and std 1
  • 'orthogonal' - Generates a random orthogonal matrix. This option only works when the number of factors is less than or equal to the smallest dimension.
  • 'stochastic' - Generates nonnegative factor matrices so that each column sums to one.

Pre-defined options for 'Lambda_Generator' for creating lambda vector (for CP) include:

  • 'rand' - Uniform on [0,1]
  • 'randn' - Gaussian with mean 0 and std 1
  • 'orthogonal' - Creates a random vector with norm one.
  • 'stochastic' - Creates a random nonnegative vector whose entries sum to one.

Pre-defined options for 'Core_Generator' for creating core tensors (for Tucker) include:

  • 'rand' - Uniform on [0,1]
  • 'randn' - Gaussian with mean 0 and std 1
% Here is ane example of a custom factor generator
+factor_generator = @(m,n) 100*rand(m,n);
+info = create_problem('Size', [5 4 3], 'Num_Factors', 2, ...
+    'Factor_Generator', factor_generator, 'Lambda_Generator', @ones);
+first_factor_matrix = info.Soln.U{1}
+
+first_factor_matrix =
+
+    1.3961   66.6972
+   18.7962   63.1040
+   58.2546   63.2120
+   82.3247   11.2026
+   25.4430   79.1349
+
+
% Here is an example of a custom core generator for Tucker:
+info = create_problem('Type', 'Tucker', 'Size', [5 4 3], ...
+    'Num_Factors', [2 2 2], 'Core_Generator', @tenones);
+core = info.Soln.core
+
core is a tensor of size 2 x 2 x 2
+	core(:,:,1) = 
+	     1     1
+	     1     1
+	core(:,:,2) = 
+	     1     1
+	     1     1
+
% Here's another example for CP, this time using a function to create
+% factor matrices such that the inner products of the columns are
+% prespecified.
+info = create_problem('Size', [5 4 3], 'Num_Factors', 3, ...
+    'Factor_Generator', @(m,n) matrandcong(m,n,.9));
+U = info.Soln.U{1};
+congruences = U'*U
+
+congruences =
+
+    1.0000    0.9000    0.9000
+    0.9000    1.0000    0.9000
+    0.9000    0.9000    1.0000
+
+

Generating data from an existing solution

It's possible to skip the solution generation altogether and instead just generate appropriate test data.

% Manually generate a test problem (or it comes from some
+% previous call to |create_problem|.
+soln = ktensor({rand(50,3), rand(40,3), rand(30,3)});
+
+% Use that soln to create new test problem.
+info = create_problem('Soln', soln);
+
+% Check whether solutions is equivalent to the input
+iseq = isequal(soln,info.Soln)
+
+iseq =
+
+  logical
+
+   1
+
+

Creating dense missing data problems

It's possible to create problems that have a percentage of missing data. The problem generator randomly creates the pattern of missing data.

% Specify 25% missing data as follows:
+[info,params] = create_problem('Size', [5 4 3], 'M', 0.25);
+
% Here is the pattern of known data (1 = known, 0 = unknown)
+info.Pattern
+
ans is a tensor of size 5 x 4 x 3
+	ans(:,:,1) = 
+	     1     1     0     1
+	     1     1     1     1
+	     1     1     1     0
+	     1     1     1     1
+	     0     1     1     1
+	ans(:,:,2) = 
+	     1     1     1     1
+	     1     0     1     1
+	     1     1     0     1
+	     1     1     1     1
+	     1     0     1     0
+	ans(:,:,3) = 
+	     1     1     1     0
+	     1     0     1     0
+	     0     1     1     0
+	     0     1     1     1
+	     0     0     1     1
+
% Here is the data (incl. noise) with missing entries zeroed out
+info.Data
+
ans is a tensor of size 5 x 4 x 3
+	ans(:,:,1) = 
+	   -0.1793   -0.2310         0    0.0429
+	   -0.3749   -0.1852    0.2674    0.0013
+	   -1.1206   -0.7617    1.1015         0
+	   -0.1343   -0.0524    0.2058   -0.0447
+	         0    0.2174   -0.2209    0.0131
+	ans(:,:,2) = 
+	   -0.0177   -0.3066    0.3508   -0.1254
+	   -0.5150         0    0.2864   -0.0218
+	   -1.7729   -1.1326         0    0.0851
+	   -0.2139   -0.2570    0.4128   -0.1203
+	    0.1954         0   -0.4493         0
+	ans(:,:,3) = 
+	   -0.2604   -0.1968    0.2981         0
+	   -0.4944         0    0.4080         0
+	         0   -1.1708    1.3813         0
+	         0   -0.1662    0.2489    0.0002
+	         0         0   -0.4103    0.0034
+

Creating sparse missing data problems.

If Sparse_M is set to true, then the data returned is sparse. Moreover, the dense versions are never explicitly created. This option only works when M >= 0.8.

% Specify 80% missing data and sparse
+info = create_problem('Size', [5 4 3], 'M', 0.80, 'Sparse_M', true);
+
% Here is the pattern of known data
+info.Pattern
+
ans is a sparse tensor of size 5 x 4 x 3 with 12 nonzeros
+	(1,1,2)     1
+	(1,2,1)     1
+	(1,3,3)     1
+	(1,4,1)     1
+	(2,3,2)     1
+	(2,4,3)     1
+	(3,4,2)     1
+	(3,4,3)     1
+	(4,3,1)     1
+	(5,1,2)     1
+	(5,2,1)     1
+	(5,2,3)     1
+
% Here is the data (incl. noise) with missing entries zeroed out
+info.Data
+
ans is a sparse tensor of size 5 x 4 x 3 with 12 nonzeros
+	(1,1,2)    0.0208
+	(1,2,1)    0.3199
+	(1,3,3)   -0.4194
+	(1,4,1)    0.2348
+	(2,3,2)    0.6854
+	(2,4,3)    1.9965
+	(3,4,2)    0.0126
+	(3,4,3)    0.3155
+	(4,3,1)    0.1286
+	(5,1,2)   -0.0514
+	(5,2,1)    1.2025
+	(5,2,3)   -2.8011
+

Create missing data problems with a pre-specified pattern

It's also possible to provide a specific pattern (dense or sparse) to be used to specify where data should be missing.

% Create pattern
+P = tenrand([5 4 3]) > 0.5;
+% Create test problem with that pattern
+info = create_problem('Size', size(P), 'M', P);
+% Show the data
+info.Data
+
ans is a tensor of size 5 x 4 x 3
+	ans(:,:,1) = 
+	    0.0019         0   -0.3582         0
+	         0    0.0407         0         0
+	         0    0.4171         0    0.1042
+	    0.0482         0   -0.1365         0
+	   -0.0504         0         0         0
+	ans(:,:,2) = 
+	   -0.0867    0.9799         0   -0.0395
+	         0         0   -0.1445         0
+	    0.0842         0   -0.1508   -0.0263
+	         0   -0.6211         0   -0.1826
+	    0.0999   -1.3011   -0.7726         0
+	ans(:,:,3) = 
+	         0         0    0.0384    0.0309
+	         0         0    0.1013         0
+	         0         0   -0.3412    0.0237
+	    0.1481   -0.8781         0    0.2671
+	   -0.0145         0         0   -0.1125
+

Creating sparse problems (CP only)

If we assume each model parameter is the input to a Poisson process, then we can generate a sparse test problems. This requires that all the factor matrices and lambda be nonnegative. The default factor generator ('randn') won't work since it produces both positive and negative values.

% Generate factor matrices with a few large entries in each column; this
+% will be the basis of our soln.
+sz = [20 15 10];
+nf = 4;
+A = cell(3,1);
+for n = 1:length(sz)
+    A{n} = rand(sz(n), nf);
+    for r = 1:nf
+        p = randperm(sz(n));
+        idx = p(1:round(.2*sz(n)));
+        A{n}(idx,r) = 10 * A{n}(idx,r);
+    end
+end
+S = ktensor(A);
+S = normalize(S,'sort',1);
+
% Create sparse test problem based on provided solution. The
+% 'Sparse_Generation' says how many insertions to make based on the
+% provided solution S. The lambda vector of the solution is automatically
+% rescaled to match the number of insertions.
+info = create_problem('Soln', S, 'Sparse_Generation', 500);
+num_nonzeros = nnz(info.Data)
+total_insertions = sum(info.Data.vals)
+orig_lambda_vs_rescaled = S.lambda ./ info.Soln.lambda
+
+num_nonzeros =
+
+   327
+
+
+total_insertions =
+
+   500
+
+
+orig_lambda_vs_rescaled =
+
+   59.6414
+   59.6414
+   59.6414
+   59.6414
+
+

Generating an initial guess

The create_guess function creates a random initial guess as a cell array of matrices. Its behavior is very similar to create_problem. A nice option is that you can generate an initial guess that is a pertubation of the solution.

info = create_problem;
+
+% Create an initial guess to go with the problem that is just a 5%
+% pertubation of the correct solution.
+U = create_guess('Soln', info.Soln, 'Factor_Generator', 'pertubation', ...
+    'Pertubation', 0.05);
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ttb.css b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ttb.css new file mode 100644 index 0000000..0fcc0e3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ttb.css @@ -0,0 +1,71 @@ +html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,font,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td{margin:0;padding:0;border:0;outline:0;font-size:100%;vertical-align:baseline;background:transparent}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:'';content:none}:focus{outine:0}ins{text-decoration:none}del{text-decoration:line-through}table{border-collapse:collapse;border-spacing:0} + +html { min-height:100%; margin-bottom:1px; } +html body { height:100%; margin:0px; font-family:Arial, Helvetica, sans-serif; font-size:10px; color:#000; line-height:140%; background:#fff none; overflow-y:scroll; } +html body td { vertical-align:top; text-align:left; } + +h1 { padding:0px; margin:0px 0px 25px; font-family:Arial, Helvetica, sans-serif; font-size:1.5em; color:#d55000; line-height:100%; font-weight:normal; } +h2 { padding:0px; margin:0px 0px 8px; font-family:Arial, Helvetica, sans-serif; font-size:1.2em; color:#000; font-weight:bold; line-height:140%; border-bottom:1px solid #d6d4d4; display:block; } +h3 { padding:0px; margin:0px 0px 5px; font-family:Arial, Helvetica, sans-serif; font-size:1.1em; color:#000; font-weight:bold; line-height:140%; } + +a { color:#005fce; text-decoration:none; } +a:hover { color:#005fce; text-decoration:underline; } +a:visited { color:#004aa0; text-decoration:none; } + +a.name { color:default; text-decoration:none; } +a.name:hover { color:default; text-decoration:none; } +a.name:visited { color:default; text-decoration:none; } + + +p { padding:0px; margin:0px 0px 20px; } +img { padding:0px; margin:0px 0px 20px; border:none; } +p img, pre img, tt img, li img, h1 img, h2 img { margin-bottom:0px; } + +ul { padding:0px; margin:0px 0px 20px 23px; list-style:square; } +ul li { padding:0px; margin:0px 0px 7px 0px; } +ul li ul { padding:5px 0px 0px; margin:0px 0px 7px 23px; } +ul li ol li { list-style:decimal; } +ol { padding:0px; margin:0px 0px 20px 0px; list-style:decimal; } +ol li { padding:0px; margin:0px 0px 7px 23px; list-style-type:decimal; } +ol li ol { padding:5px 0px 0px; margin:0px 0px 7px 0px; } +ol li ol li { list-style-type:lower-alpha; } +ol li ul { padding-top:7px; } +ol li ul li { list-style:square; } + +.content { font-size:1.2em; line-height:140%; padding: 20px; } +.banner{ background-color:#15243c; text-align:center;} + +pre, code { font-size:12px; } +tt { font-size: 1.2em; } +pre { margin:0px 0px 20px; } +pre.codeinput { padding:10px; border:1px solid #d3d3d3; background:#f7f7f7; } +pre.codeoutput { padding:10px 11px; margin:0px 0px 20px; color:#4c4c4c; } +pre.error { color:red; } + +@media print { pre.codeinput, pre.codeoutput { word-wrap:break-word; width:100%; } } + +span.keyword { color:#0000FF } +span.comment { color:#228B22 } +span.string { color:#A020F0 } +span.untermstring { color:#B20000 } +span.syscmd { color:#B28C00 } + +.footer { width:auto; padding:10px 0px; margin:25px 0px 0px; border-top:1px dotted #878787; font-size:0.8em; line-height:140%; font-style:italic; color:#878787; text-align:left; float:none; } +.footer p { margin:0px; } +.footer a { color:#878787; } +.footer a:hover { color:#878787; text-decoration:underline; } +.footer a:visited { color:#878787; } + +table th { padding:7px 5px; text-align:left; vertical-align:middle; border: 1px solid #d6d4d4; font-weight:bold; } +table td { padding:7px 5px; text-align:left; vertical-align:top; border:1px solid #d6d4d4; } + +.portrait { + background-image: url('../img/portrait.jpg'); + width: 200px; + height: 200px; + margin: 0 auto; + border-radius: 50%; + background-size: cover; + -webkit-background-size: cover; + -moz-background-size: cover; +} diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ttensor_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ttensor_doc.html new file mode 100644 index 0000000..0a63c13 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/ttensor_doc.html @@ -0,0 +1,626 @@ + + + + + Tucker tensors

Tucker tensors

Tucker format is a decomposition of a tensor X as the product of a core tensor G and matrices (e.g., A,B,C) in each dimension. In other words, a tensor X is expressed as:

$${\mathcal X} = {\mathcal G} \times_1 A \times_2 B \times_2 C$$

In MATLAB notation, X=ttm(G,{A,B,C}). The ttensor class stores the components of the tensor X and can perform many operations, e.g., ttm, without explicitly forming the tensor X.

Contents

Creating a ttensor with a tensor core

core = tensor(rand(3,2,1),[3 2 1]); %<-- The core tensor.
+U = {rand(5,3), rand(4,2), rand(3,1)}; %<-- The matrices.
+X = ttensor(core,U) %<-- Create the ttensor.
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 3 x 2 x 1
+		X.core(:,:,1) = 
+	    0.1298    0.8117
+	    0.1048    0.6696
+	    0.0632    0.6540
+	X.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	X.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	X.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+

Alternate core formats: sptensor, ktensor, or ttensor

core1 = sptenrand([3 2 1],3); %<-- Create a 3 x 2 x 1 sptensor.
+Y = ttensor(core1,U) %<-- Core is a sptensor.
+
Y is a ttensor of size 5 x 4 x 3
+	Y.core is a sparse tensor of size 3 x 2 x 1 with 3 nonzeros
+	(1,1,1)    0.9871
+	(2,2,1)    0.5015
+	(3,2,1)    0.8833
+	Y.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	Y.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	Y.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+
V = {rand(3,2),rand(2,2),rand(1,2)}; %<-- Create some random matrices.
+core2 = ktensor(V); %<-- Create a 3 x 2 x 1 ktensor.
+Y = ttensor(core2,U) %<-- Core is a ktensor.
+
Y is a ttensor of size 5 x 4 x 3
+	Y.core is a ktensor of size 3 x 2 x 1
+		Y.core.lambda = [ 1  1 ]
+		Y.core.U{1} = 
+		    0.8746    0.7261
+		    0.6178    0.2917
+		    0.9611    0.4665
+		Y.core.U{2} = 
+		    0.9439    0.0119
+		    0.0943    0.3723
+		Y.core.U{3} = 
+		    0.3542    0.0820
+	Y.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	Y.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	Y.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+
core3 = ttensor(tensor(1:8,[2 2 2]),V); %<-- Create a 3 x 2 x 1 ttensor.
+Y = ttensor(core3,U) %<-- Core is a ttensor.
+
Y is a ttensor of size 5 x 4 x 3
+	Y.core is a ttensor of size 3 x 2 x 1
+		Y.core.core is a tensor of size 2 x 2 x 2
+			Y.core.core(:,:,1) = 
+	     1     3
+	     2     4
+			Y.core.core(:,:,2) = 
+	     5     7
+	     6     8
+		Y.core.U{1} = 
+		    0.8746    0.7261
+		    0.6178    0.2917
+		    0.9611    0.4665
+		Y.core.U{2} = 
+		    0.9439    0.0119
+		    0.0943    0.3723
+		Y.core.U{3} = 
+		    0.3542    0.0820
+	Y.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	Y.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	Y.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+

Creating a one-dimensional ttensor

Z = ttensor(tensor(rand(2,1),2), rand(4,2)) %<-- One-dimensional ttensor.
+
Z is a ttensor of size 4
+	Z.core is a tensor of size 2
+		Z.core(:) = 
+	    0.3109
+	    0.2558
+	Z.U{1} = 
+		    0.1048    0.3074
+		    0.2903    0.7715
+		    0.4985    0.2026
+		    0.8205    0.9396
+

Constituent parts of a ttensor

X.core %<-- Core tensor.
+
ans is a tensor of size 3 x 2 x 1
+	ans(:,:,1) = 
+	    0.1298    0.8117
+	    0.1048    0.6696
+	    0.0632    0.6540
+
X.U %<-- Cell array of matrices.
+
+ans =
+
+  1×3 cell array
+
+    [5×3 double]    [4×2 double]    [3×1 double]
+
+

Creating a ttensor from its constituent parts

Y = ttensor(X.core,X.U) %<-- Recreate a tensor from its parts.
+
Y is a ttensor of size 5 x 4 x 3
+	Y.core is a tensor of size 3 x 2 x 1
+		Y.core(:,:,1) = 
+	    0.1298    0.8117
+	    0.1048    0.6696
+	    0.0632    0.6540
+	Y.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	Y.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	Y.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+

Creating an empty ttensor.

X = ttensor %<-- empty ttensor
+
X is a ttensor of size [empty tensor]
+	X.core is a tensor of size [empty tensor]
+		X.core = []
+

Use full or tensor to convert a ttensor to a tensor

X = ttensor(core,U) %<-- Create a tensor
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 3 x 2 x 1
+		X.core(:,:,1) = 
+	    0.1298    0.8117
+	    0.1048    0.6696
+	    0.0632    0.6540
+	X.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	X.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	X.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+
full(X) %<-- Converts to a tensor.
+
ans is a tensor of size 5 x 4 x 3
+	ans(:,:,1) = 
+	    0.8341    0.7370    0.3768    0.6139
+	    0.4723    0.4201    0.2122    0.3489
+	    0.2987    0.2622    0.1356    0.2190
+	    0.5438    0.4827    0.2447    0.4012
+	    0.7929    0.6988    0.3589    0.5827
+	ans(:,:,2) = 
+	    0.9627    0.8507    0.4349    0.7086
+	    0.5451    0.4849    0.2450    0.4027
+	    0.3447    0.3026    0.1566    0.2528
+	    0.6277    0.5572    0.2825    0.4631
+	    0.9152    0.8066    0.4143    0.6726
+	ans(:,:,3) = 
+	    0.4980    0.4401    0.2250    0.3665
+	    0.2820    0.2508    0.1267    0.2083
+	    0.1783    0.1566    0.0810    0.1308
+	    0.3247    0.2882    0.1461    0.2396
+	    0.4734    0.4172    0.2143    0.3479
+
tensor(X) %<-- Also converts to a tensor.
+
ans is a tensor of size 5 x 4 x 3
+	ans(:,:,1) = 
+	    0.8341    0.7370    0.3768    0.6139
+	    0.4723    0.4201    0.2122    0.3489
+	    0.2987    0.2622    0.1356    0.2190
+	    0.5438    0.4827    0.2447    0.4012
+	    0.7929    0.6988    0.3589    0.5827
+	ans(:,:,2) = 
+	    0.9627    0.8507    0.4349    0.7086
+	    0.5451    0.4849    0.2450    0.4027
+	    0.3447    0.3026    0.1566    0.2528
+	    0.6277    0.5572    0.2825    0.4631
+	    0.9152    0.8066    0.4143    0.6726
+	ans(:,:,3) = 
+	    0.4980    0.4401    0.2250    0.3665
+	    0.2820    0.2508    0.1267    0.2083
+	    0.1783    0.1566    0.0810    0.1308
+	    0.3247    0.2882    0.1461    0.2396
+	    0.4734    0.4172    0.2143    0.3479
+

Use double to convert a ttensor to a (multidimensional) array

double(X) %<-- Converts to a MATLAB array
+
+ans(:,:,1) =
+
+    0.8341    0.7370    0.3768    0.6139
+    0.4723    0.4201    0.2122    0.3489
+    0.2987    0.2622    0.1356    0.2190
+    0.5438    0.4827    0.2447    0.4012
+    0.7929    0.6988    0.3589    0.5827
+
+
+ans(:,:,2) =
+
+    0.9627    0.8507    0.4349    0.7086
+    0.5451    0.4849    0.2450    0.4027
+    0.3447    0.3026    0.1566    0.2528
+    0.6277    0.5572    0.2825    0.4631
+    0.9152    0.8066    0.4143    0.6726
+
+
+ans(:,:,3) =
+
+    0.4980    0.4401    0.2250    0.3665
+    0.2820    0.2508    0.1267    0.2083
+    0.1783    0.1566    0.0810    0.1308
+    0.3247    0.2882    0.1461    0.2396
+    0.4734    0.4172    0.2143    0.3479
+
+

Use ndims and size to get the size of a ttensor

ndims(X) %<-- Number of dimensions.
+
+ans =
+
+     3
+
+
size(X) %<-- Row vector of the sizes.
+
+ans =
+
+     5     4     3
+
+
size(X,2) %<-- Size of the 2nd mode.
+
+ans =
+
+     4
+
+

Subscripted reference to a ttensor

X.core(1,1,1) %<-- Access an element of the core.
+
+ans =
+
+    0.1298
+
+
X.U{2} %<-- Extract a matrix.
+
+ans =
+
+    0.9079    0.9561
+    0.4013    0.9000
+    0.5772    0.4089
+    0.4832    0.7291
+
+
X{2} %<-- Same as above.
+
+ans =
+
+    0.9079    0.9561
+    0.4013    0.9000
+    0.5772    0.4089
+    0.4832    0.7291
+
+

Subscripted assignment for a ttensor

X.core = tenones(size(X.core)) %<-- Insert a new core.
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 3 x 2 x 1
+		X.core(:,:,1) = 
+	     1     1
+	     1     1
+	     1     1
+	X.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	X.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	X.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+
X.core(2,2,1) = 7 %<-- Change a single element.
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 3 x 2 x 1
+		X.core(:,:,1) = 
+	     1     1
+	     1     7
+	     1     1
+	X.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	X.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	X.U{3} = 
+		    0.4853
+		    0.5602
+		    0.2898
+
X{3}(1:2,1) = [1;1] %<-- Change the matrix for mode 3.
+
X is a ttensor of size 5 x 4 x 3
+	X.core is a tensor of size 3 x 2 x 1
+		X.core(:,:,1) = 
+	     1     1
+	     1     7
+	     1     1
+	X.U{1} = 
+		    0.5678    0.8964    0.8084
+		    0.3669    0.1125    0.8239
+		    0.2715    0.4471    0.0643
+		    0.3697    0.3141    0.8189
+		    0.6985    0.8726    0.5384
+	X.U{2} = 
+		    0.9079    0.9561
+		    0.4013    0.9000
+		    0.5772    0.4089
+		    0.4832    0.7291
+	X.U{3} = 
+		    1.0000
+		    1.0000
+		    0.2898
+

Using end for last index

X{end}  %<-- The same as X{3}.
+
+ans =
+
+    1.0000
+    1.0000
+    0.2898
+
+

Basic operations (uplus, uminus, mtimes) for a ttensor.

X = ttensor(tenrand([2 2 2]),{rand(3,2),rand(1,2),rand(2,2)}) %<-- Data.
++X %<-- Calls uplus.
+
X is a ttensor of size 3 x 1 x 2
+	X.core is a tensor of size 2 x 2 x 2
+		X.core(:,:,1) = 
+	    0.2107    0.6356
+	    0.9670    0.4252
+		X.core(:,:,2) = 
+	    0.2262    0.7426
+	    0.9325    0.5133
+	X.U{1} = 
+		    0.5417    0.6280
+		    0.2143    0.0907
+		    0.8007    0.8121
+	X.U{2} = 
+		    0.0968    0.1922
+	X.U{3} = 
+		    0.0639    0.4619
+		    0.4969    0.8735
+ans is a ttensor of size 3 x 1 x 2
+	ans.core is a tensor of size 2 x 2 x 2
+		ans.core(:,:,1) = 
+	    0.2107    0.6356
+	    0.9670    0.4252
+		ans.core(:,:,2) = 
+	    0.2262    0.7426
+	    0.9325    0.5133
+	ans.U{1} = 
+		    0.5417    0.6280
+		    0.2143    0.0907
+		    0.8007    0.8121
+	ans.U{2} = 
+		    0.0968    0.1922
+	ans.U{3} = 
+		    0.0639    0.4619
+		    0.4969    0.8735
+
-X %<-- Calls uminus.
+
ans is a ttensor of size 3 x 1 x 2
+	ans.core is a tensor of size 2 x 2 x 2
+		ans.core(:,:,1) = 
+	   -0.2107   -0.6356
+	   -0.9670   -0.4252
+		ans.core(:,:,2) = 
+	   -0.2262   -0.7426
+	   -0.9325   -0.5133
+	ans.U{1} = 
+		    0.5417    0.6280
+		    0.2143    0.0907
+		    0.8007    0.8121
+	ans.U{2} = 
+		    0.0968    0.1922
+	ans.U{3} = 
+		    0.0639    0.4619
+		    0.4969    0.8735
+
5*X %<-- Calls mtimes.
+
ans is a ttensor of size 3 x 1 x 2
+	ans.core is a tensor of size 2 x 2 x 2
+		ans.core(:,:,1) = 
+	    1.0536    3.1781
+	    4.8349    2.1261
+		ans.core(:,:,2) = 
+	    1.1311    3.7129
+	    4.6625    2.5665
+	ans.U{1} = 
+		    0.5417    0.6280
+		    0.2143    0.0907
+		    0.8007    0.8121
+	ans.U{2} = 
+		    0.0968    0.1922
+	ans.U{3} = 
+		    0.0639    0.4619
+		    0.4969    0.8735
+

Use permute to reorder the modes of a ttensor

permute(X,[3 2 1]) %<-- Reverses the modes of X
+
ans is a ttensor of size 2 x 1 x 3
+	ans.core is a tensor of size 2 x 2 x 2
+		ans.core(:,:,1) = 
+	    0.2107    0.6356
+	    0.2262    0.7426
+		ans.core(:,:,2) = 
+	    0.9670    0.4252
+	    0.9325    0.5133
+	ans.U{1} = 
+		    0.0639    0.4619
+		    0.4969    0.8735
+	ans.U{2} = 
+		    0.0968    0.1922
+	ans.U{3} = 
+		    0.5417    0.6280
+		    0.2143    0.0907
+		    0.8007    0.8121
+

Displaying a ttensor

The tensor displays by displaying the core and each of the component matrices.

disp(X) %<-- Prints out the ttensor.
+
ans is a ttensor of size 3 x 1 x 2
+	ans.core is a tensor of size 2 x 2 x 2
+		ans.core(:,:,1) = 
+	    0.2107    0.6356
+	    0.9670    0.4252
+		ans.core(:,:,2) = 
+	    0.2262    0.7426
+	    0.9325    0.5133
+	ans.U{1} = 
+		    0.5417    0.6280
+		    0.2143    0.0907
+		    0.8007    0.8121
+	ans.U{2} = 
+		    0.0968    0.1922
+	ans.U{3} = 
+		    0.0639    0.4619
+		    0.4969    0.8735
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tucker.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tucker.html new file mode 100644 index 0000000..c24a948 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tucker.html @@ -0,0 +1,26 @@ + + + + + Tucker Decompositions + + + + + +
+

Tucker Decompositions

+ +
    +
  • hosvd - Higher-order SVD (HOSVD), including sequentially-truncated HOSVD
  • +
  • tucker_als - + Higher-order orthogonal iteration
  • +
+

+ + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tucker_als_doc.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tucker_als_doc.html new file mode 100644 index 0000000..c02e838 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/tucker_als_doc.html @@ -0,0 +1,274 @@ + + + + + Alternating least squares for Tucker model

Alternating least squares for Tucker model

The function tucker_als computes the best rank(R1,R2,..,Rn) approximation of tensor X, according to the specified dimensions in vector R. The input X can be a tensor, sptensor, ktensor, or ttensor. The result returned in T is a ttensor.

The method is originally from Tucker (1966) and later revisited in De Lathauwer et al. (2000).

  • Tucker, L. R. Some mathematical notes on three-mode factor analysis. Psychometrika, 1966, 31, 279-311.
  • De Lathauwer, L.; De Moor, B. & Vandewalle, J. On the best rank-1 and rank-(R_1, R_2, R_N) approximation of higher-order tensors. SIAM Journal on Matrix Analysis and Applications, 2000, 21, 1324-1342.

Note: Oftentimes it's better to use hosvd instead.

Contents

Create a data tensor of size [5 4 3]

rng('default'); rng(0); %<-- Set seed for reproducibility
+X = sptenrand([5 4 3], 10)
+
X is a sparse tensor of size 5 x 4 x 3 with 10 nonzeros
+	(1,2,3)    0.0759
+	(1,3,2)    0.0540
+	(2,2,2)    0.5308
+	(2,2,3)    0.7792
+	(3,1,3)    0.9340
+	(3,4,2)    0.1299
+	(4,1,2)    0.5688
+	(4,4,2)    0.4694
+	(5,2,1)    0.0119
+	(5,4,3)    0.3371
+

Create a [2 2 2] approximation

T = tucker_als(X,2)        %<-- best rank(2,2,2) approximation
+
+Tucker Alternating Least-Squares:
+ Iter  1: fit = 3.266855e-01 fitdelta = 3.3e-01
+ Iter  2: fit = 4.285677e-01 fitdelta = 1.0e-01
+ Iter  3: fit = 4.707375e-01 fitdelta = 4.2e-02
+ Iter  4: fit = 4.728036e-01 fitdelta = 2.1e-03
+ Iter  5: fit = 4.728492e-01 fitdelta = 4.6e-05
+T is a ttensor of size 5 x 4 x 3
+	T.core is a tensor of size 2 x 2 x 2
+		T.core(:,:,1) = 
+	    0.9045    0.0007
+	   -0.0007    0.8920
+		T.core(:,:,2) = 
+	    0.2732    0.0006
+	    0.0006   -0.2771
+	T.U{1} = 
+		    0.0666    0.0001
+		    0.9978    0.0008
+		   -0.0008    1.0000
+		   -0.0001    0.0007
+		   -0.0001    0.0018
+	T.U{2} = 
+		   -0.0015    1.0000
+		    1.0000    0.0015
+		    0.0021    0.0000
+		   -0.0001    0.0007
+	T.U{3} = 
+		   -0.0000   -0.0000
+		    0.2971    0.9548
+		    0.9548   -0.2971
+

Create a [2 2 1] approximation

T = tucker_als(X,[2 2 1])  %<-- best rank(2,2,1) approximation
+
+Tucker Alternating Least-Squares:
+ Iter  1: fit = 2.363442e-01 fitdelta = 2.4e-01
+ Iter  2: fit = 3.907381e-01 fitdelta = 1.5e-01
+ Iter  3: fit = 4.304797e-01 fitdelta = 4.0e-02
+ Iter  4: fit = 4.328533e-01 fitdelta = 2.4e-03
+ Iter  5: fit = 4.331455e-01 fitdelta = 2.9e-04
+ Iter  6: fit = 4.331975e-01 fitdelta = 5.2e-05
+T is a ttensor of size 5 x 4 x 3
+	T.core is a tensor of size 2 x 2 x 1
+		T.core(:,:,1) = 
+	    0.9283    0.0000
+	   -0.0000    0.8930
+	T.U{1} = 
+		    0.0753   -0.0000
+		    0.9972   -0.0000
+		    0.0000    0.9613
+		    0.0001    0.2723
+		    0.0001    0.0414
+	T.U{2} = 
+		   -0.0000    0.9921
+		    1.0000   -0.0000
+		    0.0017   -0.0000
+		    0.0001    0.1252
+	T.U{3} = 
+		    0.0000
+		    0.3959
+		    0.9183
+

Use a different ordering of the dimensions

T = tucker_als(X,2,struct('dimorder',[3 2 1]))
+
+Tucker Alternating Least-Squares:
+ Iter  1: fit = 3.954350e-01 fitdelta = 4.0e-01
+ Iter  2: fit = 4.650831e-01 fitdelta = 7.0e-02
+ Iter  3: fit = 4.724949e-01 fitdelta = 7.4e-03
+ Iter  4: fit = 4.728343e-01 fitdelta = 3.4e-04
+ Iter  5: fit = 4.728495e-01 fitdelta = 1.5e-05
+T is a ttensor of size 5 x 4 x 3
+	T.core is a tensor of size 2 x 2 x 2
+		T.core(:,:,1) = 
+	    0.9036   -0.0394
+	    0.0389    0.8910
+		T.core(:,:,2) = 
+	    0.2730   -0.0146
+	   -0.0149   -0.2769
+	T.U{1} = 
+		    0.0665   -0.0004
+		    0.9978   -0.0055
+		    0.0055    1.0000
+		    0.0007    0.0000
+		    0.0005    0.0000
+	T.U{2} = 
+		    0.0491    0.9988
+		    0.9988   -0.0491
+		    0.0021   -0.0001
+		    0.0016    0.0000
+	T.U{3} = 
+		    0.0000    0.0000
+		    0.2970    0.9549
+		    0.9549   -0.2970
+

Use the n-vecs initialization method

This initialization is more expensive but generally works very well.

T = tucker_als(X,2,struct('dimorder',[3 2 1],'init','eigs'))
+
  Computing 2 leading e-vectors for factor 2.
+  Computing 2 leading e-vectors for factor 1.
+
+Tucker Alternating Least-Squares:
+ Iter  1: fit = 4.726805e-01 fitdelta = 4.7e-01
+ Iter  2: fit = 4.728466e-01 fitdelta = 1.7e-04
+ Iter  3: fit = 4.728501e-01 fitdelta = 3.5e-06
+T is a ttensor of size 5 x 4 x 3
+	T.core is a tensor of size 2 x 2 x 2
+		T.core(:,:,1) = 
+	    0.9045    0.0000
+	   -0.0000    0.8918
+		T.core(:,:,2) = 
+	    0.2731    0.0000
+	    0.0000   -0.2775
+	T.U{1} = 
+		    0.0666   -0.0000
+		    0.9978   -0.0000
+		    0.0000    1.0000
+		    0.0000    0.0001
+		   -0.0000    0.0002
+	T.U{2} = 
+		   -0.0000    1.0000
+		    1.0000    0.0000
+		    0.0021   -0.0000
+		    0.0000    0.0005
+	T.U{3} = 
+		    0.0000    0.0000
+		    0.2973    0.9548
+		    0.9548   -0.2973
+

Specify the initial guess manually

U0 = {rand(5,2),rand(4,2),[]}; %<-- Initial guess for factors of T
+T = tucker_als(X,2,struct('dimorder',[3 2 1],'init',{U0}))
+
+Tucker Alternating Least-Squares:
+ Iter  1: fit = 3.733166e-01 fitdelta = 3.7e-01
+ Iter  2: fit = 4.397339e-01 fitdelta = 6.6e-02
+ Iter  3: fit = 4.717403e-01 fitdelta = 3.2e-02
+ Iter  4: fit = 4.728257e-01 fitdelta = 1.1e-03
+ Iter  5: fit = 4.728497e-01 fitdelta = 2.4e-05
+T is a ttensor of size 5 x 4 x 3
+	T.core is a tensor of size 2 x 2 x 2
+		T.core(:,:,1) = 
+	    0.9047    0.0100
+	   -0.0099    0.8916
+		T.core(:,:,2) = 
+	    0.2725    0.0037
+	    0.0038   -0.2779
+	T.U{1} = 
+		    0.0666    0.0001
+		    0.9978    0.0014
+		   -0.0014    1.0000
+		   -0.0002    0.0002
+		   -0.0001    0.0005
+	T.U{2} = 
+		   -0.0125    0.9999
+		    0.9999    0.0125
+		    0.0021    0.0000
+		   -0.0004    0.0013
+	T.U{3} = 
+		   -0.0000   -0.0000
+		    0.2978    0.9546
+		    0.9546   -0.2978
+
\ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/working.html b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/working.html new file mode 100644 index 0000000..39af498 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/html/working.html @@ -0,0 +1,39 @@ + + + + + Working with Tensors + + + + + +
+

Working with Tensors

+ +
    +
  • Multiplying Tensors - + Covers many types of tensor multiplication including + ttv, ttm, ttt, mttkrp, + innerprod, contract, norm
  • +
  • Mode-n Vectors - Generating + the leading mode-n vectors using nvecs
  • +
  • Collapsing and Scaling + Tensors - Computing sums, means, mins, maxs, and so on for + portions of the tensor and conversely scaling portions of the + tensor.
  • +
  • Generating Test + Problems - Covers create_problem + and create_guess for generating test random + problems with specified characteristics
  • +
  • Identities - Exploration + of identities and connections among tensor and matrix + operations
  • +
+ + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/identities_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/identities_doc.m new file mode 100644 index 0000000..ab49842 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/identities_doc.m @@ -0,0 +1,236 @@ +%% Identities and relationships of tensors +% There are many mathematical relationships, identities, and +% connections among tensors. These identities are presented here and +% show the versatility of the Tensor Toolbox. +% The propositions indicated below are references to the following +% report: +% +% T.G. Kolda, "Multilinear operators for higher-order decompositions", +% Tech. Rep. SAND2006-2081, Sandia National Laboratories, April 2006, +% http://csmr.ca.sandia.gov/~tgkolda/pubs/index.html#SAND2006-2081. + +%% N-mode product properties +% Create some data. +Y = tenrand([4 3 2]); +A = rand(3,4); +B = rand(3,3); +%% +% Prop 3.4(a): The order of the multiplication in different modes is irrelevant. +% +% $$(Y \times_1 A) \times_2 B = (Y \times_2 B) \times_1 A$$ +% +X1 = ttm( ttm(Y,A,1), B, 2); %<-- Y x_1 A x_2 B +X2 = ttm( ttm(Y,B,2), A, 1); %<-- Y x_2 B x_1 A +norm(X1 - X2) %<-- difference is zero +%% N-mode product and matricization +% Generate some data to work with. +Y = tenrand([5 4 3]); +A = rand(4,5); B = rand(3,4); C = rand(2,3); U = {A,B,C}; +%% +% Prop. 3.7a: N-mode multiplication can be expressed in terms of matricized +% tensors. +% +% $$X = Y \times_n U \Leftrightarrow X_{(n)} = UY_{(n)} $$ +% +for n = 1:ndims(Y) + X = ttm(Y,U,n); %<-- X = Y x_n U{n} + Xn = U{n} * tenmat(Y,n); %<-- Xn = U{n} * Yn + norm(tenmat(X,n) - Xn) % <-- should be zero +end +%% +% Prop. 3.7b: We can do matricizations in various ways and still be +% equivalent. +X = ttm(Y,U); %<-- X = Y x_1 A x_2 B x_3 C +Xm1 = kron(B,A)*tenmat(Y,[1 2])*C'; %<-- Kronecker product version +Xm2 = tenmat(X,[1 2]); %<-- Matriczed version +norm(Xm1 - Xm2) % <-- should be zero +Xm1 = B * tenmat(Y,2,[3 1]) * kron(A,C)'; %<-- Kronecker product version +Xm2 = tenmat(X,2,[3 1]); %<-- Matricized version +norm(Xm1 - Xm2) % <-- should be zero +Xm1 = tenmat(Y,[],[1 2 3]) * kron(kron(C,B),A)'; %<-- Vectorized via Kronecker +Xm2 = tenmat(X,[],[1 2 3]); %<-- Vectorized via matricize +norm(Xm1 - Xm2) + +%% Norm of difference between two tensors +% Prop. 3.9: For tensors X and Y, we have: +% +% $$\|X-Y\|^2 = \|X\|^2 + \|Y\|^2 - 2 $$ +% +X = tenrand([5 4 3]); Y = tenrand([5 4 3]); +% The following 2 results should be equal +norm(X-Y) +sqrt(norm(X)^2 - 2*innerprod(X,Y) + norm(Y)^2) +%% +% This relationship makes it more convenient to compare the norm of +% the difference between two different tensor objects. Imagine if we +% have a |sptensor| and a |ktensor| and we want the norm of the +% difference, which may be needed to check for convergence, for +% example, but which is very expensive to convert to a full (dense) +% tensor. Because |innerprod| and |norm| are defined for all types of +% tensor objects, this is a handy formula. +X = sptensor(X); +Y = ktensor({[1:5]',[1:4]',[1:3]'}); +% The following 2 results should be equal +norm(full(X)-full(Y)) +sqrt(norm(X)^2 - 2*innerprod(X,Y) + norm(Y)^2) + +%% Tucker tensor properties +% The properties of the Tucker operator follow directly from the +% properties of n-mode multiplication. + +% Initialize data +Y = tensor(1:24,[4 3 2]); +A1 = reshape(1:20,[5 4]); +A2 = reshape(1:12,[4 3]); +A3 = reshape(1:6,[3 2]); +A = {A1,A2,A3}; +B1 = reshape(1:20,[4 5]); +B2 = reshape(1:12,[3 4]); +B3 = reshape(1:6,[2 3]); +B = {B1,B2,B3}; +%% +% Proposition 4.2a +X = ttensor(ttensor(Y,A),B) +%% +AB = {B1*A1, B2*A2, B3*A3}; +Y = ttensor(Y,AB) +%% +norm(full(X)-full(Y)) %<-- should be zero +%% +% Proposition 4.2b +Y = tensor(1:24,[4 3 2]); +X = ttensor(Y,A); +Apinv = {pinv(A1),pinv(A2),pinv(A3)}; +Y2 = ttensor(full(X),Apinv); +norm(full(Y)-full(Y2)) %<-- should be zero +%% +% Proposition 4.2c +Y = tensor(1:24,[4 3 2]); +rand('state',0); +Q1 = orth(rand(5,4)); +Q2 = orth(rand(4,3)); +Q3 = orth(rand(3,2)); +Q = {Q1,Q2,Q3}; +X = ttensor(Y,Q) +%% +Qt = {Q1',Q2',Q3'}; +Y2 = ttensor(full(X),Qt) +norm(full(Y)-full(Y2)) %<-- should be zero + +%% Tucker operator and matricized tensors +% The Tucker operator also has various epressions in terms of +% matricized tensors and the Kronecker product. +% Proposition 4.3a +Y = tensor(1:24,[4 3 2]); +A1 = reshape(1:20,[5 4]); +A2 = reshape(1:12,[4 3]); +A3 = reshape(1:6,[3 2]); +A = {A1,A2,A3}; +X = ttensor(Y,A) +for n = 1:ndims(Y) + rdims = n; + cdims = setdiff(1:ndims(Y),rdims); + Xn = A{n} * tenmat(Y,rdims,cdims) * kron(A{cdims(2)}, A{cdims(1)})'; + norm(tenmat(full(X),rdims,cdims) - Xn) % <-- should be zero +end + +%% Orthogonalization of Tucker factors +% Proposition 4.4 +Y = tensor(1:24,[4 3 2]); +A1 = rand(5,4); +A2 = rand(4,3); +A3 = rand(3,2); +A = {A1,A2,A3}; +X = ttensor(Y,A) +%% +[Q1,R1] = qr(A1); +[Q2,R2] = qr(A2); +[Q3,R3] = qr(A3); +R = {R1,R2,R3}; +Z = ttensor(Y,R); +norm(X) - norm(Z) %<-- should be zero + +%% Kruskal operator properties +% Proposition 5.2 +A1 = reshape(1:10,[5 2]); +A2 = reshape(1:8,[4 2]); +A3 = reshape(1:6,[3 2]); +K = ktensor({A1,A2,A3}); +B1 = reshape(1:20,[4 5]); +B2 = reshape(1:12,[3 4]); +B3 = reshape(1:6,[2 3]); +X = ttensor(K,{B1,B2,B3}) + +Y = ktensor({B1*A1, B2*A2, B3*A3}); +norm(full(X) - full(Y)) %<-- should be zero + +%% +% Proposition 5.3a (second part) +A1 = reshape(1:10,[5 2]); +A2 = reshape(1:8,[4 2]); +A3 = reshape(1:6,[3 2]); +A = {A1,A2,A3}; +X = ktensor(A); +rdims = 1:ndims(X); +Z = double(tenmat(full(X), rdims, [])); +Xn = khatrirao(A{rdims},'r') * ones(length(X.lambda),1); +norm(Z - Xn) % <-- should be zero +%% +cdims = 1:ndims(X); +Z = double(tenmat(full(X), [], cdims)); +Xn = ones(length(X.lambda),1)' * khatrirao(A{cdims},'r')'; +norm(Z - Xn) % <-- should be zero +%% +% Proposition 5.3b +A1 = reshape(1:10,[5 2]); +A2 = reshape(1:8,[4 2]); +A3 = reshape(1:6,[3 2]); +A = {A1,A2,A3}; +X = ktensor(A); +for n = 1:ndims(X) + rdims = n; + cdims = setdiff(1:ndims(X),rdims); + Xn = khatrirao(A{rdims}) * khatrirao(A{cdims},'r')'; + Z = double(tenmat(full(X),rdims,cdims)); + norm(Z - Xn) % <-- should be zero +end +%% +% Proposition 5.3a (first part) +X = ktensor(A); +for n = 1:ndims(X) + cdims = n; + rdims = setdiff(1:ndims(X),cdims); + Xn = khatrirao(A{rdims},'r') * khatrirao(A{cdims})'; + Z = double(tenmat(full(X),rdims,cdims)); + norm(Z - Xn) % <-- should be zero +end + +%% Norm of Kruskal operator +% The norm of a ktensor has a special form because it can be +% reduced to summing the entries of the Hadamard product of N +% matrices of size R x R. +% Proposition 5.4 +A1 = reshape(1:10,[5 2]); +A2 = reshape(1:8,[4 2]); +A3 = reshape(1:6,[3 2]); +A = {A1,A2,A3}; +X = ktensor(A); +M = ones(size(A{1},2), size(A{1},2)); +for i = 1:numel(A) + M = M .* (A{i}'*A{i}); +end +norm(X) - sqrt(sum(M(:))) %<-- should be zero + +%% Inner product of Kruskal operator with a tensor +% The inner product of a ktensor with a tensor yields +% Proposition 5.5 +X = tensor(1:60,[5 4 3]); +A1 = reshape(1:10,[5 2]); +A2 = reshape(2:9,[4 2]); +A3 = reshape(3:8,[3 2]); +A = {A1,A2,A3}; +K = ktensor(A); +v = khatrirao(A,'r') * ones(size(A{1},2),1); +% The following 2 results should be equal +double(tenmat(X,1:ndims(X),[]))' * v +innerprod(X,K) diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/images/banner-background.jpg b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/images/banner-background.jpg new file mode 100644 index 0000000..549ee8b Binary files /dev/null and b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/images/banner-background.jpg differ diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/ktensor_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/ktensor_doc.m new file mode 100644 index 0000000..3f14702 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/ktensor_doc.m @@ -0,0 +1,147 @@ +%% Kruskal tensors +% Kruskal format is a decomposition of a tensor X as the sum of the outer +% products as the columns of matrices. For example, we might write +% +% $${\mathcal X} = \sum_r a_r \circ b_r \circ c_r$$ +% +% where a subscript denotes column index and a circle denotes outer +% product. In other words, the tensor X is built from the columns of the +% matrices A,B, and C. It's often helpful to explicitly specify a weight +% for each outer product, which we do here: +% +% $${\mathcal X} = \sum_r \lambda_r \; a_r \circ b_r \circ c_r$$ +% +% The |ktensor| class stores the components of the tensor X and can perform +% many operations, e.g., |ttm|, without explicitly forming the tensor X. + +%% Kruskal tensor format via ktensor +% Kruskal format stores a tensor as a sum of rank-1 outer products. For +% example, consider a tensor of the following form. +% +% $$X = a_1 \circ b_1 \circ c_1 + a_2 \circ b_2 \circ c_2$$ +% +% This can be stored in Kruskal form as follows. +rand('state',0); +A = rand(4,2); %<-- First column is a_1, second is a_2. +B = rand(3,2); %<-- Likewise for B. +C = rand(2,2); %<-- Likewise for C. +X = ktensor({A,B,C}) %<-- Create the ktensor. +%% +% For Kruskal format, there can be any number of matrices, but every matrix +% must have the same number of columns. The number of rows can vary. +Y = ktensor({rand(4,1),rand(2,1),rand(3,1)}) %<-- Another ktensor. +%% Specifying weights in a ktensor +% Weights for each rank-1 tensor can be specified by passing in a +% column vector. For example, +% +% $$X = \lambda_1 \; a_1 \circ b_1 \circ c_1 + \lambda_2 \; a_2 \circ b_2 \circ c_2$$ +% +lambda = [5.0; 0.25]; %<-- Weights for each factor. +X = ktensor(lambda,{A,B,C}) %<-- Create the ktensor. +%% Creating a one-dimensional ktensor +Y = ktensor({rand(4,5)}) %<-- A one-dimensional ktensor. +%% Constituent parts of a ktensor +X.lambda %<-- Weights or multipliers. +%% +X.U %<-- Cell array of matrices. +%% Creating a ktensor from its constituent parts +Y = ktensor(X.lambda,X.U) %<-- Recreate X. +%% Creating an empty ktensor +Z = ktensor %<-- Empty ktensor. +%% Use full or tensor to convert a ktensor to a tensor +full(X) %<-- Converts to a tensor. +%% +tensor(X) %<-- Same as above. +%% Use double to convert a ktensor to a multidimensional array +double(X) %<-- Converts to an array. +%% Use tendiag or sptendiag to convert a ktensor to a ttensor. +% A ktensor can be regarded as a ttensor with a diagonal core. +R = length(X.lambda); %<-- Number of factors in X. +core = tendiag(X.lambda, repmat(R,1,ndims(X))); %<-- Create a diagonal core. +Y = ttensor(core, X.u) %<-- Assemble the ttensor. +%% +norm(full(X)-full(Y)) %<-- They are the same. +%% +core = sptendiag(X.lambda, repmat(R,1,ndims(X))); %<-- Sparse diagonal core. +Y = ttensor(core, X.u) %<-- Assemble the ttensor +%% +norm(full(X)-full(Y)) %<-- They are the same. +%% Use ndims and size for the dimensions of a ktensor +ndims(X) %<-- Number of dimensions. +%% +size(X) %<-- Row vector of the sizes. +%% +size(X,2) %<-- Size of the 2nd mode. +%% Subscripted reference for a ktensor +X(1,1,1) %<-- Assemble the (1,1,1) element (requires computation). +%% +X.lambda(2) %<-- Weight of 2nd factor. +%% +X.U{2} %<-- Extract a matrix. +%% +X{2} %<-- Same as above. +%% Subscripted assignment for a ktensor +X.lambda = ones(size(X.lambda)) %<-- Insert new multipliers. +%% +X.lambda(1) = 7 %<-- Change a single element of lambda. +%% +X{3}(1:2,1) = [1;1] %<-- Change the matrix for mode 3. +%% Use end for the last array index. +X(3:end,1,1) %<-- Calculated X(3,1,1) and X((4,1,1). +%% +X(1,1,1:end-1) %<-- Calculates X(1,1,1). +%% +X{end} %<-- Or use inside of curly braces. This is X{3}. +%% Adding and subtracting ktensors +% Adding two ktensors is the same as concatenating the matrices +X = ktensor({rand(4,2),rand(2,2),rand(3,2)}) %<-- Data. +Y = ktensor({rand(4,2),rand(2,2),rand(3,2)}) %<-- More data. +%% +Z = X + Y %<-- Concatenates the factor matrices. +%% +Z = X - Y %<-- Concatenates as with plus, but changes the weights. +%% +norm( full(Z) - (full(X)-full(Y)) ) %<-- Should be zero. +%% Basic operations with a ktensor ++X %<-- Calls uplus. +%% +-X %<-- Calls uminus. +%% +5*X %<-- Calls mtimes. +%% Use permute to reorder the modes of a ktensor +permute(X,[2 3 1]) %<-- Reorders modes of X +%% Use arrange to normalize the factors of a ktensor +% The function |arrange| normalizes the columns of the factors and then +% arranges the rank-one pieces in decreasing order of size. +X = ktensor({rand(3,2),rand(4,2),rand(2,2)}) % <-- Unit weights. +%% +arrange(X) %<-- Normalized and rearranged. +%% Use fixsigns for sign indeterminacies in a ktensor +% The largest magnitude entry for each factor is changed to be +% positive provided that we can flip the signs of _pairs_ of vectors in +% that rank-1 component. +Y = X; +Y.u{1}(:,1) = -Y.u{1}(:,1); % switch the sign on a pair of columns +Y.u{2}(:,1) = -Y.u{2}(:,1) +%% +fixsigns(Y) +%% Use ktensor to store the 'skinny' SVD of a matrix +A = rand(4,3) %<-- A random matrix. +%% +[U,S,V] = svd(A,0); %<-- Compute the SVD. +X = ktensor(diag(S),{U,V}) %<-- Store the SVD as a ktensor. +%% +double(X) %<-- Reassemble the original matrix. +%% Displaying a ktensor +disp(X) %<-- Displays the vector lambda and each factor matrix. +%% Displaying data +% The |datadisp| function allows the user to associate meaning to the modes +% and display those modes with the most meaning (i.e., corresponding to the +% largest values). +X = ktensor({[0.8 0.1 1e-10]',[1e-5 2 3 1e-4]',[0.5 0.5]'}); %<-- Create tensor. +X = arrange(X) %<-- Normalize the factors. +%% +labelsDim1 = {'one','two','three'}; %<-- Labels for mode 1. +labelsDim2 = {'A','B','C','D'}; %<-- Labels for mode 2. +labelsDim3 = {'on','off'}; %<-- Labels for mode 3. +datadisp(X,{labelsDim1,labelsDim2,labelsDim3}) %<-- Display. \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/multiply_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/multiply_doc.m new file mode 100644 index 0000000..6ce5e7e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/multiply_doc.m @@ -0,0 +1,297 @@ +%% Multiplying tensors + +%% Tensor times vector (ttv for tensor) +% Compute a tensor times a vector (or vectors) in one (or more) modes. +rand('state',0); +X = tenrand([5,3,4,2]); %<-- Create a dense tensor. +A = rand(5,1); B = rand(3,1); C = rand(4,1); D = rand(2,1); %<-- Some vectors. +%% +Y = ttv(X, A, 1) %<-- X times A in mode 1. +%% +Y = ttv(X, {A,B,C,D}, 1) %<-- Same as above. +%% +Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply produces a scalar. +%% +Y = ttv(X, {D,C,B,A}, [4 3 2 1]) %<-- Same as above. +%% +Y = ttv(X, {A,B,C,D}) %<-- Same as above. +%% +Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4. +%% +Y = ttv(X, {A,B,C,D}, [3 4]) %<-- Same as above. +%% +Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way multiplication. +%% +Y = ttv(X, {A,B,C,D}, [1 2 4]) %<-- Same as above. +%% +Y = ttv(X, {A,B,D}, -3) %<-- Same as above. +%% +Y = ttv(X, {A,B,C,D}, -3) %<-- Same as above. +%% Sparse tensor times vector (ttv for sptensor) +% This is the same as in the dense case, except that the result may be +% either dense or sparse (or a scalar). +X = sptenrand([5,3,4,2],5); %<-- Create a sparse tensor. +%% +Y = ttv(X, A, 1) %<-- X times A in mode 1. Result is sparse. +%% +Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply. +%% +Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4. +%% +Y = ttv(X, {A,B,D}, -3) %<-- 3-way multiplication. Result is *dense*! +%% Kruskal tensor times vector (ttv for ktensor) +% The special structure of a ktensor allows an efficient implementation of +% vector multiplication. The result is a ktensor or a scalar. +X = ktensor([10;1],rand(5,2),rand(3,2),rand(4,2),rand(2,2)); %<-- Ktensor. +Y = ttv(X, A, 1) %<-- X times A in mode 1. Result is a ktensor. +%% +norm(full(Y) - ttv(full(X),A,1)) %<-- Result is the same as dense case. +%% +Y = ttv(X, {A,B,C,D}) %<-- All-mode multiply -- scalar result. +%% +Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4. +%% +Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way multiplication. +%% Tucker tensor times vector (ttv for ttensor) +% The special structure of a ttensor allows an efficient implementation of +% vector multiplication. The result is a ttensor or a scalar. +X = ttensor(tenrand([2,2,2,2]),rand(5,2),rand(3,2),rand(4,2),rand(2,2)); +Y = ttv(X, A, 1) %<-- X times A in mode 1. +%% +norm(full(Y) - ttv(full(X),A, 1)) %<-- Same as dense case. +%% +Y = ttv(X, {A,B,C,D}, [1 2 3 4]) %<-- All-mode multiply -- scalar result. +%% +Y = ttv(X, {C,D}, [3 4]) %<-- X times C in mode-3 & D in mode-4. +%% +Y = ttv(X, {A,B,D}, [1 2 4]) %<-- 3-way multiplication. +%% Tensor times matrix (ttm for tensor) +% Compute a tensor times a matrix (or matrices) in one (or more) modes. +X = tensor(rand(5,3,4,2)); +A = rand(4,5); B = rand(4,3); C = rand(3,4); D = rand(3,2); +%% +Y = ttm(X, A, 1); %<-- X times A in mode-1. +Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above. +Y = ttm(X, A', 1, 't') %<-- Same as above. +%% +Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way mutliply. +Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above. +Y = ttm(X, {A,B,C,D}); %<-- Same as above. +Y = ttm(X, {A',B',C',D'}, 't') %<-- Same as above. +%% +Y = ttm(X, {C,D}, [3 4]); %<-- X times C in mode-3 & D in mode-4 +Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above. +%% +Y = ttm(X, {A,B,D}, [1 2 4]); %<-- 3-way multiply. +Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above. +Y = ttm(X, {A,B,D}, -3); %<-- Same as above. +Y = ttm(X, {A,B,C,D}, -3) %<-- Same as above. +%% Sparse tensor times matrix (ttm for sptensor) +% It is also possible to multiply an sptensor times a matrix or series of +% matrices. The arguments are the same as for the dense case. The result may +% be dense or sparse, depending on its density. +X = sptenrand([5 3 4 2],10); +Y = ttm(X, A, 1); %<-- X times A in mode-1. +Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above. +Y = ttm(X, A', 1, 't') %<-- Same as above +%% +norm(full(Y) - ttm(full(X),A, 1) ) %<-- Same as dense case. +%% +Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way multiply. +Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above. +Y = ttm(X, {A,B,C,D}); %<-- Same as above. +Y = ttm(X, {A',B',C',D'}, 't') %<-- Same as above. +%% +Y = ttm(X, {C,D}, [3 4]); %<-- X times C in mode-3 & D in mode-4 +Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above. +%% +Y = ttm(X, {A,B,D}, [1 2 4]); %<-- 3-way multiply. +Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above. +Y = ttm(X, {A,B,D}, -3); %<-- Same as above. +Y = ttm(X, {A,B,C,D}, -3) %<-- Same as above. +%% +% The result may be dense or sparse. +X = sptenrand([5 3 4],1); +Y = ttm(X, A, 1) %<-- Sparse result. +%% +X = sptenrand([5 3 4],50); +Y = ttm(X, A, 1) %<-- Dense result. +%% +% Sometimes the product may be too large to reside in memory. For +% example, try the following: +% X = sptenrand([100 100 100 100], 1e4); +% A = rand(1000,100); +% ttm(X,A,1); %<-- too large for memory +%% Matricized Khatri-Rao product of a tensor. +% |mttkrp| computes the matricized Khatri-Rao product of a tensor X with +% a cell array of matrices U. The operation first matricizes (i.e. flattens) a +% tensor X with m modes, in a given mode n. Then the Khatri-Rao +% product of a cell array of matrices U={U1,...,Um} is computed, omitting +% the nth term in the array. The returned value is then the product +% matricized tensor X and Khatri-Rao product of the cell array. This +% operation is useful in many numerical procedures, e.g. formulating the +% subproblems in an alternating least squares CP decomposition of tensor. +% +% Each matrix in the cell array U must have the same number of columns. The +% number of rows of the matrix Ui equal the dimension of X in mode i. In +% the example that follows we will verify that |mttkrp| performs the +% calculation indicated above. +U = {rand(2,3), 2*rand(3,3), 3*rand(4,3)}; %<--the cell array +X = tensor(rand(2,3,4)); %<--the tensor +n = 2; %<--the dimension to matricize with respect to. + +KRP = khatrirao(U{1}, U{3}); %<--Khatri-Rao product, omitting U{2} +M = permute(X.data, [n:size(X,n), 1:n-1]); +M = reshape(M,size(X,n),[]); %<--Matricized tensor data + +norm(M*KRP-mttkrp(X,U,n)) < 1e-14 %<--They are equal, within machine precision +%% Kruskal tensor times matrix (ttm for ktensor) +% The special structure of a ktensor allows an efficient implementation of +% matrix multiplication. The arguments are the same as for the dense case. +X = ktensor({rand(5,1) rand(3,1) rand(4,1) rand(2,1)}); +%% +Y = ttm(X, A, 1); %<-- X times A in mode-1. +Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above. +Y = ttm(X, A', 1, 't') %<-- Same as above. +%% +Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way mutliply. +Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above. +Y = ttm(X, {A,B,C,D}); %<-- Same as above. +Y = ttm(X, {A',B',C',D'}, 't') %<-- Same as above. +%% +Y = ttm(X, {C,D}, [3 4]); %<-- X times C in mode-3 & D in mode-4. +Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above. +%% +Y = ttm(X, {A,B,D}, [1 2 4]); %<-- 3-way multiply. +Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above. +Y = ttm(X, {A,B,D}, -3); %<-- Same as above. +Y = ttm(X, {A,B,C,D}, -3) %<-- Same as above. +%% Tucker tensor times matrix (ttm for ttensor) +% The special structure of a ttensor allows an efficient implementation of +% matrix multiplication. +X = ttensor(tensor(rand(2,2,2,2)),{rand(5,2) rand(3,2) rand(4,2) rand(2,2)}); +%% +Y = ttm(X, A, 1); %<-- computes X times A in mode-1. +Y = ttm(X, {A,B,C,D}, 1); %<-- Same as above. +Y = ttm(X, A', 1, 't') %<-- Same as above. +%% +Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way multiply. +Y = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- Same as above. +Y = ttm(X, {A,B,C,D}); %<-- Same as above. +Y = ttm(X, {A',B',C',D'}, 't') %<-- Same as above. +%% +Y = ttm(X, {C,D}, [3 4]); %<-- X times C in mode-3 & D in mode-4 +Y = ttm(X, {A,B,C,D}, [3 4]) %<-- Same as above. +%% +Y = ttm(X, {A,B,D}, [1 2 4]); %<-- 3-way multiply +Y = ttm(X, {A,B,C,D}, [1 2 4]); %<-- Same as above. +Y = ttm(X, {A,B,D}, -3); %<-- Same as above. +Y = ttm(X, {A,B,C,D}, -3) %<-- Same as above. +%% Tensor times tensor (ttt for tensor) +X = tensor(rand(4,2,3)); Y = tensor(rand(3,4,2)); +Z = ttt(X,Y); %<-- Outer product of X and Y. +size(Z) +%% +Z = ttt(X,X,1:3) %<-- Inner product of X with itself. +%% +Z = ttt(X,Y,[1 2 3],[2 3 1]) %<-- Inner product of X & Y. +%% +Z = innerprod(X,permute(Y, [2 3 1])) %<-- Same as above. +%% +Z = ttt(X,Y,[1 3],[2 1]) %<-- Product of X & Y along specified dims. +%% Sparse tensor times sparse tensor (ttt for sptensor) +X = sptenrand([4 2 3],3); Y = sptenrand([3 4 2],3); +Z = ttt(X,Y) %<--Outer product of X and Y. +%% +norm(full(Z)-ttt(full(X),full(Y))) %<-- Same as dense. +%% +Z = ttt(X,X,1:3) %<-- Inner product of X with itself. +%% +X = sptenrand([2 3],1); Y = sptenrand([3 2],1); +Z = ttt(X, Y) %<-- Sparse result. +%% +X = sptenrand([2 3],20); Y = sptenrand([3 2],20); +Z = ttt(X, Y) %<-- Dense result. +%% +Z = ttt(X,Y,[1 2],[2 1]) %<-- inner product of X & Y +%% Inner product (innerprod) +% The function |innerprod| efficiently computes the inner product +% between two tensors X and Y. The code does this efficiently +% depending on what types of tensors X and Y. +X = tensor(rand(2,2,2)) +Y = ktensor({rand(2,2),rand(2,2),rand(2,2)}) +%% +z = innerprod(X,Y) +%% Contraction on tensors (contract for tensor) +% The function |contract| sums the entries of X along dimensions I and +% J. Contraction is a generalization of matrix trace. In other words, +% the trace is performed along the two-dimensional slices defined by +% dimensions I and J. It is possible to implement tensor +% multiplication as an outer product followed by a contraction. +X = sptenrand([4 3 2],5); +Y = sptenrand([3 2 4],5); +%% +Z1 = ttt(X,Y,1,3); %<-- Normal tensor multiplication +%% +Z2 = contract(ttt(X,Y),1,6); %<-- Outer product + contract +%% +norm(Z1-Z2) %<-- Should be zero +%% +% Using |contract| on either sparse or dense tensors gives the same +% result +X = sptenrand([4 2 3 4],20); +Z1 = contract(X,1,4) % sparse version of contract +%% +Z2 = contract(full(X),1,4) % dense version of contract +%% +norm(full(Z1) - Z2) %<-- Should be zero +%% +% The result may be dense or sparse, depending on its density. +X = sptenrand([4 2 3 4],8); +Y = contract(X,1,4) %<-- should be sparse +%% +X = sptenrand([4 2 3 4],80); +Y = contract(X,1,4) %<-- should be dense +%% Relationships among ttv, ttm, and ttt +% The three "tensor times ___" functions (|ttv|, |ttm|, |ttt|) all perform +% specialized calculations, but they are all related to some degree. +% Here are several relationships among them: +%% +X = tensor(rand(4,3,2)); +A = rand(4,1); +%% +% Tensor times vector gives a 3 x 2 result +Y1 = ttv(X,A,1) +%% +% When |ttm| is used with the transpose option, the result is almost +% the same as |ttv| +Y2 = ttm(X,A,1,'t') +%% +% We can use |squeeze| to remove the singleton dimension left over +% from |ttm| to give the same answer as |ttv| +squeeze(Y2) +%% +% Tensor outer product may be used in conjuction with contract to +% produce the result of |ttm|. Please note that this is more expensive +% than using |ttm|. +Z = ttt(tensor(A),X); +size(Z) +%% +Y3 = contract(Z,1,3) +%% +% Finally, use |squeeze| to remove the singleton dimension to get +% the same result as |ttv|. +squeeze(Y3) +%% Frobenius norm of a tensor +% The Frobenius norm of any type of tensor may be computed with the +% function |norm|. Each class is optimized to calculate the norm +% in the most efficient manner. +X = sptenrand([4 3 2],5) +norm(X) +norm(full(X)) +%% +X = ktensor({rand(4,2),rand(3,2)}) +norm(X) +%% +X = ttensor(tensor(rand(2,2)),{rand(4,2),rand(3,2)}) +norm(X) diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/nvecs_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/nvecs_doc.m new file mode 100644 index 0000000..54fedd3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/nvecs_doc.m @@ -0,0 +1,45 @@ +%% Generating the leading mode-n vectors +% The leading mode-n vectors are those vectors that span the subspace of +% the mode-n fibers. In other words, the left singular vectors of the +% n-mode matricization of X. +%% Using nvecs to calculate the leading mode-n vectors +% The |nvecs| command efficient computes the leading n-mode vectors. +rand('state',0); +X = sptenrand([4,3,2],6) %<-- A sparse tensor +%% +nvecs(X,1,2) %<-- The 2 leading mode-1 vectors +%% +nvecs(X,1,3) % <-- The 3 leading mode-1 vectors +%% +nvecs(full(X),1,3) %<-- The same thing for a dense tensor +%% +X = ktensor({rand(3,2),rand(3,2),rand(2,2)}) %<-- A random ktensor +%% +nvecs(X,2,1) %<-- The 1 leading mode-2 vector +%% +nvecs(full(X),2,1) %<-- Same thing for a dense tensor +%% +X = ttensor(tenrand([2,2,2,2]),{rand(3,2),rand(3,2),rand(2,2),rand(2,2)}); %<-- A random ttensor +%% +nvecs(X,4,2) %<-- The 1 leading mode-2 vector +%% +nvecs(full(X),4,2) %<-- Same thing for a dense tensor +%% Using nvecs for the HOSVD +X = tenrand([4 3 2]) %<-- Generate data +%% +U1 = nvecs(X,1,4); %<-- Mode 1 +U2 = nvecs(X,2,3); %<-- Mode 2 +U3 = nvecs(X,3,2); %<-- Mode 3 +S = ttm(X,{pinv(U1),pinv(U2),pinv(U3)}); %<-- Core +Y = ttensor(S,{U1,U2,U3}) %<-- HOSVD of X +%% +norm(full(Y) - X) %<-- Reproduces the same result. + +%% +U1 = nvecs(X,1,2); %<-- Mode 1 +U2 = nvecs(X,2,2); %<-- Mode 2 +U3 = nvecs(X,3,2); %<-- Mode 3 +S = ttm(X,{pinv(U1),pinv(U2),pinv(U3)}); %<-- Core +Y = ttensor(S,{U1,U2,U3}) %<-- Rank-(2,2,2) HOSVD approximation of X +%% +100*(1-norm(full(Y)-X)/norm(X)) %<-- Percentage explained by approximation \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sptenmat_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sptenmat_doc.m new file mode 100644 index 0000000..69d079c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sptenmat_doc.m @@ -0,0 +1,88 @@ +%% Converting sparse tensors to matrices and vice versa +% We show how to convert a sptensor to a matrix stored in _coordinate_ +% format and with extra information so that it can be converted back to a +% sptensor. + +%% Creating a sptenmat (sparse tensor as sparse matrix) object +% A sparse tensor can be converted to a sparse matrix. The matrix, however, +% is not stored as a MATLAB sparse matrix because that format is sometimes +% inefficient for converted sparse tensors. Instead, the row and column +% indices are stored explicitly. +%% +% First, we create a sparse tensor to be converted. +X = sptenrand([10 10 10 10],10) %<-- Generate some data. +%% +% All the same options for tenmat are available as for tenmat. +A = sptenmat(X,1) %<-- Mode-1 matricization. +%% +A = sptenmat(X,[2 3]) %<-- More than one mode is mapped to the columns. +%% +A = sptenmat(X,[2 3],'t') %<-- Specify column dimensions (transpose). +%% +A = sptenmat(X,1:4) %<-- All modes mapped to rows, i.e., vectorize. +%% +A = sptenmat(X,2) %<-- By default, columns are ordered as [1 3 4]. +%% +A = sptenmat(X,2,[3 1 4]) %<-- Explicit column ordering. +%% +A = sptenmat(X,2,'fc') %<-- Foward cyclic. +%% +A = sptenmat(X,2,'bc') %<-- Backward cyclic. +%% Constituent parts of a sptenmat +A.subs %<-- Subscripts of the nonzeros. +%% +A.vals %<-- The corresponding nonzero values. +%% +A.tsize %<-- Size of the original tensor. +%% +A.rdims %<-- Dimensions that were mapped to the rows. +%% +A.cdims %<-- Dimensions that were mapped to the columns. +%% Creating a sptenmat from its constituent parts +B = sptenmat(A.subs,A.vals,A.rdims,A.cdims,A.tsize) %<-- Copies A +%% +B = sptenmat(double(A),A.rdims,A.cdims,A.tsize) %<-- More efficient to pass a matrix. +%% Creating a sptenmat with no nonzeros +A = sptenmat([],[],A.rdims,A.cdims,A.tsize) %<-- An empty sptenmat. +%% Creating an emtpy sptenmat +A = sptenmat %<-- A really empty sptenmat. +%% Use double to convert a sptenmat to a MATLAB sparse matrix +X = sptenrand([10 10 10 10],10); %<-- Create a tensor. +A = sptenmat(X,1) %<-- Convert it to a sptenmat +%% +B = double(A) %<-- Convert it to a MATLAB sparse matrix +%% +whos A B %<-- The storage for B (the sparse matrix) is larger than for A. +%% +C = B'; %<-- Transposing the result fixes the problem. +whos C +%% Use full to convert a sptenmat to a tenmat +B = sptenmat(sptenrand([3 3 3], 3), 1) %<-- Create a sptenmat +%% +C = full(B) %<-- Convert to a tenmat +%% Use sptensor to convert a sptenmat to a sptensor +Y = sptensor(A) %<-- Convert a sptenmat to a sptensor +%% Use size and tsize for the dimensions of a sptenmat +size(A) %<-- Matrix size +tsize(A) %<-- Corresponding tensor size +%% Subscripted reference for a sptenmat +% This is not supported beyond getting the constituent parts. +%% Subscripted assignment for a sptenmat +A(1:2,1:2) = ones(2) %<-- Replace part of the matrix. +%% Use end for the last index +% End is not supported. +%% Basic operations for sptenmat +norm(A) %<-- Norm of the matrix. +%% ++A %<-- Calls uplus. +%% +-A %<-- Calls uminus. +%% Use aatx to efficiently compute A * A' * x for a sptenmat +x = ones(10,1); %<-- Create vector +aatx(A,x) %<-- Compute A * A' * x +%% +double(A) * double(A)' * x %<-- Same as above but less efficient +%% Displaying a tenmat +% Shows the original tensor dimensions, the modes mapped to rows, the modes +% mapped to columns, and the matrix. +disp(A) diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sptensor_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sptensor_doc.m new file mode 100644 index 0000000..a207078 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sptensor_doc.m @@ -0,0 +1,177 @@ +%% Sparse tensors +% MATLAB has no native ability to store sparse multidimensional arrays, +% only sparse matrices. Moreover, the compressed sparse column storage +% format for MATLAB sparse matrices is not readily adaptable to sparse +% tensors. Instead, the |sptensor| class stores the data in coordinate +% format. The |sptensor| class is best described in the following +% reference: +% +% * B. W. Bader and T. G. Kolda. *Efficient MATLAB Computations with Sparse +% and Factored Tensors*, _SIAM J. Scientific Computing_ 30:205-231, 2007. +% . +% +%% Creating a sptensor +% A sparse tensor can be created by passing in a list of subscripts and +% values. For example, here we pass in three subscripts and a scalar value. +% The resuling sparse tensor has three nonzero entries, and the size is the +% size of the largest subscript in each dimension. +rand('state',0); %<-- Setup for the script +subs = [1,1,1;1,2,1;3,4,2]; %<-- Subscripts of the nonzeros. +vals = [1; 2; 3]; %<-- The values of the nonzeros. +X = sptensor(subs,vals) %<-- Create a sparse tensor with 3 nonzeros. +%% +X = sptensor(subs,vals,[3 5 2]) %<-- Or, specify the size explicitly. +%% +% Values corresponding to repeated subscripts are summed. Also note that we +% can use a scalar as the second argument. +subs = [1 1 1; 1 1 3; 2 2 2; 4 4 4; 1 1 1; 1 1 1]; %<-- (1,1,1) is repeated. +X = sptensor(subs,2) %<-- Equivalent to X = sptensor(subs,2*ones(6,1)). +%% Specifying the accumulation method for the constructor +% By default, values corresponding to repeated elements are summed. +% However, it is possible to specify other actions to be taken. +X = sptensor(subs,2*ones(6,1),[4 4 4],@max) %<-- Maximum element. +%% +myfun = @(x) sum(x) / 3; %<-- Total sum divided by three. +X = sptensor(subs,2*ones(6,1),[4 4 4],myfun) %<-- Custom accumulation function. +%% Creating a one-dimensional sptensor. +X = sptensor([1;3;5],1,10) %<-- Same as X = sptensor([1;3;5],[1;1;1],1,10). +%% +X = sptenrand(50,5) %<-- A random, sparse, order-1 tensor with 5 nonzeros. +%% Creating an all-zero sptensor +X = sptensor([],[],[10 10 10]) %<-- Creates an all-zero tensor. +%% +X = sptensor([10 10 10]) %<-- Same as above. +%% Constituent parts of a sptensor +X = sptenrand([40 30 20],5); %<-- Create data. +X.subs %<-- Subscripts of nonzeros. +%% +X.vals %<-- Corresponding nonzero values. +%% +X.size %<-- The size. +%% Creating a sparse tensor from its constituent parts +Y = sptensor(X.subs,X.vals,X.size) %<-- Copies X. +%% Creating an empty sptensor +% An empty constructor exists, primarily to support loads of previously +% saved data. +Y = sptensor %<-- Create an empty sptensor. +%% Use sptenrand to create a random sptensor +X = sptenrand([10 10 10],0.01) %<-- Create a tensor with 1% nonzeroes. +%% +% It is also posible to specify the precise number of nonzeros rather than +% a percentage. +X = sptenrand([10 10 10],10) %<-- Create a tensor with 10 nonzeros. +%% Use squeeze to remove singleton dimensions from a sptensor +Y = sptensor([1 1 1; 2 1 1], 1, [2 1 1]) %<-- Create a sparse tensor. +squeeze(Y) %<-- Remove singleton dimensions. +%% Use full or tensor to convert a sptensor to a (dense) tensor +X = sptensor([1 1 1; 2 2 2], [1; 1]); %<-- Create a sparse tensor. +Y = full(X) %<-- Convert it to a (dense) tensor. +%% +Y = tensor(X) %<-- Same as above. +%% Use sptensor to convert a (dense) tensor to a sptensor +Z = sptensor(Y) %<-- Convert a tensor to a sptensor. +%% Use double to convert a sptensor to a (dense) multidimensional array +Y = double(X) %<-- Creates a MATLAB array. +%% Use find to extract nonzeros from a tensor and then create a sptensor +% The |find| command can be used to extract specific elements and then +% convert those into a sptensor. +X = tensor(rand(5,4,2),[5 4 2]) %<-- Create a tensor. +S = find(X > 0.9) %<-- Extract subscipts of values greater than 0.9. +V = X(S) %<-- Extract the corresponding values. +Y = sptensor(S,V,[5 4 2]) %<-- Create a new tensor. +%% Use ndims and size to get the size of a sptensor +ndims(Y) %<-- Number of dimensions or modes. +%% +size(Y) %<-- Size of Y. +%% +size(Y,3) %<-- Size of mode 3 of Y. +%% Use nnz to get the number of nonzeros of a sptensor +nnz(Y) %<-- Number of nonzeros in Y. +%% Subscripted reference for a sptensor +X = sptensor([4,4,4;2,2,1;2,3,2],[3;5;1],[4 4 4]) %<-- Create a sptensor. +%% +X(1,2,1) %<-- Extract the (1,2,1) element, which is zero. +%% +X(4,4,4) %<-- Extract the (4,4,4) element, which is nonzero. +%% +X(1:2,2:4,:) %<-- Extract a 2 x 3 x 4 subtensor. +%% +X([1 1 1; 2 2 1]) %<-- Extract elements by subscript. +%% +X([1;6]) %<-- Same as above but with linear indices. +%% +% As with a tensor, subscriped reference may be ambiguous for +% one-dimensional tensors. +X = sptensor([1;3;5],1,7) %<-- Create a sparse tensor. +%% +X(3) %<-- Fully specified, single elements are always returned as scalars. +%% +X([3;6]) %<-- Returns a subtensor. +%% +X([3;6],'extract') %<-- Same as above *but* returns an array. +%% Subscripted assignment for a sptensor +X = sptensor([30 40 20]) %<-- Create an emtpy 30 x 40 x 20 sptensor. +%% +X(30,40,20) = 7 %<-- Assign a single element. +%% +X([1,1,1;2,2,2]) = [1;1] %<-- Assign a list of elements. +%% +X(11:20,11:20,11:20) = sptenrand([10,10,10],10) %<-- Assign a subtensor. +%% +X(31,41,21) = 4 %<-- Grows the size of the sptensor. +%% +X(111:120,111:120,111:120) = sptenrand([10,10,10],10) %<-- Grow more. +%% Use end as the last index. +X(end-10:end,end-10:end,end-5:end) %<-- Same as X(108:118,110:120,115:120) +%% Use elemfun to manipulate the nonzeros of a sptensor +% The function |elemfun| is similar to |spfun| for sparse matrices. +X = sptenrand([10,10,10],3) %<-- Create some data. +%% +Z = elemfun(X, @sqrt) %<-- Square root of every nonzero. +%% +Z = elemfun(X, @(x) x+1) %<-- Use a custom function. +%% +Z = elemfun(X, @(x) x~=0) %<-- Set every nonzero to one. +%% +Z = ones(X) %<-- An easier way to change every nonzero to one. +%% Basic operations (plus, minus, times, etc.) on a sptensor +A = sptensor(tensor(floor(5*rand(2,2,2)))) %<-- Create data. +B = sptensor(tensor(floor(5*rand(2,2,2)))) %<-- Create more data. +%% ++A %<-- Calls uplus. +%% +-A %<-- Calls uminus. +%% +A+B %<-- Calls plus. +%% +A-B %<-- Calls minus. +%% +A.*B %<-- Calls times. +%% +5*A %<-- Calls mtimes. +%% +A./2 %<-- Calls rdivide. +%% +% Elementwise divsion by another sptensor is allowed, but +% if the sparsity pattern of the denominator should be a +% superset of the numerator. +A./(A+B) %<-- Calls rdivide. +%% +A./B %<-- Uh-oh. Getting a divide by zero. +%% Use permute to reorder the modes of a sptensor +A = sptenrand([30 40 20 1], 5) %<-- Create data. +%% +permute(A,[4 3 2 1]) %<-- Reorder the modes. +%% +% Permute works correctly for a 1-dimensional sptensor. +X = sptenrand(40,4) %<-- Create data. +%% +permute(X,1) %<-- Permute. +%% Displaying a tensor +% The function |disp| handles small and large elements appropriately, as +% well as aligning the indices. +X = sptensor([1 1 1]); %<-- Create an empty sptensor. +X(1,1,1) = rand(1)*1e15; %<-- Insert a very big element. +X(4,3,2) = rand(1)*1e-15; %<-- Insert a very small element. +X(2,2,2) = rand(1); %<-- Insert a 'normal' element. +disp(X) diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sshopm_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sshopm_doc.m new file mode 100644 index 0000000..758c410 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sshopm_doc.m @@ -0,0 +1,32 @@ +%% Shifted symmetric higher-order power method + +%% Data tensor +% From Example 1 in E. Kofidis and P. A. Regalia, On the best rank-1 +% approximation of higher-order supersymmetric tensors, SIAM J. Matrix +% Anal. Appl., 23 (2002), pp. 863–884, DOI: 10.1137/S0895479801387413. +A = tenzeros([3 3 3 3]); +A(perms([1 1 1 1])) = 0.2883; +A(perms([1 1 1 2])) = -0.0031; +A(perms([1 1 1 3])) = 0.1973; +A(perms([1 1 2 2])) = -0.2485; +A(perms([1 1 2 3])) = -0.2939; +A(perms([1 1 3 3])) = 0.3847; +A(perms([1 2 2 2])) = 0.2972; +A(perms([1 2 2 3])) = 0.1862; +A(perms([1 2 3 3])) = 0.0919; +A(perms([1 3 3 3])) = -0.3619; +A(perms([2 2 2 2])) = 0.1241; +A(perms([2 2 2 3])) = -0.3420; +A(perms([2 2 3 3])) = 0.2127; +A(perms([2 3 3 3])) = 0.2727; +A(perms([3 3 3 3])) = -0.3054; + +%% Call eig_sshopm with no shift +% The method with no shift will fail to converge. +[lambda, x, flag, it] = eig_sshopm(A, 'MaxIts', 100); + + +%% Call eig_sshopm with shift + +[lambda, x, flag, it] = eig_sshopm(A, 'MaxIts', 100, 'Shift', 1); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sumtensor_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sumtensor_doc.m new file mode 100644 index 0000000..18f1ffa --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/sumtensor_doc.m @@ -0,0 +1,119 @@ +%% Sums of structured tensors +% When certain operations are performed on a tensor which is formed as a +% sum of tensors, it can be beneficial to avoid explicitly forming the sum. +% For example, if a tensor is formed as a sum of a low rank tensor and a +% sparse tensor, the structure of the summands can make storage, decomposition and +% operations with other tensors significantly more efficient. The tensor +% toolbox supports a |sumtensor| object designed to exploit this structure. +% Here we explain the basics of defining and using sumtensors. +%% Creating sumtensors +% A sumtensor T can only be delared as a sum of same-sized tensors |T1, +% T2,...,TN|. The summand tensors are stored in a cell array, which define +% the "parts" of the sumtensor. The parts of a sumtensor can be (generic) +% tensors (as |tensor|), sparse tensors (as |sptensor|), Kruskal tensors +% (as |ktensor|), or Tucker tensors (as |ttensor|). An example of the use +% of the |sumtensor| constructor follows. +T1 = tensor(ones(3,3,3)); %<--A tensor +T2 = sptensor([1 1 1; 2 2 2; 3 3 2; 2 1 1], 1, [3,3,3]); %<--A sparse tensor + +T = sumtensor(T1,T2) + +%% An Large-Scale Example +% For large-scale problems, the |sumtensor| class may make the difference +% as to whether or not a tensor can be stored in memory. Consider the +% following example, where $\mathcal{T}$ is of size $1000 x 1000 x 1000$, +% formed from the sum of a |ktensor| and an |sptensor|. +X1 = rand(500, 3); %Generating some factor matrices +X2 = rand(500, 3); +X3 = rand(500, 3); +K = ktensor([1; 1; 1], X1, X2, X3); +S = sptenrand([500, 500, 500], 1e-100); + +ST = sumtensor(K,S); %<-- Declare the sumtensor +TT = full(ST); %<-- Form the sum of the tensors explicitly + +whos ST TT %<--Output the storage information for these variables + +%% +% The difference in memory between the full and sumtensor is a factor of 10^5! +% Hence we prefer to use the sumtensor object whenever possible. +%% Further examples of the sumtensor constructer +% The sumtensor constructor can declare an empty sumtensor object, having +% no parts, as follows +P = sumtensor() +%% +% |sumtensor| also supports use as a copy constructor. +S = sumtensor(P) +%% Use ndims and size for the dimensions of a sumtensor +% For a given sumtensor, |ndims| returns the number of modes of a sumtensor. +% Similarly, |size| returns a size array of the sumtensor. +ndims(T) +size(T) +%% Use full to convert a sumtensor to a "generic" tensor +% The |full| function can be used to convert a sumtensor to a generic tensor. Note that +% for large-scale tensors, this can a large amount of memory because each part of +% the sumtensor will be expanded and then summed. +full(T) +%% Use double to convert a sumtensor to a multidimensional array +% The |double| function can be used to convert a sumtensor to a multidimensional array. +% Similarly to the |full| expansion, this can use a prohibitive amount of +% memory for large-scale problems. +double(T) +%% Matricized Khatri-Rao product of a sumtensor +% The |mttkrp| function computes the Khatri-Rao product of a matricized tensor and a +% sumtensor. The required arguments are: a sumtensor X, a cell array of +% matrices U={U1,...,Um}, and a mode n. The cell array must consist of m matrices, +% where m is the number of modes in X. The number of columns of these matrices +% should be constant, and number of rows of matrix Ui should match the dimension +% of the tensor X in mode i. The matricized Khatri-Rao product operation on +% sumtensor distributes the operation to the summands of the sumtensor. +% For details of this specific computation, see the mttkrp documentation +% for a generic tensor. An example of the use of |mttkrp| follows. +U={eye(3), ones(3,3), randn(3,3)}; %<--The cell array of matrices +mttkrp(T,U,2) +%% Use innerprod to compute the inner product of a sumtensor +% The |innerprod| function computes the inner product of a sumtensor T and any type of +% tensor. The operation is performed by distributing across each of the +% sumtensor's parts. +S = sptensor([1 1 1; 2 1 3; 3 2 2; 2 1 1], 1, [3,3,3]); +innerprod(T,S) +%% Use norm for compatibility with the other types of tensors. +% The |norm| function returns 0 and a warning when called on a sumtensor. +% The procedure of computing the Frobenius norm of a sumtensor +% does not distribute across its parts, and hence is not supported for +% sumtensors. This default behavior is provided in order to ensure +% compatibility of the sumtensor class with existing decomposition routines. +norm(T) +%% +% In order avoid this default behavior and return the Frobenius norm of a +% sumtensor, it can be converted to a tensor using |full|. +norm(full(T)) +%% Use cp_als to find a CP decomposition of a sumtensor +% One of the primary motivations for defining the |sumtensor| class is for +% efficient decomposition. In particular, when trying to find a CP +% decomposition of a tensor using alternating least squares, the +% subproblems can be efficiently created and solved using |mttkrp| and +% |innerprod|. Both of these operations can be performed more efficiently +% by exploiting extra structure in the tensors which form the sum, so the +% performance of |cp_als| is also improved. Consider the following example, +% where a |cp_als| is run on a sumtensor. +cp_als(T, 2) +%% +% It follows that in cases where $\mathcal{T}$ is too large for its full expansion to be +% stored in memory, we may still be able find a CP decomposition by exploiting the +% sumtensor structure. +%% +% Note that the fit returned by cp_als is not correct for sumtensors, +% because the norm operation is not supported. +%% Basic operations (plus) for sumtensors +% Sumtensors can be added to any other type of tensor. The result is a new +% sumtensor with the tensor appended to the parts of the original +% sumtensor. Note that the tensor is always appended, despite the order of +% the operation. +T+S %<--S is appended to the parts of T +S+T %<--S is still the last part of T, despite order +%% Subscripted reference for sumtensors +% Subscripted reference can be used to return the individual parts of a +% sumtensor. +T.part{1} +T.part{2} diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/symktensor_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/symktensor_doc.m new file mode 100644 index 0000000..74e15b6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/symktensor_doc.m @@ -0,0 +1,208 @@ +%% Symmetric Kruskal tensors +% A symmetric Kruskal tensor is a decomposition of a tensor into a sum of +% vector outer products. The symmetric structure means that each term in +% the summand is the outer product of a single vector with itself $m$ times, +% where $m$ is the number of modes of the decomposed tensor. This contrasts +% with a generic , where each summand +% is an outer product of m different vectors. More concisely, a symmetric +% Kruskal tensor decomposition of a tensor $\mathcal{A}$ has the following form: +% +% $$\mathcal{A} = \sum_{i=1}^{r} x_{i}^{m}$$ +% +% In this notation, a subscript refers to a column index. A superscript +% refers to the outer product of a single vector with itself $m$ times. +% +% $$x^{m} = \underbrace{x \circ x \circ ... \circ x}_{\mbox{m-times}}.$$ +% +% The number of summands in the decomposition, $r$, is referred to as the +% number of components of the symmetric Kruskal tensor. +% +% An alternative, often equivalent expression for a symmetric Kruskal tensor +% decomposition specifies a real-valued weight for each of the summands +% in the outer product. The $r$-vector formed by these weights is referred +% to as the weight or lambda vector of the symmetric Kruskal decomposition. +% +% $$\mathcal{A} = \sum_{i=1}^{r} \lambda_{i} \; x_{i}^{m}$$ +% +% In certain cases the lambda vector is required in order for a symmetric +% Kruskal decomposition to exist, e.g. when a symmetric Kruskal tensor has +% an even number of components and the tensor to be decomposed has a negative +% element on its main diagonal. In many other cases, the lambda vector is +% optional and the symmetric Kruskal decomposition can be represented without +% specifying a lambda vector. +% +% The |symktensor| class stores symmetric Kruskal tensors, and exploits +% the extra symmetric structure to perform many calculations more +% efficiently. + +%% Declaring a symmetric Kruskal tensor with symktensor +% The |symktensor| format stores the vectors and weights of a symmetric +% Kruskal tensor decomposition. The vectors in the decomposition are +% collected as the columns of a matrix |X|, referred to as the factor matrix. +% The lambda vector, containing the (often optional) weights is input into the +% constructor as a column vector. The lambda vector and factor matrix are +% collectively referred to as the constituent parts in the declaration of a +% |symktensor|. For example, consider the decomposition of a tensor +% $\mathcal{A}$. +% +% $$\mathcal{A} = \sum_{r} \lambda_{r} \; x_{r}^{m}$$ +% +% In the example that follows, we form a symmetric Kruskal decomposition by +% specifying a factor matrix, lambda vector, and the number of modes of +% the decomposed tensor. We pass all three arguments to the |symktensor| +% constructor. +% This can be stored as a symmetric Kruskal tensor as follows. +n = 4; %The dimension in each mode of the tensor A +m = 3; %The number of modes of A +r = 2; %The rank of the decomposition +X = reshape(1:n*r,n,r); %The columns of this matrix are the vectors in decomposition +L = [1; -1]; %the weights (should be a column vector of length r) +S = symktensor(L, X, m) %Declare a symktensor object + +%% +% A |symktensor| object can be declared without a weight vector by +% specifiying the number of modes, the rank, and an additional 'nolambda' +% option. In this case, the lambda vector is set to a vector of all ones. +S2 = symktensor(X, m, r, true) +%% +% A random |symktensor| object can be declared by passing the +% constructor two arguments: the rank of the decomposition and a tensor or +% symtensor (for size). The lambda vector is taken to be all ones, and the +% factor matrix has elements drawn uniformly from (0,1). +T1 = tensor(n*ones(1,m)); %<-- Declare a tensor for size +T2 = symtensor(@ones, n,m); %<-- Declare a symtensor for size + +S2 = symktensor(r, T1) %<--Declare a random symktensor from tensor for size +S2 = symktensor(r, T2) %<--Declare a random symktensor from symtensor for size + + +%% +% This method of randomly generating a symktensor is useful when setting +% an initialization point in symmetric decomposition methods (i.e. +% |cp_sym|). +%% +% Lastly, a |symktensor| object can be declared from a vectorized +% version of the factor matrix and lambda vector, in which the lambda +% vector is stacked on top of a vectorized version of the factor matrix. +% The shape of the tensor must also be specified, by either passing a +% tensor/symtensor or listing the number of modes and the rank of the +% decomposition explicitly. Additionally, a 'nolambda' option can be added +% to any of these constructions, in which case the lambda vector should not +% be stacked onto the factor matrix. +V = [L; X(:)]; %<--Forming the vectorized version +S2 = symktensor(V, symtensor(@ones,m,n)) %<--size specified from symtensor + +S2 = symktensor(X(:), symtensor(@ones,m,n), true) %<--'nolambda' option + +S2 = symktensor(V, m, r) %<--size specified from modes and dimension + +S2 = symktensor(X(:), m, r, true) %<--size from modes and dimension, 'nolambda' option + +%% +% A symmetric Kruskal tensor can also be constructed directly from a generic +% Kruskal tensor in the |ktensor| format. If the Kruskal tensor is not +% symmetric, it is symmetrized by averaging the factor matrices and taking +% care to get the signs aligned. +K = ktensor(L, X-1, X+2, 2*X); +S2 = symktensor(K) +%% +% This method of declaring a symktensor is useful in comparing +% decomposition methods: this constructor allows any decomposition method +% which generates a ktensor CP model to also generate a symktensor. In this +% way, decomposition methods which are non-symmetric in nature may easily +% be applied to symmetric problems. +% +%% Use ndims and size for the dimensions of a symktensor +% For a given symktensor, |ndims| returns the number of dimensions (i.e. the +% number of modes) of the symmetric Kruskal tensor. |size| returns a size +% array of the symmetric Kruskal tensor. + +%Declaring a symmetric Kruskal tensor +ndims(S) +size(S) + +%% Use ncomponents for the rank of symktensor +% The function |ncomponents| returns the number of components of a +% |symktensor| object. This is $r$ in the symktensor's definition, the number +% of outer-product summands in the symmetric Kruskal tensor decomposition. +ncomponents(S) +%% Use full to convert a symktensor to a tensor +% The function |full| converts a symktensor to a tensor. +full(S) +%% Use double to convert a symktensor to a multi-dimensional array +% The function |double| converts a symktensor to a multi-dimensional array. +double(S) +%% Basic operations with symktensors +% Symktensors support multiplication by scalars. The result is the symktensor +% with the weight vector multiplied by the scalar. +4*S + +%% Use norm to compute the Frobenius norm of a symktensor +% The function |norm| returns the Frobenius norm of a symktensor. +norm(S) +%% Use normalize to normalize the components of a symktensor. +% The function |normalize| divides each of the columns in a factor matrix by its +% vector 2-norm. The 2-norm weight is then absorbed into the weight vector of +% that column. +normalize(S) + +%% +% By passing an additional $0$ argument to the normalize function, the +% weight vector is set to $\pm 1$ and the weights are absorbed into the +% factor matrix. +normalize(S,0) +%% Use arrange to normalize and sort a symktensor +% The function |arrange| normalizes the components of symktensor and sorts them +% according to the weight vector, in descending order. +arrange(S) +% Additionally, one can pass a permutation array of number of components of +% S. In this case the components are arranged according to the permutation. +arrange(S,[2 1]) +%% Computing the score of the match between two symktensors +% The function |score| provides a measure of similarity between two symktensors. +% Given two symktensors $R1$ and $R2$, we denote by $\lambda_{R1}$ and +% $\lambda_{R2}$ their respective weight vectors and |X| and |Y| their respective +% factor matrices. The function |score(R1,R2)| first normalizes the symtensor. +% It then attempts to match the symktensor $R1$ to $R2$ and returns the +% following numeric quantification of their similarity. +% +% $$\frac{1 - ||\lambda_{R1}-\lambda_{R2}||}{\max(\lambda_{R1}, \lambda_{R2})} \prod_{i=1}^{r} X_{i}' Y_{i}$$ +% +% In the above formula, $r$ is the number of components of $R1$. $R1$ must +% have at least as many components as $R2$. Any additional components are +% ignored in the score calculation. Since the formula for score depends on +% the arrangement of the components of $R1$, score rearranges $R1$ and tries a +% number of permuations. By default, $R1$ is rearranged by permuting indices +% greedily to increase the score. Calling |score| on two symktensors +% converts the symktensors to ktensors and calls the |score +R1 = symktensor([1; -1; 1], reshape(1:9, 3, 3), 3); %Declare some symtensors +R2 = symktensor([1; -1], reshape(1:6, 3,2), 3); + +score(R1, R2) %The score is 1 (perfect match) because the 1st 2 components of R1 match those of R2 +%% +% Calling |score| on two symktensor converts the symktensors to ktensors +% and calls the |score| function for ktensor. See the |ktensor/score| +% documentation for more information. +%% Subscripted reference for symktensors +% After defining a symktensor, one can reference its weight vector, factor +% matrix, or element using the following conventions. Note that elements +% are queried using multi-dimensional subscript notation, as opposed to +% linear. +S.lambda %<-- The weight vector +S.X %<-- The factor matrix + +S(1,2,1) %<-- Generate the element of index (1,2,1) from the factorization + +%% Subscripted assignment for symktensors +% Subscripted assignment can be used to change the order, weight vector, or +% factor matrix of a symktensor. First, we change the weight vector +S.lambda = [1;1] + +%% +% Next, we alter the factor matrix. U can be used instead of |X| in the +% notation that follows +S.X = [1 0; 0 1; 1 0; 0 1] +%% +% Lastly, we alter the order. This changes $m$, in the $m$-way outer product +% expansion of a symmetric Kruskal tensor. +S.m = 4 \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/symtensor_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/symtensor_doc.m new file mode 100644 index 0000000..f324b3e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/symtensor_doc.m @@ -0,0 +1,137 @@ +%% Symmetric tensors +% A symmetric tensor is a tensor that is invariant under all permutations +% of its modes. Because many of the elements of a symmetric tensor are +% guaranteed to be equal, we can simplify the storage requirements by only +% storing the unique values of the symmetric tensor. There are +% ${n+m-1}\choose{m}$ such values for an m-way tensor of dimension n. +% The |symtensor| class is designed to take advantage of this symmetric +% structure by only storing the unique values of the tensor. + +%% Definition of a symmetric tensor +% A symmetric tensor is invariant under any permutation of the indices. +% Here is a small example. The |issymmetric| function checks symmetry of a +% dense tensor. +T(:,:,1) = [1 2; 2 3]; T(:,:,2)= [2 3; 3 6]; +T = tensor(T) +issymmetric(T) + +%% Creating a symtensor from a symmetric tensor +% We can construct a |symtensor| object from a symmetric tensor. This +% object only stores the unique entries of the tensor. For the 2 x 2 x 2 +% tensor, this means there are only four unique entries. Everything else +% comes from permuting the indices of those four entries. +S = symtensor(T) + +%% Unique entries of a tensor +% Note from TGK: This needs to be added. It should have some discussion of +% all the return values from indices. What is a monomial description, etc. +[I,C,W,Q] = indices(S) + +%% Creating a symtensor from a nonsymmetric tensor +% A symmetric tensors can be created from the symmetrization of +% nonsymmetric tensor so long as it is the same size in every mode. +% If the input is not symmetric, it is symmetrized by creating an average +% of elements in the same permutation class. For instance, this example +% starts with a nonsymmetric tensor and symmetrizes it: +T2 = tensor([1:8],[2 2 2]) +S2 = symtensor(T2) + +%% +% Converting the symtensor back to a generic tensor is equivalent to +% running |symmetrize| on the original tensor. In the following example, +% the full command converts a symtensor to a tensor. +full(S2) +symmetrize(T2) + +%% Create an all ones symtensor +% The first argument is the generating function, the second argument is the +% number of modes, and the third argument is the size of each mode. +S3 = symtensor(@ones, 3, 2) + +%% Create a random symtensor +S4 = symtensor(@randn, 3, 2) + +%% Using a generating function to populate a symmetric tensor +% In general, a symmetric tensor can also have its entries created by any +% generating function. This is done by passing a function handle, the +% number of modes, and the dimension. The function is expected to take a +% two-dimension size as input and return a matrix of that shape. In fact, +% the second argument to the function will always be 1. + +% For example, we can also declare a binary symmetric tensor as follows: +S5 = symtensor(@(x,y) double(rand(x,y)>.25), 3, 3) + +%% Use ndims and size to get the size of a symmetric tensor +ndims(S) %<-- Number of modes of the symmetric tensor + +%% +size(S) %<-- Size of a symmetric tensor + +%% Use full to convert a symmetric tensor to a multidimensional array +full(S) %<-- Converts from a symmetric tensor to a tensor + +%% Subscripted reference of a symmetric tensor +% Subindex notation extracts the tensor value. +S(1,2,2) +S(2,1,2) %<-- Equal to above, by symmetry + +%% +% This works the same as applying it to the full tensor. +T(1,2,2) +T(2,1,2) + +%% +% Multiple indices can be queried by combining these indices into the rows +% of a matrix. Consider the following example, which returns a vector +% consisting of the values of S at indices indicated by the rows of the +% input matrix. +S([1 2 1; 2 1 2]) + +%% +% Single indices are interpretted as an index into the unique value array, +% which is stored with respect to increasing indices. This is very +% different than using linear indexing on the full tensor. +S(3) %<- Third unique entry corresponding to (1,2,2) +S(4) %<- Fourth unique entry, corresponding to (2,2,2) +T(3) %<- Third entry in the tensor, i.e., (1,2,1) = (1,1,2) +T(4) %<- Fourth entry in the tensor, i.e., (2,2,1) = (1,2,2) + +%% +% Mulitple entries can be obtained at once as well. +S([3:4]') + +%% +%% Subscripted assignment +% Symmetric tensors also support subscripted assignment. Either linear or +% subindex notation is valid. Multiple values can be assigned the same +% quantity, but assigning a subset of a symmetric tensor from a +% multidimensional arrays, tensor, or symtensor is not allowed. +S5(1) = 7 %<-- Linear indexing +S5(2,1,2) = 6 %<-- Subindex indexing +%% +% Symmetric tensors do not support enlargement with the assignment +% operator, so assigning a value to an index other than those which have +% already been declared produces an error. +%% Basic operations (plus, minus, and, or, etc.) on a symmetric tensor +% The tensor object supports many basic operations, illustrated here. +A = symtensor(@(x,y) rand(x,y)>.5, 3, 2) +B = symtensor(@(x,y) rand(x,y)>.5, 3, 2) +%% +A==B %<-- Calls eq. +%% +A. +% +%% Creating a tensor from an array +% The |tensor| command converts a (multidimensional) array to a tensor +% object. +M = ones(4,3,2); %<-- A 4 x 3 x 2 array. +X = tensor(M) %<-- Convert to a tensor object. +%% +% Optionally, you can specify a different shape for the tensor, so +% long as the input array has the right number of elements. +X = tensor(M,[2 3 4]) %<-- M has 24 elements. +%% Creating a one-dimensional tensor +% The tensor class explicitly supports order-one tensors as well as +% trailing singleton dimensions, but the size must be explicit in the +% constructor. By default, a column array produces a 2-way tensor. +X = tensor(rand(5,1)) %<-- Creates a 2-way tensor. +%% +% This is fixed by specifying the size explicitly. +X = tensor(rand(5,1),5) %<-- Creates a 1-way tensor. +%% Specifying trailing singleton dimensions in a tensor +% Likewise, trailing singleton dimensions must be explictly specified. +Y = tensor(rand(4,3,1)) %<-- Creates a 2-way tensor. +%% +Y = tensor(rand(4,3,1),[4 3 1]) %<-- Creates a 3-way tensor. +%% +% Unfortunately, the |whos| command does not report the size of 1D +% objects correctly (last checked for MATLAB 2006a). +whos X Y %<-- Doesn't report the right size for X! +%% The constituent parts of a tensor +X = tenrand([4 3 2]); %<-- Create data. +X.data %<-- The array. +%% +X.size %<-- The size. +%% Creating a tensor from its constituent parts +Y = tensor(X.data,X.size) %<-- Copies X. +%% Creating an empty tensor +% An empty constructor exists, primarily to support loading previously +% saved data in MAT-files. +X = tensor %<-- Creates an empty tensor. +%% Use tenone to create a tensor of all ones +X = tenones([3 4 2]) %<-- Creates a 3 x 4 x 2 tensor of ones. +%% Use tenzeros to create a tensor of all zeros +X = tenzeros([1 4 2]) %<-- Creates a 1 x 4 x 2 tensor of zeros. +%% Use tenrand to create a random tensor +X = tenrand([5 4 2]) %<-- Creates a random 5 x 4 x 2 tensor. +%% Use squeeze to remove singleton dimensions from a tensor +squeeze(Y) %<-- Removes singleton dimensions. +%% Use double to convert a tensor to a (multidimensional) array +double(Y) %<-- Converts Y to a standard MATLAB array. +%% +Y.data %<-- Same thing. +%% Use ndims and size to get the size of a tensor +ndims(Y) %<-- Number of dimensions (or ways). +%% +size(Y) %<-- Row vector with the sizes of all dimension. +%% +size(Y,3) %<-- Size of a single dimension. +%% Subscripted reference for a tensor +X = tenrand([3 4 2 1]); %<-- Create a 3 x 4 x 2 x 1 random tensor. +X(1,1,1,1) %<-- Extract a single element. +%% +% It is possible to extract a subtensor that contains a single +% element. Observe that singleton dimensions are *not* dropped unless +% they are specifically specified, e.g., as above. +X(1,1,1,:) %<-- Produces a tensor of order 1 and size 1. +%% +% In general, specified dimensions are dropped from the result. Here +% we specify the second and third dimension. +X(:,1,1,:) %<-- Produces a tensor of size 3 x 1. +%% +% Moreover, the subtensor is automatically renumbered/resized in the +% same way that MATLAB works for arrays except that singleton +% dimensions are handled explicitly. +X(1:2,[2 4],1,:) %<-- Produces a tensor of size 2 x 2 x 1. +%% +% It's also possible to extract a list of elements by passing in an +% array of subscripts or a column array of linear indices. +subs = [1,1,1,1; 3,4,2,1]; X(subs) %<-- Extract 2 values by subscript. +%% +inds = [1; 24]; X(inds) %<-- Same thing with linear indices. +%% +% The difference between extracting a subtensor and a list of linear +% indices is ambiguous for 1-dimensional tensors. We can specify +% 'extract' as a second argument whenever we are using a list of +% subscripts. +X = tenrand(10); %<-- Create a random tensor. +X([1:6]') %<-- Extract a subtensor. +%% +X([1:6]','extract') %<-- Same thing *but* result is a vector. +%% Subscripted assignment for a tensor +% We can assign a single element, an entire subtensor, or a list of +% values for a tensor. +X = tenrand([3,4,2]); %<-- Create some data. +X(1,1,1) = 0 %<-- Replaces the (1,1,1) element. +%% +X(1:2,1:2,1) = ones(2,2) %<-- Replaces a 2 x 2 subtensor. +%% +X([1 1 1;1 1 2]) = [5;7] %<-- Replaces the (1,1,1) and (1,1,2) + %elements. +%% +X([1;13]) = [5;7] %<-- Same as above using linear indices. +%% +% It is possible to *grow* the tensor automatically by assigning +% elements outside the original range of the tensor. +X(1,1,3) = 1 %<-- Grows the size of the tensor. +%% Using end for the last array index. +X(end,end,end) %<-- Same as X(3,4,3). +%% +X(1,1,1:end-1) %<-- Same as X(1,1,1:2). +%% +% It is also possible to use |end| to index past the end of an array. +X(1,1,end+1) = 5 %<-- Same as X(1,1,4). +%% Use find for subscripts of nonzero elements of a tensor +% The |find| function returns a list of nonzero *subscripts* for a +% tensor. Note that differs from the standard version, which returns +% linear indices. +X = tensor(floor(3*rand(2,2,2))) %<-- Generate some data. +%% +[S,V] = find(X) %<-- Find all the nonzero subscripts and values. +%% +S = find(X >= 2) %<-- Find subscripts of values >= 2. +%% +V = X(S) %<-- Extract the corresponding values from X. +%% Computing the Frobenius norm of a tensor +% |norm| computes the Frobenius norm of a tensor. This corresponds to +% the Euclidean norm of the vectorized tensor. +T = tensor(randn(2,3,3)); +norm(T) +%% Using reshape to rearrange elements in a tensor +% |reshape| reshapes a tensor into a given size array. The total +% number of elements in the tensor cannot change. +X = tensor(randi(10,3,2,3)); +reshape(X,[3,3,2]); +%% Basic operations (plus, minus, and, or, etc.) on a tensor +% The tensor object supports many basic operations, illustrated here. +A = tensor(floor(3*rand(2,3,2))) +B = tensor(floor(3*rand(2,3,2))) +%% +A & B %<-- Calls and. +%% +A | B %<-- Calls or. +%% +xor(A,B) %<-- Calls xor. +%% +A==B %<-- Calls eq. +%% +A~=B %<-- Calls neq. +%% +A>B %<-- Calls gt. +%% +A>=B %<-- Calls ge. +%% +A= 0.8. + +% Specify 80% missing data and sparse +info = create_problem('Size', [5 4 3], 'M', 0.80, 'Sparse_M', true); + +%% + +% Here is the pattern of known data +info.Pattern + +%% + +% Here is the data (incl. noise) with missing entries zeroed out +info.Data + +%% Create missing data problems with a pre-specified pattern +% It's also possible to provide a specific pattern (dense or sparse) to be +% used to specify where data should be missing. + +% Create pattern +P = tenrand([5 4 3]) > 0.5; +% Create test problem with that pattern +info = create_problem('Size', size(P), 'M', P); +% Show the data +info.Data + +%% Creating sparse problems (CP only) +% If we assume each model parameter is the input to a Poisson process, then +% we can generate a sparse test problems. This requires that all the factor +% matrices and lambda be nonnegative. The default factor generator +% ('randn') won't work since it produces both positive and negative values. + +% Generate factor matrices with a few large entries in each column; this +% will be the basis of our soln. +sz = [20 15 10]; +nf = 4; +A = cell(3,1); +for n = 1:length(sz) + A{n} = rand(sz(n), nf); + for r = 1:nf + p = randperm(sz(n)); + idx = p(1:round(.2*sz(n))); + A{n}(idx,r) = 10 * A{n}(idx,r); + end +end +S = ktensor(A); +S = normalize(S,'sort',1); +%% + +% Create sparse test problem based on provided solution. The +% 'Sparse_Generation' says how many insertions to make based on the +% provided solution S. The lambda vector of the solution is automatically +% rescaled to match the number of insertions. +info = create_problem('Soln', S, 'Sparse_Generation', 500); +num_nonzeros = nnz(info.Data) +total_insertions = sum(info.Data.vals) +orig_lambda_vs_rescaled = S.lambda ./ info.Soln.lambda + +%% Generating an initial guess +% The |create_guess| function creates a random initial guess as a cell +% array of matrices. Its behavior is very similar to |create_problem|. A +% nice option is that you can generate an initial guess that is a +% pertubation of the solution. + +info = create_problem; + +% Create an initial guess to go with the problem that is just a 5% +% pertubation of the correct solution. +U = create_guess('Soln', info.Soln, 'Factor_Generator', 'pertubation', ... + 'Pertubation', 0.05); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/ttensor_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/ttensor_doc.m new file mode 100644 index 0000000..b4bb692 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/ttensor_doc.m @@ -0,0 +1,77 @@ +%% Tucker tensors +% Tucker format is a decomposition of a tensor X as the product of a core +% tensor G and matrices (e.g., A,B,C) in each dimension. In other words, a +% tensor X is expressed as: +% +% $${\mathcal X} = {\mathcal G} \times_1 A \times_2 B \times_2 C$$ +% +% In MATLAB notation, |X=ttm(G,{A,B,C})|. The |ttensor| class stores the +% components of the tensor X and can perform many operations, e.g., |ttm|, +% without explicitly forming the tensor X. +%% Creating a ttensor with a tensor core +core = tensor(rand(3,2,1),[3 2 1]); %<-- The core tensor. +U = {rand(5,3), rand(4,2), rand(3,1)}; %<-- The matrices. +X = ttensor(core,U) %<-- Create the ttensor. +%% Alternate core formats: sptensor, ktensor, or ttensor +core1 = sptenrand([3 2 1],3); %<-- Create a 3 x 2 x 1 sptensor. +Y = ttensor(core1,U) %<-- Core is a sptensor. +%% +V = {rand(3,2),rand(2,2),rand(1,2)}; %<-- Create some random matrices. +core2 = ktensor(V); %<-- Create a 3 x 2 x 1 ktensor. +Y = ttensor(core2,U) %<-- Core is a ktensor. +%% +core3 = ttensor(tensor(1:8,[2 2 2]),V); %<-- Create a 3 x 2 x 1 ttensor. +Y = ttensor(core3,U) %<-- Core is a ttensor. +%% Creating a one-dimensional ttensor +Z = ttensor(tensor(rand(2,1),2), rand(4,2)) %<-- One-dimensional ttensor. +%% Constituent parts of a ttensor +X.core %<-- Core tensor. +%% +X.U %<-- Cell array of matrices. +%% Creating a ttensor from its constituent parts +Y = ttensor(X.core,X.U) %<-- Recreate a tensor from its parts. +%% Creating an empty ttensor. +X = ttensor %<-- empty ttensor +%% Use full or tensor to convert a ttensor to a tensor +X = ttensor(core,U) %<-- Create a tensor +%% +full(X) %<-- Converts to a tensor. +%% +tensor(X) %<-- Also converts to a tensor. +%% Use double to convert a ttensor to a (multidimensional) array +double(X) %<-- Converts to a MATLAB array +%% Use ndims and size to get the size of a ttensor +ndims(X) %<-- Number of dimensions. +%% +size(X) %<-- Row vector of the sizes. +%% +size(X,2) %<-- Size of the 2nd mode. +%% Subscripted reference to a ttensor +X.core(1,1,1) %<-- Access an element of the core. +%% +X.U{2} %<-- Extract a matrix. +%% +X{2} %<-- Same as above. +%% Subscripted assignment for a ttensor +X.core = tenones(size(X.core)) %<-- Insert a new core. +%% +X.core(2,2,1) = 7 %<-- Change a single element. +%% +X{3}(1:2,1) = [1;1] %<-- Change the matrix for mode 3. +%% Using end for last index +X{end} %<-- The same as X{3}. +%% Basic operations (uplus, uminus, mtimes) for a ttensor. +X = ttensor(tenrand([2 2 2]),{rand(3,2),rand(1,2),rand(2,2)}) %<-- Data. ++X %<-- Calls uplus. +%% +-X %<-- Calls uminus. +%% +5*X %<-- Calls mtimes. +%% Use permute to reorder the modes of a ttensor +permute(X,[3 2 1]) %<-- Reverses the modes of X +%% Displaying a ttensor +% The tensor displays by displaying the core and each of the component +% matrices. +disp(X) %<-- Prints out the ttensor. + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/doc/tucker_als_doc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/tucker_als_doc.m new file mode 100644 index 0000000..5b35c91 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/doc/tucker_als_doc.m @@ -0,0 +1,34 @@ +%% Alternating least squares for Tucker model +% The function |tucker_als| computes the best rank(R1,R2,..,Rn) +% approximation of tensor X, according to the specified dimensions in +% vector R. The input X can be a tensor, sptensor, ktensor, or +% ttensor. The result returned in T is a ttensor. +% +% The method is originally from Tucker (1966) and later revisited in +% De Lathauwer et al. (2000). +% +% * Tucker, L. R. +% Some mathematical notes on three-mode factor analysis. +% Psychometrika, 1966, 31, 279-311. +% * De Lathauwer, L.; De Moor, B. & Vandewalle, J. +% On the best rank-1 and rank-(R_1, R_2, R_N) approximation of +% higher-order tensors. +% SIAM Journal on Matrix Analysis and Applications, 2000, 21, 1324-1342. +% +% Note: Oftentimes it's better to use |hosvd| instead. + +%% Create a data tensor of size [5 4 3] +rng('default'); rng(0); %<-- Set seed for reproducibility +X = sptenrand([5 4 3], 10) +%% Create a [2 2 2] approximation +T = tucker_als(X,2) %<-- best rank(2,2,2) approximation +%% Create a [2 2 1] approximation +T = tucker_als(X,[2 2 1]) %<-- best rank(2,2,1) approximation +%% Use a different ordering of the dimensions +T = tucker_als(X,2,struct('dimorder',[3 2 1])) +%% Use the n-vecs initialization method +% This initialization is more expensive but generally works very well. +T = tucker_als(X,2,struct('dimorder',[3 2 1],'init','eigs')) +%% Specify the initial guess manually +U0 = {rand(5,2),rand(4,2),[]}; %<-- Initial guess for factors of T +T = tucker_als(X,2,struct('dimorder',[3 2 1],'init',{U0})) diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/eig_geap.m b/ext/YetAnotherFEcode/external/tensor_toolbox/eig_geap.m new file mode 100644 index 0000000..436b882 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/eig_geap.m @@ -0,0 +1,385 @@ +function [varargout] = eig_geap(A,B,varargin) +%EIG_GEAP Shifted power method for generalized tensor eigenproblem. +% +% [LAMBDA,X] = EIG_GEAP(A,B) finds an eigenvalue (LAMBDA) and eigenvector +% (X) for the real tensor A and the positive definite tensor B such that +% Ax^{m-1} = lambda * Bx^{m-1}. +% +% [LAMBDA,X] = EIG_GEAP(A,B,parameter,value,...) can specify additional +% parameters as follows: +% +% 'Shift' : Shift for eigenvalue calculation (Default: 'Adaptive') +% 'Margin' : Margin for positive/negative definiteness in adaptive +% shift caluclation. (Default: 1e-6) +% 'MaxIts' : Maximum power method iterations (Default: 1000) +% 'Start' : Initial guess (Default: normal random vector) +% 'Tol' : Tolerance on norm of change in |lambda| (Default: 1e-15) +% 'Concave' : Treat the problem as concave rather than convex. +% (Default: true for negative shift; false otherwise.) +% 'Display' : Display every n iterations (Default: -1 for no display) +% +% [LAMBDA,X,FLAG] = EIG_GEAP(...) also returns a flag indicating +% convergence. +% +% FLAG = 0 => Succesfully terminated +% FLAG = -1 => Norm(X) = 0 +% FLAG = -2 => Maximum iterations exceeded +% +% INFO = EIG_GEAP(...) returns a structure with the above plus other +% information, including the starting guess, the number of iterations, +% the final shift, the number of monotinicity violations, and a trace of +% the lambdas. +% +% REFERENCE: T. G. Kolda and J. R. Mayo, An Adaptive Shifted Power Method +% for Computing Generalized Tensor Eigenpairs, SIAM Journal on Matrix +% Analysis and Applications 35(4):1563-1581, December 2014, +% http://dx.doi.org/10.1137/140951758 +% +% See also EIG_SSHOPM, TENSOR, SYMMETRIZE, ISSYMMETRIC. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Error checking on A +N = size(A,1); + + +%% Check inputs +p = inputParser; +p.addParamValue('Shift', 'adaptive', @(x) strcmpi(x,'adaptive') || isscalar(x)); +p.addParamValue('MaxIts', 1000, @(x) isscalar(x) && (x > 0)); +p.addParamValue('Start', [], @(x) isequal(size(x),[N 1])); +p.addParamValue('Tol', 1.0e-15, @(x) isscalar(x) && (x > 0)); +p.addParamValue('Display', -1, @isscalar); +p.addParamValue('Concave', false, @islogical); +p.addParamValue('Margin', 1e-6, @(x) isscalar(x) && (x > 0)); +p.addParamValue('SkipChecks', false); +p.parse(varargin{:}); + +%% Copy inputs +maxits = p.Results.MaxIts; +x0 = p.Results.Start; +shift = p.Results.Shift; +tol = p.Results.Tol; +display = p.Results.Display; +concave = p.Results.Concave; +margin = p.Results.Margin; +skipchecks = p.Results.SkipChecks; + +%% Check inputs +if ~skipchecks + if ~issymmetric(A) + error('Tensor A must be symmetric') + end + + if ~isempty(B) + if ~issymmetric(B) + error('Tensor B must be symmetric'); + end + + if ~isequal(size(A),size(B)) + error('A and B must be the same size'); + end + end +end + +%% Check shift +if ~isnumeric(shift) + adaptive = true; + shift = 0; +else + adaptive = false; +end + +%% Check starting vector +if isempty(x0) + x0 = 2*rand(N,1)-1; +end + +if norm(x0) < eps + error('Zero starting vector'); +end + +%% Check concavity +if concave + beta = -1; +else + beta = 1; +end + +if ~adaptive + if (shift < 0) && (beta == 1) + error('Set ''concave'' to true for a negative shift'); + elseif (shift > 0) && (beta == -1) + error('Set ''concave'' to false for a positive shift'); + end +end + +%% Execute power method +if (display >= 0) + fprintf('Generalized Adaptive Tensor Eigenpair Power Method: '); + if (beta == -1) + fprintf('Concave '); + else + fprintf('Convex '); + end + fprintf('\n'); + fprintf('---- --------- ----- ------------ -----\n'); + fprintf('Iter Lambda Diff |newx-x| Shift\n'); + fprintf('---- --------- ----- ------------ -----\n'); +end + +flag = -2; + +x = x0 / norm(x0); +data = geap_data(x,A,B); +lambda = data.Axm / data.Bxm; +nviols = 0; +lambdatrace = zeros(maxits+1,1); +lambdatrace(1) = lambda; +if adaptive + shifttrace = zeros(maxits,1); +else + shifttrace = shift * ones(maxits,1); +end + +for its = 1:maxits + + if adaptive + tmp = min( eig( beta * geap_hessian(data) ) ); + shift = beta * max(0, ( margin / data.m ) - tmp); + shifttrace(its) = shift; + end + if data.Bex + newx = beta * (data.Axm1 - lambda * data.Bxm1 + (shift + lambda) * data.Bxm * x); + else + newx = beta * (data.Axm1 + shift * x); + end + + nx = norm(newx); + if nx < eps, + flag = -1; + break; + end + newx = newx / nx; + newdata = geap_data(newx,A,B); + newlambda = newdata.Axm / newdata.Bxm; + + + if norm(abs(newlambda-lambda)) < tol + flag = 0; + elseif (beta == 1) && (newlambda < lambda) + if (display > 0) + warning('Lambda is decreasing by %e when it should be increasing', abs(lambda-newlambda)); + end + nviols = nviols + 1; + elseif (beta == -1) && (newlambda > lambda) + if (display > 0) + warning('Lambda is increasing by %e when it should be decreasing', abs(lambda-newlambda)); + end + nviols = nviols + 1; + end + + if (display > 0) && ((flag == 0) || (mod(its,display) == 0)) + + % Iteration Number + fprintf('%4d ', its); + + % Lambda + fprintf('%9.6f ', newlambda); + d = newlambda-lambda; + if (d ~= 0) + if (d < 0), c = '-'; else c = '+'; end + fprintf('%ce%+03d ', c, round(log10(abs(d)))); + else + fprintf(' '); + end + + % Change in X + fprintf('%8.6e ', norm(newx-x)); + + % Shift + fprintf('%5.2f', shift); + + % Line end + fprintf('\n'); + end + + x = newx; + data = newdata; + lambda = newlambda; + lambdatrace(its+1) = lambda; + + if flag == 0 + break + end +end + +%% Check results +if (display >=0) + switch(flag) + case 0 + fprintf('Successful Convergence'); + case -1 + fprintf('Converged to Zero Vector'); + case -2 + fprintf('Exceeded Maximum Iterations'); + otherwise + fprintf('Unrecognized Exit Flag'); + end + fprintf('\n'); +end + +%% Process output + +nout = max(nargout,1); +if nout == 1 + + % Save everything in info + info.lambda = lambda; + info.x = x; + info.flag = flag; + info.x0 = x0; + info.its = its; + info.nviols = nviols; + info.shift = shift; + info.lambdatrace = lambdatrace(1:its+1); + info.shifttrace = shifttrace(1:its); + + varargout{1} = info; + +elseif nout >= 2 + + varargout{1} = lambda; + varargout{2} = x; + + if nout == 3 + varargout{3} = flag; + end +end + +%% ---------------------------------------------------- +function data = geap_data(x,A,B) +%GEAP_DATA Computes values needed for Generalized Tensor Eigenproblem +% +% DATA = GEAP_DATA(X,A,B) assumes X is a vector and A and B are symmetric +% tensors of appropriate sizes. No checking for sizes or symmetry are +% enforced. The following quanties are computed... +% +% - DATA.x - original X vector +% - DATA.m - ndims(A) +% - DATA.normx - norm(X) +% - DATA.normxeq1 - True if |norm(X)-1|<10*eps +% - DATA.nxm - norm(X)^ndims(A) +% - DATA.Axm - ttsv(A,X) +% - DATA.Axm1 - ttsv(A,X,-1) +% - DATA.Axm2 - ttsv(A,X,-2) +% - DATA.Bex - true, incidating B tensor is specified. +% - DATA.Bxm - ttsv(B,X) +% - DATA.Bxm1 - ttsv(B,X,-1) +% - DATA.Bxm2 - ttsv(B,X,-2) +% +% Alternatively, if B is empty, then +% - DATA.Bex - false +% - DATA.Bxm - 1 +% - DATA.Bxm1 - X +% - DATA.Bxm2 - [] +% +% See also GEAP_FUNCTION, GEAP_GRADIENT, GEAP_HESSIAN, GEAP. + + +data.x = x; +data.m = ndims(A); +data.normx = norm(x); +data.normxeq1 = abs(data.normx-1) < 10*eps; +data.nxm = (data.normx)^(data.m); +data.Axm2 = ttsv(A,x,-2); +data.Axm1 = data.Axm2*x; +data.Axm = data.Axm1'*x; + +if isempty(B) + + data.Bex = false; + data.Bxm = 1; + data.Bxm1 = x; + data.Bxm2 = []; + +else + + data.Bex = true; + data.Bxm2 = ttsv(B,x,-2); + data.Bxm1 = data.Bxm2*x; + data.Bxm = data.Bxm1'*x; + + if data.Bxm < 0 + disp(data.x) + disp(data.Bxm) + error('B is not positive definite') + end + +end + +function H = geap_hessian(data,alpha,dividebym) +%GEAP_HESSIAN Computes Generalized Tensor Eigenproblem gradient. +% +% G = GEAP_HESSIAN(DATA) returns the GEAP function Hessian divided by +% DATA.m, where DATA is the result of calling the GEAP_DATA function. +% +% G = GEAP_FUNTION(DATA,ALPHA) returns the Hessian of the shifted GEAP +% function, where the shift if ALPHA. +% +% G = GEAP_FUNCTION(DATA,ALPHA,FALSE) does not divide the result by +% data.m. +% +% See also GEAP_DATA, GEAP_FUNCTION, GEAP_GRADIENT, GEAP. + +if ~exist('alpha','var') + alpha = 0; +end + +if ~exist('dividebym','var') + dividebym = true; +end + +if (~data.normxeq1) + warning('Norm(x) = %e, but should be 1.\n', data.normx); +end + +m = data.m; +x = data.x; +n = size(x,1); +Axm = data.Axm; +Axm1 = data.Axm1; +Axm2 = data.Axm2; +xxt = x*x'; +mat4 = eye(n) + (m-2) * xxt; + +if data.Bex + Bxm = data.Bxm; + Bxm1 = data.Bxm1; + Bxm2 = data.Bxm2; + + mat1 = symprod(Bxm1,Bxm1); + mat2 = symprod(Axm1,x); + mat3 = symprod(Axm1,Bxm1); + mat5 = symprod(Bxm1,x); + + H1dm = ((m*Axm)/(Bxm^3)) * mat1 ... + + (1/Bxm) * ( (m-1) * Axm2 + m * mat2 + Axm * mat4 ) ... + - (1/Bxm^2) * ( m * mat3 + (m-1) * Axm * Bxm2 + m * Axm * mat5); +else + H1dm = (m-1)*Axm2; +end + +H2dm = alpha * mat4; +H = H1dm + H2dm; + +if ~dividebym + H = m * H; +end + +function M = symprod(a,b) +M = a*b' + b*a'; + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/eig_sshopm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/eig_sshopm.m new file mode 100644 index 0000000..398265a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/eig_sshopm.m @@ -0,0 +1,210 @@ +function [lambda,x,flag,its,x0] = eig_sshopm(A,varargin) +%EIG_SSHOPM Shifted power method for finding real eigenpair of real tensor. +% +% [LAMBDA,X]=EIG_SSHOPM(A) finds an eigenvalue (LAMBDA) and eigenvector +% (X) for the real tensor A such that Ax^{m-1} = lambda*x. +% +% [LAMBDA,X]=EIG_SSHOPM(A,parameter,value,...) can specify additional +% parameters as follows: +% +% 'Shift' : Shift for eigenvalue calculation (Default: 'Adaptive') +% 'Margin' : Margin for positive/negative definiteness in adaptive +% shift caluclation. (Default: 1e-6) +% 'MaxIts' : Maximum power method iterations (Default: 1000) +% 'Start' : Initial guess (Default: normal random vector) +% 'Tol' : Tolerance on norm of change in |lambda| (Default: 1e-15) +% 'Concave' : Treat the problem as concave rather than convex. +% (Default: true for negative shift; false otherwise.) +% 'Display' : Display every n iterations (Default: -1 for no display) +% +% [LAMBDA,X,FLAG]=EIG_SSHOPM(...) also returns a flag indicating convergence. +% +% FLAG = 0 => Succesfully terminated +% FLAG = -1 => Norm(X) = 0 +% FLAG = -2 => Maximum iterations exceeded +% +% [LAMBDA,X,FLAG,IT]=EIG_SSHOPM(...) also returns the number of iterations. +% +% [LAMBDA,X,FLAG,IT,X0]=EIG_SSHOPM(...) also returns the intial guess. +% +% REFERENCES: +% * T. G. Kolda and J. R. Mayo, Shifted Power Method for Computing Tensor +% Eigenpairs, SIAM Journal on Matrix Analysis and Applications +% 32(4):1095-1124, October 2011, http://dx.doi/org/10.1137/100801482 +% * T. G. Kolda and J. R. Mayo, An Adaptive Shifted Power Method for +% Computing Generalized Tensor Eigenpairs, SIAM Journal on Matrix +% Analysis and Applications 35(4):1563-1582, December 2014, +% http://dx.doi.org/0.1137/140951758 +% +% See also EIG_GEAP, EIG_SSHOPMC, TENSOR, SYMMETRIZE, ISSYMMETRIC. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + + +%% Error checking on A +P = ndims(A); +N = size(A,1); + +if ~issymmetric(A) + error('Tensor must be symmetric.') +end + +%% Check inputs +p = inputParser; +p.addParamValue('Shift', 'adaptive'); +p.addParamValue('MaxIts', 1000, @(x) isscalar(x) && (x > 0)); +p.addParamValue('Start', [], @(x) isequal(size(x),[N 1])); +p.addParamValue('Tol', 1.0e-15, @(x) isscalar(x) && (x > 0)); +p.addParamValue('Display', -1, @isscalar); +p.addParamValue('Concave', false, @islogical); +p.addParamValue('Margin', 1e-6, @(x) isscalar(x) && (x > 0)); +p.parse(varargin{:}); + +%% Copy inputs +maxits = p.Results.MaxIts; +x0 = p.Results.Start; +shift = p.Results.Shift; +tol = p.Results.Tol; +display = p.Results.Display; +concave = p.Results.Concave; +margin = p.Results.Margin; + +%% Check shift +if ~isnumeric(shift) + adaptive = true; + shift = 0; +else + adaptive = false; +end + +%% Check starting vector +if isempty(x0) + x0 = 2*rand(N,1)-1; +end + +if norm(x0) < eps + error('Zero starting vector'); +end + +%% Check concavity +if shift ~= 0 + concave = (shift < 0); +end + +%% Execute power method +if (display >= 0) + fprintf('TENSOR SHIFTED POWER METHOD: '); + if concave + fprintf('Concave '); + else + fprintf('Convex '); + end + fprintf('\n'); + fprintf('---- --------- ----- ------------ -----\n'); + fprintf('Iter Lambda Diff |newx-x| Shift\n'); + fprintf('---- --------- ----- ------------ -----\n'); +end + +flag = -2; +x = x0 / norm(x0); +lambda = x'*ttsv(A,x,-1); +if adaptive + shift = adapt_shift(A,x,margin,concave); +end + +for its = 1:maxits + + newx = ttsv(A,x,-1) + shift * x; + + if (concave) + newx = -newx; + end + + nx = norm(newx); + if nx < eps, + flag = -1; + break; + end + newx = newx / nx; + + newlambda = newx'* ttsv(A,newx,-1); + + if adaptive + newshift = adapt_shift(A,newx,margin,concave); + else + newshift = shift; + end + + if norm(abs(newlambda-lambda)) < tol + flag = 0; + end + + if (display > 0) && ((flag == 0) || (mod(its,display) == 0)) + fprintf('%4d ', its); + % Lambda + fprintf('%9.6f ', newlambda); + d = newlambda-lambda; + if (d ~= 0) + if (d < 0), c = '-'; else c = '+'; end + fprintf('%ce%+03d ', c, round(log10(abs(d)))); + else + fprintf(' '); + end + % Change in X + fprintf('%8.6e ', norm(newx-x)); + + % Shift + fprintf('%5.2f', shift); + + % Line end + fprintf('\n'); + end + + x = newx; + lambda = newlambda; + shift = newshift; + + if flag == 0 + break + end +end + +%% Check results +if (display >=0) + switch(flag) + case 0 + fprintf('Successful Convergence'); + case -1 + fprintf('Converged to Zero Vector'); + case -2 + fprintf('Exceeded Maximum Iterations'); + otherwise + fprintf('Unrecognized Exit Flag'); + end + fprintf('\n'); +end + + +%% ---------------------------------------------------- + +function alpha = adapt_shift(A,x,tau,concave) + +m = ndims(A); +Y = ttsv(A,x,-2); +e = eig(Y); + +if concave + if max(e) <= -tau/(m^2-m) + alpha = 0; + else + alpha = -(tau/m) - ((m-1)*max(e)); + end +else + if min(e) >= tau/(m^2-m) + alpha = 0; + else + alpha = (tau/m) - ((m-1)*min(e)); + end +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/eig_sshopmc.m b/ext/YetAnotherFEcode/external/tensor_toolbox/eig_sshopmc.m new file mode 100644 index 0000000..2352931 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/eig_sshopmc.m @@ -0,0 +1,168 @@ +function [lambda,x,flag,its,x0,trace] = eig_sshopmc(A,varargin) +%EIG_SSHOPMC Shifted power method for real/complex eigenpair of tensor. +% +% [LAMBDA,X]=EIG_SSHOPMC(A) finds an eigenvalue (LAMBDA) and eigenvector (X) +% for the real tensor A such that Ax^{m-1} = lambda*x. +% +% [LAMBDA,X]=EIG_SSHOPMC(A,parameter,value,...) can specify additional +% parameters as follows: +% +% 'Shift' : Shift in the eigenvalue calculation (Default: 0) +% 'MaxIts' : Maximum power method iterations (Default: 1000) +% 'Start' : Initial guess (Default: normal random vector) +% 'Tol' : Tolerance on norm of change in |lambda| (Default: 1e-16) +% 'Display' : Display every n iterations (Default: -1 for no display) +% +% [LAMBDA,X,FLAG]=EIG_SSHOPMC(...) also returns a flag indicating +% convergence. +% +% FLAG = 0 => Succesfully terminated with |lambda - lambda_old| < Tol +% FLAG = -1 => Norm(X) = 0 +% FLAG = -2 => Maximum iterations exceeded +% +% [LAMBDA,X,FLAG,IT]=EIG_SSHOPMC(...) also returns the number of +% iterations. +% +% [LAMBDA,X,FLAG,IT,X0]=EIG_SSHOPMC(...) also returns the intial guess. +% +% [LAMBDA,X,FLAG,IT,X0,TRACE]=EIG_SSHOPMC(...) also returns a trace of the +% |lambda| values at each iteration. +% +% REFERENCE: T. G. Kolda and J. R. Mayo, Shifted Power Method for +% Computing Tensor Eigenpairs, SIAM Journal on Matrix Analysis and +% Applications 32(4):1095-1124, October 2011 (doi:10.1137/100801482) +% +% See also EIG_SSHOPM, TENSOR, SYMMETRIZE, ISSYMMETRIC. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Error checking on A +P = ndims(A); +N = size(A,1); + +if ~issymmetric(A) + error('Tensor must be symmetric.') +end + +%% Check inputs +p = inputParser; +p.addParamValue('Shift', 0); +p.addParamValue('MaxIts', 1000, @(x) x > 0); +p.addParamValue('Start', [], @(x) isequal(size(x),[N 1])); +p.addParamValue('Tol', 1.0e-16); +p.addParamValue('Display', -1, @isscalar); +p.parse(varargin{:}); + +%% Copy inputs +maxits = p.Results.MaxIts; +x0 = p.Results.Start; +shift = p.Results.Shift; +tol = p.Results.Tol; +display = p.Results.Display; + +%% Check starting vector +if isempty(x0) + x0 = 2*rand(N,1)-1 + 1i * (2*randn(N,1)-1); +end + +if norm(x0) < eps + error('Zero starting vector'); +end + +%% Execute power method +if (display >= 0) + fprintf('TENSOR SHIFTED POWER METHOD: '); + fprintf('Shift = %g\n', shift); + fprintf('---- --------- ----- --------- ----- -------- ----- --------\n'); + fprintf('Iter R(Lambda) Diff C(Lambda) Diff |Lambda| Diff |newx-x|\n'); + fprintf('---- --------- ----- --------- ----- -------- ----- --------\n'); +end + +flag = -2; +x = x0 / norm(x0); +lambda = x'*ttsv(A,x,-1); + +trace = zeros(maxits,1); +trace(1) = lambda; + +for its = 1:maxits + + newx = ttsv(A,x,-1) + shift * x; + newx = newx / (lambda + shift); + + nx = norm(newx); + if nx < eps, + flag = -1; + break; + end + newx = newx / nx; + + newlambda = newx'* ttsv(A,newx,-1); + + if norm(abs(newlambda) - abs(lambda)) < tol + flag = 0; + end + + + if (display > 0) && ((flag == 0) || (mod(its,display) == 0)) + fprintf('%4d ', its); + % Real Part + fprintf('%9.6f ', real(newlambda)); + d = real(newlambda-lambda); + if (d ~= 0) + if (d < 0), c = '-'; else c = '+'; end + fprintf('%ce%+03d ', c, round(log10(abs(d)))); + else + fprintf(' '); + end + % Imaginary Part + fprintf('%9.6f ', imag(newlambda)); + d = imag(newlambda-lambda); + if (d ~= 0) + if (d < 0), c = '-'; else c = '+'; end + fprintf('%ce%+03d ', c, round(log10(abs(d)))); + else + fprintf(' '); + end + % Absolute Value + fprintf('%8.6f ', abs(newlambda)); + d = abs(newlambda) - abs(lambda); + if (d ~= 0) + if (d < 0), c = '-'; else c = '+'; end + fprintf('%ce%+03d ', c, round(log10(abs(d)))); + else + fprintf(' '); + end + % Change in X + fprintf('%8.6f ', norm(newx-x)); + % Line end + fprintf('\n'); + end + + x = newx; + lambda = newlambda; + trace(its+1) = lambda; + + if flag == 0 + break + end +end + +%% Check results +if (display >=0) + switch(flag) + case 0 + fprintf('Successful Convergence'); + case -1 + fprintf('Converged to Zero Vector'); + case -2 + fprintf('Exceeded Maximum Iterations'); + otherwise + fprintf('Unrecognized Exit Flag'); + end + fprintf('\n'); +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/export_data.m b/ext/YetAnotherFEcode/external/tensor_toolbox/export_data.m new file mode 100644 index 0000000..bceba48 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/export_data.m @@ -0,0 +1,112 @@ +function export_data(A, fname) +%EXPORT_DATA Export tensor-related data to a file. +% +% EXPORT(A,FNAME) exports object A to the file named FNAME in plain ASCII +% text. Export currently supports exporting the following data types: +% +% - tensor +% - sptensor +% - matrix +% +% In the case of a tensor, the first three lines give details about the +% tensor. The format for a 4 x 3 x 2 tensor is as follows... +% +% tensor +% 3 +% 4 3 2 +% +% +% +% +% +% +% +% +% ... +% +% +% +% +% In the case of an sptensor, the first four lines give details about the +% sptensor. The format for a 4 x 3 x 2 sptensor with 10 nonzeros is as +% follows... +% +% sptensor +% 3 +% 4 3 2 +% 10 +% i1 j1 k1 +% i2 j2 k2 +% ... +% i10 j10 k10 +% +% A matrix is formatted the same as a 2-way tensor except that the first +% line says "matrix" rather than "tensor". +% +% See also TENSOR, SPTENSOR, IMPORT_DATA +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +%% Open file +fid = fopen(fname,'w'); +if (fid == -1) + error('Cannot open file %s',fname); +end + +%% Export the object + +if isa(A,'tensor') + + fprintf(fid, 'tensor\n'); + export_size(fid, size(A)); + export_array(fid, A.data); + +elseif isa(A,'sptensor') + + fprintf(fid, 'sptensor\n'); + export_sparse_size(fid, A); + export_sparse_array(fid, A); + +elseif isnumeric(A) && ndims(A) == 2 + + fprintf(fid, 'matrix\n'); + export_size(fid, size(A)); + export_array(fid, A); + +else + + error('Invalid data type for export'); + +end + + +%% Close file +fclose(fid); + +function export_size(fid, sz) +% Export the size of something to a file +fprintf(fid, '%d \n', length(sz)); % # of dimensions on one line +fprintf(fid, '%d ', sz); % # size of each dimensions on the next line +fprintf(fid, '\n'); + +function export_array(fid, data) +% Export dense data that supports numel and linear indexing +for i = 1:numel(data) + fprintf(fid, '%.16e\n', data(i)); +end + +function export_sparse_size(fid, A) +% Export the size of something to a file +fprintf(fid, '%d \n', length(size(A))); % # of dimensions on one line +fprintf(fid, '%d ', size(A)); % # size of each dimensions on the next line +fprintf(fid, '\n'); +fprintf(fid, '%d \n', nnz(A)); % # number of nonzeros on the next line + +function export_sparse_array(fid, A) +% Export sparse array data in coordinate format +for i = 1:nnz(A) + for s = 1:length(size(A)) + fprintf(fid,'%d ', A.subs(i,s)); + end + fprintf(fid,'%.16e\n',A.vals(i)); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/gcp_opt.m b/ext/YetAnotherFEcode/external/tensor_toolbox/gcp_opt.m new file mode 100644 index 0000000..68f7ca7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/gcp_opt.m @@ -0,0 +1,672 @@ +function [M, M0, info] = gcp_opt(X, nc, varargin) +%GCP_OPT Fits Generalized CP decomposition with user-specified function. +% +% M = GCP_OPT(X,R,'type',TYPE) computes an estimate of the best rank-R +% generalized CP (GCP) decomposition of the tensor X for the specified +% generalized loss function specified by TYPE. The input X can be a +% tensor or sptensor. The result M is a ktensor. A full set of types can +% be found in GCP_FG_SETUP, but popular options include +% +% 'binary' - Bernoulli distribution for binary data +% 'count' - Poisson distribution for count data (see also CP_APR) +% 'normal' - Gaussian distribution (see also CP_ALS and CP_OPT) +% 'huber (0.25)' - Similar to Gaussian but robust to outliers +% 'rayleigh' - Rayleigh distribution for nonnegative data +% 'gamma' - Gamma distribution for nonnegative data +% +% M = GCP_OPT(X,R,'func',FH,'grad',GH,'lower',LB) passes a user-specified +% choice for the elementwise loss function, corresponding gradient, and +% lower bound on the factor matrix entries. This is an alternative to +% specifying the 'type' as shown above. +% +% M = GCP_OPT(X,R,...,'opt',OPT) specifies the optimization method: +% 'lbfgsb' - Bound-constrained limited-memory BFGS +% 'sgd' - Stochastic gradient descent (SGD) +% 'adam' - Momentum-based SGD method +% If X is dense, any of the three options can be used, and 'lbfgsb' is +% the default. The X is sparse, only 'sgd' and 'adam' (default) are +% options. Each method has specific parameters, see the document for +% details. +% +% M = GCP_OPT(X,R,...,'mask',W) specifies a mask W that is 0 for missing +% entries and 1 everywhere else. This can only be used in the case that X +% is dense and 'opt' is the default ('lbfgsb'). The missing entries are +% ignored in the fitting of the model. +% +% M = GCP_OPT(X,R,'param',value,...) specifies additional optional +% parameters and values as follows, with the defaults in curly braces: +% +% 'maxiters' - Maximum number of outer iterations {1000} +% 'init' - Initialization for factor matrices {'rand'} +% 'printitn' - Print every n iterations; 0 for no printing {1} +% 'state' - Random state, to re-create the same outcome {[]} +% +% [M,M0,out] = GCP_OPT(...) also returns the initial guess (M0) and a +% structure with additional information. To reproduce the +% run exactly, use M_alt = gcp_opt(X,R,out.params.Results). +% +% Documentation page for GCP_OPT +% +% REFERENCES: +% * D. Hong, T. G. Kolda, J. A. Duersch. Generalized Canonical Polyadic +% Tensor Decomposition. SIAM Review, 2019. +% * T. G. Kolda, D. Hong, J. Duersch. Stochastic Gradients for +% Large-Scale Tensor Decomposition, 2019. +% +% % Documentation page for GCP-OPT +% +% See also CP_OPT, CP_APR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + +%% Timers +setupStartA = tic; + +%% Iniital setup +nd = ndims(X); +sz = size(X); +tsz = prod(sz); + +%% Random set-up +defaultStream = RandStream.getGlobalStream; + +%% Set algorithm parameters from input or by using defaults +params = inputParser; + +params.addParameter('type', [], @ischar); +params.addParameter('func', [], @(x) isa(x,'function_handle')); +params.addParameter('grad', [], @(x) isa(x,'function_handle')); +params.addParameter('lower', [], @isscalar); + +params.addParameter('opt', [], @ischar); +params.addParameter('mask', [], @(x) isa(x,'tensor')); + +params.addParameter('maxiters', 1000, @isscalar); +params.addParameter('init','rand'); +params.addParameter('printitn',1, @isscalar); +params.addParameter('state', defaultStream.State); + +params.addParameter('factr', 1e7, @isscalar); +params.addParameter('pgtol', 1e-4 * tsz, @isscalar); + +params.addParameter('fsamp',[]); +params.addParameter('gsamp',[]); +params.addParameter('oversample',1.1, @isscalar); +params.addParameter('sampler', []); +params.addParameter('fsampler', []); + +params.addParameter('rate', 1e-3, @isscalar); +params.addParameter('decay', 0.1, @isscalar); +params.addParameter('maxfails', 1, @isscalar); +params.addParameter('epciters', 1000, @isscalar); +params.addParameter('festtol', -Inf, @isscalar); + +params.addParameter('beta1', 0.9, @isscalar); +params.addParameter('beta2', 0.999, @isscalar); +params.addParameter('epsilon', 1e-8, @isscalar); + + +params.parse(varargin{:}); + +% Save info +info.params = params.Results; + +%% Initialize random number generator with specified state +defaultStream.State = params.Results.state; + +%% Extract remaining parameters +type = params.Results.type; +fh = params.Results.func; +gh = params.Results.grad; +lb = params.Results.lower; +opt = params.Results.opt; +W = params.Results.mask; +maxiters = params.Results.maxiters; +init = params.Results.init; +printitn = params.Results.printitn; +factr = params.Results.factr; +pgtol = params.Results.pgtol; +fsamp = params.Results.fsamp; +gsamp = params.Results.gsamp; +oversample = params.Results.oversample; +gsampler_type = params.Results.sampler; +fsampler_type = params.Results.fsampler; +rate = params.Results.rate; +decay = params.Results.decay; +maxfails = params.Results.maxfails; +epciters = params.Results.epciters; +festtol = params.Results.festtol; +beta1 = params.Results.beta1; +beta2 = params.Results.beta2; +epsilon = params.Results.epsilon; + +%% More setup +vecsz = sum(sz)*nc; +isdense = isa(X,'tensor'); +issparse = isa(X,'sptensor'); + +if isdense + if isempty(W) + nmissing = 0; + nnonzeros = nnz(X); + nzeros = tsz - nnonzeros; + else + X = X .* W; + nmissing = tsz - nnz(W); + nnonzeros = nnz(X); + nzeros = nnz(W) - nnz(X); + end +elseif issparse + if ~isempty(W) + error('Cannot specify missing entries for sparse tensors'); + end + nmissing = 0; + nnonzeros = nnz(X); + nzeros = tsz - nnonzeros; +else + error('Input tensor must be tensor or sptensor'); +end + +% Save info +info.tsz = tsz; +info.nmissing = nmissing; +info.nnonzeros = nnonzeros; +info.nzeros = nzeros; + +%% Set up function/gradient and bounds + +if ~isempty(fh) && ~isempty(gh) + if isempty(fh) || isempty(gh) + error('Must specify ''func'' and ''grad'' if either one is specified'); + end + if isempty(lb) + lb = -infty; + end + type = 'user-specified'; +else + [fh,gh,lb_] = tt_gcp_fg_setup(type,X); + if isempty(lb) + lb = lb_; + end +end + +% Save info +info.type = type; +info.fh = fh; +info.gh = gh; +info.lb = lb; + +%% Create initial guess, denoted M0 +if iscell(init) + Uinit = init; + M0 = ktensor(Uinit); + inittype = 'cell'; +elseif isa(init, 'ktensor') + M0 = init; + inittype = 'ktensor'; +elseif strcmp(init,'rand') + Uinit = cell(nd,1); + for k = 1:nd + Uinit{k} = rand(sz(k),nc); + end + M0 = ktensor(Uinit); + M0 = M0 * (norm(X)/norm(M0)); % normalize + inittype = 'rand'; +end + +% We assume throughout that the lambda weights are all ones. Make sure that +% the initial guess satisfies this property. +M0 = normalize(M0,0); + +% Save info +info.inittype = inittype; + +%% Setup the optimization + +if isempty(opt) + if isdense + opt = 'lbfgsb'; + elseif issparse + opt = 'adam'; + end +end + + +if ~ismember(opt,{'lbfgsb','sgd','adam','adagrad'}) + error('Invalid choice for ''opt'''); +end + +use_lbfgsb = strcmpi(opt,'lbfgsb'); +use_adam = strcmpi(opt,'adam'); +use_sgd = strcmpi(opt,'sgd'); +use_adagrad = strcmpi(opt,'adagrad'); + +use_stoc = use_adam || use_sgd || use_adagrad; + +if issparse && ~use_stoc + error('Must set ''opt'' to ''sgd'' or ''adam'' or ''adagrad'' for sparse tensor'); +end + +% Save info +info.opt = opt; + +%% Set up for Stochastic Optimization +if use_stoc + + if ~isempty(W) + error('Have not yet implemented stochastic optimization for the case of missing data'); + end + + crng = []; % Default value + xnzidx = []; % Only create the sorted indices if needed + + + % Set up fsampler + if isempty(fsampler_type) + if issparse + fsampler_type = 'stratified'; + else + fsampler_type = 'uniform'; + end + end + + if isa(fsampler_type,'function_handle') + + fsampler = fsampler_type; + fsampler_str = 'user-specified'; + + elseif strcmp(fsampler_type, 'stratified') + + if isempty(fsamp) + ftmp = max(ceil(nnonzeros/100), 10^5); + fsamp(1) = min(ftmp, nnonzeros); + fsamp(2) = min([ftmp, nnonzeros, nzeros]); + elseif length(fsamp) == 1 + tmp = fsamp; + fsamp(1) = tmp; + fsamp(2) = tmp; + end + + % Create and sort linear indices of X nonzeros for the sampler + if isempty(xnzidx) + xnzidx = tt_sub2ind64(sz,X.subs); + xnzidx = sort(xnzidx); + end + + fsampler = @() tt_sample_stratified(X, xnzidx, fsamp(1), fsamp(2), oversample); + fsampler_str = sprintf('stratified with %d nonzero and %d zero samples', fsamp); + + + elseif strcmp(fsampler_type, 'uniform') + + if isempty(fsamp) + fsamp = min( max(ceil(tsz/10), 10^6), tsz ); + end + + fsampler = @() tt_sample_uniform(X,fsamp); + fsampler_str = sprintf('uniform with %d samples', fsamp); + + else + + error('Invalid choice for ''fsampler'''); + + end + + % Set up gsampler + if isempty(gsampler_type) + if issparse + gsampler_type = 'stratified'; + else + gsampler_type = 'uniform'; + end + end + + if strcmp(gsampler_type, 'semi-stratified') || strcmp(gsampler_type, 'stratified') + + if isempty(gsamp) + gtmp = max(1000, ceil(3 * nnonzeros / maxiters)); + gsamp(1) = min(gtmp, nnonzeros); + gsamp(2) = min([gtmp, nnonzeros, nzeros]); + end + + if length(gsamp) == 1 + tmp = gsamp; + gsamp(1) = tmp; + gsamp(2) = tmp; + end + + if strcmp(gsampler_type, 'semi-stratified') + + gsampler = @() tt_sample_semistrat(X, gsamp(1), gsamp(2)); + gsampler_str = sprintf('semi-stratified with %d nonzero and %d zero samples', gsamp); + crng = 1:gsamp(1); + + else + + % Create and sort linear indices of X nonzeros for the sampler + if isempty(xnzidx) + xnzidx = tt_sub2ind64(sz,X.subs); + xnzidx = sort(xnzidx); + end + + gsampler = @() tt_sample_stratified(X, xnzidx, gsamp(1), gsamp(2), oversample); + gsampler_str = sprintf('stratified with %d nonzero and %d zero samples', gsamp); + end + + elseif strcmp(gsampler_type, 'uniform') + + if isempty(gsamp) + gsamp = min( max( 1000, ceil(10 * tsz / maxiters) ), tsz); + end + + if issparse + + exp_nonzeros = gsamp * nnonzeros / tsz; + exp_zeros = gsamp * nzeros / tsz; + + % Create and sort linear indices of X nonzeros for the sampler + if isempty(xnzidx) + xnzidx = tt_sub2ind64(sz,X.subs); + xnzidx = sort(xnzidx); + end + + gsampler = @() tt_sample_stratified(X, xnzidx, random('Poisson', exp_nonzeros), random('Poisson', exp_zeros), oversample); + gsampler_str = sprintf('pseudo-uniform with %d samples', gsamp); + + + else + + gsampler = @() tt_sample_uniform(X,gsamp); + gsampler_str = sprintf('uniform with %d samples', gsamp); + + end + else + + error('Invalid sampler: %s', gsampler_type); + + end + + + % Save info + info.fsampler = fsampler_str; + info.gsampler = gsampler_str; + info.fsamp = fsamp; + info.gsamp = gsamp; + +end + +setupTimeA = toc(setupStartA); + +%% Welcome message +if printitn > 0 + fprintf('\n'); + fprintf('GCP-OPT-%s (Generalized CP Tensor Decomposition)\n',upper(opt)); + fprintf('\n'); + fprintf('Tensor size: %s (%d total entries)\n', tt_size2str(size(X)), tsz); + if nmissing > 0 + fprintf('Missing entries: %d (%.2g%%)\n', nmissing, 100*nmissing / tsz); + end + if issparse + fprintf('Sparse tensor: %d (%.2g%%) Nonzeros and %d (%.2f%%) Zeros\n', nnonzeros, 100*nnonzeros/tsz, nzeros, 100*nzeros/tsz); + end + fprintf('Generalized function Type: %s\n', type); + fprintf('Objective function: %s\n', func2str(fh)); + fprintf('Gradient function: %s\n', func2str(gh)); + fprintf('Lower bound of factor matrices: %g\n', lb); + fprintf('Optimization method: %s\n', opt); + if use_stoc + fprintf('Max iterations (epochs): %d\n',maxiters); + fprintf('Iterations per epoch: %d\n', epciters); + fprintf('Learning rate / decay / maxfails: %g %g %g\n', rate, decay, maxfails); + fprintf('Function Sampler: %s\n', fsampler_str); + fprintf('Gradient Sampler: %s\n', gsampler_str); + else + fprintf('Max iterations: %d\n', maxiters); + fprintf('Projected gradient tolerance: %.4g\n', pgtol); + end + fprintf('\n'); +end + +%% L-BFGS-B Optimization +if use_lbfgsb + + setupStartB = tic; + + fcn = @(x) tt_gcp_fg(update(M0,1:nd,x), X, fh, gh, W, true, true, true); + + lbvec = lb * ones(vecsz,1); + ubvec = inf(vecsz,1); + + lbfgsbopts = struct; + lbfgsbopts.x0 = tovec(M0,false); + lbfgsbopts.printEvery = printitn; + lbfgsbopts.maxIts = maxiters; + lbfgsbopts.maxTotalIts = maxiters*10; + lbfgsbopts.factr = factr; + lbfgsbopts.pgtol = pgtol; + setupTimeB = toc(setupStartB); + + if (printitn > 0) + fprintf('Begin Main loop\n'); + end + + mainStart = tic; + lbfgsbopts.errFcn = @(x) toc(mainStart); + [x,finalf,lbfgsout] = lbfgsb(fcn, lbvec, ubvec, lbfgsbopts); + mainTime = toc(mainStart); + + M = update(M0,1:nd,x); + + info.fcn = fcn; + info.lbfgsbopts = lbfgsbopts; + info.lbfgsout = lbfgsout; + info.finalf = finalf; + + if printitn > 0 + fprintf('End Main Loop\n'); + fprintf('\n'); + fprintf('Final objective: %10.4e\n', finalf); + fprintf('Setup time: %.2f seconds\n', setupTimeA+setupTimeB); + fprintf('Main loop time: %.2f seconds\n', mainTime); + fprintf('Outer iterations: %d\n', lbfgsout.iterations); + fprintf('Total iterations: %d\n', lbfgsout.totalIterations); + fprintf('L-BFGS-B Exit message: %s\n', lbfgsout.lbfgs_message1); + end + +end + +%% Stochastic Optimization +if use_stoc + + setupStartB = tic; + + % Initialize moments + if use_adam + m = cell(nd,1); + v = cell(nd,1); + for k = 1:nd + m{k} = zeros(sz(k),nc); + v{k} = zeros(sz(k),nc); + end + else + m = []; + v = []; + end + + % Only used by Adagrad + gnormsum = 0; + + % Extract samples for estimating function value - these never change + [fsubs,fvals,fwgts] = fsampler(); + + % Compute initial estimated function value + fest = tt_gcp_fg_est(M0,fh,gh,fsubs,fvals,fwgts,true,false,false,false); + + % Set up loop variables + M = M0; % Copy initial guess + nfails = 0; % Counter # times fails to decrease + titers = 0; % Total iterations (used by ADAM) + + Msave = M; % Best model so far + msave = m; % Corresponding ADAM parameters + vsave = v; % Corresponding ADAM parameters + fest_prev = fest; % Corresponding function value + + % Tracing the progress in the function value by epoch + fest_trace = zeros(maxiters+1,1); + step_trace = zeros(maxiters+1,1); + time_trace = zeros(maxiters+1,1); + fest_trace(1) = fest; + + % Print status + if (printitn > 0) + fprintf('Begin Main loop\n'); + fprintf('Initial f-est: %e\n', fest); + end + + setupTimeB = toc(setupStartB); + mainStart = tic; + time_trace(1) = toc(setupStartA); + + + % Main loop outer iteration + for nepoch = 1:maxiters + + % Main loop inner iteration + step = decay^nfails * rate; % Ignored by Adagrad + for iter = 1:epciters + + % Tracking total iterations + titers = titers + 1; + + % Select subset for stochastic gradient + [gsubs,gvals,gwts] = gsampler(); + + % Compute gradients for each mode + Gest = tt_gcp_fg_est(M,fh,gh,gsubs,gvals,gwts,false,true,false,false,crng); + + % Check for inf + isinfgrad = cellfun(@(x) any(isinf(x(:))), Gest, 'UniformOutput', true); + if any(isinfgrad) + error('Infinite gradient encountered! (epoch = %g, iter = %g)', nepoch, iter); + end + + % Take a step + if use_adam + m = cellfun(@(mk,gk) beta1*mk + (1-beta1)*gk,m,Gest,'UniformOutput',false); + v = cellfun(@(vk,gk) beta2*vk + (1-beta2)*gk.^2,v,Gest,'UniformOutput',false); + mhat = cellfun(@(mk) mk/(1-beta1^titers),m,'UniformOutput',false); + vhat = cellfun(@(vk) vk/(1-beta2^titers),v,'UniformOutput',false); + M.u = cellfun(@(uk,mhk,vhk) max(lb,uk-step*mhk./(sqrt(vhk)+epsilon)),M.u,mhat,vhat,'UniformOutput',false); + elseif use_adagrad + gnormsum = gnormsum + sum(cellfun(@(gk) sum(gk(:).^2), Gest, 'UniformOutput',true)); + step = 1/sqrt(gnormsum); + M.u = cellfun(@(uk,gk) max(lb,uk-step*gk),M.u,Gest,'UniformOutput',false); + else + M.u = cellfun(@(uk,gk) max(lb,uk-step*gk),M.u,Gest,'UniformOutput',false); + end + end + + % Estimate objective function value + fest = tt_gcp_fg_est(M,fh,gh,fsubs,fvals,fwgts,true,false,false,false); + + % Save trace + fest_trace(nepoch+1) = fest; + step_trace(nepoch+1) = step; + + % Check convergence condition + failed_epoch = fest > fest_prev; + + if failed_epoch + nfails = nfails + 1; + end + + festtol_test = fest < festtol; + + % Reporting + if (printitn > 0) && (mod(nepoch,printitn) == 0 || failed_epoch || festtol_test) + fprintf('Epoch %2d: f-est = %e, step = %g', nepoch, fest, step); + if failed_epoch + fprintf(', nfails = %d (resetting to solution from last epoch)', nfails); + end + fprintf('\n'); + + end + + if failed_epoch + + % Back up to best solution so far! + M = Msave; + m = msave; + v = vsave; + fest = fest_prev; + titers = titers - epciters; + + % Reset Adagrad + gnormsum = 0; + + else + + % Save current solution + Msave = M; + msave = m; + vsave = v; + fest_prev = fest; + + end + + % Save time + time_trace(nepoch+1) = toc(setupStartA); + + if (nfails > maxfails) || festtol_test + break; + end + + + end + + mainTime = toc(mainStart); + + info.fest_trace = fest_trace(1:nepoch+1); + info.step_trace = step_trace(1:nepoch+1); + info.time_trace = time_trace(1:nepoch+1); + info.nepoch = nepoch; + + if printitn > 0 + fprintf('End Main Loop\n'); + fprintf('\n'); + fprintf('Final f-est: %10.4e\n', fest); + fprintf('Setup time: %.2f seconds\n', setupTimeA+setupTimeB); + fprintf('Main loop time: %.2f seconds\n', mainTime); + fprintf('Total iterations: %d\n', nepoch * epciters); + end + + +end + +%% Wrap up + +% Save timings +info.mainTime = mainTime; +info.setupTimeA = setupTimeA; +info.setupTimeB = setupTimeB; +info.setupTime = setupTimeA+setupTimeB; + +% Arrange the final tensor so that the columns are normalized. +M = fixsigns(arrange(M)); + + + + + + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/hosvd.m b/ext/YetAnotherFEcode/external/tensor_toolbox/hosvd.m new file mode 100644 index 0000000..25fa448 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/hosvd.m @@ -0,0 +1,132 @@ +function T = hosvd(X,tol,varargin) +%HOSVD Compute sequentially-truncated higher-order SVD (Tucker). +% +% T = HOSVD(X,TOL) computes a Tucker decomposition with relative error +% specified by TOL, i.e., it computes a ttensor T such that +% +% ||X-T||/||X|| <= TOL. +% +% The method automatically determines the appropriate ranks of the +% Tucker decomposition. By default, the method computes the +% sequentially-truncated HOSVD. +% +% T = HOSVD(X,TOL,'param',value,...) specifies optional parameters and +% values. Valid parameters and their default values are: +% 'verbosity' - How much to print between 0 and 10. Default: 1. +% 'dimorder' - Order to loop through dimensions Default: 1:ndims(X). +% 'sequential' - Use sequentially-truncated version: Default: true. +% 'ranks' - Specify ranks (rather than computing). Default: []. +% +% Documentation page for HOSVD +% +% See also TUCKER_ALS, TTENSOR +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +%% Read paramters +d = ndims(X); + +params = inputParser; +params.addParameter('verbosity',1); +params.addParameter('sequential',true); +params.addParameter('dimorder',1:d,@(x) isequal(sort(x),1:d)); +params.addParameter('ranks',[]); +params.parse(varargin{:}); + +verbosity = params.Results.verbosity; +sequential = params.Results.sequential; +dimorder = params.Results.dimorder; +ranks = params.Results.ranks; + +%% Setup +if verbosity > 0 + fprintf('Computing HOSVD...\n'); +end +normxsqr = collapse(X.^2); +eigsumthresh = tol.^2 * normxsqr / d; + +if verbosity > 2 + fprintf('||X||^2 = %g\n', normxsqr); + fprintf('tol = %g\n', tol); + fprintf('eigenvalue sum threshold = tol^2 ||X||^2 / d = %g\n', eigsumthresh); +end + +if ~isempty(ranks) + if ~isvector(ranks) || length(ranks) ~= d + error('Specified ranks must be a vector of length ndims(X)'); + end + r = ranks; +else + r = zeros(d,1); +end + +%% Main loop + +U = cell(d,1); % Allocate space for factor matrices +Y = X; % Copy input tensor, shrinks at each step for sequentially-truncated + +for k = dimorder + + % Compute Gram matrix + Yk = double(tenmat(Y,k)); + Z = Yk*Yk'; + + % Compute eigenvalue decomposition + [V,D] = eig(Z); + [eigvec,pi] = sort(diag(D),'descend'); + + % If rank is not prespecified, compute it. + if r(k) == 0 + + eigsum = cumsum(eigvec,'reverse'); + r(k) = find(eigsum > eigsumthresh, 1, 'last'); + + if verbosity > 5 + fprintf('Reverse cummulative sum of evals of Gram matrix:\n'); + for i = 1:length(eigsum) + fprintf('%d: %6.4f ',i,eigsum(i)); + if i == r(k) + fprintf('<-- Cutoff'); + end + fprintf('\n'); + end + end + + end + + % Extract factor matrix by picking out leading eigenvectors of V + U{k} = V(:,pi(1:r(k))); + + % Shrink! + if sequential + Y = ttm(Y,U{k}',k); + end +end + +% Extract final core +if sequential + G = Y; +else + G = ttm(Y,U,'t'); +end + +%% Final result +T = ttensor(G,U); + +if verbosity > 0 + diffnormsqr = collapse((X-full(T)).^2); + relnorm = sqrt(diffnormsqr/normxsqr); + fprintf('Size of core: %s\n', tt_size2str(size(G))); + if relnorm <= tol + fprintf('||X-T||/||X|| = %g <=', relnorm); + fprintf('%f (tol)\n',tol); + else + fprintf('Tolerance not satisfied!! '); + fprintf('||X-T||/||X|| = %g >=', relnorm); + fprintf('%f (tol)\n',tol); + warning('Specified tolerance was not achieved'); + end + fprintf('\n'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/import_data.m b/ext/YetAnotherFEcode/external/tensor_toolbox/import_data.m new file mode 100644 index 0000000..1ae4b7d --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/import_data.m @@ -0,0 +1,108 @@ +function A = import_data(fname) +%IMPORT_DATA Import tensor-related data to a file. +% +% A = IMPORT_DATA(FNAME) imports an object A from the file named FNAME. +% The supported data types and formatting of the file are explained in +% EXPORT_DATA. +% +% See also TENSOR, EXPORT_DATA +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Open file +fid = fopen(fname,'r'); +if (fid == -1) + error('Cannot open file %s',fname); +end + +%% Get the type of object +type = import_type(fid); + +%% Import the object + +if strcmpi(type,'tensor') + + sz = import_size(fid); + data = import_array(fid, prod(sz)); + A = tensor(data, sz); + +elseif strcmpi(type,'sptensor') + + sz = import_size(fid); + nz = import_nnz(fid); + [subs, vals] = import_sparse_array(fid, length(sz), nz); + A = sptensor(subs, vals, sz); + +elseif strcmpi(type,'matrix') || strcmpi(type,'matrix') + + sz = import_size(fid); + data = import_array(fid, prod(sz)); + A = reshape(data, sz); + +elseif strcmpi(type,'ktensor') + + sz = import_size(fid); + r = import_rank(fid); + lambda = import_array(fid, r); + U = {}; + for n = 1:length(sz) + line = fgets(fid); + fac_type = import_type(fid); + fac_sz = import_size(fid); + fac_data = import_array(fid, prod(fac_sz)); + % row wise reshape + fac = reshape(fac_data, fliplr(fac_sz))'; + U{n} = fac; + end + A = ktensor(lambda,U); + +else + + error('Invalid data type for export'); + +end + + +%% Close file +fclose(fid); + +function type = import_type(fid) +% Import IO data type +line = fgets(fid); +typelist = regexp(line, '\s+', 'split'); +type = typelist(1); + +function sz = import_size(fid) +% Import the size of something from a file +line = fgets(fid); +n = sscanf(line, '%d'); +line = fgets(fid); +sz = sscanf(line, '%d'); +sz = sz'; +if (size(sz,2) ~= n) + error('Imported dimensions are not of expected size'); +end + +function nz = import_nnz(fid) +% Import the size of something from a file +line = fgets(fid); +nz = sscanf(line, '%d'); + +function r = import_rank(fid) +% Import the rank of something from a file +line = fgets(fid); +r = sscanf(line, '%d'); + +function data = import_array(fid, n) +% Import dense data that supports numel and linear indexing +data = fscanf(fid, '%e', n); + +function [subs, vals] = import_sparse_array(fid, n, nz) +% Import sparse data subs and vals from coordinate format data +data = textscan(fid,[repmat('%f',1,n) '%n']); +subs = cell2mat(data(1:n)); +vals = data{n+1}; +if (size(subs,1) ~= nz) + error('Imported nonzeros are not of expected size'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/info.xml b/ext/YetAnotherFEcode/external/tensor_toolbox/info.xml new file mode 100644 index 0000000..98b2ad6 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/info.xml @@ -0,0 +1,25 @@ + + + +R2018a +Tensor +toolbox +$toolbox/matlab/icons/unknownicon.gif +doc/html + + + + + doc tensor_toolbox + $toolbox/matlab/icons/webicon.gif + + + + web http://www.sandia.gov/~tgkolda/TensorToolbox + $toolbox/matlab/icons/webicon.gif + + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/khatrirao.m b/ext/YetAnotherFEcode/external/tensor_toolbox/khatrirao.m new file mode 100644 index 0000000..3ead962 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/khatrirao.m @@ -0,0 +1,74 @@ +function P = khatrirao(varargin) +%KHATRIRAO Khatri-Rao product of matrices. +% +% KHATRIRAO(A,B) computes the Khatri-Rao product of matrices A and +% B that have the same number of columns. The result is the +% column-wise Kronecker product +% [KRON(A(:,1),B(:,1)) ... KRON(A(:,n),B(:,n))] +% +% KHATRIRAO(A1,A2,...) computes the Khatri-Rao product of +% multiple matrices that have the same number of columns. +% +% KHATRIRAO(C) computes the Khatri-Rao product of +% the matrices in cell array C. +% +% KHATRIRAO(...,'r') computes the Khatri-Rao product in reverse +% order. +% +% NOTE: Updated to use BSXFUN per work of Phan Anh Huy. See Anh Huy Phan, +% Petr Tichavský, Andrzej Cichocki, On Fast Computation of Gradients for +% CANDECOMP/PARAFAC Algorithms, arXiv:1204.1586, 2012. +% +% Examples +% A = rand(5,2); B = rand(3,2); C = rand(2,2); +% khatrirao(A,B) %<-- Khatri-Rao of A and B +% khatrirao(B,A,'r') %<-- same thing as above +% khatrirao({C,B,A}) %<-- passing a cell array +% khatrirao({A,B,C},'r') %<-- same as above +% +% See also TENSOR, KTENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Error checking on input and set matrix order +% Note that this next if/else check forces A to be a cell array. +if ischar(varargin{end}) && varargin{end} == 'r' + if nargin == 2 && iscell(varargin{1}) + % Input is a single cell array + A = varargin{1}; + else + % Input is a sequence of matrices + A = {varargin{1:end-1}}; + end + matorder = length(A):-1:1; +else + if nargin == 1 && iscell(varargin{1}) + % Input is a single cell array + A = varargin{1}; + else + % Input is a sequence of matrices + A = varargin; + end + matorder = 1:length(A); +end + +%% Error check on matrices and compute number of rows in result +ndimsA = cellfun(@ndims, A); +if(~all(ndimsA == 2)) + error('Each argument must be a matrix'); +end + +ncols = cellfun(@(x) size(x, 2), A); +if(~all(ncols == ncols(1))) + error('All matrices must have the same number of columns.'); +end + + +%% Computation +N = ncols(1); +P = A{matorder(1)}; +for i = matorder(2:end) + P = bsxfun(@times, reshape(A{i},[],1,N),reshape(P,1,[],N)); +end +P = reshape(P,[],N); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/capabilitychart.m b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/capabilitychart.m new file mode 100644 index 0000000..e427a75 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/capabilitychart.m @@ -0,0 +1,65 @@ +%% Script to print capability chart for all the tensor toolbox classes. +% Note that we exclude the tensor-as-matrix style classes. + +%% Manual class names + +nclasses = 7; +classnames = {'tensor'; 'sptensor'; 'symtensor'; 'ttensor'; 'ktensor'; 'symktensor'; 'sumtensor'}; + + +%% Get directory contents for each class (omitting constructor) +classmembers = cell(nclasses,1); +functionnames = {}; +for i = 1:nclasses + C = dir(['../@' classnames{i}]); + nc = length(C); + tf_tmp = false(nc,1); + for j = 1:nc + if (length(C(j).name) > 3) && strcmp(C(j).name(end-1:end),'.m') ... + && ~strcmp(C(j).name(1:end-2),classnames{i}) + tf_tmp(j) = true; + end + end + C = C(tf_tmp); + C = arrayfun(@(x) x.name(1:end-2), C, 'UniformOutput', false); + classmembers{i} = C; + functionnames = union(functionnames,C); +end + +% get membership array +nfunctions = length(functionnames); +tf = false(nfunctions, nclasses); +for i = 1:nclasses + tf(:,i) = ismember(functionnames, classmembers{i}); +end + +%% Print out results +cnl = cellfun(@length,classnames); + +for i = 1:nfunctions + if mod(i,20)==1 + fprintf('function '); + for i = 1:nclasses + fprintf('%s ', classnames{i}); + end + fprintf('\n'); + end + fprintf('%-12s', functionnames{i}); + for j = 1:nclasses + for k = 1:4 + fprintf(' '); + end + if tf(i,j) + fprintf('X'); + else + fprintf('-'); + end + for k = 5:cnl(j) + fprintf(' '); + end + end + fprintf('\n'); +end + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/create_dircontents.m b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/create_dircontents.m new file mode 100644 index 0000000..c7d828f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/create_dircontents.m @@ -0,0 +1 @@ +function C = create_dircontents(dirname,varargin) %CREATE_DIRCONTENTS Scan a directory and create a contents list. % % C = CREATE_DIRCONTENTS(DIR) creates a list of function names and % descriptions that can be used to create a Contents.m file or be % inserted into the constructor for a class. % % C = CREATE_DIRCONTENTS(DIR,'Copyright',false) skips the copyright % check. % % C = CREATE_DIRCONTENTS(DIR,'Debug',true) prints out extra information. % % See also UPDATE_CLASSLIST, CREATE_TOPCONTENTS. %% Parse inputs params = inputParser; params.addParameter('Debug',false); params.addParameter('Copyright',true); params.parse(varargin{:}); debug = params.Results.Debug; copyright = params.Results.Copyright; %% Get the directory contents D = dir(dirname); if (numel(D) == 0) error('ERROR: Cannot find directory!'); end %% Check which files to keep if (debug) fprintf('\nDetermining the m-files.\n'); end cnt = 0; for i = 1:numel(D) fname = D(i).name; if D(i).isdir, continue, end if isempty(regexp(fname,'.*\.m$','once')), continue, end if regexp(fname,'^tmp_','once'), continue, end if strcmp(fname,'Contents.m'), continue, end if regexp(fname,'^tt_','once') if ~(strcmp(fname,'tt_ind2sub.m') || strcmp(fname,'tt_sub2ind.m')) continue end end if debug fprintf('Valid filename: %s\n', fname); end cnt = cnt + 1; F{cnt} = fname; end %% Extract the descriptions if (debug) fprintf('\nExtracing the M-file descriptions.\n'); end for i = 1:cnt % Open file fname = fullfile(dirname,F{i}); fid = fopen(fname); if (fid == -1) error('Unable to open file %s',fname); end % Find function declaration while 1 tline = fgetl(fid); if ~ischar(tline) error('No function declaration in %s', fname); end if regexp(tline,'^function.*') break; end end % Find title line while 1 tline = fgetl(fid); if tline == -1 break end if regexp(tline,'^%.*') fname = regexp(tline,'%([A-Z_0-9]*)\s*(.*)','tokens'); name{i} = lower(fname{1}{1}); desc{i} = fname{1}{2}; if ~isequal(name{i},F{i}(1:end-2)) warning('Filename/description mismatch for %s', name{i}); elseif isempty(regexp(desc{i},'\.\s*$','once')) warning('Missing final period for description in %s', name{i}); end break; end end % Find copyright if copyright c = 0; while 1 fline = fgetl(fid); if ~ischar(fline), break, end if strcmp(fline,'%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation.') c = 2; break; elseif strcmp(fline,'%Copyright 2015, Sandia Corporation.') || ... strcmp(fline,'%Copyright 2012, Sandia Corporation.') c = 1; break; end end if c == 0 warning('Missing copyright in %s',[dirname '/' F{i}]); elseif c == 1 warning('Copyright out of date in %s',[dirname '/' F{i}]); end end fclose(fid); end %% Clean up contents lines w = max(cellfun(@length,name)); pat = sprintf('%%-%ds - %%s',w); for i = 1:cnt C{i} = sprintf(pat,name{i},desc{i}); if (debug) fprintf('Descp for %-30s: %s\n',F{i},C{i}); end end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/create_topcontents.m b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/create_topcontents.m new file mode 100644 index 0000000..407a811 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/create_topcontents.m @@ -0,0 +1,33 @@ +function create_topcontents +%CREATE_TOPCONTENTS Creates the top-level Contents.m file for toolbox + +%% Open main contents file +fidmain = fopen('../Contents.m','w'); +fprintf(fidmain,'%% Tensor Toolbox (Sandia National Labs)\n'); +fprintf(fidmain,'%% Version 3.1 %s\n', date); +fprintf(fidmain,'%% Tensor Toolbox for dense, sparse, and decomposed n-way arrays.\n'); +fprintf(fidmain,'%% \n'); +fprintf(fidmain,'%% Tensor Toolbox Classes:\n'); +fprintf(fidmain,'%% tensor - Dense tensor.\n'); +fprintf(fidmain,'%% sptensor - Sparse tensor.\n'); +fprintf(fidmain,'%% symtensor - Symmetric tensor.\n'); +fprintf(fidmain,'%% ktensor - Kruskal decomposed tensor.\n'); +fprintf(fidmain,'%% symktensor - Kruskal decomposed symmetric tensor.\n'); +fprintf(fidmain,'%% sumtensor - Sum of different types of tensors.\n'); +fprintf(fidmain,'%% ttensor - Tucker decomposed tensor.\n'); +fprintf(fidmain,'%% tenmat - Tensor as matrix.\n'); +fprintf(fidmain,'%% sptenmat - Sparse tensor as matrix.\n'); +fprintf(fidmain,'%% \n'); + +%% Get contents of main directory +fprintf(fidmain,'%% Tensor Toolbox Functions:\n'); +C = create_dircontents('..'); +for i = 1:numel(C) + fprintf(fidmain,'%% %s\n',C{i}); +end +fprintf(fidmain,'%%\n'); +fprintf(fidmain,'%% Documentation page for Tensor Toolbox\n'); + + +%% Close main contents file +fclose(fidmain); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/helptoc_template.xml b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/helptoc_template.xml new file mode 100644 index 0000000..b536bef --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/helptoc_template.xml @@ -0,0 +1,14 @@ + + + + + +Tensor Toolbox + +INSERT LIST HERE + +Tensor Toolbox Site + + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/mxdom2simplehtml_ttb.xsl b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/mxdom2simplehtml_ttb.xsl new file mode 100644 index 0000000..e3cdcea --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/mxdom2simplehtml_ttb.xsl @@ -0,0 +1,400 @@ + + + + + ]> + + + + + + + + + + + + + + + + + + + +This HTML was auto-generated from MATLAB code. +To make changes, update the MATLAB code and republish this document. + + + <xsl:value-of select="$title"/> + + + MATLAB + + + + + + + .m + + + + + + + + + + +
+ + + + + + +

+ introduction + + + + /introduction +
+ + + + + + + + + + + + + + + + + h1 + h2 + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+ + + + + + + + + + + + + + + + + +

Contents

+
    + + +
  • #
  • +
    +
    +
+
+ + + + +

+
+ +
+
+ +
+
+ +
  • +
    + + + +
    +
    + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + +
    
    +
    +
    + + + + +
    
    +
    +
    + + + + + + + +
    +             
    +            
    +            
    +            
    +        
    +
    +
    +
    + + + + + + + + + + + width: + + ; + + + height: + ; + + + + + + + + + + + + + width: + + ; + + + height: + ; + + + + + + + + + + + + + + + + + + + +##### SOURCE BEGIN ##### + +##### SOURCE END ##### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/tensor_toolbox_product_page_template.html b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/tensor_toolbox_product_page_template.html new file mode 100644 index 0000000..5bd9ce2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/tensor_toolbox_product_page_template.html @@ -0,0 +1,207 @@ + +MATLAB Tensor Toolbox + + + + + + + + + + + + + + +
    Sandia National Laboratories
    + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + +

    Tensor Toolbox

    +

    The MATLAB Tensor Toolbox enables the creation and manipulation of + dense and sparse multidimensional arrays.

    +

    Documentation

    +

    [INSERT LIST HERE]

    + +

    Home Page

    +

    For more information including how to cite the + toolbox, please + visit the Tensor Toolbox homepage at + + https://gitlab.com/tensors/tensor_toolbox.

    +

    Legal Stuff

    + +

    Questions or Comments?

    +

    Please send us + email.

    +
    + +

     
    Contact
    + Tamara Kolda
    + tgkolda@sandia.gov
    + (925)294-4769

    + +
    + + + + + + + + +
    + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/update_classlist.m b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/update_classlist.m new file mode 100644 index 0000000..7984e67 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/update_classlist.m @@ -0,0 +1,105 @@ +function update_classlist(varargin) +%UPDATE_CLASSLIST Updates constructor m-file with list of classes. +% +% This method assumes there are two lines at the top of the file that +% define the class. It keeps those in tact. It then searchers for the +% first line that hase the string "a href" in it, and it replaces all the +% lines inbetween with the list of functions for the class. +% +% UPDATE_CLASSLIST('Class',CLASSNAME) updates the filelist at the top of +% the class constructor file, which is used as documentation for the +% class when typing 'HELP CLASSNAME'. +% +% UPDATE_CLASSLIST('Class',CLASSNAME,'Debug',true) doesn't actually +% overwrite the constructor file. Instead, it just creates the file +% tmp_CLASSNAME.m in the class directory. +% +% UPDATE_CLASSLIST updates every class. +% +% See also CREATE_DIRCONTENTS, CREATE_TOPCONTENTS. +% +%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation. + + +%% Parse inputs +params = inputParser; +params.addParameter('Class',[]); +params.addParameter('Debug',false); +params.addParameter('Copyright',false); +params.parse(varargin{:}); + +classlist = params.Results.Class; +if isempty(classlist) + classlist = {'tensor','sptensor','ttensor','ktensor','tenmat',... + 'sptenmat','sumtensor','symtensor','symktensor'}; +elseif ~iscell(classlist) + classlist = {classlist}; +end + +debug = params.Results.Debug; + +ttbdir = getfield(what('tensor_toolbox'),'path'); + +%% +for j = 1:numel(classlist) + + % Extract contents + classname = classlist{j}; + classdir = fullfile(ttbdir,strcat('@',classname)); + fname = fullfile(classdir,strcat(classname,'.m')); + fnametmp = fullfile(classdir,strcat('tmp_',classname,'.m')); + + % Extract directory contents + C = create_dircontents(classdir,'Copyright',params.Results.Copyright); + + % Write to main class file + fprintf('Replacing list of functions in file %s\n', fname); + fidold = fopen(fname, 'r'); + fidnew = fopen(fnametmp, 'w'); + + % Copy first two lines, which are just the class name and + % description, plus a blank line. + for i = 1:2 + oldline = fgetl(fidold); + fprintf(fidnew, '%s\n', oldline); + end + + % Insert new contents into temporary file. + fprintf(fidnew,'%%%s Methods:\n',upper(classname)); + for i = 1:numel(C) + fprintf(fidnew,'%% %s\n',C{i}); + end + fprintf(fidnew,'%%\n'); + + % Skip until href found + while 1 + oldline = fgetl(fidold); + if ~ischar(oldline) + error('Never found href line') + end + if contains(oldline,'a href=') + break; + end + end + + % Just copy everything else + while 1 + fprintf(fidnew, '%s\n', oldline); + oldline = fgetl(fidold); + if ~ischar(oldline), break, end + end + + fclose(fidold); + fclose(fidnew); + + if ~debug + [s,m] = movefile(fnametmp,fname); + if (s == 0) + fprintf('Error renaming file: %s\n',m); + end + end +end + +% + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/update_copyright.m b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/update_copyright.m new file mode 100644 index 0000000..6c84cf9 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/maintenance/update_copyright.m @@ -0,0 +1,49 @@ +function update_copyright(fname,varargin) +%UPDATE_COPYRIGHT Update pre-3.0 copyright to new version. + +%% Setup + +%% Parse inputs +params = inputParser; +params.addParameter('Verbose',true); +params.addParameter('Debug',true); +params.parse(varargin{:}); +verbose = params.Results.Verbose; +debug = params.Results.Debug; + +%% Set up files +[pathstr,name,ext] = fileparts(fname); +fnametmp = fullfile(pathstr, [name '_tmp' ext]); +if (verbose) + fprintf('Replacing copyright in file %s\n', fname); +end + +%% Open files +fidold = fopen(fname, 'r'); +fidnew = fopen(fnametmp, 'w'); + +%% Copy over and replace copyright +while 1 + tline = fgetl(fidold); + if ~ischar(tline), break, end + + if regexp(tline,'.MATLAB Tensor Toolbox.\w*$') + fprintf(fidnew,'%%MATLAB Tensor Toolbox. Copyright 2017, Sandia Corporation.\n'); + fprintf(fidnew,'%%https://gitlab.com/tensors/tensor_toolbox.\n'); + continue; + end + if regexp(tline,'Copyright 2015, Sandia Corporation'), continue, end + if regexp(tline,'This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others.\w*') + for i = 1:6 + tline = fgetl(fidold); + end + continue; + end + fprintf(fidnew, '%s\n', tline); + +end + +%% Close files +fclose(fidold); +fclose(fidnew); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/matrandcong.m b/ext/YetAnotherFEcode/external/tensor_toolbox/matrandcong.m new file mode 100644 index 0000000..0a9d9e1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/matrandcong.m @@ -0,0 +1,20 @@ +function X = matrandcong(m,n,gamma) +%MATRANDCONG Create a random matrix with a fixed congruence. +% +% X = MATRANDCONG(M,N,GAMMA) creates a matrix X of size M x N such +% that each column of X has norm 1 and any two columns of X have an inner +% product equal to GAMMA. +% +% Based on code from Evrim Acar and the paper G. Tomasi and R. Bro, A +% comparison of algorithms for fitting the PARAFAC model, Computational +% Statistics & Data Analysis, 50: 1700-1734, 2006. +% +% See also MATRANDORTH, MATRANDNORM, CREATE_PROBLEM, CREATE_GUESS. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +CG = gamma * ones(n,n) + (1-gamma) * eye(n); +CGR = chol(CG); +X = randn(m,n); +[Q,~] = qr(X,0); +X = Q * CGR; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/matrandnorm.m b/ext/YetAnotherFEcode/external/tensor_toolbox/matrandnorm.m new file mode 100644 index 0000000..a74f7c5 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/matrandnorm.m @@ -0,0 +1,28 @@ +function X = matrandnorm(varargin) +%MATRANDNORM Normalizes columns of X so that each is unit 2-norm. +% +% X = MATRANDNORM(M,N) creates a random M x N matrix with randomly using +% normally distributed enries and then rescales the columsn so that each +% has a unit 2-norm. +% +% X = MATRANDNORM(X) rescales the columns of X so that each +% column has a unit 2-norm. +% +% Examples +% X = MATRANDNORM(rand(5,5)); +% X = MATRANDNORM(3,2); +% X = MATRANDNORM(ones(4)); +% +% See also MATRANDORTH, MATRANDNORM, CREATE_PROBLEM, CREATE_GUESS. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +if nargin == 2 + X = randn(varargin{1}, varargin{2}); +else + X = varargin{1}; +end + +norms = sqrt(sum(X.^2,1)); +X = bsxfun(@rdivide,X,norms); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/matrandorth.m b/ext/YetAnotherFEcode/external/tensor_toolbox/matrandorth.m new file mode 100644 index 0000000..44ba47e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/matrandorth.m @@ -0,0 +1,63 @@ +function M=matrandorth(n, tol) +%MATRANDORTH Generates random n x n orthogonal real matrix. +% +% M = MATRANDORTH(N) generates a random N x N orthogonal real matrix. +% +% M = MATRANDORTH(M,TOL) explicitly specifies a threshold value, TOL, +% that measures linear dependence of a newly formed column with the +% existing columns. Defaults to 1e-6. +% +% In this version the generated matrix distribution *is* uniform over the +% manifold O(n) w.r.t. the induced R^(n^2) Lebesgue measure, at a slight +% computational overhead (randn + normalization, as opposed to rand ). +% +% NOTE: This code is renamed from RandOrthMat by Ofek Shilon. +% https://www.mathworks.com/matlabcentral/fileexchange/11783-randorthmat +% +% (c) Ofek Shilon, 2006. +% +%This code is *not* copyrighted by Sandia, but it is distributed with: +% +% See also MATRANDNORM, MATRANDCONG, CREATE_PROBLEM, CREATE_GUESS. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + if exist('tol','var') + if (tol >= 1) + ncols = tol; + tol = 1e-6; + else + ncols = n; + end + else + tol=1e-6; + ncols = n; + end + + + M = zeros(n); % prealloc + + % gram-schmidt on random column vectors + + vi = randn(n,1); + % the n-dimensional normal distribution has spherical symmetry, which implies + % that after normalization the drawn vectors would be uniformly distributed on the + % n-dimensional unit sphere. + + M(:,1) = vi ./ norm(vi); + + for i=2:n + nrm = 0; + while nrm= 1, K >= 0 +% PERMUTATIONS WITHOUT REPETITION/REPLACEMENT +% COMBINATOR(N,K,'p') -- N >= 1, N >= K >= 0 +% COMBINATIONS WITH REPETITION/REPLACEMENT +% COMBINATOR(N,K,'c','r') -- N >= 1, K >= 0 +% COMBINATIONS WITHOUT REPETITION/REPLACEMENT +% COMBINATOR(N,K,'c') -- N >= 1, N >= K >= 0 +% +% Example: +% +% To see the subset relationships, do this: +% combinator(4,2,'p','r') % Permutations with repetition +% combinator(4,2,'p') % Permutations without repetition +% combinator(4,2,'c','r') % Combinations with repetition +% combinator(4,2,'c') % Combinations without repetition +% +% +% If it is desired to use a set other than 1:N, simply use the output from +% COMBINATOR as an index into the set of interest. For example: +% +% MySet = ['a' 'b' 'c' 'd']; +% MySetperms = combinator(length(MySet),3,'p','r'); % Take 3 at a time. +% MySetperms = MySet(MySetperms) +% +% +% Class support for input N: +% float: double, single +% integers: int8,int16,int32 +% +% +% Notes: +% All of these algorithms have the potential to create VERY large outputs. +% In each subfunction there is an anonymous function which can be used to +% calculate the number of row which will appear in the output. If a rather +% large output is expected, consider using an integer class to conserve +% memory. For example: +% +% M = combinator(int8(30),3,'p','r'); % NOT uint8(30) +% +% will take up 1/8 the memory as passing the 30 as a double. See the note +% below on using the MEX-File. +% +% To make your own code easier to read, the fourth argument can be any +% string. If the string begins with an 'r' (or 'R'), the function +% will be called with the replacement/repetition algorithm. If not, the +% string will be ignored. +% For instance, you could use: 'No replacement', or 'Repetition allowed' +% If only two inputs are used, the function will assume 'p','r'. +% The third argument must begin with either a 'p' or a 'c' but can be any +% string beyond that. +% +% The permutations with repetitions algorithm uses cumsum. So does the +% combinations without repetition algorithm for the special case of K=2. +% Unfortunately, MATLAB does not allow cumsum to work with integer classes. +% Thus a subfunction has been placed at the end for the case when these +% classes are passed. The subfunction will automatically pass the +% necessary matrix to the built-in cumsum when a single or double is used. +% When an integer class is used, the subfunction first looks to see if the +% accompanying MEX-File (cumsumall.cpp) has been compiled. If not, +% then a MATLAB For loop is used to perform the cumsumming. This is +% VERY slow! Therefore it is recommended to compile the MEX-File when +% using integer classes. +% The MEX-File was tested by the author using the Borland 5.5 C++ compiler. +% +% See also, perms, nchoosek, npermutek (on the FEX) +% +% Author: Matt Fig +% Contact: popkenai@yahoo.com +% Date: 5/30/2009 +% +% Reference: http://mathworld.wolfram.com/BallPicking.html +% +%This code is *not* copyrighted by Sandia, but it is distributed with: +%MATLAB Tensor Toolbox. +%Copyright 2015, Sandia Corporation. + +ng = nargin; + +if ng == 2 + s1 = 'p'; + s2 = 'r'; +elseif ng == 3 + s2 = 'n'; +elseif ng ~= 4 + error('Only 2, 3 or 4 inputs are allowed. See help.') +end + +if isempty(N) || K == 0 + A = []; + return +elseif numel(N)~=1 || N<=0 || ~isreal(N) || floor(N) ~= N + error('N should be one real, positive integer. See help.') +elseif numel(K)~=1 || K<0 || ~isreal(K) || floor(K) ~= K + error('K should be one real non-negative integer. See help.') +end + +STR = lower(s1(1)); % We are only interested in the first letter. + +if ~strcmpi(s2(1),'r') + STR = [STR,'n']; +else + STR = [STR,'r']; +end + +try + switch STR + case 'pr' + A = perms_rep(N,K); % strings + case 'pn' + A = perms_no_rep(N,K); % permutations + case 'cr' + A = combs_rep(N,K); % multichoose + case 'cn' + A = combs_no_rep(N,K); % choose + otherwise + error('Unknown option passed. See help') + end +catch + rethrow(lasterror) % Throw error from here, not subfunction. + % The only error thrown should be K>N for non-replacement calls. +end + + + + +function PR = perms_rep(N,K) +% This is (basically) the same as npermutek found on the FEX. It is the +% fastest way to calculate these (in MATLAB) that I know. +% pr = @(N,K) N^K; Number of rows. +% A speed comparison could be made with COMBN.m, found on the FEX. This +% is an excellent code which uses ndgrid. COMBN is written by Jos. +% +% % All timings represent the best of 4 consecutive runs. +% % All timings shown in subfunction notes used this configuration: +% % 2007a 64-bit, Intel Xeon, win xp 64, 16 GB RAM +% tic,Tc = combinator(single(9),7,'p','r');toc +% %Elapsed time is 0.199397 seconds. Allow Ctrl+T+C+R on block +% tic,Tj = combn(single(1:9),7);toc +% %Elapsed time is 0.934780 seconds. +% isequal(Tc,Tj) % Yes + +if N==1 + PR = ones(1,K,class(N)); + return +elseif K==1 + PR = (1:N).'; + return +end + +CN = class(N); +M = double(N); % Single will give us trouble on indexing. +L = M^K; % This is the number of rows the outputs will have. +PR = zeros(L,K,CN); % Preallocation. +D = ones(1,N-1,CN); % Use this for cumsumming later. +LD = M-1; % See comment on N. +VL = [-(N-1) D].'; % These values will be put into PR. +% Now start building the matrix. +TMP = VL(:,ones(L/M,1,CN)); % Instead of repmatting. +PR(:,K) = TMP(:); % We don't need to do two these in loop. +PR(1:M^(K-1):L,1) = VL; % The first column is the simplest. +% Here we have to build the cols of PR the rest of the way. +for ii = K-1:-1:2 + ROWS = 1:M^(ii-1):L; % Indices into the rows for this col. + TMP = VL(:,ones(length(ROWS)/(LD+1),1,CN)); % Match dimension. + PR(ROWS,K-ii+1) = TMP(:); % Build it up, insert values. +end + +PR(1,:) = 1; % For proper cumsumming. +PR = cumsum2(PR); % This is the time hog. + + + + +function PN = perms_no_rep(N,K) +% Subfunction: permutations without replacement. +% Uses the algorithm in combs_no_rep as a basis, then permutes each row. +% pn = @(N,K) prod(1:N)/(prod(1:(N-K))); Number of rows. + +if N==K + PN = perms_loop(N); % Call helper function. +% [id,id] = sort(PN(:,1)); %#ok Not nec., uncomment for nice order. +% PN = PN(id,:); % Return values. + return +elseif K==1 + PN = (1:N).'; % Easy case. + return +end + +if K>N % Since there is no replacement, this cannot happen. + error(['When no repetitions are allowed, '... + 'K must be less than or equal to N']) +end + +M = double(N); % Single will give us trouble on indexing. +WV = 1:K; % Working vector. +lim = K; % Sets the limit for working index. +inc = 1; % Controls which element of WV is being worked on. +BC = prod(M-K+1:M); % Pre-allocation of return arg. +BC1 = BC / ( prod(1:K)); % Number of comb blocks. +PN = zeros(round(BC),K,class(N)); +L = prod(1:K) ; % To get the size of the blocks. +cnt = 1+L; +P = perms_loop(K); % Only need to use this once. +PN(1:(1+L-1),:) = WV(P); % The first row. + +for ii = 2:(BC1 - 1); + if logical((inc+lim)-N) % The logical is nec. for class single(?) + stp = inc; % This is where the for loop below stops. + flg = 0; % Used for resetting inc. + else + stp = 1; + flg = 1; + end + + for jj = 1:stp + WV(K + jj - inc) = lim + jj; % Faster than a vector assignment! + end + + PN(cnt:(cnt+L-1),:) = WV(P); % Assign block. + cnt = cnt + L; % Increment base index. + inc = inc*flg + 1; % Increment the counter. + lim = WV(K - inc + 1 ); % lim for next run. +end + +V = (N-K+1):N; % Final vector. +PN(cnt:(cnt+L-1),:) = V(P); % Fill final block. +% The sorting below is NOT necessary. If you prefer this nice +% order, the next two lines can be un-commented. +% [id,id] = sort(PN(:,1)); %#ok This is not necessary! +% PN = PN(id,:); % Return values. + + + + +function P = perms_loop(N) +% Helper function to perms_no_rep. This is basically the same as the +% MATLAB function perms. It has been un-recursed for a runtime of around +% half the recursive version found in perms.m For example: +% +% tic,Tp = perms(1:9);toc +% %Elapsed time is 0.222111 seconds. Allow Ctrl+T+C+R on block +% tic,Tc = combinator(9,9,'p');toc +% %Elapsed time is 0.143219 seconds. +% isequal(Tc,Tp) % Yes + +M = double(N); % Single will give us trouble on indexing. +P = 1; % Initializer. +G = cumprod(1:(M-1)); % Holds the sizes of P. +CN = class(N); + +for n = 2:M + q = P; + m = G(n-1); + P = zeros(n*m,n,CN); + P(1:m, 1) = n; + P(1:m, 2:n) = q; + a = m + 1; + + for ii = n-1:-1:1, + t = q; + t(t == ii) = n; + b = a + m - 1; + P(a:b, 1) = ii; + P(a:b, 2:n) = t; + a = b + 1; + end +end + + + + +function CR = combs_rep(N,K) +% Subfunction multichoose: combinations with replacement. +% cr = @(N,K) prod((N):(N+K-1))/(prod(1:K)); Number of rows. + +M = double(N); % Single will give us trouble on indexing. +WV = ones(1,K,class(N)); % This is the working vector. +mch = prod((M:(M+K-1)) ./ (1:K)); % Pre-allocation. +CR = ones(round(mch),K,class(N)); + +for ii = 2:mch + if WV(K) == N + cnt = K-1; % Work backwards in WV. + + while WV(cnt) == N + cnt = cnt-1; % Work backwards in WV. + end + + WV(cnt:K) = WV(cnt) + 1; % Fill forward. + else + WV(K) = WV(K)+1; % Keep working in this group. + end + + CR(ii,:) = WV; +end + + + + +function CN = combs_no_rep(N,K) +% Subfunction choose: combinations w/o replacement. +% cn = @(N,K) prod(N-K+1:N)/(prod(1:K)); Number of rows. +% Same output as the MATLAB function nchoosek(1:N,K), but often faster for +% larger N. +% For example: +% +% tic,Tn = nchoosek(1:17,8);toc +% %Elapsed time is 0.430216 seconds. Allow Ctrl+T+C+R on block +% tic,Tc = combinator(17,8,'c');toc +% %Elapsed time is 0.024438 seconds. +% isequal(Tc,Tn) % Yes + +if K>N + error(['When no repetitions are allowed, '... + 'K must be less than or equal to N']) +end + +M = double(N); % Single will give us trouble on indexing. + +if K == 1 + CN =(1:N).'; % These are simple cases. + return +elseif K == N + CN = (1:N); + return +elseif K==2 && N>2 % This is an easy case to do quickly. + BC = (M-1)*M / 2; + id1 = cumsum2((M-1):-1:2)+1; + CN = zeros(BC,2,class(N)); + CN(:,2) = 1; + CN(1,:) = [1 2]; + CN(id1,1) = 1; + CN(id1,2) = -((N-3):-1:0); + CN = cumsum2(CN); + return +end + +WV = 1:K; % Working vector. +lim = K; % Sets the limit for working index. +inc = 1; % Controls which element of WV is being worked on. +BC = prod(M-K+1:M) / (prod(1:K)); % Pre-allocation. +CN = zeros(round(BC),K,class(N)); +CN(1,:) = WV; % The first row. + +for ii = 2:(BC - 1); + if logical((inc+lim)-N) % The logical is nec. for class single(?) + stp = inc; % This is where the for loop below stops. + flg = 0; % Used for resetting inc. + else + stp = 1; + flg = 1; + end + + for jj = 1:stp + WV(K + jj - inc) = lim + jj; % Faster than a vector assignment. + end + + CN(ii,:) = WV; % Make assignment. + inc = inc*flg + 1; % Increment the counter. + lim = WV(K - inc + 1 ); % lim for next run. +end + +CN(ii+1,:) = (N-K+1):N; + + + + +function A = cumsum2(A) +%CUMSUM2, works with integer classes. +% Duplicates the action of cumsum, but for integer classes. +% If Matlab ever allows cumsum to work for integer classes, we can remove +% this. + +if isfloat(A) + A = cumsum(A); % For single and double, use built-in. + return +else + try + A = cumsumall(A); % User has the MEX-File ready? + catch + warning('Cumsumming by loop. MEX cumsumall.cpp for speed.') %#ok + for ii = 2:size(A,1) + A(ii,:) = A(ii,:) + A(ii-1,:); % User likes it slow. + end + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tenones.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tenones.m new file mode 100644 index 0000000..c5a0195 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tenones.m @@ -0,0 +1,25 @@ +function X = tenones(varargin) +%TENONES Ones tensor. +% +% X = TENONES(SZ) forms a tensor of size SZ with all ones. +% +% TENONES(SZ) is equivalent to TENSOR(ONES(SZ(1),SZ(2),...),SZ). +% +% See also TENSOR, ONES. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if nargin == 1 + sz = varargin{1}; +else + sz = cell2mat(varargin); +end + +if isempty(sz) + X = tensor(); + return; +end + +data = ones([sz 1 1]); +X = tensor(data,sz); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tenrand.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tenrand.m new file mode 100644 index 0000000..f87213f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tenrand.m @@ -0,0 +1,25 @@ +function X = tenrand(varargin) +%TENRAND Uniformly distributed pseudo-random tensor. +% +% X = TENRAND(SZ) forms a tensor of size SZ with pseudo-random +% values drawn from a uniform distribution on the unit interval. +% +% TENRAND(SZ) is equivalent to TENSOR(RAND(SZ(1),SZ(2),...),SZ). +% +% See also TENSOR, SPTENRAND, RAND. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if nargin == 1 + sz = varargin{1}; +else + sz = cell2mat(varargin); +end + +if isempty(sz) + X = tensor; +else + data = rand([sz 1 1]); + X = tensor(data,sz); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tenrandblk.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tenrandblk.m new file mode 100644 index 0000000..6304ae7 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tenrandblk.m @@ -0,0 +1,120 @@ +function G = tenrandblk(bsz, bns, verbose) +%TENRANDBLK Generate nearly block diagonal tensor. +% +% G = TENRANDBLK(BSZ,BNS) creates a tensor G that is block 'diagonal' +% plus noise. The first argument specifies the size of each block as a +% row. The number of rows of BSZ is the number of blocks and the order of +% the tensor is the number of columns. The blocks need not be square. The +% size of G is equal to sum(BSZ,1). The second argument specifies the +% squared norm of each block. The values must be strictly decreasing and +% sum to less than one. The squared norm of the offdiagonal parts of G is +% 1-sum(BNS). +% +% See example usage. +% +% See also CREATE_PROBLEM +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +%% Check inputs + +% --- Check sizes --- + +% Must be a matrix +if ~ismatrix(bsz) + error('Block sizes must be a matrix'); +end + +if any(bsz <= 0) + error('Block sizes must be positive'); +end + +if any(bsz ~= round(bsz)) + error('Block sizes must be integers'); +end + +% Find the end of each block +bend = cumsum(bsz,1); + +% Extract size: D = # dimenions, L = # levels +[D,L] = size(bsz); + +% Final size +gsz = bend(end,:); + +% --- Check errors --- + +% Check errors are okay +if sum(bns) > 1 + error('sum of all block squared error norms must be <= 1'); +end + +for i = 2:L + if bns(i) > bns(i-1) + error('Squared norms must be strictly decreasing'); + end +end + +if ~exist('verbose','var') + verbose = false; +end + +%% Create tensor + +% Figure out norm of off-block-diagonal +dltnrmsqr = 1 - sum(bns); + +% Create pattern for off-block-diagonal to be modified as we go +dltpattern = ones(gsz); + +% Create tensor to fill in +G = tensor(@zeros,gsz); + +% Create random entries to use +Grnd = tensor(@(sz) sign(randn(sz)) .* (0.1*rand(sz)+0.9), gsz); + +% Loop through and create blocks +for i = 1:L + + % Figure out ith block pattern + blkrange = cell(D,1); + for k = 1:D + if i == 1 + blkrange{k} = 1:bend(i,k); + else + blkrange{k} = bend(i-1,k)+1:bend(i,k); + end + end + + % Create pattern that has ones for the block + pattern = zeros(gsz); + pattern(blkrange{:}) = 1; + + % Zero out block in the off-diagonal pattern + dltpattern(blkrange{:}) = 0; + + % Randomly fill delta-pattern and rescale + block = Grnd .* pattern; + sse = collapse(block.^2); + block = sqrt(bns(i)/sse) .* block; + + % Add to main tensor + G = G + block; + + % Verbose output + if verbose + fprintf('Created block of size %s with norm (%f)^2=%f\n', tt_size2str(bsz(i,:)), norm(block), norm(block)^2); + end +end + +if dltnrmsqr > 0 + % Final pattern + block = Grnd .* dltpattern; + sse = collapse(block.^2); + block = sqrt(dltnrmsqr/sse) .* block; + G = G + block; + if verbose + fprintf('Created tensor of size %s with off-block-diaognal norm (%f)^2=%f\n', tt_size2str(gsz), norm(block), norm(block)^2); + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tenzeros.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tenzeros.m new file mode 100644 index 0000000..edd383a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tenzeros.m @@ -0,0 +1,32 @@ +function X = tenzeros(varargin) +%TENZEROS Create zeros tensor. +% +% X = TENZEROS(SZ) forms a tensor of size SZ with all zeros. +% +% TENZEROS(SZ) is equivalent to TENSOR(ZEROS(SZ(1),SZ(2),...),SZ). +% +% See also TENSOR, ZEROS. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if nargin == 1 + sz = varargin{1}; +else + sz = cell2mat(varargin); +end + +if isempty(sz) + X = tensor; + return; +end + +if nargin == 2 + order = sz; + dim = varargin{1}; + sz = dim * ones(1,order); +end + +data = zeros([sz 1 1]); +X = tensor(data,sz); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/README.md b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/README.md new file mode 100644 index 0000000..ee4594a --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/README.md @@ -0,0 +1,17 @@ +# Tests for Tensor Toolbox for MATLAB + +Name each test file as `Test_Something.m`. + +## Running the Tests +``` matlab +r=runtests; % Runs all tests +table(r) % View results +r=runtests(Test_Somthing); % Runs just the tests in that file +r=runtests('Test_NewTTM','ProcedureName','Compare','Verbosity',4); % Particular tests +``` + + +## Creating New Tests +Copy one of the existing tests as a guide and modify to do tests relvant for the +m-files being created. See the MATLAB documentation on `Class-Based Unit Tests` +for more information. diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_DenseSparseConvert.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_DenseSparseConvert.m new file mode 100644 index 0000000..25e5f32 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_DenseSparseConvert.m @@ -0,0 +1,40 @@ +% Testing conversions between tensor and sptensor +classdef Test_DenseSparseConvert < matlab.unittest.TestCase + methods (Test) + + function Empty(testCase) + x = sptensor; + y = tensor; + testCase.verifyEqual(x, sptensor(y)); + testCase.verifyEqual(y, tensor(x)); + end + + function Zero(testCase) + x = sptensor([5 4 3]); + y = tensor(@zeros, [5 4 3]); + testCase.verifyEqual(x, sptensor(y)); + testCase.verifyEqual(y, tensor(x)); + end + + function ThreeWay(testCase) + x = sptenrand([4 3 2], 0.4); + y = tensor(x); + testCase.verifyEqual(x, sptensor(y)); + + y = tenrand([4 3 2]); + x = sptensor(y); + testCase.verifyEqual(y, tensor(x)); + end + + function OneWay(testCase) + x = sptenrand(10,0.4); + y = tensor(x); + testCase.verifyEqual(x, sptensor(y)); + + y = tenrand([10]); + x = sptensor(y); + testCase.verifyEqual(y, tensor(x)); + end + + end +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_ImportData.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_ImportData.m new file mode 100644 index 0000000..27d530e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_ImportData.m @@ -0,0 +1,12 @@ +% Testing importing tensors using import_data +classdef Test_ImportData < matlab.unittest.TestCase + methods (Test) + + function Full(testCase) + x = import_data('sptensor_small.tns'); + y = sptensor([1 1 1;2 2 2;3 3 3],[1 2 3]',[3 3 3]); + testCase.verifyEqual(full(x), full(y)); + end + + end +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_NewTTM.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_NewTTM.m new file mode 100644 index 0000000..3863058 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_NewTTM.m @@ -0,0 +1,49 @@ +% Testing different versions of tensor/ttm for correctness and efficiency +classdef Test_NewTTM < matlab.unittest.TestCase + + + properties (TestParameter) + combo = struct('small3d', [3 10 50], 'small4d', [4 10 25], 'small5d', [5 5 10], 'large3d',[3 100 250]); + ver = struct('old', 0, 'new', 1); + end + + methods (Test) + function Compare(testCase, combo) + nd = combo(1); + lsz = combo(2); + usz = combo(3); + rsz = usz - lsz; + sz = lsz * ones(1, nd) + randi(rsz, 1, nd); + X = tensor(@randn, sz); + U = cell(nd,1); + for n = 1:nd + U{n} = randn(lsz + randi(rsz), sz(n)); + end + for n = 1:nd + Y1 = ttm(X,U{n},n,[],0); + Y2 = ttm(X,U{n},n,[],1); + testCase.verifyEqual(size(Y1), size(Y2)); + testCase.verifyEqual(Y1.data, Y2.data, 'AbsTol', 1e-12); + end + end + + function Time(testCase, combo, ver) + nd = combo(1); + lsz = combo(2); + usz = combo(3); + sz = usz * ones(1, nd); + X = tensor(@randn, sz); + U = cell(nd,1); + for n = 1:nd + U{n} = randn(lsz, sz(n)); + end + for n = 1:nd + newsz = sz; + newsz(n) = lsz; + Y1 = ttm(X,U{n},n,[],ver); + testCase.verifyEqual(size(Y1), newsz); + end + end + + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_Symtensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_Symtensor.m new file mode 100644 index 0000000..dd6acdd --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_Symtensor.m @@ -0,0 +1,251 @@ +% Tests for symtensor class +classdef Test_Symtensor < matlab.unittest.TestCase + + properties (TestParameter) + m = struct( 'three', 3, 'one', 1 ); + n = struct( 'ten', 10, 'three', 3, 'one', 1); + g = struct( 'rand', @rand, 'zeros', @zeros, 'ones', @ones); + + bf = struct('and', @and, 'eq', @eq, 'ge', @ge, 'gt', @gt, 'ldivide', @ldivide, 'le', @le, 'lt', @lt, 'minus', @minus, 'ne', @ne, 'or', @or, 'plus', @plus, 'power', @power, 'rdivide', @rdivide, 'times', @times, 'xor', @xor); + uf = struct('not', @not, 'uminus', @uminus, 'uplus', @uplus); + sfr = struct('mtimes', @mtimes, 'mrdivide', @mrdivide); + sfl = struct('mldivide', @mldivide, 'mtimes', @mtimes); + end + + methods (Test) + + function ConstructBySym(testCase,m,n,g) + % Test construction by symmetrization + % S = SYMTENSOR(X) + % [S,I] = SYMTENSOR(X) + lsz = n * ones(1,m); + T = tensor(feval(g,[lsz 1]),lsz); + [X,I] = symtensor(T); + testCase.verifyClass(X, 'symtensor'); + testCase.verifyEqual(X.m, m); + testCase.verifyEqual(X.n, n); + testCase.verifyEqual(full(X), symmetrize(T)); + testCase.verifyEqual(size(I), [nchoosek(m+n-1,m) m]); + % Also test indices function while we're here... + I2 = indices(X); + testCase.verifyEqual(I, I2); + % Test some other stuff too + testCase.verifyTrue(issymmetric(X)); % Also testing issymmetric + testCase.verifyEqual(ndims(X), m); + testCase.verifyEqual(size(X), n*ones(1,m)); + testCase.verifyEqual(size(X,randi(m)), n); + % Test isequal too + Y = symtensor(X.vals, m, n); + testCase.verifyTrue(isequal(X,Y)); + Y(1) = Y(1) - 1; + testCase.verifyFalse(isequal(X,Y)); + end + + function ConstructByCopy(testCase) + % Test copy constructor + % S = SYMTENSOR(S0) + X = symtensor(@rand,4,3); + Y = X; + testCase.verifyClass(Y, 'symtensor'); + testCase.verifyEqual(X,Y); + end + + function ConstructByVal(testCase, m, n, g) + % Test construct from distinct values + % S = SYMTENSOR(VALS,M,N) + vsz = nchoosek(m+n-1,m); + vals = feval(g,[vsz 1]); + X = symtensor(vals,m,n); + Y = symtensor(vals',m,n); + testCase.verifyClass(X, 'symtensor'); + testCase.verifyClass(Y, 'symtensor'); + testCase.verifyEqual(X,Y); + % Test some other stuff too + testCase.verifyTrue(issymmetric(X)); % Also testing issymmetric + testCase.verifyEqual(ndims(X), m); + testCase.verifyEqual(size(X), n*ones(1,m)); + testCase.verifyEqual(size(X,randi(m)), n); + % Test isequal too + Y = symtensor(X.vals, m, n); + testCase.verifyTrue(isequal(X,Y)); + Y(1) = Y(1) - 1; + testCase.verifyFalse(isequal(X,Y)); + end + + function ConstructByGen(testCase, m, n, g) + X = symtensor(g, m, n); + testCase.verifyClass(X, 'symtensor'); + testCase.verifyEqual(X.m, m); + testCase.verifyEqual(X.n, n); + % Test some other stuff too + testCase.verifyTrue(issymmetric(X)); % Also testing issymmetric + testCase.verifyEqual(ndims(X), m); + testCase.verifyEqual(size(X), n*ones(1,m)); + testCase.verifyEqual(size(X,randi(m)), n); + % Test isequal too + Y = symtensor(X.vals, m, n); + testCase.verifyTrue(isequal(X,Y)); + Y(1) = Y(1) - 1; + testCase.verifyFalse(isequal(X,Y)); + end + + function BinaryFuncs(testCase, bf) + mlocal = 4; + nlocal = 3; + X = symtensor(@ones,mlocal,nlocal); % 15 distinct elements + Y = X+1; + Y((1:5)') = 0; + Z1 = feval(bf, X, Y); + Z2 = symtensor(feval(bf, X.vals, Y.vals), mlocal, nlocal); + testCase.verifyEqual(Z1,Z2); + end + + function BinaryFuncsWithScalar(testCase, bf) + mlocal = 4; + nlocal = 3; + X = symtensor(@ones, mlocal, nlocal); % 15 distinct elements + Z1 = feval(bf, X, 5); + Z2 = symtensor(feval(bf, X.vals, 5), mlocal, nlocal); + testCase.verifyEqual(Z1,Z2); + end + + function UnaryFuncs(testCase,uf) + mlocal = 3; + nlocal = 5; + X = symtensor(@rand, mlocal, nlocal); + X((1:5)') = X((1:5)') > .5; + Z1 = feval(uf,X); + Z2 = symtensor(feval(uf,X.vals), mlocal, nlocal); + testCase.verifyEqual(Z1,Z2); + end + + function ScalarFuncsRight(testCase,sfr) + mlocal = 4; + nlocal = 3; + X = symtensor(@rand, mlocal, nlocal); % 15 distinct elements + Z1 = feval(sfr, X, 5); + Z2 = symtensor(feval(sfr, X.vals, 5), mlocal, nlocal); + testCase.verifyEqual(Z1,Z2); + end + + function ScalarFuncsLeft(testCase,sfl) + mlocal = 4; + nlocal = 3; + X = symtensor(@rand, mlocal, nlocal); % 15 distinct elements + Z1 = feval(sfl, 5, X); + Z2 = symtensor(feval(sfl, 5, X.vals), mlocal, nlocal); + testCase.verifyEqual(Z1,Z2); + end + + function CheckFull(testCase) + X=tensor(rand([3,3,3])); + Y=symtensor(X); + F=full(Y); + testCase.verifyEqual(symmetrize(X),F); + Y=symtensor(@ones,3,3); + F=full(Y); + testCase.verifyEqual(F,tensor(ones([3,3,3]))); + end + + function SubsRef(testCase, m, n) + p = nchoosek(m+n-1,m); + X = symtensor(1:p, m, n); + testCase.verifyEqual(X.val, (1:p)'); + testCase.verifyEqual(X.m, m); + testCase.verifyEqual(X.n, n); + %% Linear indexing into val array + q = randi(p); + % Single + testCase.verifyEqual(X(q),q); + % Range + testCase.verifyEqual(X((1:q)'),(1:q)'); + r = (min(2,p):min(4,p))'; + testCase.verifyEqual(X(r),r); + % List + q = randi(p,25,1); + testCase.verifyEqual(X(q),q); + %% Subscripts + % Single + s = randi(n,1,m); + ssrt = sort(s,2); + testCase.verifyEqual(X(s),X(ssrt)); + xsubs = indices(X); + [~,locx] = ismember(ssrt,xsubs,'rows'); + testCase.verifyEqual(X(s),X(locx)); + % List + s = randi(n,5,m); + ssrt = sort(s,2); + testCase.verifyEqual(X(s),X(ssrt)); + xsubs = indices(X); + [~,locx] = ismember(ssrt,xsubs,'rows'); + testCase.verifyEqual(X(s),X(locx)); + end + + function SubsAsgn(testCase, m, n) + p = nchoosek(m+n-1,m); + X = symtensor(1:p, m, n); + % Assignment of all values + X(:) = (p:-1:1)'; + testCase.verifyEqual(X.val, (p:-1:1)'); + X.val = (1:p)'; + testCase.verifyEqual(X.val, (1:p)'); + % Assignment of single entry using linear index + idx = randi(p); + X(idx) = -X(idx); + newvals = (1:p)'; + newvals(idx) = -idx; + testCase.verifyEqual(X.val, newvals); + % Assignment of multiple entries using linear indices + idx = unique(randi(p,10,1)); + newvals = -1*(1:length(idx))'; + X(idx) = newvals; + testCase.verifyEqual(X(idx),newvals); + % Assignment of single entry using subscripts + s = randi(n,1,m); + X(s) = 17; + testCase.verifyEqual(X(s),17); + % Assignment of multiple entries using subscripts + % (Need to be careful here because we can have repeat entries in s + % that are not obvious until sorted due to different subscripts for + % the same unique element.) + s = randi(n,5,m); + ssrt = sort(s,2); + [ssrt,locs] = unique(ssrt,'rows'); + s = s(locs,:); + newvals = 10*(1:size(s,1))'; + X(s) = newvals; + testCase.verifyEqual(X(ssrt),newvals); + end + + function TenFun(testCase, m, n, g) + X = symtensor(g, m, n); + Xf = full(X); + fh = @(x) x + 1; + Z = tenfun(fh, X ); + Zf = tenfun(fh, Xf); + testCase.verifyEqual(Z, symtensor(Zf)); + + fh = @eq; + Z = tenfun(fh, X , 1); + Zf = tenfun(fh, Xf, 1); + testCase.verifyEqual(Z, symtensor(Zf)); + + Y = symtensor(g, m, n); + Yf = full(Y); + fh = @plus; + Z = tenfun(fh, X , Y); + Zf = tenfun(fh, Xf, Yf); + testCase.verifyEqual(Z, symtensor(Zf)); + + W = symtensor(g, m, n); + Wf = full(W); + fh = @max; + Z = tenfun(fh, W, X, Y); + Zf = tenfun(fh, Wf, Xf, Yf); + testCase.verifyEqual(Z, symtensor(Zf)); + end + + end +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_Tensor.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_Tensor.m new file mode 100644 index 0000000..54c6e59 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/Test_Tensor.m @@ -0,0 +1,769 @@ +% Tests for tensor class +classdef Test_Tensor < matlab.unittest.TestCase + + properties (TestParameter) + nd = struct( 'three', 3, 'one', 1 ); + maxdim = struct( 'ten', 10, 'three', 3, 'one', 1 ); + gen = struct( 'rand', @rand, 'zeros', @zeros, 'ones', @ones, 'randn', @randn ); + nx = struct( 'one', 1, 'ten', 10, 'hundred', 100 ); + szs = struct( 'fourthreetwo', [4 3 2], 'threetwoone', [3 2 1], 'threetwo', [3 2], 'three', 3, 'one', 1, 'emtpy', []); + + bf = struct('and', @and, 'eq', @eq, 'ge', @ge, 'gt', @gt, 'ldivide', @ldivide, 'le', @le, 'lt', @lt, 'minus', @minus, 'ne', @ne, 'or', @or, 'plus', @plus, 'power', @power, 'rdivide', @rdivide, 'times', @times, 'xor', @xor); + uf = struct('not', @not, 'uminus', @uminus, 'uplus', @uplus); + sfr = struct('mtimes', @mtimes, 'mrdivide', @mrdivide); + sfl = struct('mldivide', @mldivide, 'mtimes', @mtimes); + + symver = struct('new', 0, 'old', 1); + issymver = struct('new', 0', 'old', 1); + + end + + methods (Test) + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- CONSTRUCTOR TESTS --- + % Testing the constructor using various generators and also the + % (implicit) copy constructor. Also including some other tests that + % are simple: double, ndims, full, isequal, norm, nnz, size. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + function Construct(testCase, nd, maxdim, gen) + sz = randi(maxdim, 1, nd); + % --- Case I: Construct from (Vector) Data --- + s = rng; % Save state of random number generator + Xdata = gen([sz 1]); + X = tensor(Xdata, sz); + % --- Case II: Use Generator Function --- + rng(s); % Reset random number generator to same state as above + Y = tensor(gen,sz); % Should be the same as X + % --- Case III: Copy --- + Z = X; + % --- Checks --- + testCase.verifyClass(X, 'tensor'); + testCase.verifyClass(Y, 'tensor'); + testCase.verifyClass(Z, 'tensor'); + testCase.verifyEqual(X, Y); + testCase.verifyEqual(X, Z); + testCase.verifyEqual(ndims(X), nd); + testCase.verifyEqual(size(X), sz); + for i = 1:nd; + testCase.verifyEqual(size(X,i), sz(i)); + end + % --- More Tests --- + testCase.verifyEqual(double(X), Xdata); % double + testCase.verifyEqual(norm(X), sqrt(sum(Xdata(:).^2)),'RelTol',1e-15); % norm + testCase.verifyEqual(X, full(X)); % full + testCase.verifyTrue(isequal(X,Y)); % isequal + testCase.verifyEqual(nnz(X), nnz(X.data(:))); + end + + function ConstructAlt(testCase,szs) + X = tenrand(szs); + testCase.verifyEqual(size(X),szs); + end + + function ConstructBadSize(testCase) + testCase.verifyError(@()eval('X = tensor(1:12, [4 3 2]);'), ?MException); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- FUNCTION TESTS --- + % Testing binary and unary functions to make sure they work as + % expected. In particular, the binary functions should accept + % scalar inputs. The matrix functions (mtimes, mrdivide, mldivide) + % work with scalars only. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + + function BinaryFuncs(testCase, bf) + sz = [4 3 2 4]; + X = tensor(@ones, sz); + Y = X+1; + Y((1:5)') = 0; + Z1 = feval(bf, X, Y); + Z2 = tensor(feval(bf, X.data, Y.data), sz); + testCase.verifyEqual(Z1,Z2); + end + + + function BinaryFuncErrors(testCase,bf) %check binary functions + X = tensor(rand([4 3 2])); + Y = tensor(rand([3 4 2])); + % Should throw an exception if the sizes don't match + testCase.verifyError(@()bf(X,Y),?MException); + % Should throw an exception if only a single argument + testCase.verifyError(@()bf(X), ?MException); + % Should throw an exception for too many arguments + testCase.verifyError(@()bf(X,X,X), ?MException); + end + + function And(testCase, szs) + X = tenrand(szs); + Y = X.data; + testCase.verifyEqual(double(X&X), double(Y~=0)); + end + + function UnaryFuncs(testCase,uf) + sz = [4 3 2 4]; + X = tensor(@rand, sz); + X((1:5)') = X((1:5)') > .5; + Z1 = feval(uf,X); + Z2 = tensor(feval(uf,X.data), sz); + testCase.verifyEqual(Z1,Z2); + end + + function ScalarFuncsRight(testCase,sfr) + sz = [4 3 2 4]; + X = tensor(@rand, sz); + Z1 = feval(sfr, X, 5); + Z2 = tensor(feval(sfr, X.data, 5), sz); + testCase.verifyEqual(Z1,Z2); + end + + function ScalarFuncsLeft(testCase,sfl) + sz = [7 4 1]; + X = tensor(@rand, sz); + Z1 = feval(sfl, 5, X); + Z2 = tensor(feval(sfl, 5, X.data), sz); + testCase.verifyEqual(Z1,Z2); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- SUBSCRIPTED REFERENCE AND ASSIGNMENT --- + % Testing the various ways of referencing a tensor, including dot, + % subscripts, and linear indices. Tensor support passing an array + % of linear indices or a matrix of subscripts with one per row. + % Tensors also support extraction of a subtensor using ranges. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + function SubsRefDot(testCase, nd ,maxdim, gen) + sz = randi(maxdim, 1, nd); + Xdata = gen([sz 1]); + X = tensor(Xdata, sz); + testCase.verifyEqual(X.data, Xdata); + testCase.verifyEqual(X.size, sz); + end + + function SubsRefEntry(testCase, nd, maxdim, gen) + sz = randi(maxdim, 1, nd); + idx = randi(prod(sz)); + sub = tt_ind2sub(sz, idx); + + szstr = strjoin(arrayfun(@num2str, sz, 'UniformOutput', false),','); + substr = strjoin(arrayfun(@num2str, sub, 'UniformOutput', false),','); + + Xdata = gen([sz 1]); + X = tensor(Xdata, sz); + + dgnstc = sprintf('Failure for size=%s, entry=%s', szstr, substr); + testCase.verifyEqual(Xdata(idx), X(idx), dgnstc); + testCase.verifyEqual( eval(['X(' substr ')']), X(idx), dgnstc); + end + + function SubsRefColons(testCase, nd, maxdim, gen) + sz = randi(maxdim, 1, nd); + szstr = strjoin(arrayfun(@num2str, sz, 'UniformOutput', false),','); + + Xdata = gen([sz 1]); + X = tensor(Xdata, sz); %#ok + + iscolon = rand(nd,1) > 0.5; + iscolon(randi(nd)) = 1; + + substrs = cell(nd,1); + for k = 1:nd + if iscolon(k) + substrs{k} = ':'; + else + substrs{k} = num2str(randi(sz(k))); + end + end + substr = strjoin(substrs,','); + + Y1 = eval(['X(' substr ')']); + Y2 = eval(['Xdata(' substr ')']); + dgnstc = sprintf('Failure for size=%s, entry=%s', szstr, substr); + testCase.verifyEqual(Y1.data(:), Y2(:), dgnstc); + testCase.verifyEqual(size(Y1), sz(iscolon), dgnstc); + end + + function SubsRefRanges(testCase, nd, maxdim, gen) + sz = randi(maxdim, 1, nd); + szstr = strjoin(arrayfun(@num2str, sz, 'UniformOutput', false),','); + + Xdata = gen([sz 1]); + X = tensor(Xdata, sz); %#ok + + iscolon = rand(nd,1) > 0.5; + iscolon(randi(nd)) = 1; + + newsz = sz; + substrs = cell(nd,1); + for k = 1:nd + if iscolon(k) + if sz(k) == 1 + substrs{k} = ':'; + else + lower = randi(sz(k)-1); + upper = lower+randi(sz(k)-lower); + substrs{k} = sprintf('%d:%d', lower, upper); + newsz(k) = upper - lower + 1; + end + else + substrs{k} = num2str(randi(sz(k))); + end + end + substr = strjoin(substrs,','); + + Y1 = eval(['X(' substr ')']); + Y2 = eval(['Xdata(' substr ')']); + dgnstc = sprintf('Failure for size=%s, entry=%s', szstr, substr); + testCase.verifyEqual(Y1.data(:), Y2(:), dgnstc); + testCase.verifyEqual(size(Y1), newsz(iscolon), dgnstc); + end + + function SubsRefList(testCase, nd, maxdim, nx) + sz = randi(maxdim, 1, nd); + X = tensor(@rand, sz); + idx = randi(prod(sz),nx,1); % List of elements to extract + sub = tt_ind2sub(sz, idx); + L0 = squeeze(X.data(idx)); + if ~iscolumn(L0) + L0 = L0'; + end + L1 = X(idx, 'extract'); + testCase.verifyEqual(L0,L1); + L2 = X(sub, 'extract'); + testCase.verifyEqual(L0,L2); + if nd > 1 + L3 = X(idx); + testCase.verifyEqual(L0,L3); + L4 = X(sub); + testCase.verifyEqual(L0,L4); + end + end + + + function SubsAsgnElement(testCase, nd, maxdim) + sz = randi(maxdim, 1, nd); + szstr = strjoin(arrayfun(@num2str, sz, 'UniformOutput', false),','); + idx = randi(prod(sz)); + sub = tt_ind2sub(sz, idx); + substr = strjoin(arrayfun(@num2str, sub, 'UniformOutput', false),','); + dstr = sprintf('Failure sz=%s, idx=%d, sub=%s\n', szstr, idx, substr); + + % Using linear index + X = tensor(@zeros, sz); + X(idx) = 1; + testCase.verifyEqual(X(idx), 1, dstr); + if prod(sz) > 1 + rng = [1:idx-1, idx+1:prod(sz)]'; + testCase.verifyEqual(X(rng,'extract'), zeros(prod(sz)-1,1),dstr); + end + + % Repeat using subscript + X = tensor(@zeros, sz); + estr = sprintf('X( %s ) = 1;', substr); + eval(estr); + testCase.verifyEqual(X(idx), 1, dstr); + testCase.verifyEqual(nnz(X), 1, dstr); + end + + function SubsAsgnGrowSize(testCase, nd, maxdim) + sub = randi(maxdim, 1, nd); + substr = strjoin(arrayfun(@num2str, sub, 'UniformOutput', false),','); + X = tensor; + estr = sprintf('X( %s ) = 1;', substr); + eval(estr); + testCase.verifyEqual(size(X), sub); + testCase.verifyEqual(X.data(end), 1); + testCase.verifyEqual(nnz(X), 1); + end + + function SubsAsgnGrowOrder(testCase, nd, maxdim) + sz = randi(maxdim, 1, nd); + sub = [tt_ind2sub(sz, randi(prod(sz))) 1]; + newsz = [sz 1]; + idx = sub2ind(newsz, sub); + substr = strjoin(arrayfun(@num2str, sub, 'UniformOutput', false),','); + X = tensor(@zeros,sz); + estr = sprintf('X( %s ) = 1;', substr); + eval(estr); + testCase.verifyEqual(size(X), newsz); + testCase.verifyEqual(X(idx), 1); + testCase.verifyEqual(ndims(X), nd+1); + testCase.verifyEqual(nnz(X), 1); + end + + function SubsAsgnListValue(testCase, nd, maxdim, nx) + sz = randi(maxdim, 1, nd); + n = min(nx, prod(sz)); % Number of values in list + idx = randperm(prod(sz)); + idx = idx(1:n)'; + + X = tensor(@zeros,sz); + X(idx) = 1; + testCase.verifyEqual(X(idx, 'extract'), ones(n,1)); + testCase.verifyEqual(nnz(X), n); + + X = tensor(@zeros, sz); + sub = tt_ind2sub(sz, idx); + X(sub) = 1; + testCase.verifyEqual(X(idx, 'extract'), ones(n,1)); + testCase.verifyEqual(nnz(X), n); + end + + function SubsAsgnListArray(testCase, nd, maxdim, nx) + sz = randi(maxdim, 1, nd); + n = min(nx, prod(sz)); % Number of values in list + idx = randperm(prod(sz)); + idx = idx(1:n)'; + + X = tensor(@zeros,sz); + X(idx) = (1:n)'; + testCase.verifyEqual(X(idx, 'extract'), (1:n)'); + testCase.verifyEqual(nnz(X), n); + + X = tensor(@zeros, sz); + sub = tt_ind2sub(sz, idx); + X(sub) = (1:n)'; + testCase.verifyEqual(X(idx, 'extract'), (1:n)'); + testCase.verifyEqual(nnz(X), n); + end + + function SubsAsgnListArrayToEmpty(testCase, nd, maxdim, nx) + sz = randi(maxdim, 1, nd); + n = min(nx, prod(sz)); % Number of values in list + idx = randperm(prod(sz)); + idx = idx(1:n)'; + + % In the case that n = 1, we run into strange problems because + % there is no way to differentiate between a 'list' and a + % subscript when expanding an emtpy tensor. Maybe this can be + % fixed by recognizing that it's a row rather than a column array? + if (n > 1) + X = tensor; + sub = tt_ind2sub(sz, idx); + X(sub) = (1:n)'; + newsz = max(sub,[],1); + newidx = tt_sub2ind(newsz, sub); + testCase.verifyEqual(X(newidx, 'extract'), (1:n)'); + testCase.verifyEqual(nnz(X), n); + end + end + +% function SubsAsgnListArrayError(testCase) +% % Check for a linear index assignment that tries to expand the +% % tensor. The problem is that it's not clear *how* to expand the +% % tensor, so this should fail. +% X = tensor(@zeros, [2 2 2]); %#ok +% testCase.verifyError(@() eval('X(9) = 1;'), 'TTB:BadIndex'); +% end + + function SubsAsgnRange(testCase, nd, maxdim) + sz = randi(maxdim, 1, nd); + X = tensor(@zeros, sz); %#ok + iscolon = rand(nd,1) > 0.3; % Pick modes for range + iscolon(randi(nd)) = 1; % Make sure there's at least one! + newsz = sz; + substrs = cell(nd,1); + for k = 1:nd + if iscolon(k) + if sz(k) == 1 + substrs{k} = ':'; + else + lower = randi(sz(k)-1); + upper = lower+randi(sz(k)-lower); + substrs{k} = sprintf('%d:%d', lower, upper); + newsz(k) = upper - lower + 1; + end + else + substrs{k} = num2str(randi(sz(k))); + end + end + substr = strjoin(substrs,','); + newsz = newsz(iscolon); + estr = ['X(' substr ') = tensor(@ones,newsz);']; + eval(estr); + testCase.verifyEqual(eval(['X(' substr ')']), tensor(@ones,newsz)); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- FIND --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Find(testCase, szs) + X = tenrand(szs); + subs = find(X > 0.5); + idx = find(X.data > 0.5); + % This next step is a workaround since MATLAB's built-in function + % returns a 0x1 array if X.data is nonempty but all zeros. + if isempty(idx) + idx = []; + end + testCase.verifyEqual(tt_sub2ind(size(X),subs), idx); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- TTM: Tensor Times Matrix --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + function TtmOrder(testCase) + % X x_i Mi x_j Mj = X x_j Mj x_i Mi + nd = 4; + sz = [4 3 2 4]; + X = tensor(@rand, sz); + for i = 1:nd + Mi = rand(5, sz(i)); + for j = (i+1):nd + Mj = rand(6, sz(j)); + T1 = ttm( ttm(X,Mj,j), Mi, i); + T2 = ttm( ttm(X,Mi,i), Mj, j); + testCase.verifyEqual(T1.data, T2.data, 'RelTol', 1e-15); + end + end + end + + function TtmSameMode(testCase) + %X x1 A x1 B = X x1 BA + nd = 4; + sz = [4 3 2 4]; + X = tensor(@rand, sz); + for i = 1:nd + A = rand(5, sz(i)); + B = rand(6, 5); + T1 = ttm( ttm(X,A,i), B, i); + T2 = ttm( X, B*A, i); + testCase.verifyEqual(T1.data, T2.data, 'RelTol', 1e-15); + end + end + + function Ttm(testCase) + X = tensor(rand(5,3,4,2)); + A = rand(4,5); + B = rand(4,3); + C = rand(3,4); + D = rand(3,2); + + Y = ttm(X, A, 1); %<-- computes X times A in mode-1 + testCase.verifyEqual(size(Y), [4 3 4 2]); + Z = A * reshape(X.data, 5, []); + testCase.verifyEqual(Y.data(:), Z(:), 'RelTol', 1e-15); + Y2 = ttm(X, {A,B,C,D}, 1); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + Y2 = ttm(X, A', 1, 't'); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + + Y = ttm(X, {A,B,C,D}, [1 2 3 4]); %<-- 4-way multiply + testCase.verifyEqual(size(Y), [4 4 3 3]); + Y2 = ttm(X, {D,C,B,A}, [4 3 2 1]); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + Y2 = ttm(X, {A,B,C,D}); + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + Y2 = ttm(X, {A',B',C',D'}, 't'); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + + Y = ttm(X, {C,D}, [3 4]); %<-- X times C in mode-3 & D in mode-4 + testCase.verifyEqual(size(Y), [5 3 3 3]); + Y2 = ttm(X, {A,B,C,D}, [3 4]); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + + Y = ttm(X, {A,B,D}, [1 2 4]); %<-- 3-way multiply + testCase.verifyEqual(size(Y), [4 4 4 3]); + Y2 = ttm(X, {A,B,C,D}, [1 2 4]); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + Y2 = ttm(X, {A,B,D}, -3); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + Y2 = ttm(X, {A,B,C,D}, -3); %<-- same as above + testCase.verifyEqual(Y.data, Y2.data, 'RelTol', 1e-15); + end + + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- Symmetrize and Testing Symmetry --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + function Symmetrize(testCase, nd, maxdim) + n = randi(maxdim); + sz = n * ones(1, nd); + X = tensor(@rand, sz); + S = symmetrize(X); + if nd > 1 && prod(sz) > 1 + testCase.verifyFalse(issymmetric(X)); + end + testCase.verifyTrue(issymmetric(S)); + sz = [4 3 2]; + X = tensor(@rand, sz); + testCase.verifyError(@() symmetrize(X),'TTB:Tensor:BadModes'); + end + + function AllSymmetric(testCase,symver,issymver) + m = 3; + n = 4; + sz = n * ones(1,m); + X = tensor(rand(sz), sz); + Y = symmetrize(X,1:m,symver); + testCase.verifyTrue(issymmetric(Y,1:m,issymver)); + end + + function GroupedSymmetries(testCase,symver,issymver) + X = tensor(rand(4,3,3,4)); + Y0 = symmetrize(X,{ [1 4], [2 3]},symver); + testCase.verifyTrue(issymmetric(Y0,{[1 4], [2,3]},issymver)); + end + + function TestIsSymmetric(testCase,issymver) + % Make sure it doesn't say everything is symmetric! + m = 3; + n = 4; + sz = n * ones(1,m); + X = tensor(rand(sz),sz); + testCase.verifyFalse(issymmetric(X,1:m,issymver)); + + sz = [4 3 2]; + X = tensor(rand(sz),sz); + testCase.verifyFalse(issymmetric(X,1:m,issymver)); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- TenFun --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + function Tenfun(testCase, nd, maxdim, gen) + sz = randi(maxdim, 1, nd); + X = tensor(gen, sz); + Z = tenfun(@(x) x + 1, X); + testCase.verifyEqual(Z, X+1); + + Z = tenfun(@eq, X, 1); + testCase.verifyEqual(Z, tensor(X.data == 1, sz)); + + Y = tensor(gen, sz); + Z = tenfun(@plus, X, Y); + testCase.verifyEqual(Z, X+Y); + + W = tensor(gen,sz); + Z = tenfun(@max, W, X, Y); + T = max([X.data(:), Y.data(:), Z.data(:)], [], 2); + testCase.verifyEqual(Z, tensor(T, sz)); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- Collapse/SCALE --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function CollapseScale(testCase) + X = tenrand([4 4 4]); + Y = collapse(X, [2,3]); + testCase.verifyEqual(Y.data, sum(reshape(X.data, [4 16]), 2)); + testCase.verifyEqual(size(Y), 4); + Z = scale(X, 1./Y, 1); + testCase.verifyEqual(double(collapse(Z, [2 3])), ones(4,1), 'RelTol', 1e-15); + Y = collapse(X, [1 2], @max); + testCase.verifyEqual(Y.data, max(reshape(X.data,[16 4]))'); + testCase.verifyEqual(size(Y), 4); + Z = scale(X, 1./Y, 3); + testCase.verifyEqual(double(collapse(Z, [1 2], @max)), ones(4,1), 'RelTol', 1e-15); + X = tenones([3,4,5]); + S = 10 * [1:5]'; + Y = scale(X,S,3); + testCase.verifyEqual(double(scale(Y,1./S,3)), X.data, 'RelTol', 1e-15); + S = tensor(10 * [1:5]',5); + Y = scale(X,S,3); + testCase.verifyEqual(double(scale(Y,1./S,3)), X.data, 'RelTol', 1e-15); + S = tensor(1:12,[3 4]); + Y = scale(X,S,[1 2]); + testCase.verifyEqual(double(scale(Y,1./S,[1 2])), X.data, 'RelTol', 1e-15); + S = tensor(1:12,[3 4]); + Y = scale(X,S,-3); + testCase.verifyEqual(double(scale(Y,1./S,-3)), X.data, 'RelTol', 1e-15); + S = tensor(1:60,[3 4 5]); + Y = scale(X,S,1:3); + testCase.verifyEqual(double(scale(Y,1./S,1:3)), X.data, 'RelTol', 1e-15); + end + + function CollapseSum(testCase, szs) + X = tenrand(szs); + Y = collapse(X,2:ndims(X)); + Z = X.data; + for j = 2:ndims(X) + Z = sum(Z,j); + end + testCase.verifyEqual(double(Y),Z,'RelTol',1e-14); + end + + function CollapseMax(testCase, szs) + X = tenrand(szs); + Y = collapse(X,1:ndims(X)-1,@max); + Z = X.data; + for j = 1:ndims(X)-1 + Z = max(Z,[],j); + end + if isempty(X.data) + testCase.verifyEqual(double(Y),[],'RelTol',1e-14); + else + zsz = [size(X,ndims(X)) 1]; + testCase.verifyEqual(double(Y),reshape(Z, zsz),'RelTol',1e-14); + end + end + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- Contract --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Contract(testCase) + X = tensor(rand(4,3,2)); + Y = tensor(rand(3,2,4)); + Z1 = ttt(X,Y,1,3); %<-- Normal tensor multiplication + Z2 = contract(ttt(X,Y),1,6); %<-- Outer product + contract + testCase.verifyEqual(norm(Z1-Z2), 0, 'AbsTol', 1e-15); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- End --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function End(testCase) + X = tensor(1:24, [4 3 2]); + testCase.verifyEqual(X(end,1,1), X(4,1,1)); + testCase.verifyEqual(X(end,end,1), X(4,3,1)); + testCase.verifyEqual(X(end,end,end), X(4,3,2)); + testCase.verifyEqual(X(end), X(24)); + testCase.verifyEqual(size(X(end,:,:)),[3 2]); + testCase.verifyEqual(size(X(2:end,:,:)),[3 3 2]); + end + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- InnerProd --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Innerprod(testCase, nd, maxdim, gen) + sz = randi(maxdim, 1, nd); + X = tensor(gen, sz); + Y = tensor(gen, sz); + Z = tensor(gen, sz+1); + testCase.verifyEqual(innerprod(X, Y), dot(X.data(:), Y.data(:)), 'RelTol', 1e-14); + testCase.verifyError(@() innerprod(X, Z), 'TTB:UnequalSize') + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- MTTKRP --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Mttkrp(testCase) + X = tensor(rand(2,3,4)); + A = rand(2,6); + B = rand(3,6); + C = rand(4,6); + Y = mttkrp(X, {A,B,C}, 3); + testCase.verifyEqual(size(Y), [4 6]); + Y2 = mttkrp(X, {A,B,[]}, 3); + testCase.verifyEqual(size(Y2), [4 6]); + Z = double(tenmat(X,3))*khatrirao(B,A); + testCase.verifyEqual(Y, Z, 'RelTol', 1e-14); + testCase.verifyEqual(Y2, Z, 'RelTol', 1e-14); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- NVECS --- + % This breaks in nd = 1 and sometimes if the dimension is 1. + % Perhaps something to fix in a future version. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Nvecs(testCase, nd, maxdim) + if nd > 1 && maxdim > 1 + sz = randi(maxdim, 1, nd); + sz = max(sz,2); + X = tensor(@rand, sz); + n = randi(nd); + r = randi(sz(n)); + U = nvecs(X, n, r); + [U1,~,~] = svds(double(tenmat(X,n)),r); + % Greedy sort + for i=1:size(U1,2) + [~, j] = max(abs(U(:,i)'*U1(:,i:end))); + if (j ~= 1) + % Swap to remaining closest match. + U1(:, [i i+j-1]) = U1(:, [i+j-1 i]); + end + if (U(:,i)'*U1(:,i)<0) + % Fix direction. + U1(:,i) = -U1(:,i); + end + end + testCase.verifyEqual(U, U1, 'RelTol', 1e-8); + end + end + + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- PERMUTE --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Permute(testCase) + X = tensor(rand(3,2,4)); + Y = permute(X, [1 3 2]); + testCase.verifyEqual(size(Y), [3 4 2]); + testCase.verifyEqual(Y.data, permute(X.data, [1 3 2])); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- RESHAPE --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Reshape(testCase) + X = tensor(@rand, [4 3 2]); + Y = reshape(X, [2 3 4]); + testCase.verifyEqual(size(Y), [2 3 4]); + testCase.verifyEqual(X.data(:), Y.data(:)); + end + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- SQUEEZE --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Squeeze(testCase) + X = tensor(@rand, [2 1 3]); + Y = squeeze(X); + testCase.verifyEqual(size(Y), [2 3]); + testCase.verifyClass(Y, 'tensor'); + X = tensor(@rand, [1 1]); + Y = squeeze(X); + testCase.verifyEqual(size(Y), [1 1]); + testCase.verifyClass(Y, 'double'); + end + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % --- TTSV/TTV --- + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + function Ttv(testCase) + X = tensor(@rand, [5,3,4,2]); + A = rand(5,1); + B = rand(3,1); + C = rand(4,1); + D = rand(2,1); + + Y = ttv(X, A, 1); %<-- X times A in mode 1 + testCase.verifyEqual(size(Y), [3 4 2]); + testCase.verifyClass(Y, 'tensor'); + Yalt = ttv(X, {A,B,C,D}, 1); %<-- same as above + testCase.verifyEqual(Y.data, Yalt.data, 'RelTol', 1e-15); + + Y = ttv(X, {A,B,C,D}, [1 2 3 4]); %<-- All-mode multiply + testCase.verifyEqual(size(Y), [1 1]); + testCase.verifyClass(Y, 'double'); + Yalt = ttv(X, {D,C,B,A}, [4 3 2 1]); %<-- same as above + testCase.verifyEqual(Y, Yalt); + Yalt = ttv(X, {A,B,C,D}); %<-- same as above + testCase.verifyEqual(Y, Yalt); + + Y = ttv(X, {C,D}, [3 4]); %<-- X times C in mode-3 & D in mode-4 + testCase.verifyEqual(size(Y), [5 3]); + testCase.verifyClass(Y, 'tensor'); + Yalt = ttv(X, {A,B,C,D}, [3 4]); %<-- same as above + testCase.verifyEqual(double(Y), double(Yalt), 'RelTol', 1e-15); + + Y = ttv(X, {A,B,D}, [1 2 4]); %<-- 3-way multiplication + testCase.verifyEqual(size(Y), 4); + testCase.verifyClass(Y, 'tensor'); + Yalt = ttv(X, {A,B,C,D}, [1 2 4]); %<-- same as above + testCase.verifyEqual(double(Y), double(Yalt), 'RelTol', 1e-15); + Yalt = ttv(X, {A,B,D}, -3); %<-- same as above + testCase.verifyEqual(double(Y), double(Yalt), 'RelTol', 1e-15); + Yalt = ttv(X, {A,B,C,D}, -3); %<-- same as above + testCase.verifyEqual(double(Y), double(Yalt), 'RelTol', 1e-15); + end + + + end + + + +end \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tests/sptensor_small.tns b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/sptensor_small.tns new file mode 100644 index 0000000..4aa99bf --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tests/sptensor_small.tns @@ -0,0 +1,7 @@ +sptensor +3 +3 3 3 +3 +1 1 1 1 +2 2 2 2 +3 3 3 3 diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_fg.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_fg.m new file mode 100644 index 0000000..3fc5d4e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_fg.m @@ -0,0 +1,79 @@ +function [f,G] = tt_cp_fg(Z,A,Znormsqr) +%TT_CP_FG Computes function and gradient of the CP function. +% +% [F,G] = TT_CP_FG(Z,A) calculates F = (1/2) ||Z - ktensor(A)||^2 where +% Z is an N-way tensor and A is a ktensor or a cell array with N +% factor matrices. It also calculates the gradient of the CP fit +% function where Z is an N-way tensor and A is a ktensor or a +% cell array with N factor matrices. The result is also a cell +% array with N factor matrices corresponding to the gradients; in +% other words, G{n}(:,r) is the partial derivative of the fit +% function with respect to A{n}(:,r). +% +% [F,G] = TT_CP_FG(Z,A,NORMZSQR) also passes in the pre-computed +% norm of Z, which makes the computations faster. +% +% See also CP_OPT, TT_CP_FUN. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + + +%% Set-up +% if ~isa(Z,'tensor') && ~isa(Z,'sptensor') +% error('Z must be a tensor or a sptensor'); +% end +N = ndims(Z); + +if ~iscell(A) && ~isa(A,'ktensor'); + error('A must be a cell array or ktensor'); +end + +if isa(A,'ktensor') + A = tocell(A); +end +R = size(A{1},2); + +%% Upsilon and Gamma +Upsilon = cell(N,1); +for n = 1:N + Upsilon{n} = A{n}'*A{n}; +end + +Gamma = cell(N,1); +for n = 1:N + Gamma{n} = ones(R,R); + for m = [1:n-1,n+1:N] + Gamma{n} = Gamma{n} .* Upsilon{m}; + end +end + + +%% Calculation + +%F1 +if exist('Znormsqr','var') + f_1 = Znormsqr; +else + f_1 = norm(Z)^2; +end + +%% Calculate gradient and F2 +G = cell(N,1); +U = mttkrp(Z,A,1); +V = A{1} .* U; +f_2 = sum(V(:)); +G{1} = -U + A{1}*Gamma{1}; +for n = 2:N + U = mttkrp(Z,A,n); + G{n} = -U + A{n}*Gamma{n}; +end + +%F3 +W = Gamma{1} .* Upsilon{1}; +f_3 = sum(W(:)); + +%SUM +f = 0.5 * f_1 - f_2 + 0.5 * f_3; + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_fun.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_fun.m new file mode 100644 index 0000000..cdfdd63 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_fun.m @@ -0,0 +1,21 @@ +function [f,g] = tt_cp_fun(x,Z,Znormsqr) +%TT_CP_FUN Calculate function and gradient for CP fit function. +% +% [F,G] = TT_CP_FUN(X,Z) where X is a vector containing the entries of the +% components of the model and Z is the tensor to be fit. +% +% See also TT_CP_VEC_TO_FAC, TT_FAC_TO_VEC, TT_CP_FG, CP_OPT +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Convert x to a cell array of matrices +A = tt_cp_vec_to_fac(x,Z); + +%% Call cp_fit and cp_gradient using cp_fg +[f,G] = tt_cp_fg(Z,A,Znormsqr); + +%% Convert a cell array to a vector +g = tt_fac_to_vec(G); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_vec_to_fac.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_vec_to_fac.m new file mode 100644 index 0000000..265e2f8 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_cp_vec_to_fac.m @@ -0,0 +1,26 @@ +function A = tt_cp_vec_to_fac(x,Z) +%TT_CP_VEC_TO_FAC Converts a vector to a cell array of factor matrices. +% +% A = TT_CP_VEC_TO_FAC(X,Z) converts the vector X into a cell array +% of factor matrices consistent with the size of the tensor Z. +% +% See also TT_FAC_TO_VEC, TT_CP_FUN, TT_CP_OPT. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Set-up +P = length(x); +N = ndims(Z); +sz = size(Z); + +%% Determine R +R = P / sum(sz); + +%% Create A +A = cell(N,1); +for n = 1:N + idx1 = sum(sz(1:n-1))*R + 1; + idx2 = sum(sz(1:n))*R; + A{n} = reshape(x(idx1:idx2),sz(n),R); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_dimscheck.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_dimscheck.m new file mode 100644 index 0000000..e0826c2 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_dimscheck.m @@ -0,0 +1,69 @@ +function [sdims,vidx] = tt_dimscheck(dims,N,M) +%TT_DIMSCHECK Used to preprocess dimensions tensor dimensions. +% +% NEWDIMS = TT_DIMCHECK(DIMS,N) checks that the specified dimensions +% are valid for a tensor of order N. If DIMS is empty, then +% NEWDIMS=1:N. If DIMS is negative, then NEWDIMS is everything +% but the dimensions specified by -DIMS. Finally, NEWDIMS is +% returned in sorted order. +% +% [NEWDIMS,IDX] = TT_DIMCHECK(DIMS,N,M) does all of the above but +% also returns an index for M muliplicands. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +% Fix empty case +if isempty(dims) + dims = 1:N; +end + +% Fix "minus" case +if (max(dims) < 0) + % Check that every member of dims is in 1:N + tf = ismember(-dims,1:N); + if min(tf) == 0 + error('Invalid dimensions specified'); + end + dims = setdiff(1:N, -dims); +end + +% Check that every member of dims is in 1:N +tf = ismember(dims,1:N); +if min(tf) == 0 + error('Invalid dimensions specified'); +end + +% Save the number of dimensions in dims +P = length(dims); + +% Reorder dims from smallest to largest (this matters in particular +% for the vector multiplicand case, where the order affects the +% result) +[sdims,sidx] = sort(dims,'ascend'); + +if (nargout == 2) + % Can't have more multiplicands them dimensions + if (M > N) + error('Cannot have more multiplcands than dimensions'); + end + + % Check that the number of mutliplicands must either be + % full-dimensional (i.e., M==N) or equal to the number of specified + % dimensions (i.e., M==P). + if (M ~= N) && (M ~= P) + error('Invalid number of multiplicands'); + end + + % Check sizes to determine how to index multiplicands + if (P == M) + % Case 1: Number of items in dims and number of multiplicands + % are equal; therefore, index in order of how sdims was sorted. + vidx = sidx; + else + % Case 2: Number of multiplicands is equal to the number of + % dimensions in the tensor; therefore, index multiplicands by + % dimensions specified in dims argument. + vidx = sdims; % index multiplicands by (sorted) dimension + end +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_fac_to_vec.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_fac_to_vec.m new file mode 100644 index 0000000..5787152 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_fac_to_vec.m @@ -0,0 +1,29 @@ +function x = tt_fac_to_vec(A) +%TT_FAC_TO_VEC Converts a set of factor matrices to a vector. +% +% X = TT_FAC_TO_VEC(A) converts a cell array of factor matrices A to a +% vector by vectorizing each matrix and stacking them. +% +% See also TT_CP_VEC_TO_FAC, TT_CP_FUN, CP_OPT. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Set-up +N = length(A); + +%% Get sizes +sz = zeros(N,1); +for n = 1:N + sz(n) = size(A{n},1); +end +R = size(A{1},2); +P = sum(sz)*R; + +%% Create x +x = zeros(P,1); +for n = 1:N + idx1 = sum(sz(1:n-1))*R + 1; + idx2 = sum(sz(1:n))*R; + x(idx1:idx2) = reshape(A{n},sz(n)*R,1); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg.m new file mode 100644 index 0000000..ea2609c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg.m @@ -0,0 +1,104 @@ +function [F,G] = tt_gcp_fg(M, X, f, g, W, computeF, computeG, vectorG) +%TT_GCP_FG Loss function and gradient for generalized CP. +% +% F = TT_GCP_FG(M,X,f) expects that M is a ktensor and X is a +% dense tensor. The f is a function handle of the form f(x,m) that +% measures the elementwise loss for a data entry x and corresponding +% model entry m. The function should be able to accept vector inputs to +% do evaluations in bulk. +% +% [F,G] = TT_GCP_FG(M,X,f,g) also computes the gradient. Here g is the +% gradient of f and has the same form. The G is returned as a cell array +% where G{k} is a matrix that is the same size as M.u{k} (the k-th factor +% matrix). +% +% [F,G] = TT_GCP_FG(M,X,f,g,W) specifies a weight tensor where W is a tensor +% that is the same size as X. It should have 1's for known values and 0's +% for missing values. The function/gradient is only computed w.r.t. the +% known values. Setting W to [] indicated no missing data. +% +% See also GCP_OPT, TT_GCP_FG_SETUP. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + +%% Hidden options +% +% G = GCP_FG(M,X,f,g,W,false,true) computes only the gradient. +% +% G = GCP_FG(M,X,f,g,W,false,true,true) computes only the gradient and +% converts it to a vector (equivalent to the tovec operation on a +% ktensor). +% +% [F,G] = GCP_FG(M,X,f,g,W,true,true,true) computes boths the function +% and the gradient and converts the gradient to vector form. +% + +%% Parse inputs +if nargin < 8 + + if ~exist('W','var') + W = []; + end + + if ~exist('computeF','var') + computeF = true; + end + + if ~exist('computeG','var') + computeG = (nargout > 1); + end + + if ~exist('vectorG','var') + vectorG = false; + end + +end + +%% Setup +Mfull = full(M); +Mv = Mfull(:); +Xfull = full(X); +Xv = Xfull(:); +F = []; +G = []; + +%% Calculate function value +if computeF + + Fvec = f(Xv, Mv); % F is a vector + + if ~isempty(W) + Fvec = W(:).*Fvec; % be sure to zero out any unknown entries + end + + F = sum(Fvec); + +end + +%% QUIT IF ONLY NEED FUNCTION EVAL +if ~computeG + return; +end + +%% Gradient calculation +Y = g(Xv,Mv); % Result is a vector +Y = tensor(Y,size(X)); +if ~isempty(W) + Y = W.*Y; +end + +%% Gradient wrt U's using MTTKRP sequence. +G = mttkrps(Y,M.u); + +%% Assemble gradient +if vectorG + G = cell2mat(cellfun(@(x) x(:), G, 'UniformOutput', false)); +end + +%% If not computing F, set F (the 1st return arugment) to be the gradient +if ~computeF + F = G; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg_est.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg_est.m new file mode 100644 index 0000000..ab72111 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg_est.m @@ -0,0 +1,148 @@ +function [F, G] = gcp_fg_est(M, fh, gh, subs, xvals, weights, computeF, computeG, vectorG, LambdaCheck, crng) +%GCP_FG_EST Estimate the GCP function and gradient with a subsample +% +% [F,G] = GCP_FG_EST(M, FH, GH, XSUBS, XVALS, WVALS) estimates the GCP +% function and gradient specified by FH and GH for M and X. In this case, +% we have only a portion of X as specified by XSUBS and XVALS along with +% the corresponding sampling weights in WVALS that are used in the estimate. +% +% See also GCP_SGD, GCP_FG, GCP_FG_SETUP. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + +%% Hidden options +% +% Note that there are five hidden options. The first three are similar to +% the hidden options for gcp_fg and the fourth is whether or not to verify +% that M has lambda = [1,1,...,1], which is assumed and so should +% generally be checked unless the user is absolutely sure it's okay. + +%% Parse inputs +if nargin < 11 + + if ~exist('computeF','var') + computeF = true; + end + + if ~exist('computeG','var') + computeG = (nargout > 1); + end + + if ~exist('vectorG','var') + vectorG = false; + end + + if ~exist('LambdaCheck','var') + LambdaCheck = true; + end + + % Specify range for correction/adjustment when nonzeros may be included + % in the "zero" sample. In this case, crng should be the indices + % of nonzero samples, which are the ones that are adjusted. + if ~exist('idx','var') + crng = []; + end + +end + +%% Input checks (keep minimal for timing's sake) + +d = ndims(M); +sz = size(M); +F = []; +G = []; + +if LambdaCheck && ~all(M.lambda == 1) + warning('Fixing M to have all ones for lambda'); + M = normalize(M,1); +end + +%% Compute model values and exploded Zk matrices +[mvals, Zexp] = gcp_fg_est_helper(M.u, subs); + +%% Compute function value +if computeF + Fvec = fh(xvals,mvals); + if ~isempty(crng) + Fvec(crng) = Fvec(crng) - fh(0,mvals(crng)); + end + F = sum( weights .* Fvec ); +end +if ~computeG + return; +end + +%% Compute sample y values +yvals = weights .* gh(xvals, mvals); +if ~isempty(crng) + yvals(crng) = yvals(crng) - weights(crng) .* gh(0, mvals(crng)); +end + +%% Compute function and gradient +G = cell(d,1); +nsamples = size(subs,1); +for k=1:d + % The row of each element is the row index to accumulate in the + % gradient. The column indices are corresponding samples. They are + % in order because they match the vector of samples to be + % multiplied on the right. + S = sparse(subs(:,k), (1:nsamples)', yvals, sz(k), nsamples, nsamples); + G{k} = S * Zexp{k}; +end + +% Convert to single vector +if vectorG + G = cell2mat(cellfun(@(x) x(:), G, 'UniformOutput', false)); +end + +%% If not computing F, set F (the 1st return arugment) to be the gradient +if ~computeF + F = G; +end + +function [mvals, Zexp] = gcp_fg_est_helper(factors, subs) +% GCP_FG_EST_HELPER Model values at sample locations and exploded Zk's. + +% Created by Tamara G. Kolda, Sept. 2018. Includes prior work by +% collaborators David Hong and Jed Duersch. + +% Check for empty +if isempty(subs) + mvals = []; + return; +end + +% Process inputs +d = size(subs,2); + +% Create exploded U's from the model factor matrices +Uexp = cell(d,1); +for k = 1:d + Uexp{k} = factors{k}(subs(:,k),:); +end + +% After this pass, +% Zexp{k} = Hadarmard product of Uexp{1} through Uexp{k-1} +% for k = 2,...,d. +Zexp = cell(1,d); +Zexp{2} = Uexp{1}; +for k = 3:d + Zexp{k} = Zexp{k-1} .* Uexp{k-1}; +end + +% After this pass, +% Zexp{k} = Hadamard product of Uexp{1} though Uexp{d}, except Uexp{k} +% for k = 1,...,d. +Zexp{1} = Uexp{d}; +for k=d-1:-1:2 + Zexp{k} = Zexp{k} .* Zexp{1}; + Zexp{1} = Zexp{1} .* Uexp{k}; +end + +% Compute model values at sample locations +mvals = sum(Zexp{d} .* Uexp{d},2); + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg_setup.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg_setup.m new file mode 100644 index 0000000..fd5abf1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_gcp_fg_setup.m @@ -0,0 +1,150 @@ +function [fh,gh,lowerbnd] = gcp_fg_setup(type,X) +%GCP_FG_SETUP Sets the GCP functions according to specified name. +% +% [F,G,L] = GCP_FG_SETUP(TYPE,X) returns the function and gradient +% function as well as the lower bound for different types of objective +% functions. It also checks that X satisfies the standard constraints for +% that choice. The valid types are: +% +% - 'normal' or 'Gaussian' +% - 'binary' or 'Bernoulli-odds' +% - 'Bernoulli-logit' +% - 'count' or 'Poisson' +% - 'Poisson-log' +% - 'Rayleigh' +% - 'Gamma' +% +% [F,G,L] = GCP_FG_SETUP(TYPE,X) works for types that require a +% parameter, which is specified inside the type string. +% +% - 'negative-binomial (number of failures)' +% - 'Huber (delta threshold)' +% - 'beta-divergence (beta)' +% +% Details of the functions can be found in D. Hong, T. G. Kolda, J. A. Duersch. +% Generalized Canonical Polyadic Tensor Decomposition. arXiv:1808.07452, +% 2018. +% +% Documentation page +% +% See also GCP_OPT, GCP_FG, GCP_FG_EST. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +tmp = split(type); +type = tmp{1}; +if length(tmp)>1 + param = str2double(tmp{2}(2:end-1)); +end + +switch lower(type) + case {'normal','gaussian'} + fh = @(x,m) (m-x).^2; + gh = @(x,m) 2.*(m-x); + lowerbnd = -Inf; + case {'binary', 'bernoulli-odds'} + if exist('X','var') && ~valid_binary(X) + warning('Using ''%s'' type but tensor X is not binary', type); + end + fh = @(x,m) log(m+1) - x.*log(m + 1e-10); + gh = @(x,m) 1./(m+1) - x./(m + 1e-10); + lowerbnd = 0; + case {'bernoulli-logit'} + if exist('X','var') && ~valid_binary(X) + warning('Using ''%s'' type but tensor X is not binary', type); + end + fh = @(x,m) log(exp(m) + 1) - x .* m; + gh = @(x,m) exp(m)./(exp(m) + 1) - x; + lowerbnd = -Inf; + case {'count','poisson'} + if exist('X','var') && ~valid_natural(X) + warning('Using ''%s'' type but tensor X is not counts', type); + end + fh = @(x,m) m - x.*log(m + 1e-10); + gh = @(x,m) 1 - x./(m + 1e-10); + lowerbnd = 0; + case {'poisson-log'} + if exist('X','var') && ~valid_natural(X) + warning('Using ''%s'' type but tensor X is not counts', type); + end + fh = @(x,m) exp(m) - x.*m; + gh = @(x,m) exp(m) - x; + lowerbnd = -Inf; + case 'rayleigh' + if exist('X','var') && ~valid_nonneg(X) + warning('Using ''%s'' type but tensor X is not nonnegative', type); + end + fh = @(x,m) 2*log(m+1e-10) + (pi/4)*(x./(m+1e-10)).^2; + gh = @(x,m) 2./(m+1e-10) - (pi/2)*x.^2./(m+1e-10).^3; + lowerbnd = 0; + case 'gamma' + if exist('X','var') && ~valid_nonneg(X) + warning('Using ''%s'' type but tensor X is not nonnegative', type); + end + fh = @(x,m) x./(m+1e-10) + log(m+1e-10); + gh = @(x,m) -x./((m+1e-10).^2) + 1./(m+1e-10); + lowerbnd = 0; + case 'huber' + if ~exist('param','var') + error('Need to specify threshold') + end + d = param; + eval(sprintf('fh = @(x,m) (x-m).^2 .* (abs(x-m) < %g) + (%g .* abs(x-m)- %g) .* (abs(x-m) >= %g);',d,2*d,d^2,d)); + eval(sprintf('gh = @(x,m) -2.*(x-m) .* (abs(x-m) < %g) - (%g.*sign(x-m)) .* (abs(x-m) >= %g);',d,2*d,d)); + lowerbnd = -Inf; + case 'negative-binomial' + if exist('X','var') && ~valid_nonneg(X) + warning('Using ''%s'' type but tensor X is not nonnegative', type); + end + if ~exist('param','var') + error('Need to specify number of trials') + end + r = param; + eval(sprintf('fh = @(x,m) (%d+x) .* log(1+m) - x * log(m+1e-10);',r)); + eval(sprintf('gh = @(x,m) (%d)./(1+m) - x./(m+1e-10);',r+1)); + lowerbnd = 0; + case 'beta' + if exist('X','var') && ~valid_nonneg(X) + warning('Using ''%s'' type but tensor X is not nonnegative', type); + end + if ~exist('param','var') + error('Need to specify beta') + end + b = param; +% eval(sprintf('fh = @(x,m) (1/%g) .* (m+1e-10).^%g - (1/(%g-1)) .* x .* (m+1e-10).^(%g-1);',b,b,b,b)); +% eval(sprintf('gh = @(x,m) (m+1e-10).^(%g-1) - x.*(m+1e-10).^(%g-2);',b,b)); + eval(sprintf('fh = @(x,m) (%g) .* (m+1e-10).^(%g) - (%g) .* x .* (m+1e-10).^(%g);',1/b,b,1/(b-1),b-1)); + eval(sprintf('gh = @(x,m) (m+1e-10).^(%g) - x.*(m+1e-10).^(%g);',b-1,b-2)); + lowerbnd = 0; + otherwise + error('Unknown type: %s', type); +end + +function tf = valid_nonneg(X) + +if isa(X,'sptensor') + tf = all(X.vals > 0); +else + tf = all(X(:) > 0); +end + +function tf = valid_binary(X) + +if isa(X,'sptensor') + tf = all(X.vals == 1); +else + tf = isequal(unique(X(:)),[0;1]); +end + +function tf = valid_natural(X) + +if isa(X, 'sptensor') + vals = X.vals; +else + vals = X(:); +end + +tf = all(vals >= 0) && all(vals == round(vals)); \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_ind2sub.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_ind2sub.m new file mode 100644 index 0000000..3b06549 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_ind2sub.m @@ -0,0 +1,24 @@ +function subs = tt_ind2sub(siz,idx) +%TT_IND2SUB Multiple subscripts from linear indices. +% +% SUBS = TT_IND2SUB(SIZ,INDS) returns that subscripts equivalent +% to the linear indices in INDS for a tensor of size SIZ. +% +% See also TT_SUB2IND, IND2SUB. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if isempty(idx) + subs = []; + return; +end + +k = [1 cumprod(siz(1:end-1))]; +n = length(siz); +idx=idx-1; +for i = n : -1 : 1 + div=floor(idx/k(i)); + subs(:,i) = div+1; + idx=idx-k(i)*div; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_ind2sub64.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_ind2sub64.m new file mode 100644 index 0000000..a09f2af --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_ind2sub64.m @@ -0,0 +1,27 @@ +function subs = tt_ind2sub64(siz,idx) +%TT_IND2SUB Multiple subscripts from linear indices. +% +% SUBS = TT_IND2SUB(SIZ,INDS) returns that subscripts equivalent +% to the linear indices in INDS for a tensor of size SIZ. +% +% See also TT_SUB2IND, IND2SUB, STRATIFIED_SAMPLE. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. + + +if isempty(idx) + subs = []; + return; +end + +k = uint64([1 cumprod(siz(1:end-1))]); +n = length(siz); +idx=idx-1; +for i = n : -1 : 1 + div=idivide(idx,k(i),'floor'); + subs(:,i) = div+1; + idx=idx-k(i)*div; +end +subs = double(subs); \ No newline at end of file diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_intvec2str.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_intvec2str.m new file mode 100644 index 0000000..76ea206 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_intvec2str.m @@ -0,0 +1,12 @@ +function s = tt_intvec2str(v) +%TT_INTVEC2STR Print integer vector to a string with brackets. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if isempty(v) + s = sprintf('[]'); + return; +end + +s = ['[ ' sprintf('%d ',v(1:end)) ']']; diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_matrix2cellstr.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_matrix2cellstr.m new file mode 100644 index 0000000..f676e9b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_matrix2cellstr.m @@ -0,0 +1,17 @@ +function S = tt_matrix2cellstr(M) +%TT_MATRIX2CELLSTR Convert a matrix to a cell array of strings. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +fmt = get(0,'FormatSpacing'); +format compact +S = evalc('disp(M)'); +if isempty(S) + S = {''}; + return; +end +set(0,'FormatSpacing',fmt) +S = textscan(S,'%s','delimiter','\n','whitespace',''); +S = S{1}; +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_nonzeros.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_nonzeros.m new file mode 100644 index 0000000..0f90411 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_nonzeros.m @@ -0,0 +1,40 @@ +function [subs,vals] = tt_sample_nonzeros(X, nns, with_replacement) +%TT_SAMPLE_NONZEROS Sample nonzeros from a sparse tensor. +% +% [VALS,SUBS] = TT_SAMPLE_NONZEROS(X,N) finds N random nonzero entries +% (uniformly with replacement) in the sparse tensor X. It returns +% VALS, the values, and SUBS, the corresponding subscripts. Throws an +% error if N > nnz(X). +% +% See also SAMPLE_STRATIFIED. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + +% Created by Tamara G. Kolda, Sept. 2018. + +%% Input checks +if nargin < 3 + with_replacement = true; +end + +%% Setup +nnx = nnz(X); + +%% Select nonzeros +if nns == nnx + nidx = 1:nnx; +elseif with_replacement + nidx = randi(nnx, nns,1); +else + if nns > nnx + error('Tensor does not have enough nonzeros to sample'); + end + nidx = randperm(nnx,nns); +end + +%% Extract subscripts and values +subs = X.subs(nidx,:); +vals = X.vals(nidx); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_semistrat.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_semistrat.m new file mode 100644 index 0000000..6e91e06 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_semistrat.m @@ -0,0 +1,41 @@ +function [subs, xvals, weights] = tt_sample_semistrat(X, nnzs, nzrs) +%TT_SAMPLE_OVERLAPPED Sample nonzero and zero entries from a sparse tensor. +% +% [SUBS,VALS,WGTS] = TT_SAMPLE_OVERLAPPED(X,NNZ,NZR) creates a +% stratifies sample of nonzero and zero entries of the sparse tensor X. +% +% Example +% [subs,vals,wgts] = tt_sample_overlapped(X,1000,1000); +% [f,G] = tt_gcp_fg_est(M,fh,gh,subs,vals,wgts,true,true,true,false,1000); +% +% See also GCP_OPT, TT_GCP_FG_EST, TT_SAMPLE_UNIFORM. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + +%% Setup +d = ndims(X); +sz = size(X); +nelx = prod(sz); % Number of elements in X +nnzx = nnz(X); % Number of Nonzeros in X +nzrx = nelx - nnzx; % Number of Zeros in X +with_replacement = true; + +%% Sample nonzeros +[nonzero_subs, nonzero_xvals] = tt_sample_nonzeros(X,nnzs,with_replacement); +nonzero_weights = (nnzx / nnzs) * ones(nnzs,1); + + +%% Sample 'zeros' +zero_subs = bsxfun(@(a,b)ceil(a.*b), rand(nzrs,d), sz); +zero_xvals = zeros(nzrs,1); +zero_weights = nelx / nzrs * ones(nzrs,1); + +%% Assemble "Sample" +subs = [nonzero_subs; zero_subs]; +xvals = [nonzero_xvals; zero_xvals]; +weights = [nonzero_weights; zero_weights]; + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_stratified.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_stratified.m new file mode 100644 index 0000000..ee990e3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_stratified.m @@ -0,0 +1,48 @@ +function [subs, xvals, weights] = tt_sample_stratified(X, xnzidx, nnzs, nzrs, oversample) +%TT_SAMPLE_STRATIFIED Sample nonzero and zero entries from a sparse tensor. +% +% [SUBS,VALS,WGTS] = TT_SAMPLE_STRATIFIED(X,NZIDX,NNZ,NZR) creates a +% stratifies sample of nonzero and zero entries of the sparse tensor X. +% The NZIDX is the sorted 64-bit linear indices of the nonzeros in X. +% This is required for efficients of the sampling. The values NNZ and NZR +% specify the desired number of nonzero and zero samples, respectively. +% +% Example +% nzidx = tt_sub2ind64(sz,X.subs); +% nzidx = sort(nzidx); +% [subs,vals,wgts] = tt_sample_stratified(X,nzidx,1000,1000); +% [f,G] = tt_gcp_fg_est(M,fh,gh,subs,vals,wgts); +% +% See also GCP_OPT, TT_GCP_FG_EST, TT_SAMPLE_UNIFORM. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + +%% Setup +sz = size(X); +nelx = prod(sz); % Number of elements in X +nnzx = nnz(X); % Number of Nonzeros in X +nzrx = nelx - nnzx; % Number of Zeros in X +with_replacement = true; + +if nargin < 5 + oversample = 1.1; +end + +%% Sample nonzeros +[nonzero_subs, nonzero_xvals] = tt_sample_nonzeros(X,nnzs,with_replacement); +nonzero_weights = (nnzx / nnzs) * ones(nnzs,1); + +%% Sample zeros +zero_subs = tt_sample_zeros(X,xnzidx,nzrs,oversample,with_replacement); +zero_xvals = zeros(nzrs,1); +zero_weights = (nzrx / nzrs) * ones(nzrs,1); + +%% Assemble "Sample" +subs = [nonzero_subs; zero_subs]; +xvals = [nonzero_xvals; zero_xvals]; +weights = [nonzero_weights; zero_weights]; + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_uniform.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_uniform.m new file mode 100644 index 0000000..960b9d3 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_uniform.m @@ -0,0 +1,29 @@ +function [subs,vals,wgts] = tt_sample_uniform(X,nsamp) +%TT_SAMPLE_UNIFORM Uniformly sample indices from a tensor. +% +% [SUBS,VALS,WGTS] = TT_SAMPLE_UNIFORM(X,N) samples N indices uniformly at +% random from X, along with the corresponding values and the weight of +% the sample. This is for use with stochastic optimization in GCP_OPT. +% +% See also GCP_OPT, TT_SAMPLE_STRATIFIED. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + + +%% Setup +d = ndims(X); +sz = size(X); +tsz = prod(sz); % Number of entries in X + +%% Subscripts +subs = bsxfun(@(a,b)ceil(a.*b), rand(nsamp,d), sz); + +%% Values +vals = X(subs); + +%% Weights +wgts = tsz / nsamp * ones(nsamp,1); + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_zeros.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_zeros.m new file mode 100644 index 0000000..f9fa619 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sample_zeros.m @@ -0,0 +1,101 @@ +function subs = tt_sample_zeros(X,Xnzidx,nsamps,oversample,with_replacement) +%TT_SAMPLE_ZEROS Sample zero entries from a sparse tensor. +% +% SUBS = TT_SAMPLE_ZEROS(X,NZIDX,N) finds N random zero entries (uniformly +% with replacement) in the sparse tensor X, where the zeros are not +% stored explicitly. The NZIDX is the sorted linear indices of the +% nonzeros in X. The return value SUBS is a list of subscripts of zero +% entries. The procedure automatically determines how much it needs to +% oversample to find N nonzero entries and prints a warning if it fails +% to do so. +% +% SUBS = TT_SAMPLE_ZEROS(X,N,OR) is the same as above except that OR > 1 +% specifies the oversample rate. The default is 1.1, but this can be +% increased if the method has trouble getting enough samples which can +% happen when N is relatively large compared ot the number of zeros in X. +% +% See also SAMPLE_STRATIFIED. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% Created by Tamara G. Kolda, Fall 2018. Includes work with +% collaborators David Hong and Jed Duersch. + + +%% Setup +d = ndims(X); +sz = size(X); +nelx = prod(sz); % Number of entries in X +nnzx = length(Xnzidx); % Number of Nonzeros in X +nzrx = nelx - nnzx; % Number of Zeros in X + +if nargin < 3 + oversample = 1.1; +elseif oversample < 1.1 + error('Oversampling rate must be >= 1.1'); +end + +if nargin < 4 + with_replacement = true; +end + + +%% Determine the number of samples to generate +% We need to oversample to account for potential duplicates and for +% nonzeros we may pick. + +if ~with_replacement && (nsamps > nzrx) + error('Cannot sample more than the total number of zeros'); +end + +% Save the requested number of zeros +nsamps_requested = nsamps; + +% First, determine the number of samples we need to account for the fact +% that some samples will be nonzeros and so discarded. +ntmp1 = ceil(nsamps * nelx / nzrx); + +% Error check +if ~with_replacement && (ntmp1 >= nelx) + error('Need too many zero samples for this to work'); +end + +% Second, determine number of samples given that some will be duplicates, +% via coupon collector problem. This only matters if sampling with +% replacement. + +if with_replacement + ntmp2 = ntmp1; +else + ntmp2 = ceil(nelx * log(1/(1-(ntmp1/nelx)))); +end + +% Finally, add a margin of safety by oversampling +nsamps = ceil(oversample * ntmp2); + +%% Generate the actual samples, removing duplicates, nonzeros, and excess + +% Subscripts +tmpsubs = bsxfun(@(a,b)ceil(a.*b), rand(nsamps,d), sz); + +if ~with_replacement + tmpsubs = unique(tmpsubs,'rows','stable'); +end + +% Select out just the zeros +tmpidx = tt_sub2ind64(sz, tmpsubs); +iszero = ~builtin('_ismemberhelper',tmpidx,Xnzidx); +tmpsubs = tmpsubs(iszero,:); + +% Trim back to desired number of samples +nsamps = min(size(tmpsubs,1), nsamps_requested); + + +%% Final return values +% Warn if too few entries +if (nsamps < nsamps_requested) + warning('Unable to get the desired number of zero samples, %d (obtained) versus %d (requested)', nsamps, nsamps_requested); +end + +% Allocate and fill subscript-value pairs. +subs = tmpsubs(1:nsamps,:); diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_size2str.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_size2str.m new file mode 100644 index 0000000..ca3793f --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_size2str.m @@ -0,0 +1,17 @@ +function s = tt_size2str(sz) +%TT_SIZE2STR Convert size to a string that can be printed. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if isempty(sz) + s = sprintf('[empty tensor]'); + return; +end + +if numel(sz) == 1 + s = sprintf('%d',sz); +else + s = [sprintf('%d x ',sz(1:end-1)) sprintf('%d', sz(end)) ]; +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sizecheck.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sizecheck.m new file mode 100644 index 0000000..3b2a057 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sizecheck.m @@ -0,0 +1,28 @@ +function ok = tt_sizecheck(siz) +%TT_SIZECHECK Checks that the size is valid. +% +% TT_SIZECHECK(S) throws an error if S is not a valid size array, +% which means that it is a row vector with strictly postitive, +% real-valued, finite integer values. +% +% X = TT_SIZECHECK(S) returns true if S is a valid and false otherwise. +% +% See also TT_SUBSCHECK. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if ndims(siz) == 2 && size(siz,1) == 1 ... + && isreal(siz) ... + && ~any(isnan(siz(:))) && ~any(isinf(siz(:))) ... + && isequal(siz,round(siz)) && all(siz(:) > 0) + ok = true; +elseif isempty(siz) + ok = true; +else + ok = false; +end + +if ~ok && nargout == 0 + error('Size must be a row vector of real positive integers'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sub2ind.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sub2ind.m new file mode 100644 index 0000000..f090f7c --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sub2ind.m @@ -0,0 +1,20 @@ +function idx = tt_sub2ind(siz,subs) +%TT_SUB2IND Converts multidimensional subscripts to linear indices. +% +% INDS = TT_SUB2IND(SIZ,SUBS) returns the linear indices +% equivalent to the subscripts in the array SUBS for a tensor of +% size SIZ. +% +% See also TT_IND2SUB, SUB2IND. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if isempty(subs) + idx = []; + return; +end + +mult = [1 cumprod(siz(1:end-1))]; +idx = (subs - 1) * mult' + 1; + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sub2ind64.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sub2ind64.m new file mode 100644 index 0000000..3234d6b --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_sub2ind64.m @@ -0,0 +1,22 @@ +function idx = tt_sub2ind64(siz,subs) +%TT_SUB2IND Converts multidimensional subscripts to 64-bit linear indices. +% +% INDS = TT_SUB2IND(SIZ,SUBS) returns the linear indices +% equivalent to the subscripts in the array SUBS for a tensor of +% size SIZ. +% +% See also TT_IND2SUB, SUB2IND. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +if isempty(subs) + idx = []; + return; +end + +if prod(siz) >= 2^64 + error('Maximum linear index exceeds 2^64'); +else + mult = uint64( [1 cumprod(siz(1:end-1))] ); + idx = uint64( sum( bsxfun( @times, uint64(subs-1), mult), 2 ) + 1 ); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_subscheck.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_subscheck.m new file mode 100644 index 0000000..39e1782 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_subscheck.m @@ -0,0 +1,30 @@ +function ok = tt_subscheck(subs) +%TT_SUBSCHECK Checks for valid subscripts. +% +% TT_SUBSCHECK(S) throws an error if S is not a valid subscript +% array, which means that S is a matrix of real-valued, finite, +% positive, integer subscripts. +% +% X = TT_SUBSCHECK(S) returns true if S is a valid and false +% otherwise. +% +% See also TT_SIZECHECK, TT_VALSCHECK. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + +% +% Includes improvements offered by Marcus Brubaker. + +if isempty(subs) + ok = true; +elseif ndims(subs) == 2 && isreal(subs) ... + && all(isfinite(subs(:)) & subs(:) > 0) ... + && isequal(subs,round(subs)) + ok = true; +else + ok = false; +end + +if ~ok && nargout == 0 + error('Subscripts must be a matrix of real positive integers'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_subsubsref.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_subsubsref.m new file mode 100644 index 0000000..7d9aad1 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_subsubsref.m @@ -0,0 +1,12 @@ +function a = tt_subsubsref(obj,s) +%TT_SUBSUBSREF Helper function for tensor toolbox subsref. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if length(s) == 1 + a = obj; +else + a = subsref(obj, s(2:end)); +end + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tt_valscheck.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_valscheck.m new file mode 100644 index 0000000..a673d6e --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tt_valscheck.m @@ -0,0 +1,22 @@ +function ok = tt_valscheck(vals) +%TT_VALSCHECK Checks for valid values. +% +% TT_VALSCHECK(S) throws an error if S is not a valid values +% array, which means that S is a column array. +% +% X = TT_VALSCHECK(S) returns true if S is a valid and false otherwise. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +if isempty(vals) + ok = true; +elseif ndims(vals) == 2 && size(vals,2) == 1 + ok = true; +else + ok = false; +end + +if ~ok && nargout == 0 + error('Values must be a column array'); +end diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tucker_als.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tucker_als.m new file mode 100644 index 0000000..eded257 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tucker_als.m @@ -0,0 +1,153 @@ +function [T,Uinit] = tucker_als(X,R,varargin) +%TUCKER_ALS Higher-order orthogonal iteration. +% +% T = TUCKER_ALS(X,R) computes the best rank-(R1,R2,..,Rn) +% approximation of tensor X, according to the specified dimensions +% in vector R. The input X can be a tensor, sptensor, ktensor, or +% ttensor. The result returned in T is a ttensor. +% +% T = TUCKER_ALS(X,R,'param',value,...) specifies optional parameters and +% values. Valid parameters and their default values are: +% 'tol' - Tolerance on difference in fit {1.0e-4} +% 'maxiters' - Maximum number of iterations {50} +% 'dimorder' - Order to loop through dimensions {1:ndims(A)} +% 'init' - Initial guess [{'random'}|'nvecs'|cell array] +% 'printitn' - Print fit every n iterations {1} +% +% [T,U0] = TUCKER_ALS(...) also returns the initial guess. +% +% Examples: +% X = sptenrand([5 4 3], 10); +% T = tucker_als(X,2); %<-- best rank(2,2,2) approximation +% T = tucker_als(X,[2 2 1]); %<-- best rank(2,2,1) approximation +% T = tucker_als(X,2,'dimorder',[3 2 1]); +% T = tucker_als(X,2,'dimorder',[3 2 1],'init','nvecs'); +% U0 = {rand(5,2),rand(4,2),[]}; %<-- Initial guess for factors of T +% T = tucker_als(X,2,'dimorder',[3 2 1],'init',U0); +% +% Documentation page for Tucker-ALS +% +% See also HOSVD, TTENSOR, TENSOR, SPTENSOR, KTENSOR. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +% Extract number of dimensions and norm of X. +N = ndims(X); +normX = norm(X); + +%% Set algorithm parameters from input or by using defaults +params = inputParser; +params.addParamValue('tol',1e-4,@isscalar); +params.addParamValue('maxiters',50,@(x) isscalar(x) & x > 0); +params.addParamValue('dimorder',1:N,@(x) isequal(sort(x),1:N)); +params.addParamValue('init', 'random', @(x) (iscell(x) || ismember(x,{'random','nvecs','eigs'}))); +params.addParamValue('printitn',1,@isscalar); +params.parse(varargin{:}); + +%% Copy from params object +fitchangetol = params.Results.tol; +maxiters = params.Results.maxiters; +dimorder = params.Results.dimorder; +init = params.Results.init; +printitn = params.Results.printitn; + +if numel(R) == 1 + R = R * ones(N,1); +end +U = cell(N,1); + +%% Error checking +% Error checking on maxiters +if maxiters < 0 + error('OPTS.maxiters must be positive'); +end + +% Error checking on dimorder +if ~isequal(1:N,sort(dimorder)) + error('OPTS.dimorder must include all elements from 1 to ndims(X)'); +end + +%% Set up and error checking on initial guess for U. +if iscell(init) + Uinit = init; + if numel(Uinit) ~= N + error('OPTS.init does not have %d cells',N); + end + for n = dimorder(2:end); + if ~isequal(size(Uinit{n}),[size(X,n) R(n)]) + error('OPTS.init{%d} is the wrong size',n); + end + end +else + % Observe that we don't need to calculate an initial guess for the + % first index in dimorder because that will be solved for in the first + % inner iteration. + if strcmp(init,'random') + Uinit = cell(N,1); + for n = dimorder(2:end) + Uinit{n} = rand(size(X,n),R(n)); + end + elseif strcmp(init,'nvecs') || strcmp(init,'eigs') + % Compute an orthonormal basis for the dominant + % Rn-dimensional left singular subspace of + % X_(n) (1 <= n <= N). + Uinit = cell(N,1); + for n = dimorder(2:end) + fprintf(' Computing %d leading e-vectors for factor %d.\n', ... + R(n),n); + Uinit{n} = nvecs(X,n,R(n)); + end + else + error('The selected initialization method is not supported'); + end +end + +%% Set up for iterations - initializing U and the fit. +U = Uinit; +fit = 0; + +if printitn > 0 + fprintf('\nTucker Alternating Least-Squares:\n'); +end + +%% Main Loop: Iterate until convergence +for iter = 1:maxiters + + fitold = fit; + + % Iterate over all N modes of the tensor + for n = dimorder(1:end) + Utilde = ttm(X, U, -n, 't'); + + % Maximize norm(Utilde x_n W') wrt W and + % keeping orthonormality of W + U{n} = nvecs(Utilde,n,R(n)); + end + + % Assemble the current approximation + core = ttm(Utilde, U, n, 't'); + + % Compute fit + normresidual = sqrt( normX^2 - norm(core)^2 ); + fit = 1 - (normresidual / normX); %fraction explained by model + fitchange = abs(fitold - fit); + + if mod(iter,printitn)==0 + fprintf(' Iter %2d: fit = %e fitdelta = %7.1e\n', iter, fit, fitchange); + end + + % Check for convergence + if (iter > 1) && (fitchange < fitchangetol) + break; + end + +end + +T = ttensor(core, U); + +end + + diff --git a/ext/YetAnotherFEcode/external/tensor_toolbox/tucker_sym.m b/ext/YetAnotherFEcode/external/tensor_toolbox/tucker_sym.m new file mode 100644 index 0000000..44e0f92 --- /dev/null +++ b/ext/YetAnotherFEcode/external/tensor_toolbox/tucker_sym.m @@ -0,0 +1,145 @@ +function [T,Xinit] = tucker_sym(S,R,varargin) +%TUCKER_SYM Symmetric Tucker approximation. +% +% T = TUCKER_SYM(S,R) computes the best rank-(R,R,...,R) approximation of +% the symmetric tensor S, according to the specified dimension R. The +% result returned in T is a ttensor (with all factors equal), i.e., +% T = G x_1 X x_2 X ... x_N X where X is the optimal factor matrix and G +% is the corresponding core. +% +% T = TUCKER_SYM(S,R,'param',value,...) specifies optional parameters and +% values. Valid parameters and their default values are: +% 'tol' - Tolerance on difference in X {1.0e-10} +% 'maxiters' - Maximum number of iterations {1000} +% 'init' - Initial guess [{'random'}|'nvecs'|cell array] +% 'printitn' - Print fit every n iterations {1} +% 'return' - First return argument is T or X [{'ttensor'},'matrix'] +% +% [T,X0] = TUCKER_SYM(...) also returns the initial guess. +% +% See also TUCKER_SYM. +% +% Reference: Phillip A. Regalia, Monotonically Convergent Algorithms for +% Symmetric Tensor Approximation, Linear Algebra and its Applications +% 438(2):875-890, 2013, http://dx.doi.org/10.1016/j.laa.2011.10.033. +% +%MATLAB Tensor Toolbox. Copyright 2018, Sandia Corporation. + + +%% Input checking +if ~issymmetric(S) + error('S must be symmetric'); +end + +if numel(R) ~= 1 + error('R must be a scalar'); +end + +N = ndims(S); +D = size(S,1); + +%% Set algorithm parameters from input or by using defaults +params = inputParser; +params.addParameter('tol',1e-10,@isscalar); +params.addParameter('maxiters',1000,@(x) isscalar(x) & x > 0); +params.addParameter('init', 'random'); +params.addParameter('printitn',1,@isscalar); +params.addParameter('return','ttensor'); +params.parse(varargin{:}); + +%% Copy from params object +tol = params.Results.tol; +maxiters = params.Results.maxiters; +init = params.Results.init; +printitn = params.Results.printitn; + +%% Error checking +% Error checking on maxiters +if maxiters < 0 + error('OPTS.maxiters must be positive'); +end + +%% Set up and error checking on initial guess for U. +if isnumeric(init) + Xinit = init; + if ~isequal(size(Xinit),[D R]) + error('OPTS.init is the wrong size'); + end +else + if strcmp(init,'random') + Xinit = rand(D,R); + elseif strcmp(init,'nvecs') || strcmp(init,'eigs') + % Compute an orthonormal basis for the dominant + % Rn-dimensional left singular subspace of + % X_(n) (1 <= n <= N). + fprintf('Computing %d leading e-vectors.\n',R); + Xinit = nvecs(S,1,R); + else + error('The selected initialization method is not supported'); + end +end + +%% Set up for iterations - we ensure that X is orthogonal +X = Xinit; +[X,~] = qr(X,0); + +% Roughly the same tolerance as is used by pinv +svdtol = D^(N-1) * norm(S) * eps(1.0); + +if printitn > 0 + fprintf('\nSymmetric Tucker:\n'); +end + +%% Main Loop: Iterate until convergence +Xcell = cell(N,1); +for iter = 1:maxiters + + Xold = X; + + % For the remainder tensor + [Xcell{:}] = deal(X); + Rem = ttm(S, Xcell, -1, 't'); + Rem = double(tenmat(Rem, 1)); + + % Form gradient + % NOTE: We use the SVD directly rather than PINV, which could be + % invoked by the line: X = 2*N*Rem*pinv(Rem)*X; + [UU,SS,~] = svd(Rem,0); + ii = find(diag(SS) > svdtol, 1, 'last'); + UU = UU(:,1:ii); + X = 2*N*UU*(UU'*X); + + % Update X + [X,~] = qr(X,0); + + % Check for convergence + fit = norm(X-Xold)/norm(X); + + % Check for convergence + if (fit < tol) + break; + end + + % Print results + if mod(iter,printitn)==0 + fprintf(' Iter %2d: rel. change in X = %e\n', iter, fit); + end + +end + +% Print final result +if (printitn > 0) && (iter < maxiters) + fprintf(' Iter %2d: rel. change in X = %e\n', iter, fit); +end + +% Do they want just the matrix or the full tensor back? +if strcmpi(params.Results.return,'matrix') + T = X; +else + [Xcell{:}] = deal(X); + core = ttm(S, Xcell, 1:N, 't'); + T = ttensor(core, Xcell); +end + + + diff --git a/ext/YetAnotherFEcode/src/Element/MechanicalElements/Hex20Element.m b/ext/YetAnotherFEcode/src/Element/MechanicalElements/Hex20Element.m index ce0e8f6..0e670f6 100644 --- a/ext/YetAnotherFEcode/src/Element/MechanicalElements/Hex20Element.m +++ b/ext/YetAnotherFEcode/src/Element/MechanicalElements/Hex20Element.m @@ -178,7 +178,7 @@ xe = x(index,:); end - function f = get.uniformBodyForce(self) + function F = get.uniformBodyForce(self) % _____________________________________________________________ % % F = uniform_body_force(self,direction) @@ -190,6 +190,121 @@ F(3:3:end) = self.vol/20; % uniformly distributed pressure on the structure end + function [T2, globalSubs] = T2(self) + % this function computes the 3-tensor corresponding to the + % quadratic component of the nonlinear internal force in + % global coordinates at the element level. + + % global DOFs associated to the element nodes + index = get_index(self.nodeIDs,self.nDOFPerNode); + + % location of each dimension of tensor in global DOFs + globalSubs = {index, index, index}; + + X = self.quadrature.X; + W = self.quadrature.W; + + C = self.initialization.C; % constitutive law matrix + H = self.initialization.H; % Linear strain matrix: eps_l = H*th + + % Quadratic strain matrix: A = L.th, eps_quad = A*th + L = tenzeros([6,9,9]); + L(1,1,1)=1; L(4,2,1)=1; L(5,3,1)=1; + L(4,1,2)=1; L(2,2,2)=1; L(6,3,2)=1; + L(5,1,3)=1; L(6,2,3)=1; L(3,3,3)=1; + L(1,4,4)=1; L(4,5,4)=1; L(5,6,4)=1; + L(4,4,5)=1; L(2,5,5)=1; L(6,6,5)=1; + L(5,4,6)=1; L(6,5,6)=1; L(3,6,6)=1; + L(1,7,7)=1; L(4,8,7)=1; L(5,9,7)=1; + L(4,7,8)=1; L(2,8,8)=1; L(6,9,8)=1; + L(5,7,9)=1; L(6,8,9)=1; L(3,9,9)=1; + + m = self.nNodes*self.nDOFPerNode; + Q3h = tenzeros([m,m,m]); + for ii = 1:self.quadrature.Ng + for jj = 1:self.quadrature.Ng + for kk = 1:self.quadrature.Ng + g = X(ii); % natural coordinates + h = X(jj); % natural coordinates + r = X(kk); % natural coordinates + we = W(ii)*W(jj)*W(kk); % weights + [G,detJ,~] = self.G_HEX20(g,h,r); + + % G(x,y,z) and detJ from the position of the gauss points + + %construct core part of the tensors for each gauss point + GHC = tensor((C*H*G)'); + TG = tensor(G); %create tensor object out of matrix + LGG = ttt(ttt(L,TG,3,1),TG,2,1); + + Q3h_int = ttt(GHC,LGG,2,1); + Q3h = Q3h + Q3h_int*detJ*we; + + end + end + end + + % build third order tensors using Q3h + Q3ht = permute(Q3h,[3 2 1]); + T2 = Q3h./2 + Q3ht; + end + + function [T3, globalSubs] = T3(self) + % this function computes the 4-tensor corresponding to the + % quadratic component of the nonlinear internal force in + % global coordinates at the element level. + + % global DOFs associated to the element nodes + index = get_index(self.nodeIDs,self.nDOFPerNode); + + % location of each dimension of tensor in global DOFs + globalSubs = cell(4,1); + globalSubs(:) = {index}; + + X = self.quadrature.X; + W = self.quadrature.W; + + C = self.initialization.C; % constitutive law matrix + + % Quadratic strain matrix: A = L.th, eps_quad = A*th + L = tenzeros([6,9,9]); + L(1,1,1)=1; L(4,2,1)=1; L(5,3,1)=1; + L(4,1,2)=1; L(2,2,2)=1; L(6,3,2)=1; + L(5,1,3)=1; L(6,2,3)=1; L(3,3,3)=1; + L(1,4,4)=1; L(4,5,4)=1; L(5,6,4)=1; + L(4,4,5)=1; L(2,5,5)=1; L(6,6,5)=1; + L(5,4,6)=1; L(6,5,6)=1; L(3,6,6)=1; + L(1,7,7)=1; L(4,8,7)=1; L(5,9,7)=1; + L(4,7,8)=1; L(2,8,8)=1; L(6,9,8)=1; + L(5,7,9)=1; L(6,8,9)=1; L(3,9,9)=1; + + m = self.nNodes*self.nDOFPerNode; + T3 = tenzeros([m,m,m,m]); + + for ii = 1:self.quadrature.Ng + for jj = 1:self.quadrature.Ng + for kk = 1:self.quadrature.Ng + g = X(ii); % natural coordinates + h = X(jj); % natural coordinates + r = X(kk); % natural coordinates + we = W(ii)*W(jj)*W(kk); % weights + [G,detJ,~] = self.G_HEX20(g,h,r); + + % G(x,y,z) and detJ from the position of the gauss points + + %construct core part of the tensors for each gauss point + TC = tensor(C); %create tensor object, rename it to distinguish + TG = tensor(G); %create tensor object out of matrix + LGG = ttt(ttt(L,TG,3,1),TG,2,1); + + Q4h_int = ttt(ttt(permute(LGG,[2 1 3]),TC,2,1),LGG,3,1); + T3 = T3 + Q4h_int*detJ*we/2; + + end + end + end + + end % ANCILLARY FUNCTIONS _____________________________________________ function V = get.vol(self) diff --git a/ext/YetAnotherFEcode/src/Element/MechanicalElements/Tet10Element.m b/ext/YetAnotherFEcode/src/Element/MechanicalElements/Tet10Element.m index 4fbe618..4513b33 100644 --- a/ext/YetAnotherFEcode/src/Element/MechanicalElements/Tet10Element.m +++ b/ext/YetAnotherFEcode/src/Element/MechanicalElements/Tet10Element.m @@ -171,7 +171,110 @@ f(3:3:end) = self.vol/10; % uniformly distributed pressure on the structure end + function [T2, globalSubs] = T2(self) + % this function computes the 3-tensor corresponding to the + % quadratic component of the nonlinear internal force in + % global coordinates at the element level. + + % global DOFs associated to the element nodes + index = get_index(self.nodeIDs,self.nDOFPerNode); + + % location of each dimension of tensor in global DOFs + globalSubs = {index, index, index}; + + X = self.quadrature.X; + W = self.quadrature.W; + + C = self.initialization.C; % constitutive law matrix + H = self.initialization.H; % Linear strain matrix: eps_l = H*th + + % Quadratic strain matrix: A = L.th, eps_quad = A*th + L = tenzeros([6,9,9]); + L(1,1,1)=1; L(4,2,1)=1; L(5,3,1)=1; + L(4,1,2)=1; L(2,2,2)=1; L(6,3,2)=1; + L(5,1,3)=1; L(6,2,3)=1; L(3,3,3)=1; + L(1,4,4)=1; L(4,5,4)=1; L(5,6,4)=1; + L(4,4,5)=1; L(2,5,5)=1; L(6,6,5)=1; + L(5,4,6)=1; L(6,5,6)=1; L(3,6,6)=1; + L(1,7,7)=1; L(4,8,7)=1; L(5,9,7)=1; + L(4,7,8)=1; L(2,8,8)=1; L(6,9,8)=1; + L(5,7,9)=1; L(6,8,9)=1; L(3,9,9)=1; + + m = self.nNodes*self.nDOFPerNode; + Q3h = tenzeros([m,m,m]); + + for ii = 1:length(self.quadrature.W) + g = X(1,ii); + h = X(2,ii); + r = X(3,ii); + we = W(ii); % weights + [G,detJ,~] = G_TET10(self,g,h,r); %get shape function derivative + % G(x,y,z) and detJ from the position of the gauss points + + %construct core part of the tensors for each gauss point + GHC = tensor((C*H*G)'); + TG = tensor(G); %create tensor object out of matrix + LGG = ttt(ttt(L,TG,3,1),TG,2,1); + + Q3h_int = ttt(GHC,LGG,2,1); + Q3h = Q3h + Q3h_int*detJ*we; + end + + % build third order tensors using Q3h + Q3ht = permute(Q3h,[3 2 1]); + T2 = Q3h./2 + Q3ht; + end + function [T3, globalSubs] = T3(self) + % this function computes the 4-tensor corresponding to the + % quadratic component of the nonlinear internal force in + % global coordinates at the element level. + + % global DOFs associated to the element nodes + index = get_index(self.nodeIDs,self.nDOFPerNode); + + % location of each dimension of tensor in global DOFs + globalSubs = cell(4,1); + globalSubs(:) = {index}; + + X = self.quadrature.X; + W = self.quadrature.W; + + C = self.initialization.C; % constitutive law matrix + + % Quadratic strain matrix: A = L.th, eps_quad = A*th + L = tenzeros([6,9,9]); + L(1,1,1)=1; L(4,2,1)=1; L(5,3,1)=1; + L(4,1,2)=1; L(2,2,2)=1; L(6,3,2)=1; + L(5,1,3)=1; L(6,2,3)=1; L(3,3,3)=1; + L(1,4,4)=1; L(4,5,4)=1; L(5,6,4)=1; + L(4,4,5)=1; L(2,5,5)=1; L(6,6,5)=1; + L(5,4,6)=1; L(6,5,6)=1; L(3,6,6)=1; + L(1,7,7)=1; L(4,8,7)=1; L(5,9,7)=1; + L(4,7,8)=1; L(2,8,8)=1; L(6,9,8)=1; + L(5,7,9)=1; L(6,8,9)=1; L(3,9,9)=1; + + m = self.nNodes*self.nDOFPerNode; + T3 = tenzeros([m,m,m,m]); + + for ii = 1:length(self.quadrature.W) + g = X(1,ii); + h = X(2,ii); + r = X(3,ii); + we = W(ii); % weights + [G,detJ,~] = G_TET10(self,g,h,r); %get shape function derivative + % G(x,y,z) and detJ from the position of the gauss points + + %construct core part of the tensors for each gauss point + TC = tensor(C); %create tensor object, rename it to distinguish + TG = tensor(G); %create tensor object out of matrix + LGG = ttt(ttt(L,TG,3,1),TG,2,1); + + Q4h_int = ttt(ttt(permute(LGG,[2 1 3]),TC,2,1),LGG,3,1); + T3 = T3 + Q4h_int*detJ*we/2; + end + + end % ANCILLARY FUNCTIONS _____________________________________________ function V = get.vol(self) diff --git a/ext/YetAnotherFEcode/src/EssentialBoundary.m b/ext/YetAnotherFEcode/src/EssentialBoundary.m index 63e032d..b488713 100644 --- a/ext/YetAnotherFEcode/src/EssentialBoundary.m +++ b/ext/YetAnotherFEcode/src/EssentialBoundary.m @@ -90,3 +90,4 @@ function apply_Dirichlet_BC(self,constrainedNodes,constrainedDOF,value) end end + diff --git a/ext/YetAnotherFEcode/src/Material/MechanicalMaterials/KirchoffMaterial.m b/ext/YetAnotherFEcode/src/Material/MechanicalMaterials/KirchoffMaterial.m index 43a7524..c661a7a 100644 --- a/ext/YetAnotherFEcode/src/Material/MechanicalMaterials/KirchoffMaterial.m +++ b/ext/YetAnotherFEcode/src/Material/MechanicalMaterials/KirchoffMaterial.m @@ -12,13 +12,18 @@ function self = KirchoffMaterial(varargin) % call Material Class constructor self@Material(varargin{:}) - - % Define properties specific to this class + end + + function lambda = get.lambda(self) + E = self.YOUNGS_MODULUS; + nu = self.POISSONS_RATIO; + lambda = nu*E / ((1 + nu) * (1 - 2*nu)); + end + + function mu = get.mu(self) E = self.YOUNGS_MODULUS; nu = self.POISSONS_RATIO; - - self.lambda = nu*E / ((1 + nu) * (1 - 2*nu)); - self.mu = E / (2*(1 + nu)); + mu = E / (2*(1 + nu)); end function D = get_stress_strain_matrix_2D(self) diff --git a/ext/YetAnotherFEcode/src/Mesh/Mesh.m b/ext/YetAnotherFEcode/src/Mesh/Mesh.m index 3efdcff..88e5df3 100644 --- a/ext/YetAnotherFEcode/src/Mesh/Mesh.m +++ b/ext/YetAnotherFEcode/src/Mesh/Mesh.m @@ -117,4 +117,4 @@ function reset_boundary(self) end -end \ No newline at end of file +end diff --git a/ext/YetAnotherFEcode/src/PostProcessing/PlotFieldonDeformedMesh.m b/ext/YetAnotherFEcode/src/PostProcessing/PlotFieldonDeformedMesh.m index a2a821d..eb08c73 100644 --- a/ext/YetAnotherFEcode/src/PostProcessing/PlotFieldonDeformedMesh.m +++ b/ext/YetAnotherFEcode/src/PostProcessing/PlotFieldonDeformedMesh.m @@ -90,9 +90,9 @@ defoZ = Z+factor*UZ ; view(3); hold on; - patch(defoX,defoY,defoZ,profile,'EdgeColor',meshcolor,... - 'DisplayName','Deformed Mesh') - rotate3d on ; + h = patch(defoX,defoY,defoZ,profile,'EdgeColor',meshcolor,... + 'DisplayName','Deformed Mesh'); + rotate3d on; elseif dimension == 2 % For 2D plots ux = disp(:,1) ; diff --git a/ext/YetAnotherFEcode/src/static_equilibrium.m b/ext/YetAnotherFEcode/src/static_equilibrium.m index dbac2f3..de5029d 100644 --- a/ext/YetAnotherFEcode/src/static_equilibrium.m +++ b/ext/YetAnotherFEcode/src/static_equilibrium.m @@ -1,14 +1,19 @@ -function [ u_lin, u ] = static_equilibrium( Assembly, Fext, varargin ) +function [ u_lin, u ] = static_equilibrium( Assembly, uInit, Fext, varargin ) % finds the equilibrium configuration of the model subject to Fext load. % Detailed explanation goes here -K = Assembly.DATA.K; + +% compute linear displacement +u0 = zeros(Assembly.Mesh.nDOFs,1); +[K,~] = Assembly.tangent_stiffness_and_force(u0); u_lin = Assembly.solve_system(K,Fext); -u0 = Assembly.constrain_vector(u_lin); -[nsteps,tol,method] = parse_inputs(varargin{:}); +% initial displacement +u0 = Assembly.constrain_vector(uInit); + +[nsteps,tol,method,maxIter] = parse_inputs(varargin{:}); switch method case 'fsolve' - options = optimoptions('fsolve','SpecifyObjectiveGradient',true,'MaxIterations',10000); + options = optimoptions('fsolve','SpecifyObjectiveGradient',true,'MaxIterations',maxIter); [ueq] = fsolve(@(u)f(u,Assembly,Fext),u0,options); u = Assembly.unconstrain_vector(ueq); @@ -27,8 +32,15 @@ c = norm(Assembly.constrain_vector(residual))/c0; fprintf('STEP %d, ITERATION %d, RESIDUAL %d \n',j,it,c); if c < tol + disp('Iterations converged') + break + end + + if it>=maxIter + disp('Not converged: maximum number of iterations reached') break end + correction = Assembly.solve_system(K,residual); u = u + correction; it = it + 1; @@ -48,15 +60,17 @@ end -function [nsteps,tol,method] = parse_inputs(varargin) +function [nsteps,tol,method,maxIter] = parse_inputs(varargin) %% parsing inputs defaultnsteps = 100; defaulttol = 1e-6; defaultmethod = 'fsolve'; - +defaultmaxiter = 100; p = inputParser; addParameter(p,'nsteps',defaultnsteps, @(x)validateattributes(x, ... {'numeric'},{'nonempty','integer','positive'}) ); +addParameter(p,'maxIter',defaultmaxiter, @(x)validateattributes(x, ... + {'numeric'},{'nonempty','integer','positive'}) ); addParameter(p,'tol',defaulttol, @(x)validateattributes(x, ... {'numeric'},{'nonempty','positive'}) ); addParameter(p,'method',defaultmethod,@(x)validateattributes(x, ... @@ -66,4 +80,5 @@ nsteps = p.Results.nsteps; tol = p.Results.tol; method = p.Results.method; +maxIter = p.Results.maxIter; end \ No newline at end of file diff --git a/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.cfe b/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.cfe new file mode 100644 index 0000000..55185b9 Binary files /dev/null and b/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.cfe differ diff --git a/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.cfs b/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.cfs new file mode 100644 index 0000000..fdddf25 Binary files /dev/null and b/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.cfs differ diff --git a/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.si b/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.si new file mode 100644 index 0000000..dc8df49 Binary files /dev/null and b/ext/tensor_toolbox/doc/html/helpsearch-v3/_1.si differ diff --git a/ext/tensor_toolbox/doc/html/helpsearch-v3/segments_3 b/ext/tensor_toolbox/doc/html/helpsearch-v3/segments_3 new file mode 100644 index 0000000..86faab3 Binary files /dev/null and b/ext/tensor_toolbox/doc/html/helpsearch-v3/segments_3 differ diff --git a/src/@DynamicalSystem/compute_fext.m b/src/@DynamicalSystem/compute_fext.m index a16f540..03a5181 100644 --- a/src/@DynamicalSystem/compute_fext.m +++ b/src/@DynamicalSystem/compute_fext.m @@ -9,6 +9,9 @@ else if obj.Options.HarmonicForce fext = obj.fext.epsilon * real(obj.fext.coeffs * exp(1i * obj.fext.kappas * obj.Omega * t)); + if obj.Options.BaseExcitation + fext = fext*(obj.Omega)^2; + end else fext = obj.fext(t); end diff --git a/src/@DynamicalSystem/evaluate_Fext.m b/src/@DynamicalSystem/evaluate_Fext.m index 385a166..ee66105 100644 --- a/src/@DynamicalSystem/evaluate_Fext.m +++ b/src/@DynamicalSystem/evaluate_Fext.m @@ -7,10 +7,17 @@ if isempty(obj.Fext) Fext = sparse(obj.N,1); else - Fext = obj.Fext.epsilon * real(obj.Fext.coeffs * exp(1i * obj.Fext.kappas * obj.Omega * t)); + if obj.Options.HarmonicForce + Fext = obj.Fext.epsilon * real(obj.Fext.coeffs * exp(1i * obj.Fext.kappas * obj.Omega * t)); + if obj.Options.BaseExcitation + Fext = Fext*(obj.Omega)^2; + end + else + Fext = obj.Fext(t); + end end case 2 Fext = [obj.compute_fext(t); sparse(obj.n,1)]; -end \ No newline at end of file +end diff --git a/src/@SSM/FRC_cont_ep.m b/src/@SSM/FRC_cont_ep.m index 5f7979e..beb49d5 100644 --- a/src/@SSM/FRC_cont_ep.m +++ b/src/@SSM/FRC_cont_ep.m @@ -83,6 +83,7 @@ ispolar = strcmp(coordinates, 'polar'); fdata.ispolar = ispolar; +fdata.isbaseForce = obj.System.Options.BaseExcitation; if ispolar odefun = @(z,p) ode_2mDSSM_polar(z,p,fdata); else @@ -182,6 +183,9 @@ for j=1:numel(om) % Forced response in Physical Coordinates statej = state(j,:); + if obj.System.Options.BaseExcitation + epsf(j) = epsf(j)*(om(j))^2; + end [Aout, Zout, z_norm, Zic] = compute_full_response_2mD_ReIm(W_0, W1{j}, statej, epsf(j), nt, mFreqs, outdof); % collect output in array @@ -215,6 +219,9 @@ W_1j = W_1; end % Forced response in Physical Coordinates + if obj.System.Options.BaseExcitation + epsf(j) = epsf(j)*(om(j))^2; + end statej = state(j,:); [Aout, Zout, z_norm, Zic] = compute_full_response_2mD_ReIm(W_0, W_1j, statej, epsf(j), nt, mFreqs, outdof); diff --git a/src/@SSM/FRC_level_set.m b/src/@SSM/FRC_level_set.m index 7c9c109..05e00c4 100644 --- a/src/@SSM/FRC_level_set.m +++ b/src/@SSM/FRC_level_set.m @@ -65,6 +65,9 @@ epsilon = par(j); obj.System.fext.epsilon = epsilon; end + if obj.System.Options.BaseExcitation + epsilon = epsilon*Omega^2; + end % compute non-autonomous SSM coefficients [W1, R1] = obj.compute_perturbed_whisker(order); diff --git a/src/@SSM/SSM_isol2ep.m b/src/@SSM/SSM_isol2ep.m index 84f1a48..808ec04 100644 --- a/src/@SSM/SSM_isol2ep.m +++ b/src/@SSM/SSM_isol2ep.m @@ -76,6 +76,7 @@ ispolar = strcmp(obj.FRCOptions.coordinates, 'polar'); fdata.ispolar = ispolar; +fdata.isbaseForce = obj.System.Options.BaseExcitation; if ispolar odefun = @(z,p) ode_2mDSSM_polar(z,p,fdata); else diff --git a/src/@SSM/extract_FRC.m b/src/@SSM/extract_FRC.m index 8ef824c..9ba2914 100644 --- a/src/@SSM/extract_FRC.m +++ b/src/@SSM/extract_FRC.m @@ -83,6 +83,11 @@ % call continuation based method mFreqs = mFreqs(1:2:end)'; runid = ['freqSubint',num2str(i)]; + if j>1 % take lowest order solution as initial guess + sol_jminus1 = ep_read_solution('', [runid,'.ep'], 1); + obj.FRCOptions.p0 = sol_jminus1.p; + obj.FRCOptions.z0 = sol_jminus1.x; + end FRC{i} = obj.FRC_cont_ep(runid,resModes,order,mFreqs,parName,parSubRange); plotStyle = 'lines'; case 'continuation po' diff --git a/src/@SSM/private/FRC_reduced_to_full.m b/src/@SSM/private/FRC_reduced_to_full.m index b3b9b9c..0885690 100644 --- a/src/@SSM/private/FRC_reduced_to_full.m +++ b/src/@SSM/private/FRC_reduced_to_full.m @@ -70,6 +70,9 @@ end end % Forced response in Physical Coordinates + if obj.System.Options.BaseExcitation + epsf(j) = epsf(j)*(om(j))^2; + end %% ep toolbox if isep state = FRC.z(j,:); diff --git a/src/@SSM/private/ode_2mDSSM_cartesian.m b/src/@SSM/private/ode_2mDSSM_cartesian.m index 1b0a618..0b1ab3c 100644 --- a/src/@SSM/private/ode_2mDSSM_cartesian.m +++ b/src/@SSM/private/ode_2mDSSM_cartesian.m @@ -57,6 +57,7 @@ id = iNonauto(i); r = rNonauto(i); r = epsf*r; + if data.isbaseForce; r = r.*om.^2; end rRe = real(r); rIm = imag(r); yRe(id,:) = yRe(id,:)+rRe; diff --git a/src/@SSM/private/ode_2mDSSM_polar.m b/src/@SSM/private/ode_2mDSSM_polar.m index f2bda25..563b4dd 100644 --- a/src/@SSM/private/ode_2mDSSM_polar.m +++ b/src/@SSM/private/ode_2mDSSM_polar.m @@ -57,6 +57,7 @@ id = iNonauto(i); r = rNonauto(i); r = epsf*r; + if data.isbaseForce; r = r.*om.^2; end rRe = real(r); rIm = imag(r); yrho(id,:) = yrho(id,:)+rRe.*cos(th(id,:))+rIm.*sin(th(id,:)); diff --git a/src/@SSM/private/plot_frc_full.m b/src/@SSM/private/plot_frc_full.m index 63e1c6c..512b872 100644 --- a/src/@SSM/private/plot_frc_full.m +++ b/src/@SSM/private/plot_frc_full.m @@ -115,4 +115,4 @@ function plot_frc_full(Par,Znorm,outdof,Aout,stab,order,ParName,varargin) end legend boxoff; end -end \ No newline at end of file +end diff --git a/src/DSOptions.m b/src/DSOptions.m index 2270fca..c42e993 100644 --- a/src/DSOptions.m +++ b/src/DSOptions.m @@ -8,6 +8,7 @@ RayleighDamping = true; % damping matrix of second-order system HarmonicForce = true; % external forcing lambdaThreshold = 1e16; % Threshold for stiff eigenmodes (will be removed) + BaseExcitation = false; % harmonic forcing in the form \epsilon\Omega^2 f^{ext}(\Omega t) end methods function set.notation(obj,notation) diff --git a/src/misc/auto_red_dyn.m b/src/misc/auto_red_dyn.m new file mode 100644 index 0000000..ab40bb8 --- /dev/null +++ b/src/misc/auto_red_dyn.m @@ -0,0 +1,43 @@ +function y = auto_red_dyn(z, data) +% auto_red_dyn This function presents vectorized implementation of vector field +% of reduced dynamics of autonomous SSMs, namely, \dot{x}=R(x). The x +% vector here is a mixture of both real and complex-conjugate pair +% variables. + +assert(~isempty(data), 'Structure data is empty'); +% extract data fields +lamd = data.lamd; +beta = data.beta; +kappa = data.kappa; + +y = lamd.*z; + +% nonlinear part +nTerms = size(kappa,1); +for i=1:nTerms + coeffs = beta(:,i); + if max(abs(coeffs))>1e-8 + terms = z_power_ka(z, kappa(i,:)); + y = y+coeffs*terms; + end +end + +end + + +function y = z_power_ka(z, ka) +% Z_POWER_KA This function computes complex monomilal z^ka, where +% z=[z1,...,zm] and ka=[ka1,...,kam] and z^ka=z1^ka1*...*zm^kam. Here we +% support vectorized version of z^ka. Specifically, z could be a m-by-n +% matrix, where each row corresponds the component sampled at different +% locations. However, we assume ka to be a vector + +m = size(z,1); +% assert(m==numel(ka), 'The dimension of z and ka is not matched'); + +y = 1; +for i=1:m + y = y.*z(i,:).^ka(i); +end + +end \ No newline at end of file diff --git a/src/misc/plot_2D_auto_SSM.m b/src/misc/plot_2D_auto_SSM.m new file mode 100644 index 0000000..416ddce --- /dev/null +++ b/src/misc/plot_2D_auto_SSM.m @@ -0,0 +1,41 @@ +function plot_2D_auto_SSM(W,rhosamp,plotdofs,varargin) +% PLOT_2D_AUTO_SSM This function returns the plot of 2 dimensional +% autonomous SSM. We first generate the grids of parameterization +% coordinates based on rhosamp X thetasamp(0:2*pi/128:2*pi). Then we map +% these grids to the full system based on the expansion W of SSM at +% plotdofs. Note that plotdofs should have 3 dofs. + +assert(numel(plotdofs)==3,'the number of dofs is not three'); +% generate grids +[RHO,THETA] = meshgrid(rhosamp,0:2*pi/128:2*pi); +zdof1 = zeros(size(RHO)); +zdof2 = zeros(size(RHO)); +zdof3 = zeros(size(RHO)); +for k=1:129 + pk = RHO(k,:).*exp(1i*THETA(k,:)); + zk = reduced_to_full_traj([],[pk;conj(pk)],W); + zk = zk(plotdofs,:); + zdof1(k,:) = zk(1,:); + zdof2(k,:) = zk(2,:); + zdof3(k,:) = zk(3,:); +end + +figure; hold on +h = surf(zdof1,zdof2,zdof3,'FaceColor', 0.9*[1 1 1], 'FaceAlpha', 1.0, ... + 'LineStyle', '-', 'EdgeColor', 0.6*[1 1 1], ... + 'LineWidth', 0.5); +view([1,1,1]) +grid on +set(gca,'LineWidth',1.2); +set(gca,'FontSize',14); +if isempty(varargin) + xlabel(['$z_{\mathrm{',num2str(plotdofs(1)),'}}$'],'interpreter','latex','FontSize',16); + ylabel(['$z_{\mathrm{',num2str(plotdofs(2)),'}}$'],'interpreter','latex','FontSize',16); + zlabel(['$z_{\mathrm{',num2str(plotdofs(3)),'}}$'],'interpreter','latex','FontSize',16); +else + xlabel(varargin{1}{1},'interpreter','latex','FontSize',16); + ylabel(varargin{1}{2},'interpreter','latex','FontSize',16); + zlabel(varargin{1}{3},'interpreter','latex','FontSize',16); +end + +end diff --git a/src/misc/plot_FRC.m b/src/misc/plot_FRC.m index 519dae3..a5d0df2 100644 --- a/src/misc/plot_FRC.m +++ b/src/misc/plot_FRC.m @@ -82,10 +82,10 @@ function plot_FRC(FRC,outdof,order,ParName,plotStyle,figs,color) plot(Par(stab),Aout(stab,k),'o','Color', color,'MarkerSize',10,'DisplayName',strcat('SSM-$$\mathcal{O}(',num2str(order),')$$ - stable')); plot(Par(~stab),Aout(~stab,k),'s','Color', color,'MarkerSize',10,'DisplayName',strcat('SSM-$$\mathcal{O}(',num2str(order),')$$ - unstable')); add_labels('$\Omega$',strcat('$||z_{',num2str(outdof(k)),'}||_{\infty}$')) + lgd = legend(); + set(lgd,'Interpreter','latex','Location','best'); + xlabel('$\Omega$','Interpreter','latex'); end - lgd = legend(); - set(lgd,'Interpreter','latex','Location','best'); - xlabel('$\Omega$','Interpreter','latex'); end end diff --git a/src/misc/proj2SSM.m b/src/misc/proj2SSM.m new file mode 100644 index 0000000..3ffe061 --- /dev/null +++ b/src/misc/proj2SSM.m @@ -0,0 +1,48 @@ +function varargout = proj2SSM(z0,projMethod,varargin) +% PROJ2SSM This function returns the projection of point z0 on an +% autonomous SSM. The information of SSM is given in the varargin arguments +% and we support both linear and nonlinear projection methods. +% Specifically, for projMethod=nonlinear +% we solve an optimization problem argmin_q ||z0-W(q)||, +% where the map W gives the expansion of SSM. +% for projMethod=linear +% we find q from z0=Vq, which is solved using the +% orthonormality of V and W(left eigenvectors) with respect +% to B matrix + +switch projMethod + case 'nonlinear' + W_0 = varargin{1}; % expansion of SSM + autData = varargin{2}; % data for mapping between real and complex coordinates + pV = varargin{3}; % initial guess of optimization + + fprintf('Call fminunc to find the projection of point on SSM\n'); + + options = optimoptions('fminunc','Display','notify'); + options = optimoptions(options,'StepTolerance',1e-10); + options = optimoptions(options, 'MaxFunctionEvaluations', 1e5); + + [z,fval,exitflag] = fminunc(@(u) squaDist2pointSSM(z0,u,W_0,autData), pV, options); + + % convert real representation to complex one + x_real = z(autData.realx,:); + x_comp = z(autData.compx(1:2:end-1),:)+1i*z(autData.compx(2:2:end),:); + x = zeros(autData.dim, 1); + x(autData.realx,:) = x_real; + x(autData.compx(1:2:end-1),:) = x_comp; + x(autData.compx(2:2:end),:) = conj(x_comp); + + varargout{1} = x; % projected point on SSM (coordinates in modal domain) + varargout{2} = fval; % squared distance to projected point + varargout{3} = exitflag; % flag for convergence of optimization routine + + case 'linear' + Wm = varargin{1}; % left eigenvectors correspond to master spectral subspace + B = varargin{2}; + q = Wm'*B*z0; + varargout{1} = q; + + otherwise + error('Please select projection method to SSM from: {linear, nonlinear}'); +end +end \ No newline at end of file diff --git a/src/misc/reduced_dynamics_symbolic.m b/src/misc/reduced_dynamics_symbolic.m index cc1c769..e510e26 100644 --- a/src/misc/reduced_dynamics_symbolic.m +++ b/src/misc/reduced_dynamics_symbolic.m @@ -16,8 +16,8 @@ sympref('FloatingPointOutput',false); lamdRe = real(lamdMaster); lamdIm = imag(lamdMaster); -lamdRe = lamdRe(1:2:end-1); -lamdIm = lamdIm(1:2:end-1); +lamdRe = lamdRe(1:2:end); +lamdIm = lamdIm(1:2:end); order = numel(R0); m = numel(lamdRe); beta = cell(m,1); % coefficients - each cell corresponds to one mode diff --git a/src/misc/reduced_to_full.m b/src/misc/reduced_to_full.m new file mode 100644 index 0000000..60c9b2b --- /dev/null +++ b/src/misc/reduced_to_full.m @@ -0,0 +1,18 @@ +function [z] = reduced_to_full(p,W0,W1,epsilon) + +nt = size(p,2); + +if isempty(W1) || epsilon == 0 + N = size(W0{1}.coeffs ,1); + z = zeros(N,1); +else + W10 = W1{1}; + phi = linspace(0,2*pi,nt); % assuming single periodic frequency + z = epsilon * real( W10.coeffs * exp(1i * W10.kappas * phi)); +end + +for j = 1:length(W0) + z = z + real(expand_multiindex(W0{j},p)); +end + +end \ No newline at end of file diff --git a/src/misc/transient_traj_on_auto_ssm.m b/src/misc/transient_traj_on_auto_ssm.m new file mode 100644 index 0000000..97d80f3 --- /dev/null +++ b/src/misc/transient_traj_on_auto_ssm.m @@ -0,0 +1,52 @@ +function traj = transient_traj_on_auto_ssm(DS, modes, W_0, R_0, tf, nsteps, outdof, z0, varargin) +% TRANSIENT_TRAJ_ON_AUTO_SSM This function returns transient trajectory on +% autonomous SSM. Given an initial condition z0 in full system, we first +% project it on SSM (based on linear projection) and then perform time +% integration using ode45 for t\in[0,tf]. Trajectory at outdof is avaliable +% in the output. The inputs W_0 and R_0 can be obtained from compute_whisker +% routine. DS is the dynamical system class and modes represent master +% modes. +% +% varargin = s0 (initial condition in reduced coordinates) + +%% project z0 to reduced manifold +if numel(varargin)==0 + qinit = proj2SSM(z0,'linear',DS.spectrum.W(:,modes),DS.B); +else + qinit = varargin{1}; +end + +%% forward simulation in reduced dynamics +% extract coefficients and exponents +beta = []; +kappa = []; +for k = 2:numel(R_0) + R = R_0{k}; + betak = R.coeffs; + if ~isempty(betak) + kappak = R.ind; + % assemble terms + beta = [beta betak]; + kappa = [kappa; kappak]; + end +end +autData.lamd = DS.spectrum.Lambda(modes); +autData.beta = beta; +autData.kappa = kappa; + +% Construct ode45-compatible vector field +odefun = @(t,z) auto_red_dyn(z,autData); +% forward simulation of reduced dynamics +tsamp = linspace(0,tf,nsteps+1); +options = odeset('RelTol',1e-8,'AbsTol',1e-10); +[t,y] = ode45(@(t,y) odefun(t,y), tsamp, qinit, options); +% mapping it back to physical domain +state = transpose(y); +z = reduced_to_full_traj([],state,W_0); +zout = z(outdof,:); + +traj.time = t; +traj.red = y; +traj.phy = zout'; + +end \ No newline at end of file