Skip to content

Commit

Permalink
Signed-off-by: wc253 <chenweibupt@gmail.com>
Browse files Browse the repository at this point in the history
  • Loading branch information
wc253 committed Dec 24, 2020
1 parent f1407b3 commit 6eb22f5
Show file tree
Hide file tree
Showing 20 changed files with 1,220 additions and 0 deletions.
132 changes: 132 additions & 0 deletions lib/LTDL_utilize/LTDL.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
function [Z_group,Da,De,errList,loss_list] = LTDL(Da,De,nclusters,All_group,R,lamda1,lamda2,par)
%
% the optimization algorithm of proposed Low-rank Tensor Dictionary Learning (LTDL)
%
YC_group = cell(1,nclusters);
T_group = cell(1,nclusters);
epsilon = par.epsilon;
max_iternum = par.max_iter;
rho = par.rho;
nu = par.nu;
errList = zeros(max_iternum, 1);
sizX = zeros(nclusters,3);
sizDa = size(Da,2);
sizDe = size(De,2);
loss_list = [];
%% initialize Z
Z_group = cell(1,nclusters);
D = (kron(De,Da))';
invD = pinv(D*D');
lastZD = cell(1,nclusters);
for kk = 1:nclusters
X = All_group{kk};
sizX(kk,:) = size(X);
X3 = tens2mat(X,3);
Z3 = (X3*D')*invD;
Z = mat2tens(Z3,[sizDa sizDe sizX(kk,3)],3);
Z_group{kk} = Z;
YC_group{kk} = zeros([sizDa sizDe sizX(kk,3)]);
lastZD{kk} = tmprod(Z_group{kk},{Da,De},[1,2]);
end
fprintf('iter£º ')
for k = 1:max_iternum
%fprintf('Inter:%f \n',k);
ZD = cell(1,nclusters);
D = (kron(De,Da))';
d = size(D);
[Uspa,Sspa,~] = svd(Da'*Da);
[Uspe,Sspe,~] = svd(De'*De);
kU=kron(Uspe,Uspa);
kS=kron(sum(Sspe),sum(Sspa));
invDI = kU*diag(1./((2+2*lamda2)*kS+rho*ones(1,d(1))))*kU';
for kk = 1:nclusters
Z = Z_group{kk};
X = All_group{kk};
YC_kk = YC_group{kk};
%% Update C
C_kk = mysoft(Z-YC_kk./rho,lamda1/rho,1); %l1 norm soft-thresholding

%% Update T (hosvd or hooi)
% T = double(hosvd(tensor(tmprod(Z,{Da,De},[1,2])),1e-3,'ranks',(R(:,kk))'));
T = hosvd1(tmprod(Z,{Da,De},[1,2]),(R(:,kk))');
T_group{kk} = T;
% Tu = tucker_als(tensor(tmprod(Z,{Da,De},[1,2])),(R(:,kk))','tol',1e-1,'printitn',0); %hooi
% T = tmprod(double(Tu.core),Tu.U,[1,2,3]);
% T_group{kk} = T;

%% Update Z
dimsZ = [sizDa sizDe sizX(kk,3)];
s = (tens2mat(2*X+2*lamda2*T,3))*D';
Z3 = (s+tens2mat(rho*C_kk+YC_kk,3))*invDI;
Z = mat2tens(Z3, dimsZ, 3);
Z_group{kk} = Z;

%% Update Y
YC_group{kk} = YC_kk+rho*(C_kk-Z);
end
clear D kU s Z3 Z T Tu;
%% Update Da and De
X = [];
A = [];
for i = 1:nclusters
XX = (tens2mat(All_group{i}+lamda2*T_group{i},1))/(1+lamda2);
X = [X,XX];
AA = tens2mat(tmprod(Z_group{i},{De},[2]),1);
A = [A,AA];
end
Da = l2ls_learn_basis_dual(X, A, 1, Da);
% Da = I_clearDictionary(Da,A,X);
X = [];
A = [];
for i = 1:nclusters
XX = (tens2mat(All_group{i}+lamda2*T_group{i},2))/(1+lamda2);
X = [X,XX];
AA = tens2mat(tmprod(Z_group{i},{Da},[1]),2);
A = [A,AA];
end
De = l2ls_learn_basis_dual(X, A, 1, De);
% De = I_clearDictionary(De,A,X);
clear XX X AA A

rho = rho*nu;

%% show result with itration
% I = displayDictionaryElementsAsImage(Da, 8, floor(sizDa/8),par.block_sz(1),par.block_sz(2));
% imshow(I)
% loss = 0;
% for i = 1:nclusters
% ZD{i} = tmprod(Z_group{i},{Da,De},[1,2]);
% errList(k) = errList(k) + frob(lastZD{i}-ZD{i})/frob(lastZD{i});
% loss_k = frob(ZD{i}-All_group{i})^2+lamda1*sum(abs(Z_group{i}(:)))+lamda2*frob(ZD{i}-T_group{i})^2;
% loss = loss + loss_k;
% end
% loss_list(k) = loss;
% errList(k) = errList(k)/nclusters;
% lastZD = ZD;
% disp([sprintf('Ier: %.1f error=%.4f loss=%.2f',k,errList(k),loss)]);
% if errList(k) < epsilon
% break
% end
fprintf('\b\b\b\b\b%5i',k);
end
fprintf('\n');
end

% function Dictionary = I_clearDictionary(Dictionary,CoefMatrix,Data)
% % delete the correlated columns (atoms) in the dictionary and replace them with the data
% T2 = 0.99;
% T1 = 3;
% K=size(Dictionary,2);
% Er=sum((Data-Dictionary*CoefMatrix).^2,1); %remove identical atoms
% G=Dictionary'*Dictionary;
% G = G-diag(diag(G));
% max(G(:))
% for jj=1:1:K
% if max(G(jj,:))>T2 || length(find(abs(CoefMatrix(jj,:))>1e-7))<=T1
% [val,pos]=max(Er);
% Er(pos(1))=0;
% Dictionary(:,jj)=Data(:,pos(1))/norm(Data(:,pos(1)));
% G=Dictionary'*Dictionary; G = G-diag(diag(G));
% end
% end
% end
60 changes: 60 additions & 0 deletions lib/LTDL_utilize/LTDL_denoising.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
function [deno_msi] = LTDL_denoising(noisy_msi,sigma,par)
msi_sz = size(noisy_msi);
deno_msi = noisy_msi;
sizDa = [prod(par.block_sz),floor(par.fatratio_D(1)*prod(par.block_sz))];
sizDe = [msi_sz(3),floor(par.fatratio_D(2)*msi_sz(3))];
for ii = 1:par.numDenoise
nomcur_msi = deno_msi + par.delta*(noisy_msi - deno_msi);
%% form all tensor groups
noimsi_slices = extract_slices(nomcur_msi,par,msi_sz);
X = reshape(noimsi_slices, prod(par.block_sz)*msi_sz(3), size(noimsi_slices, 3))';
mean_blocks = 100;
nclusters = ceil(prod(par.block_num)/(mean_blocks));
fkmeans_opt.careful = 1;
[idx,nclusters,~,~] = myfkmeans(X, nclusters, fkmeans_opt); % kmeans++
Nblocks = zeros(1,nclusters);
All_group = cell(1,nclusters);
origi_All_group = cell(1,nclusters);
%% initialization of dictionries
Da = randn(sizDa); %
De = randn(sizDe);
Da = Da*diag(1./sqrt(sum(Da.*Da)));
De = De*diag(1./sqrt(sum(De.*De)));

if(ii == 1)
origi_noimsi_slices = noimsi_slices;
lamdaS = par.cS*sigma; % sparsity weight
lamdaR = par.cR*sigma; % low-rank weight
for k = 1:nclusters
nblocks = numel(find(idx==k));
Nblocks(k) = nblocks;
All_group{k} = noimsi_slices(:, :, idx==k);
origi_All_group{k} = origi_noimsi_slices(:, :, idx==k);
end
else
A = (noisy_msi - deno_msi).^2;
sigma_est = sqrt(abs(sigma^2- mean(A(:)))); % estimate noise degree
lamdaS = par.cS*sigma_est;
lamdaR = par.cR*sigma_est;
% par.max_iter = max(par.max_iter-10, 30);
for k = 1:nclusters
origi_All_group{k} = origi_noimsi_slices(:, :, idx==k);
nblocks = numel(find(idx==k));
Nblocks(k) = nblocks;
All_group{k} = noimsi_slices(:, :, idx==k);
end
end
clear X

%% training Da,De and Z^(k) for all tensor groups
R = setR(origi_All_group,nclusters);
[Z_group,Da,De,~,~] = LTDL(Da,De,nclusters,All_group,R,lamdaS,lamdaR,par);
%% reconstruct denoised MSI
clean_slices = zeros(prod(par.block_sz),msi_sz(3),prod(par.block_num));
for k = 1:nclusters
clean_slices(:,:,idx==k) = tmprod(Z_group{k},{Da,De},[1 2]);
end
deno_msi = joint_slices(clean_slices,par,msi_sz);
clear All_group idx clean_slices Z_group
end
end
107 changes: 107 additions & 0 deletions lib/LTDL_utilize/LTDL_syth.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
function [Z_group,Da,De,loss_list,Acc] = LTDL_syth(Da,De,nclusters,All_group,R,lamda1,lamda2,par,D1,D2)
%
% the optimization algorithm of proposed Low-rank Tensor Dictionary Learning (LTDL)
%
C_group = cell(1,nclusters);
YC_group = cell(1,nclusters);
T_group = cell(1,nclusters);
epsilon = par.epsilon;
max_iternum = par.max_iter;
rho = par.rho;
nu = par.nu;
loss_list = zeros(1,max_iternum);
Acc = zeros(1,max_iternum);
sizX = zeros(nclusters,3);
sizDa = size(Da,2);
sizDe = size(De,2);
%% initialize Z
Z_group = cell(1,nclusters);
D = (kron(De,Da))';
invD = pinv(D*D');
lastZD = cell(1,nclusters);
for kk = 1:nclusters
X = All_group{kk};
sizX(kk,:) = size(X);
X3 = tens2mat(X,3);
Z3 = (X3*D')*invD;
Z = mat2tens(Z3,[sizDa sizDe sizX(kk,3)],3);
Z_group{kk} = Z;
YC_group{kk} = zeros([sizDa sizDe sizX(kk,3)]);
lastZD{kk} = tmprod(Z,{Da,De},[1,2]);
end
for k = 1:max_iternum
%fprintf('Inter:%f \n',k);
ZD = cell(1,nclusters);
D = (kron(De,Da))';
d = size(D);
[Uspa,Sspa,~] = svd(Da'*Da);
[Uspe,Sspe,~] = svd(De'*De);
kU=kron(Uspe,Uspa);
kS=kron(sum(Sspe),sum(Sspa));
invDI = kU*diag(1./((2+2*lamda2)*kS+rho*ones(1,d(1))))*kU';
for kk = 1:nclusters
Z = Z_group{kk};
X = All_group{kk};
%% Update C
C_group{kk} = mysoft(Z-YC_group{kk}./rho,lamda1/rho,1); %l1 norm soft-thresholding

%% Update T (hosvd or hooi)
% T = double(hosvd(tensor(tmprod(Z,{Da,De},[1,2])),1e-3,'ranks',(R(:,kk))'));
T = hosvd1(tmprod(Z,{Da,De},[1,2]),(R(:,kk))');
T_group{kk} = T;
% Tu = tucker_als(tensor(tmprod(Z,{Da,De},[1,2])),(R(:,kk))','tol',1e-1,'printitn',0); %hooi
% T = tmprod(double(Tu.core),Tu.U,[1,2,3]);
% T_group{kk} = T;

%% Update Z
dimsZ = [sizDa sizDe sizX(kk,3)];
s = (tens2mat(2*X+2*lamda2*T,3))*D';
Z3 = (s+tens2mat(rho*C_group{kk}+YC_group{kk},3))*invDI;
Z = mat2tens(Z3, dimsZ, 3);
Z_group{kk} = Z;

%% Update Y
YC_group{kk} = YC_group{kk}+rho*(C_group{kk}-Z_group{kk});

end
clear D kU s Z3 Z T Tu;
%% Update Da and De
X = [];
A = [];
for i = 1:nclusters
XX = (tens2mat(All_group{i}+lamda2*T_group{i},1))/(1+lamda2);
X = [X,XX];
AA = tens2mat(tmprod(Z_group{i},{De},[2]),1);
A = [A,AA];
end
Da = l2ls_learn_basis_dual(X, A, 1, Da);
X = [];
A = [];
for i = 1:nclusters
XX = (tens2mat(All_group{i}+lamda2*T_group{i},2))/(1+lamda2);
X = [X,XX];
AA = tens2mat(tmprod(Z_group{i},{Da},[1]),2);
A = [A,AA];
end
De = l2ls_learn_basis_dual(X, A, 1, De);
clear XX X AA A

rho = rho*nu;
%% Calculate the accuracy of matched atoms
Acc(k) = matched_atoms(kron(De,Da),kron(D2,D1),0.01);
loss = 0;
err = 0;
for i = 1:nclusters
ZD{i} = tmprod(Z_group{i},{Da,De},[1,2]);
err = err + frob(lastZD{i}-ZD{i})/frob(lastZD{i});
loss_k = frob(ZD{i}-All_group{i})^2+lamda1*sum(abs(Z_group{i}(:)))+lamda2*frob(ZD{i}-T_group{i})^2;
loss = loss + loss_k;
end
loss_list(k) = loss;
lastZD = ZD;
disp([sprintf('Ier: %.0f objective value=%.2f success recovery atoms=%.0f',k,loss_list(k),Acc(k))]);
if err/nclusters < epsilon
break
end
end
end
18 changes: 18 additions & 0 deletions lib/LTDL_utilize/Parset_LTDL.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
function par = Parset_LTDL(msi_sz)
%if you want to test the LTDL on cropped MSIs, please tune the redundancy
%ratio of dictionaries between 1.2-1.5,
par.fatratio_D = [1.5,1.5]; %the redundancy ratio of dictionaries
par.nu = 1.05;
par.cS = 2;
par.cR = 1000; %1000
par.delta = 0.2;
par.numDenoise = 2;
par.block_sz = [7 7];
par.overlap_sz = [5 5];
par.block_num = ceil((msi_sz(1:2) - par.overlap_sz)./(par.block_sz - par.overlap_sz));
par.remnum = rem(msi_sz(1:2) - par.overlap_sz,par.block_sz - par.overlap_sz);
% parameters for the algorithm
par.epsilon = 1e-4;
par.max_iter = 30;
par.rho = 1;
end
45 changes: 45 additions & 0 deletions lib/LTDL_utilize/calc_curve.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% name: calc_curve.m
%
% This is sub-routine of SCORE algorithm to calculate modified eigenvalue estimator by using HOSVD core tensor, and calculate evaluation values of MDL(BIC).
%
% This code was implemented by T. Yokota
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

function [mdl l2 rho v2] = calc_curve(H1,rho,ab);

[p L] = size(H1);

if L < p
p = L;
end

mu = sum(H1.^2,1)/p;
[mu IDD]= sort(mu,'descend');
lm = sum(H1.^2,2)/L;

L2 = max(1,round(rho*L)); %
v2 = mean(mu(L2:end));

[HS IDD] = sort((H1.^2)','descend');
l2 = sort(sum(HS(1:L2,:),1),'descend');
L2 = L;
%HS = zeros(p,L2); %
%for i = 1:L2
% HS(:,i) = H1(:,IDD(i));
%end

%l2 = sort(sum(HS.^2,2),'descend')/L2; %

for k = 1:(p-1)
v = mean(l2(k+1:p));

if strcmp(ab,'bic')
mdl(k,1) = -sum(log(l2(k+1:p))) + (p-k)*log(v) + k*(2*p-k)*log(L2)/L2/2;
else
mdl(k,1) = -sum(log(l2(k+1:p))) + (p-k)*log(v) + k*(2*p-k)/L2;
end

end


Loading

0 comments on commit 6eb22f5

Please sign in to comment.