-
Notifications
You must be signed in to change notification settings - Fork 38
/
diffusion_utils.py
129 lines (114 loc) · 5.84 KB
/
diffusion_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import math
import torch
import numpy as np
def make_beta_schedule(schedule="linear", num_timesteps=1000, start=1e-5, end=1e-2):
if schedule == "linear":
betas = torch.linspace(start, end, num_timesteps)
elif schedule == "const":
betas = end * torch.ones(num_timesteps)
elif schedule == "quad":
betas = torch.linspace(start ** 0.5, end ** 0.5, num_timesteps) ** 2
elif schedule == "jsd":
betas = 1.0 / torch.linspace(num_timesteps, 1, num_timesteps)
elif schedule == "sigmoid":
betas = torch.linspace(-6, 6, num_timesteps)
betas = torch.sigmoid(betas) * (end - start) + start
elif schedule == "cosine" or schedule == "cosine_reverse":
max_beta = 0.999
cosine_s = 0.008
betas = torch.tensor(
[min(1 - (math.cos(((i + 1) / num_timesteps + cosine_s) / (1 + cosine_s) * math.pi / 2) ** 2) / (
math.cos((i / num_timesteps + cosine_s) / (1 + cosine_s) * math.pi / 2) ** 2), max_beta) for i in
range(num_timesteps)])
if schedule == "cosine_reverse":
betas = betas.flip(0) # starts at max_beta then decreases fast
elif schedule == "cosine_anneal":
betas = torch.tensor(
[start + 0.5 * (end - start) * (1 - math.cos(t / (num_timesteps - 1) * math.pi)) for t in
range(num_timesteps)])
return betas
def extract(input, t, x):
shape = x.shape
out = torch.gather(input, 0, t.to(input.device))
reshape = [t.shape[0]] + [1] * (len(shape) - 1)
return out.reshape(*reshape)
# Forward functions
def q_sample(y, y_0_hat, alphas_bar_sqrt, one_minus_alphas_bar_sqrt, t, noise=None):
"""
y_0_hat: prediction of pre-trained guidance model; can be extended to represent
any prior mean setting at timestep T.
"""
if noise is None:
noise = torch.randn_like(y).to(y.device)
sqrt_alpha_bar_t = extract(alphas_bar_sqrt, t, y)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
# q(y_t | y_0, x)
y_t = sqrt_alpha_bar_t * y + (1 - sqrt_alpha_bar_t) * y_0_hat + sqrt_one_minus_alpha_bar_t * noise
return y_t
# Reverse function -- sample y_{t-1} given y_t
def p_sample(model, x, y, y_0_hat, y_T_mean, t, alphas, one_minus_alphas_bar_sqrt):
"""
Reverse diffusion process sampling -- one time step.
y: sampled y at time step t, y_t.
y_0_hat: prediction of pre-trained guidance model.
y_T_mean: mean of prior distribution at timestep T.
We replace y_0_hat with y_T_mean in the forward process posterior mean computation, emphasizing that
guidance model prediction y_0_hat = f_phi(x) is part of the input to eps_theta network, while
in paper we also choose to set the prior mean at timestep T y_T_mean = f_phi(x).
"""
device = next(model.parameters()).device
z = torch.randn_like(y) # if t > 1 else torch.zeros_like(y)
t = torch.tensor([t]).to(device)
alpha_t = extract(alphas, t, y)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
sqrt_one_minus_alpha_bar_t_m_1 = extract(one_minus_alphas_bar_sqrt, t - 1, y)
sqrt_alpha_bar_t = (1 - sqrt_one_minus_alpha_bar_t.square()).sqrt()
sqrt_alpha_bar_t_m_1 = (1 - sqrt_one_minus_alpha_bar_t_m_1.square()).sqrt()
# y_t_m_1 posterior mean component coefficients
gamma_0 = (1 - alpha_t) * sqrt_alpha_bar_t_m_1 / (sqrt_one_minus_alpha_bar_t.square())
gamma_1 = (sqrt_one_minus_alpha_bar_t_m_1.square()) * (alpha_t.sqrt()) / (sqrt_one_minus_alpha_bar_t.square())
gamma_2 = 1 + (sqrt_alpha_bar_t - 1) * (alpha_t.sqrt() + sqrt_alpha_bar_t_m_1) / (
sqrt_one_minus_alpha_bar_t.square())
eps_theta = model(x, y, y_0_hat, t).to(device).detach()
# y_0 reparameterization
y_0_reparam = 1 / sqrt_alpha_bar_t * (
y - (1 - sqrt_alpha_bar_t) * y_T_mean - eps_theta * sqrt_one_minus_alpha_bar_t)
# posterior mean
y_t_m_1_hat = gamma_0 * y_0_reparam + gamma_1 * y + gamma_2 * y_T_mean
# posterior variance
beta_t_hat = (sqrt_one_minus_alpha_bar_t_m_1.square()) / (sqrt_one_minus_alpha_bar_t.square()) * (1 - alpha_t)
y_t_m_1 = y_t_m_1_hat.to(device) + beta_t_hat.sqrt().to(device) * z.to(device)
return y_t_m_1
# Reverse function -- sample y_0 given y_1
def p_sample_t_1to0(model, x, y, y_0_hat, y_T_mean, one_minus_alphas_bar_sqrt):
device = next(model.parameters()).device
t = torch.tensor([0]).to(device) # corresponding to timestep 1 (i.e., t=1 in diffusion models)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
sqrt_alpha_bar_t = (1 - sqrt_one_minus_alpha_bar_t.square()).sqrt()
eps_theta = model(x, y, y_0_hat, t).to(device).detach()
# y_0 reparameterization
y_0_reparam = 1 / sqrt_alpha_bar_t * (
y - (1 - sqrt_alpha_bar_t) * y_T_mean - eps_theta * sqrt_one_minus_alpha_bar_t)
y_t_m_1 = y_0_reparam.to(device)
return y_t_m_1
def p_sample_loop(model, x, y_0_hat, y_T_mean, n_steps, alphas, one_minus_alphas_bar_sqrt):
device = next(model.parameters()).device
z = torch.randn_like(y_T_mean).to(device)
cur_y = z + y_T_mean # sample y_T
y_p_seq = [cur_y]
for t in reversed(range(1, n_steps)): # t from T to 2
y_t = cur_y
cur_y = p_sample(model, x, y_t, y_0_hat, y_T_mean, t, alphas, one_minus_alphas_bar_sqrt) # y_{t-1}
y_p_seq.append(cur_y)
assert len(y_p_seq) == n_steps
y_0 = p_sample_t_1to0(model, x, y_p_seq[-1], y_0_hat, y_T_mean, one_minus_alphas_bar_sqrt)
y_p_seq.append(y_0)
return y_p_seq
# Evaluation with KLD
def kld(y1, y2, grid=(-20, 20), num_grid=400):
y1, y2 = y1.numpy().flatten(), y2.numpy().flatten()
p_y1, _ = np.histogram(y1, bins=num_grid, range=[grid[0], grid[1]], density=True)
p_y1 += 1e-7
p_y2, _ = np.histogram(y2, bins=num_grid, range=[grid[0], grid[1]], density=True)
p_y2 += 1e-7
return (p_y1 * np.log(p_y1 / p_y2)).sum()