import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn.functional as F
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
[docs]
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
[docs]
def forward(self, x):
return x
[docs]
class MaskedLinear(nn.Module):
def __init__(self, in_features, out_features, diagonal_zeros=False, bias=True):
super(MaskedLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
[docs]
def reset_parameters(self):
nn.init.kaiming_normal_(self.weight)
if self.bias is not None:
self.bias.data.zero_()
[docs]
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_in % n_out == 0 or n_out % n_in == 0
mask = np.ones((n_in, n_out), dtype=np.float32)
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i + 1 :, i * k : (i + 1) * k] = 0
if self.diagonal_zeros:
mask[i : i + 1, i * k : (i + 1) * k] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[(i + 1) * k :, i : i + 1] = 0
if self.diagonal_zeros:
mask[i * k : (i + 1) * k :, i : i + 1] = 0
return mask
[docs]
def forward(self, x):
output = x.mm(self.mask * self.weight)
if self.bias is not None:
return output.add(self.bias.expand_as(output))
else:
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return (
self.__class__.__name__
+ " ("
+ str(self.in_features)
+ " -> "
+ str(self.out_features)
+ ", diagonal_zeros="
+ str(self.diagonal_zeros)
+ ", bias="
+ str(bias)
+ ")"
)
[docs]
class MaskedConv2d(nn.Module):
def __init__(
self,
in_features,
out_features,
size_kernel=(3, 3),
diagonal_zeros=False,
bias=True,
):
super(MaskedConv2d, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.size_kernel = size_kernel
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(
torch.FloatTensor(out_features, in_features, *self.size_kernel)
)
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
[docs]
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
[docs]
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_out % n_in == 0 or n_in % n_out == 0, "%d - %d" % (n_in, n_out)
# Build autoregressive mask
l = (self.size_kernel[0] - 1) // 2
m = (self.size_kernel[1] - 1) // 2
mask = np.ones(
(n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.float32
)
mask[:, :, :l, :] = 0
mask[:, :, l, :m] = 0
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i * k : (i + 1) * k, i + 1 :, l, m] = 0
if self.diagonal_zeros:
mask[i * k : (i + 1) * k, i : i + 1, l, m] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[i : i + 1, (i + 1) * k :, l, m] = 0
if self.diagonal_zeros:
mask[i : i + 1, i * k : (i + 1) * k :, l, m] = 0
return mask
[docs]
def forward(self, x):
output = F.conv2d(x, self.mask * self.weight, bias=self.bias, padding=(1, 1))
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return (
self.__class__.__name__
+ " ("
+ str(self.in_features)
+ " -> "
+ str(self.out_features)
+ ", diagonal_zeros="
+ str(self.diagonal_zeros)
+ ", bias="
+ str(bias)
+ ", size_kernel="
+ str(self.size_kernel)
+ ")"
)
[docs]
class CNN_Flow_Layer(nn.Module):
def __init__(
self, dim, kernel_size, dilation, test_mode=0, rescale=True, skip=True
):
super(CNN_Flow_Layer, self).__init__()
self.dim = dim
self.kernel_size = kernel_size
self.dilation = dilation
self.test_mode = test_mode
self.rescale = rescale
self.skip = skip
self.usecuda = True
if (
self.rescale
): # last layer of flow needs to account for the scale of target variable
self.lmbd = nn.Parameter(torch.FloatTensor(self.dim).normal_()) # .cuda())
self.conv1d = nn.Conv1d(1, 1, kernel_size, dilation=dilation)
[docs]
def forward(self, x):
# pad zero to the right
padded_x = F.pad(x, (0, (self.kernel_size - 1) * self.dilation))
conv1d = self.conv1d(padded_x.unsqueeze(1)).squeeze() # (bs, 1, width)
w = self.conv1d.weight.squeeze()
# make sure u[i]w[0] >= -1 to ensure invertibility for h(x)=tanh(x) and with skip
neg_slope = 1e-2
activation = F.leaky_relu(conv1d, negative_slope=neg_slope)
activation_gradient = (activation >= 0).float() + (
activation < 0
).float() * neg_slope
# for 0<=h'(x)<=1, ensure u*w[0]>-1
scale = (
(w[0] == 0).float() * self.lmbd
+ (w[0] > 0).float() * (-1.0 / w[0] + F.softplus(self.lmbd))
+ (w[0] < 0).float() * (-1.0 / w[0] - F.softplus(self.lmbd))
)
if self.rescale:
if self.test_mode:
activation = activation.unsqueeze(dim=0)
activation_gradient = activation_gradient.unsqueeze(dim=0)
output = activation.mm(torch.diag(scale))
activation_gradient = activation_gradient.mm(torch.diag(scale))
else:
output = activation
if self.skip:
output = output + x
logdet = torch.log(torch.abs(activation_gradient * w[0] + 1)).sum(1)
else:
logdet = torch.log(torch.abs(activation_gradient * w[0])).sum(1)
return output, logdet
[docs]
class Dilation_Block(nn.Module):
def __init__(self, dim, kernel_size, test_mode=0):
super(Dilation_Block, self).__init__()
self.block = nn.ModuleList()
i = 0
while 2**i <= dim:
conv1d = CNN_Flow_Layer(
dim, kernel_size, dilation=2**i, test_mode=test_mode
)
self.block.append(conv1d)
i += 1
[docs]
def forward(self, x):
logdetSum = 0
output = x
for i in range(len(self.block)):
output, logdet = self.block[i](output)
logdetSum += logdet
return output, logdetSum
[docs]
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
[docs]
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
[docs]
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_features)
+ " -> "
+ str(self.out_features)
+ ")"
)
[docs]
class FCNN(nn.Module):
"""
Simple fully connected neural network.
"""
def __init__(self, in_dim, out_dim, hidden_dim):
super().__init__()
self.network = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, out_dim),
)
[docs]
def forward(self, x):
return self.network(x)
[docs]
def searchsorted(bin_locations, inputs, eps=1e-6):
bin_locations[..., -1] += eps
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
[docs]
def unconstrained_RQS(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
inside_intvl_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_intvl_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
outputs[inside_intvl_mask], logabsdet[inside_intvl_mask] = RQS(
inputs=inputs[inside_intvl_mask],
unnormalized_widths=unnormalized_widths[inside_intvl_mask, :],
unnormalized_heights=unnormalized_heights[inside_intvl_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_intvl_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
[docs]
def RQS(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
# if torch.min(inputs) < left or torch.max(inputs) > right:
# raise ValueError("Input outside domain")
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError("Minimal bin width too large for the number of bins")
if min_bin_height * num_bins > 1.0:
raise ValueError("Minimal bin height too large for the number of bins")
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)
input_derivatives_plus_one = input_derivatives_plus_one[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
) + input_heights * (input_delta - input_derivatives)
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
)
c = -input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, -logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, logabsdet