In [2]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson
import numpy as np
import torch
import torch.nn as nn
In [3]:
class Utils:
# Convert string parameter into torch activation function
@staticmethod
def get_activation_function(activation_function_str, negative_slope):
if activation_function_str == "Lrelu":
activation = torch.nn.LeakyReLU(negative_slope=negative_slope)
elif activation_function_str == "relu":
activation = torch.nn.ReLU()
elif activation_function_str == "sigmoid":
activation = torch.nn.Sigmoid()
else:
raise ValueError("Activation function not recognized : {}".format(activation_function_str))
return activation
# Convert string parameter into torch final activation function
@staticmethod
def get_final_activation_function(final_activation):
if final_activation == "Sigmoid":
final_activation = lambda x: torch.sigmoid(x)
elif final_activation == "Clamping":
final_activation = lambda x: torch.clamp(x, min=0., max=1.)
elif final_activation == "Linear":
final_activation = lambda x: x
else:
raise ValueError("final_activation must be Sigmoid, Clamping or Linear")
return final_activation
In [4]:
class Conditional_Poisson_Generator(nn.Module):
def __init__(self,
noise_size: int=1,
min_mu: int=1,
max_mu: int=1,
hidden_dim: int=100,
activation_function: str="Lrelu",
final_activation: str="Sigmoid",
label_size: int=10,
negative_slope: float=0.1,
nb_hidden_layers_G: int=1,
eps: float=0.,
**kwargs):
super(Conditional_Poisson_Generator, self).__init__()
self.min_mu = min_mu
self.max_mu = max_mu
self.input_dim = noise_size
self.nb_hidden_layers = nb_hidden_layers_G
self.hidden_dim = hidden_dim
self.activation = Utils.get_activation_function(activation_function, negative_slope)
self.input_noise = nn.Linear(self.input_dim, self.hidden_dim)
self.label_size = label_size
self.input_label = nn.Linear(1, self.label_size)
self.fcs = nn.ModuleList()
self.eps = eps
for i in range(self.nb_hidden_layers):
if i == 0:
self.fcs.append(nn.Linear(self.hidden_dim + self.label_size, self.hidden_dim))
else:
self.fcs.append(nn.Linear(self.hidden_dim, self.hidden_dim))
self.output = nn.Linear(self.hidden_dim, 1)
self.final_activation = Utils.get_final_activation_function(final_activation)
def forward(self, x, mu):
# Normalize mu with min_mu and max_mu into [0, 1]
if self.max_mu == self.min_mu:
mu = 1
else:
mu = (mu - self.min_mu) / (self.max_mu - self.min_mu)
return self.feed_forward(x, mu)
def feed_forward(self, x, mu):
x = self.activation(self.input_noise(x))
mu = self.activation(self.input_label(mu))
x = torch.cat((x, mu), 1)
for i in range(self.nb_hidden_layers):
x = self.activation(self.fcs[i](x))
x = self.final_activation(self.output(x))
return x + self.eps
In [9]:
params = {}
# tested_mus are mu values on which we will perform tests (KS divergence)
params["tested_mus"] = [2, 5, 10, 15]
# Set min_mu == max_mu to train on one value for mu; otherwise, it will train on the interval and parametrize G and D
params["min_mu"] = 0.3
params["max_mu"] = 30
# alpha: poisson.ppf(alpha) is network's max return value.
# Because network return values are bounded in [0,1], 1 has to have a draw equivalent which is poisson.ppf(alpha)
params["alpha"] = 0.999
# Networks parameters
# Number of uniform draws for G
params["noise_size"] = 4
# Hidden layer size for mu inputs when parametrized (leaving it at 10 is satisfactory)
params["label_size"] = 16
# "Lrelu", "relu" or "sigmoid" (default Lrelu with negative slope 0.1)
params["activation_function"] = "Lrelu"
params["negative_slope"] = 0.1
# Final activation for Generator: Sigmoid, Linear or Clamping
params["final_activation"] = "Linear"
# hidden layer size
params["hidden_dim"] = 100
# number of hidden layers for both D and G after input layer (default 1) : total number of hidden layers is nb_hidden_layers + 1
params["nb_hidden_layers_G"] = 1
params["nb_hidden_layers_D"] = 1
# Path parameters
# Load_dir_model is the folder containing the model to load
params["load_dir_model"] = "checkpoints"
# CUDA (might not work, to be tested)
params["cuda"] = False
# In case cuda = True, pick GPU number :
%env CUDA_VISIBLE_DEVICES = 0
env: CUDA_VISIBLE_DEVICES=0
Load the best model (PATH should be like "models_poisson/poisson_mu-0.1-20_finalactivation-linear/checkpoints".
poisson_generator needs to get the same features.
In [10]:
poisson_checkpoint = torch.load(params["load_dir_model"], map_location='cpu')
poisson_generator = Conditional_Poisson_Generator(**params).to('cpu')
poisson_generator.load_state_dict(poisson_checkpoint['generator_state_dict'])
print(poisson_checkpoint['epoch'])
116
Call the best model. max_mu depends on the training (take the same) and M is equal to poisson.ppf(0.999, max_mu) (see training).
In [11]:
def Poisson_generator(mu, size, max_mu, **kwargs) :
M = poisson.ppf(0.999, max_mu)
if mu > max_mu:
_mu = max_mu
else:
_mu = mu
uniform = torch.rand(size, 4)
mus = torch.ones(size, 1)*_mu
simulated_poisson_draws = poisson_generator(uniform, mus).detach().numpy() * M
if mu > _mu:
simulated_poisson_draws = [(e - _mu) / np.sqrt(_mu) * np.sqrt(mu) + mu for e in simulated_poisson_draws]
simulated_poisson_draws = np.array([int(np.round(e)) for e in simulated_poisson_draws])
return simulated_poisson_draws
Plot histograms on tested_mus.
In [12]:
for MU in params["tested_mus"] :
print(MU)
true_poisson = poisson.rvs(MU, size = 100000)
GAN_poisson = Poisson_generator(MU, 100000, **params)
plt.hist(GAN_poisson, bins=200, alpha=0.75, color = 'red', density=True, label="Simulated distribution")
plt.hist(true_poisson, bins=200, alpha=0.75, density=True, color = 'blue', label="Simulated distribution")
plt.show()
2
5
10
15
In [ ]: