# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from https://github.com/toshas/torch_truncnorm import math from numbers import Number import torch from torch.distributions import constraints, Distribution from torch.distributions.utils import broadcast_all CONST_SQRT_2 = math.sqrt(2) CONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi) CONST_INV_SQRT_2 = 1 / math.sqrt(2) CONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI) CONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e) class TruncatedStandardNormal(Distribution): """Truncated Standard Normal distribution. Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf """ arg_constraints = { "a": constraints.real, "b": constraints.real, } has_rsample = True eps = 1e-6 def __init__(self, a, b, validate_args=None, device=None): self.a, self.b = broadcast_all(a, b) self.a = self.a.to(device) self.b = self.b.to(device) if isinstance(a, Number) and isinstance(b, Number): batch_shape = torch.Size() else: batch_shape = self.a.size() super(TruncatedStandardNormal, self).__init__( batch_shape, validate_args=validate_args ) if self.a.dtype != self.b.dtype: raise ValueError("Truncation bounds types are different") if any( (self.a >= self.b) .view( -1, ) .tolist() ): raise ValueError("Incorrect truncation range") eps = self.eps self._dtype_min_gt_0 = eps self._dtype_max_lt_1 = 1 - eps self._little_phi_a = self._little_phi(self.a) self._little_phi_b = self._little_phi(self.b) self._big_phi_a = self._big_phi(self.a) self._big_phi_b = self._big_phi(self.b) self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps) self._log_Z = self._Z.log() little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan) little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan) self._lpbb_m_lpaa_d_Z = ( self._little_phi_b * little_phi_coeff_b - self._little_phi_a * little_phi_coeff_a ) / self._Z self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z self._variance = ( 1 - self._lpbb_m_lpaa_d_Z - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2 ) self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z @constraints.dependent_property def support(self): return constraints.interval(self.a, self.b) @property def mean(self): return self._mean @property def deterministic_sample(self): return self.mean @property def variance(self): return self._variance def entropy(self): return self._entropy @property def auc(self): return self._Z @staticmethod def _little_phi(x): return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI def _big_phi(self, x): phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf()) return phi.clamp(self.eps, 1 - self.eps) @staticmethod def _inv_big_phi(x): return CONST_SQRT_2 * (2 * x - 1).erfinv() def cdf(self, value): if self._validate_args: self._validate_sample(value) return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1) def icdf(self, value): y = self._big_phi_a + value * self._Z y = y.clamp(self.eps, 1 - self.eps) return self._inv_big_phi(y) def log_prob(self, value): if self._validate_args: self._validate_sample(value) return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5 def rsample(self, sample_shape=None): if sample_shape is None: sample_shape = torch.Size([]) shape = self._extended_shape(sample_shape) p = torch.empty(shape, device=self.a.device).uniform_( self._dtype_min_gt_0, self._dtype_max_lt_1 ) return self.icdf(p) class TruncatedNormal(TruncatedStandardNormal): """Truncated Normal distribution. https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf """ has_rsample = True def __init__(self, loc, scale, a, b, validate_args=None, device=None): scale = scale.clamp_min(self.eps) self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b) a = a.to(device) b = b.to(device) self._non_std_a = a self._non_std_b = b a = (a - self.loc) / self.scale b = (b - self.loc) / self.scale super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args) self._log_scale = self.scale.log() self._mean = self._mean * self.scale + self.loc self._variance = self._variance * self.scale**2 self._entropy += self._log_scale def _to_std_rv(self, value): return (value - self.loc) / self.scale def _from_std_rv(self, value): return value * self.scale + self.loc def cdf(self, value): return super(TruncatedNormal, self).cdf(self._to_std_rv(value)) def icdf(self, value): sample = self._from_std_rv(super().icdf(value)) # clamp data but keep gradients sample_clip = torch.stack( [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0 ).max(0)[0] sample_clip = torch.stack( [sample_clip, self._non_std_b.detach().expand_as(sample)], 0 ).min(0)[0] sample.data.copy_(sample_clip) return sample def log_prob(self, value): value = self._to_std_rv(value) return super(TruncatedNormal, self).log_prob(value) - self._log_scale