forked from NVIDIAGameWorks/kaolin-wisp
-
Notifications
You must be signed in to change notification settings - Fork 0
/
layers.py
109 lines (89 loc) · 3.44 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
import torch
import torch.nn as nn
import torch.nn.functional as F
def normalize_frobenius(x):
"""Normalizes the matrix according to the Frobenius norm.
Args:
x (torch.FloatTensor): A matrix.
Returns:
(torch.FloatTensor): A normalized matrix.
"""
norm = torch.sqrt((torch.abs(x)**2).sum())
return x / norm
def normalize_L_1(x):
"""Normalizes the matrix according to the L1 norm.
Args:
x (torch.FloatTensor): A matrix.
Returns:
(torch.FloatTensor): A normalized matrix.
"""
abscolsum = torch.sum(torch.abs(x), dim=0)
abscolsum = torch.min(torch.stack([1.0/abscolsum, torch.ones_like(abscolsum)], dim=0), dim=0)[0]
return x * abscolsum[None,:]
def normalize_L_inf(x):
"""Normalizes the matrix according to the Linf norm.
Args:
x (torch.FloatTensor): A matrix.
Returns:
(torch.FloatTensor): A normalized matrix.
"""
absrowsum = torch.sum(torch.abs(x), axis=1)
absrowsum = torch.min(torch.stack([1.0/absrowsum, torch.ones_like(absrowsum)], dim=0), dim=0)[0]
return x * absrowsum[:,None]
class FrobeniusLinear(nn.Module):
"""A standard Linear layer which applies a Frobenius normalization in the forward pass.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.linear = nn.Linear(*args, **kwargs)
def forward(self, x):
weight = normalize_frobenius(self.linear.weight)
return F.linear(x, weight, self.linear.bias)
class L_1_Linear(nn.Module):
"""A standard Linear layer which applies a L1 normalization in the forward pass.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.linear = nn.Linear(*args, **kwargs)
def forward(self, x):
weight = normalize_L_1(self.linear.weight)
return F.linear(x, weight, self.linear.bias)
class L_inf_Linear(nn.Module):
"""A standard Linear layer which applies a Linf normalization in the forward pass.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.linear = nn.Linear(*args, **kwargs)
def forward(self, x):
weight = normalize_L_inf(self.linear.weight)
return F.linear(x, weight, self.linear.bias)
def spectral_norm_(*args, **kwargs):
"""Initializes a spectral norm layer.
"""
return nn.utils.spectral_norm(nn.Linear(*args, **kwargs))
def get_layer_class(layer_type):
"""Convenience function to return the layer class name from text.
Args:
layer_type (str): Text name for the layer.
Retunrs:
(nn.Module): The layer to be used for the decoder.
"""
if layer_type == 'none':
return nn.Linear
elif layer_type == 'spectral_norm':
return spectral_norm_
elif layer_type == 'frobenius_norm':
return FrobeniusLinear
elif layer_type == "l_1_norm":
return L_1_Linear
elif layer_type == "l_inf_norm":
return L_inf_Linear
else:
assert(False and "layer type does not exist")