-
Notifications
You must be signed in to change notification settings - Fork 7
/
math_ops.py
186 lines (147 loc) · 6.47 KB
/
math_ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# borrowed from tensorflow_compression/python/ops/math_ops.py
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Math operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
__all__ = [
"upper_bound",
"lower_bound",
]
@tf.RegisterGradient("IdentityFirstOfTwoInputs")
def _identity_first_of_two_inputs_grad(op, grad):
"""Gradient for `lower_bound` or `upper_bound` if `gradient == 'identity'`.
Args:
op: The op for which to calculate a gradient.
grad: Gradient with respect to the output of the op.
Returns:
Gradient with respect to the inputs of the op.
"""
del op # unused
return [grad, None]
@tf.RegisterGradient("UpperBound")
def _upper_bound_grad(op, grad):
"""Gradient for `upper_bound` if `gradient == 'identity_if_towards'`.
Args:
op: The op for which to calculate a gradient.
grad: Gradient with respect to the output of the op.
Returns:
Gradient with respect to the inputs of the op.
"""
inputs, bound = op.inputs
pass_through_if = tf.logical_or(inputs <= bound, grad > 0)
return [tf.cast(pass_through_if, grad.dtype) * grad, None]
@tf.RegisterGradient("LowerBound")
def _lower_bound_grad(op, grad):
"""Gradient for `lower_bound` if `gradient == 'identity_if_towards'`.
Args:
op: The op for which to calculate a gradient.
grad: Gradient with respect to the output of the op.
Returns:
Gradient with respect to the inputs of the op.
"""
inputs, bound = op.inputs
pass_through_if = tf.logical_or(inputs >= bound, grad < 0)
return [tf.cast(pass_through_if, grad.dtype) * grad, None]
def upper_bound(inputs, bound, gradient="identity_if_towards", name=None):
"""Same as `tf.minimum`, but with helpful gradient for `inputs > bound`.
This function behaves just like `tf.minimum`, but the behavior of the gradient
with respect to `inputs` for input values that hit the bound depends on
`gradient`:
If set to `'disconnected'`, the returned gradient is zero for values that hit
the bound. This is identical to the behavior of `tf.minimum`.
If set to `'identity'`, the gradient is unconditionally replaced with the
identity function (i.e., pretending this function does not exist).
If set to `'identity_if_towards'`, the gradient is replaced with the identity
function, but only if applying gradient descent would push the values of
`inputs` towards the bound. For gradient values that push away from the bound,
the returned gradient is still zero.
Note: In the latter two cases, no gradient is returned for `bound`.
Also, the implementation of `gradient == 'identity_if_towards'` currently
assumes that the shape of `inputs` is the same as the shape of the output. It
won't work reliably for all possible broadcasting scenarios.
Args:
inputs: Input tensor.
bound: Upper bound for the input tensor.
gradient: 'disconnected', 'identity', or 'identity_if_towards' (default).
name: Name for this op.
Returns:
`tf.minimum(inputs, bound)`
Raises:
ValueError: for invalid value of `gradient`.
"""
try:
gradient = {
"identity_if_towards": "UpperBound",
"identity": "IdentityFirstOfTwoInputs",
"disconnected": None,
}[gradient]
except KeyError:
raise ValueError("Invalid value for `gradient`: '{}'.".format(gradient))
with tf.name_scope(name, "UpperBound", [inputs, bound]) as scope:
inputs = tf.convert_to_tensor(inputs, name="inputs")
bound = tf.convert_to_tensor(
bound, name="bound", dtype=inputs.dtype)
if gradient:
with tf.get_default_graph().gradient_override_map({"Minimum": gradient}):
return tf.minimum(inputs, bound, name=scope)
else:
return tf.minimum(inputs, bound, name=scope)
def lower_bound(inputs, bound, gradient="identity_if_towards", name=None):
"""Same as `tf.maximum`, but with helpful gradient for `inputs < bound`.
This function behaves just like `tf.maximum`, but the behavior of the gradient
with respect to `inputs` for input values that hit the bound depends on
`gradient`:
If set to `'disconnected'`, the returned gradient is zero for values that hit
the bound. This is identical to the behavior of `tf.maximum`.
If set to `'identity'`, the gradient is unconditionally replaced with the
identity function (i.e., pretending this function does not exist).
If set to `'identity_if_towards'`, the gradient is replaced with the identity
function, but only if applying gradient descent would push the values of
`inputs` towards the bound. For gradient values that push away from the bound,
the returned gradient is still zero.
Note: In the latter two cases, no gradient is returned for `bound`.
Also, the implementation of `gradient == 'identity_if_towards'` currently
assumes that the shape of `inputs` is the same as the shape of the output. It
won't work reliably for all possible broadcasting scenarios.
Args:
inputs: Input tensor.
bound: Lower bound for the input tensor.
gradient: 'disconnected', 'identity', or 'identity_if_towards' (default).
name: Name for this op.
Returns:
`tf.maximum(inputs, bound)`
Raises:
ValueError: for invalid value of `gradient`.
"""
try:
gradient = {
"identity_if_towards": "LowerBound",
"identity": "IdentityFirstOfTwoInputs",
"disconnected": None,
}[gradient]
except KeyError:
raise ValueError("Invalid value for `gradient`: '{}'.".format(gradient))
with tf.name_scope(name, "LowerBound", [inputs, bound]) as scope:
inputs = tf.convert_to_tensor(inputs, name="inputs")
bound = tf.convert_to_tensor(
bound, name="bound", dtype=inputs.dtype)
if gradient:
with tf.get_default_graph().gradient_override_map({"Maximum": gradient}):
return tf.maximum(inputs, bound, name=scope)
else:
return tf.maximum(inputs, bound, name=scope)