Skip to content

Commit

Permalink
Added DeconvNet, Guided-/ExcitationBackprop
Browse files Browse the repository at this point in the history
added new composites for
- DeconvNet
- GuidedBackprop
- ExcitationBackprop
  • Loading branch information
chr5tphr committed Jun 24, 2021
1 parent d3e1705 commit 0d23871
Showing 1 changed file with 52 additions and 1 deletion.
53 changes: 52 additions & 1 deletion zennit/composites.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from .core import Composite
from .layer import Sum
from .rules import Gamma, Epsilon, ZBox, ZPlus, AlphaBeta, Flat, Pass, Norm
from .rules import Gamma, Epsilon, ZBox, ZPlus, AlphaBeta, Flat, Pass, Norm, ReLUDeconvNet, ReLUGuidedBackprop
from .types import Convolution, Linear, AvgPool, Activation


Expand Down Expand Up @@ -230,3 +230,54 @@ def __init__(self, canonizers=None):
(Linear, Flat())
]
super().__init__(layer_map, first_map, canonizers=canonizers)


@register_composite('deconvnet')
class DeconvNet(LayerMapComposite):
'''An explicit composite modifying the gradients of all ReLUs according to DeconvNet [1].
References
----------
.. [1] M. D. Zeiler and R. Fergus, “Visualizing and understanding convolutional networks,” in European conference
on computer vision. Springer, 2014, pp. 818–833.
'''
def __init__(self, canonizers=None):
layer_map = [
(torch.nn.ReLU, ReLUDeconvNet()),
]
super().__init__(layer_map, canonizers=canonizers)


@register_composite('guided_backprop')
class GuidedBackprop(LayerMapComposite):
'''An explicit composite modifying the gradients of all ReLUs according to GuidedBackprop [1].
References
----------
.. [1] J. T. Springenberg, A. Dosovitskiy, T. Brox, and M. A. Riedmiller, “Striving for simplicity: The all
convolutional net,” in Proceedings of the International Conference of Learning Representations (ICLR), 2015.
'''
def __init__(self, canonizers=None):
layer_map = [
(torch.nn.ReLU, ReLUGuidedBackprop()),
]
super().__init__(layer_map, canonizers=canonizers)


@register_composite('excitation_backprop')
class ExcitationBackprop(LayerMapComposite):
'''An explicit composite implementing the ExcitationBackprop [1].
References
----------
.. [1] J. Zhang, S. A. Bargal, Z. Lin, J. Brandt, X. Shen, and S. Sclaroff, “Top-down neural attention by
excitation backprop,” International Journal of Computer Vision, vol. 126, no. 10, pp. 1084–1102, 2018.
'''
def __init__(self, canonizers=None):
layer_map = [
(Sum, Norm()),
(AvgPool, Norm()),
(Linear, ZPlus()),
]
super().__init__(layer_map, canonizers=canonizers)

0 comments on commit 0d23871

Please sign in to comment.