Skip to content

Commit

Permalink
update to 7.0.1
Browse files Browse the repository at this point in the history
  • Loading branch information
hycis committed Jun 30, 2021
1 parent bbaf5c3 commit b2bc469
Show file tree
Hide file tree
Showing 17 changed files with 7 additions and 479 deletions.
36 changes: 0 additions & 36 deletions examples/charcnn_text_classifier.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,18 @@

import tensorflow as tf
<<<<<<< HEAD
import tensorgraph as tg
from tensorgraph.layers import Reshape, Embedding, Conv2D, RELU, Linear, Flatten, ReduceSum, Softmax
from nltk.tokenize import RegexpTokenizer
from nlpbox import CharNumberEncoder, CatNumberEncoder
from tensorgraph.utils import valid, split_df, make_one_hot
from tensorgraph.cost import entropy, accuracy
=======
import tensorgraphx as tg
from tensorgraphx.layers import Reshape, Embedding, Conv2D, RELU, Linear, Flatten, ReduceSum, Softmax
from nltk.tokenize import RegexpTokenizer
from nlpbox import CharNumberEncoder, CatNumberEncoder
from tensorgraphx.utils import valid, split_df, make_one_hot
from tensorgraphx.cost import entropy, accuracy
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
import pandas
import numpy as np

# character CNN
def model(word_len, sent_len, nclass):
unicode_size = 1000
ch_embed_dim = 20

<<<<<<< HEAD
=======
h, w = valid(ch_embed_dim, word_len, stride=(1,1), kernel_size=(ch_embed_dim,5))
h, w = valid(h, w, stride=(1,1), kernel_size=(1,5))
h, w = valid(h, w, stride=(1,2), kernel_size=(1,5))
conv_out_dim = int(h * w * 60)

>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
X_ph = tf.placeholder('int32', [None, sent_len, word_len])
input_sn = tg.StartNode(input_vars=[X_ph])
charcnn_hn = tg.HiddenNode(prev=[input_sn],
Expand All @@ -39,7 +21,6 @@ def model(word_len, sent_len, nclass):
encode_dim=ch_embed_dim,
zero_pad=True),
Reshape(shape=(-1, ch_embed_dim, word_len, 1)),
<<<<<<< HEAD
Conv2D(num_filters=20, padding='VALID',
kernel_size=(ch_embed_dim,5), stride=(1,1)),
RELU(),
Expand All @@ -51,19 +32,6 @@ def model(word_len, sent_len, nclass):
RELU(),
Flatten(),
Linear(nclass),
=======
Conv2D(input_channels=1, num_filters=20, padding='VALID',
kernel_size=(ch_embed_dim,5), stride=(1,1)),
RELU(),
Conv2D(input_channels=20, num_filters=40, padding='VALID',
kernel_size=(1,5), stride=(1,1)),
RELU(),
Conv2D(input_channels=40, num_filters=60, padding='VALID',
kernel_size=(1,5), stride=(1,2)),
RELU(),
Flatten(),
Linear(conv_out_dim, nclass),
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
Reshape((-1, sent_len, nclass)),
ReduceSum(1),
Softmax()
Expand Down Expand Up @@ -101,11 +69,7 @@ def tweets(word_len, sent_len, train_valid_ratio=[5,1]):


def train():
<<<<<<< HEAD
from tensorgraph.trainobject import train as mytrain
=======
from tensorgraphx.trainobject import train as mytrain
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
with tf.Session() as sess:
word_len = 20
sent_len = 50
Expand Down
110 changes: 0 additions & 110 deletions examples/cifar10_allcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

from __future__ import division, print_function, absolute_import

<<<<<<< HEAD
from tensorgraph.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \
Softmax, Flatten, Linear, AvgPooling, \
Lambda, BatchNormalization, IdentityBlock, \
Expand All @@ -17,25 +16,13 @@
import tensorflow as tf
from tensorgraph.cost import entropy, accuracy, mse
from tensorgraph.dataset import Mnist, Cifar10
=======
from tensorgraphx.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \
Softmax, Flatten, Linear, AvgPooling, \
Lambda, BatchNormalization, IdentityBlock, \
TransitionLayer, DenseNet
from tensorgraphx.utils import same, valid, same_nd, valid_nd
import tensorgraphx as tg
import tensorflow as tf
from tensorgraphx.cost import entropy, accuracy, mse
from tensorgraphx.dataset import Mnist, Cifar10
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
from tensorflow.python.framework import ops
import numpy as np


def model(nclass, h, w, c):
with tf.name_scope('Cifar10AllCNN'):
seq = tg.Sequential()
<<<<<<< HEAD
seq.add(Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(RELU())
seq.add(BatchNormalization())
Expand Down Expand Up @@ -73,54 +60,6 @@ def model(nclass, h, w, c):
seq.add(BatchNormalization())

seq.add(AvgPooling(poolsize=(8, 8), stride=(1,1), padding='VALID'))
=======
seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
seq.add(BatchNormalization(input_shape=[h,w,96]))

seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
seq.add(Dropout(0.5))

seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
seq.add(BatchNormalization(input_shape=[h,w,96]))

seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
seq.add(Dropout(0.5))

seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
seq.add(BatchNormalization(input_shape=[h,w,192]))

seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
seq.add(Dropout(0.5))

seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
seq.add(BatchNormalization(input_shape=[h,w,192]))

seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
seq.add(Dropout(0.5))

seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
seq.add(RELU())
h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
seq.add(BatchNormalization(input_shape=[h,w,nclass]))

seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID'))
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
seq.add(Flatten())
seq.add(Softmax())
return seq
Expand Down Expand Up @@ -210,11 +149,7 @@ def train():


def train_with_trainobject():
<<<<<<< HEAD
from tensorgraph.trainobject import train as mytrain
=======
from tensorgraphx.trainobject import train as mytrain
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config = config) as sess:
Expand Down Expand Up @@ -243,11 +178,7 @@ def train_with_trainobject():


def train_with_VGG():
<<<<<<< HEAD
from tensorgraph.trainobject import train as mytrain
=======
from tensorgraphx.trainobject import train as mytrain
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config = config) as sess:
Expand All @@ -256,21 +187,12 @@ def train_with_VGG():
_, nclass = y_train.shape
print('X max', np.max(X_train))
print('X min', np.min(X_train))
<<<<<<< HEAD
from tensorgraph.layers import VGG19
seq = tg.Sequential()
layer = VGG19()
seq.add(layer)
seq.add(Flatten())
seq.add(Linear(nclass))
=======
from tensorgraphx.layers import VGG19
seq = tg.Sequential()
layer = VGG19(input_channels=c, input_shape=(h,w))
seq.add(layer)
seq.add(Flatten())
seq.add(Linear(512,nclass))
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
seq.add(Softmax())
X_ph = tf.placeholder('float32', [None, h, w, c])
y_ph = tf.placeholder('float32', [None, nclass])
Expand All @@ -292,11 +214,7 @@ def train_with_VGG():


def train_with_Resnet():
<<<<<<< HEAD
from tensorgraph.trainobject import train as mytrain
=======
from tensorgraphx.trainobject import train as mytrain
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config = config) as sess:
Expand All @@ -306,28 +224,12 @@ def train_with_Resnet():
print('X max', np.max(X_train))
print('X min', np.min(X_train))
seq = tg.Sequential()
<<<<<<< HEAD
seq.add(IdentityBlock(nlayers=4, filters=[32, 64]))
seq.add(TransitionLayer(16))
seq.add(IdentityBlock(nlayers=4, filters=[64, 128]))
seq.add(TransitionLayer(16))
seq.add(Flatten())
seq.add(Linear(nclass))
=======
id1 = IdentityBlock(input_channels=c, input_shape=(h,w), nlayers=4, filters=[32, 64])
seq.add(id1)
trans1 = TransitionLayer(input_channels=id1.output_channels, input_shape=id1.output_shape)
seq.add(trans1)

id2 = IdentityBlock(input_channels=trans1.output_channels, input_shape=trans1.output_shape,
nlayers=4, filters=[64, 128])
seq.add(id2)
trans2 = TransitionLayer(input_channels=id2.output_channels, input_shape=id2.output_shape)
seq.add(trans2)
seq.add(Flatten())
ldim = trans2.output_channels * np.prod(trans2.output_shape)
seq.add(Linear(ldim,nclass))
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
seq.add(Softmax())

X_ph = tf.placeholder('float32', [None, h, w, c])
Expand All @@ -350,11 +252,7 @@ def train_with_Resnet():


def train_with_Densenet():
<<<<<<< HEAD
from tensorgraph.trainobject import train as mytrain
=======
from tensorgraphx.trainobject import train as mytrain
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config = config) as sess:
Expand All @@ -364,17 +262,9 @@ def train_with_Densenet():
print('X max', np.max(X_train))
print('X min', np.min(X_train))
seq = tg.Sequential()
<<<<<<< HEAD
seq.add(DenseNet(ndense=3, growth_rate=4, nlayer1blk=4))
seq.add(Flatten())
seq.add(Linear(nclass))
=======
dense = DenseNet(input_channels=c, input_shape=(h,w), ndense=3, growth_rate=4, nlayer1blk=4)
seq.add(dense)
seq.add(Flatten())
ldim = dense.output_channels
seq.add(Linear(ldim,nclass))
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
seq.add(Softmax())

X_ph = tf.placeholder('float32', [None, h, w, c])
Expand Down
19 changes: 0 additions & 19 deletions examples/example.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,9 @@

import tensorflow as tf
import numpy as np
<<<<<<< HEAD
from tensorgraph import Graph, StartNode, HiddenNode, EndNode
from tensorgraph.layers import Linear, RELU, Concat, Mean, Sum
from tensorgraph import ProgressBar, SequentialIterator
=======
from tensorgraphx import Graph, StartNode, HiddenNode, EndNode
from tensorgraphx.layers import Linear, RELU, Concat, Mean, Sum
from tensorgraphx import ProgressBar, SequentialIterator
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5


def model():
Expand All @@ -25,21 +19,12 @@ def model():

h1 = HiddenNode(prev=[start1, start2],
input_merge_mode=Concat(),
<<<<<<< HEAD
layers=[Linear(y2_dim), RELU()])
h2 = HiddenNode(prev=[start2],
layers=[Linear(y2_dim), RELU()])
h3 = HiddenNode(prev=[h1, h2],
input_merge_mode=Sum(),
layers=[Linear(y1_dim), RELU()])
=======
layers=[Linear(y1_dim+y2_dim, y2_dim), RELU()])
h2 = HiddenNode(prev=[start2],
layers=[Linear(y2_dim, y2_dim), RELU()])
h3 = HiddenNode(prev=[h1, h2],
input_merge_mode=Sum(),
layers=[Linear(y2_dim, y1_dim), RELU()])
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5
e1 = EndNode(prev=[h3])
e2 = EndNode(prev=[h2])

Expand Down Expand Up @@ -74,11 +59,7 @@ def train():
n_exp += len(y1_batch)
pbar.update(n_exp)
print('end')
<<<<<<< HEAD
# saver.save(sess, 'test.tf')
=======
saver.save(sess, 'test.tf')
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5


if __name__ == '__main__':
Expand Down
18 changes: 1 addition & 17 deletions examples/hierachical_softmax.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@

<<<<<<< HEAD
from tensorgraph.node import StartNode, HiddenNode, EndNode
import tensorflow as tf
from tensorgraph.layers.linear import Linear
Expand All @@ -8,16 +7,7 @@
from tensorgraph.graph import Graph
import numpy as np
from tensorgraph.data_iterator import SequentialIterator
=======
from tensorgraphx.node import StartNode, HiddenNode, EndNode
import tensorflow as tf
from tensorgraphx.layers.linear import Linear
from tensorgraphx.layers.activation import RELU, Softmax
from tensorgraphx.layers.merge import Concat, Mean, Sum
from tensorgraphx.graph import Graph
import numpy as np
from tensorgraphx.data_iterator import SequentialIterator
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5


## params
x_dim = 50
Expand All @@ -35,15 +25,9 @@
# define the graph model structure
start = StartNode(input_vars=[x_ph])

<<<<<<< HEAD
h1 = HiddenNode(prev=[start], layers=[Linear(component_dim), Softmax()])
h2 = HiddenNode(prev=[h1], layers=[Linear(component_dim), Softmax()])
h3 = HiddenNode(prev=[h2], layers=[Linear(component_dim), Softmax()])
=======
h1 = HiddenNode(prev=[start], layers=[Linear(x_dim, component_dim), Softmax()])
h2 = HiddenNode(prev=[h1], layers=[Linear(component_dim, component_dim), Softmax()])
h3 = HiddenNode(prev=[h2], layers=[Linear(component_dim, component_dim), Softmax()])
>>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5


e1 = EndNode(prev=[h1], input_merge_mode=Sum())
Expand Down
Loading

0 comments on commit b2bc469

Please sign in to comment.