diff --git a/examples/charcnn_text_classifier.py b/examples/charcnn_text_classifier.py index 91b5037..49855a9 100644 --- a/examples/charcnn_text_classifier.py +++ b/examples/charcnn_text_classifier.py @@ -1,20 +1,11 @@ import tensorflow as tf -<<<<<<< HEAD import tensorgraph as tg from tensorgraph.layers import Reshape, Embedding, Conv2D, RELU, Linear, Flatten, ReduceSum, Softmax from nltk.tokenize import RegexpTokenizer from nlpbox import CharNumberEncoder, CatNumberEncoder from tensorgraph.utils import valid, split_df, make_one_hot from tensorgraph.cost import entropy, accuracy -======= -import tensorgraphx as tg -from tensorgraphx.layers import Reshape, Embedding, Conv2D, RELU, Linear, Flatten, ReduceSum, Softmax -from nltk.tokenize import RegexpTokenizer -from nlpbox import CharNumberEncoder, CatNumberEncoder -from tensorgraphx.utils import valid, split_df, make_one_hot -from tensorgraphx.cost import entropy, accuracy ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import pandas import numpy as np @@ -22,15 +13,6 @@ def model(word_len, sent_len, nclass): unicode_size = 1000 ch_embed_dim = 20 - -<<<<<<< HEAD -======= - h, w = valid(ch_embed_dim, word_len, stride=(1,1), kernel_size=(ch_embed_dim,5)) - h, w = valid(h, w, stride=(1,1), kernel_size=(1,5)) - h, w = valid(h, w, stride=(1,2), kernel_size=(1,5)) - conv_out_dim = int(h * w * 60) - ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 X_ph = tf.placeholder('int32', [None, sent_len, word_len]) input_sn = tg.StartNode(input_vars=[X_ph]) charcnn_hn = tg.HiddenNode(prev=[input_sn], @@ -39,7 +21,6 @@ def model(word_len, sent_len, nclass): encode_dim=ch_embed_dim, zero_pad=True), Reshape(shape=(-1, ch_embed_dim, word_len, 1)), -<<<<<<< HEAD Conv2D(num_filters=20, padding='VALID', kernel_size=(ch_embed_dim,5), stride=(1,1)), RELU(), @@ -51,19 +32,6 @@ def model(word_len, sent_len, nclass): RELU(), Flatten(), Linear(nclass), -======= - Conv2D(input_channels=1, num_filters=20, padding='VALID', - kernel_size=(ch_embed_dim,5), stride=(1,1)), - RELU(), - Conv2D(input_channels=20, num_filters=40, padding='VALID', - kernel_size=(1,5), stride=(1,1)), - RELU(), - Conv2D(input_channels=40, num_filters=60, padding='VALID', - kernel_size=(1,5), stride=(1,2)), - RELU(), - Flatten(), - Linear(conv_out_dim, nclass), ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 Reshape((-1, sent_len, nclass)), ReduceSum(1), Softmax() @@ -101,11 +69,7 @@ def tweets(word_len, sent_len, train_valid_ratio=[5,1]): def train(): -<<<<<<< HEAD from tensorgraph.trainobject import train as mytrain -======= - from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 with tf.Session() as sess: word_len = 20 sent_len = 50 diff --git a/examples/cifar10_allcnn.py b/examples/cifar10_allcnn.py index 92df4d3..8fb3661 100644 --- a/examples/cifar10_allcnn.py +++ b/examples/cifar10_allcnn.py @@ -7,7 +7,6 @@ from __future__ import division, print_function, absolute_import -<<<<<<< HEAD from tensorgraph.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \ Softmax, Flatten, Linear, AvgPooling, \ Lambda, BatchNormalization, IdentityBlock, \ @@ -17,17 +16,6 @@ import tensorflow as tf from tensorgraph.cost import entropy, accuracy, mse from tensorgraph.dataset import Mnist, Cifar10 -======= -from tensorgraphx.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \ - Softmax, Flatten, Linear, AvgPooling, \ - Lambda, BatchNormalization, IdentityBlock, \ - TransitionLayer, DenseNet -from tensorgraphx.utils import same, valid, same_nd, valid_nd -import tensorgraphx as tg -import tensorflow as tf -from tensorgraphx.cost import entropy, accuracy, mse -from tensorgraphx.dataset import Mnist, Cifar10 ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 from tensorflow.python.framework import ops import numpy as np @@ -35,7 +23,6 @@ def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() -<<<<<<< HEAD seq.add(Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) @@ -73,54 +60,6 @@ def model(nclass, h, w, c): seq.add(BatchNormalization()) seq.add(AvgPooling(poolsize=(8, 8), stride=(1,1), padding='VALID')) -======= - seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) - seq.add(BatchNormalization(input_shape=[h,w,96])) - - seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) - seq.add(Dropout(0.5)) - - seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) - seq.add(BatchNormalization(input_shape=[h,w,96])) - - seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) - seq.add(Dropout(0.5)) - - seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) - seq.add(BatchNormalization(input_shape=[h,w,192])) - - seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) - seq.add(Dropout(0.5)) - - seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) - seq.add(BatchNormalization(input_shape=[h,w,192])) - - seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) - seq.add(Dropout(0.5)) - - seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) - seq.add(RELU()) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) - seq.add(BatchNormalization(input_shape=[h,w,nclass])) - - seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID')) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Flatten()) seq.add(Softmax()) return seq @@ -210,11 +149,7 @@ def train(): def train_with_trainobject(): -<<<<<<< HEAD from tensorgraph.trainobject import train as mytrain -======= - from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config = config) as sess: @@ -243,11 +178,7 @@ def train_with_trainobject(): def train_with_VGG(): -<<<<<<< HEAD from tensorgraph.trainobject import train as mytrain -======= - from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config = config) as sess: @@ -256,21 +187,12 @@ def train_with_VGG(): _, nclass = y_train.shape print('X max', np.max(X_train)) print('X min', np.min(X_train)) -<<<<<<< HEAD from tensorgraph.layers import VGG19 seq = tg.Sequential() layer = VGG19() seq.add(layer) seq.add(Flatten()) seq.add(Linear(nclass)) -======= - from tensorgraphx.layers import VGG19 - seq = tg.Sequential() - layer = VGG19(input_channels=c, input_shape=(h,w)) - seq.add(layer) - seq.add(Flatten()) - seq.add(Linear(512,nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) X_ph = tf.placeholder('float32', [None, h, w, c]) y_ph = tf.placeholder('float32', [None, nclass]) @@ -292,11 +214,7 @@ def train_with_VGG(): def train_with_Resnet(): -<<<<<<< HEAD from tensorgraph.trainobject import train as mytrain -======= - from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config = config) as sess: @@ -306,28 +224,12 @@ def train_with_Resnet(): print('X max', np.max(X_train)) print('X min', np.min(X_train)) seq = tg.Sequential() -<<<<<<< HEAD seq.add(IdentityBlock(nlayers=4, filters=[32, 64])) seq.add(TransitionLayer(16)) seq.add(IdentityBlock(nlayers=4, filters=[64, 128])) seq.add(TransitionLayer(16)) seq.add(Flatten()) seq.add(Linear(nclass)) -======= - id1 = IdentityBlock(input_channels=c, input_shape=(h,w), nlayers=4, filters=[32, 64]) - seq.add(id1) - trans1 = TransitionLayer(input_channels=id1.output_channels, input_shape=id1.output_shape) - seq.add(trans1) - - id2 = IdentityBlock(input_channels=trans1.output_channels, input_shape=trans1.output_shape, - nlayers=4, filters=[64, 128]) - seq.add(id2) - trans2 = TransitionLayer(input_channels=id2.output_channels, input_shape=id2.output_shape) - seq.add(trans2) - seq.add(Flatten()) - ldim = trans2.output_channels * np.prod(trans2.output_shape) - seq.add(Linear(ldim,nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) X_ph = tf.placeholder('float32', [None, h, w, c]) @@ -350,11 +252,7 @@ def train_with_Resnet(): def train_with_Densenet(): -<<<<<<< HEAD from tensorgraph.trainobject import train as mytrain -======= - from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config = config) as sess: @@ -364,17 +262,9 @@ def train_with_Densenet(): print('X max', np.max(X_train)) print('X min', np.min(X_train)) seq = tg.Sequential() -<<<<<<< HEAD seq.add(DenseNet(ndense=3, growth_rate=4, nlayer1blk=4)) seq.add(Flatten()) seq.add(Linear(nclass)) -======= - dense = DenseNet(input_channels=c, input_shape=(h,w), ndense=3, growth_rate=4, nlayer1blk=4) - seq.add(dense) - seq.add(Flatten()) - ldim = dense.output_channels - seq.add(Linear(ldim,nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) X_ph = tf.placeholder('float32', [None, h, w, c]) diff --git a/examples/example.py b/examples/example.py index 8def5d2..0da7658 100644 --- a/examples/example.py +++ b/examples/example.py @@ -1,15 +1,9 @@ import tensorflow as tf import numpy as np -<<<<<<< HEAD from tensorgraph import Graph, StartNode, HiddenNode, EndNode from tensorgraph.layers import Linear, RELU, Concat, Mean, Sum from tensorgraph import ProgressBar, SequentialIterator -======= -from tensorgraphx import Graph, StartNode, HiddenNode, EndNode -from tensorgraphx.layers import Linear, RELU, Concat, Mean, Sum -from tensorgraphx import ProgressBar, SequentialIterator ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 def model(): @@ -25,21 +19,12 @@ def model(): h1 = HiddenNode(prev=[start1, start2], input_merge_mode=Concat(), -<<<<<<< HEAD layers=[Linear(y2_dim), RELU()]) h2 = HiddenNode(prev=[start2], layers=[Linear(y2_dim), RELU()]) h3 = HiddenNode(prev=[h1, h2], input_merge_mode=Sum(), layers=[Linear(y1_dim), RELU()]) -======= - layers=[Linear(y1_dim+y2_dim, y2_dim), RELU()]) - h2 = HiddenNode(prev=[start2], - layers=[Linear(y2_dim, y2_dim), RELU()]) - h3 = HiddenNode(prev=[h1, h2], - input_merge_mode=Sum(), - layers=[Linear(y2_dim, y1_dim), RELU()]) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 e1 = EndNode(prev=[h3]) e2 = EndNode(prev=[h2]) @@ -74,11 +59,7 @@ def train(): n_exp += len(y1_batch) pbar.update(n_exp) print('end') -<<<<<<< HEAD # saver.save(sess, 'test.tf') -======= - saver.save(sess, 'test.tf') ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 if __name__ == '__main__': diff --git a/examples/hierachical_softmax.py b/examples/hierachical_softmax.py index 7275629..52aed3c 100644 --- a/examples/hierachical_softmax.py +++ b/examples/hierachical_softmax.py @@ -1,5 +1,4 @@ -<<<<<<< HEAD from tensorgraph.node import StartNode, HiddenNode, EndNode import tensorflow as tf from tensorgraph.layers.linear import Linear @@ -8,16 +7,7 @@ from tensorgraph.graph import Graph import numpy as np from tensorgraph.data_iterator import SequentialIterator -======= -from tensorgraphx.node import StartNode, HiddenNode, EndNode -import tensorflow as tf -from tensorgraphx.layers.linear import Linear -from tensorgraphx.layers.activation import RELU, Softmax -from tensorgraphx.layers.merge import Concat, Mean, Sum -from tensorgraphx.graph import Graph -import numpy as np -from tensorgraphx.data_iterator import SequentialIterator ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 + ## params x_dim = 50 @@ -35,15 +25,9 @@ # define the graph model structure start = StartNode(input_vars=[x_ph]) -<<<<<<< HEAD h1 = HiddenNode(prev=[start], layers=[Linear(component_dim), Softmax()]) h2 = HiddenNode(prev=[h1], layers=[Linear(component_dim), Softmax()]) h3 = HiddenNode(prev=[h2], layers=[Linear(component_dim), Softmax()]) -======= -h1 = HiddenNode(prev=[start], layers=[Linear(x_dim, component_dim), Softmax()]) -h2 = HiddenNode(prev=[h1], layers=[Linear(component_dim, component_dim), Softmax()]) -h3 = HiddenNode(prev=[h2], layers=[Linear(component_dim, component_dim), Softmax()]) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 e1 = EndNode(prev=[h1], input_merge_mode=Sum()) diff --git a/examples/mnist_cnn.py b/examples/mnist_cnn.py index 2abb9ad..05ff14c 100644 --- a/examples/mnist_cnn.py +++ b/examples/mnist_cnn.py @@ -11,7 +11,6 @@ from __future__ import division, print_function, absolute_import -<<<<<<< HEAD from tensorgraph.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \ Softmax, Flatten, Linear, BatchNormalization from tensorgraph.utils import same @@ -19,21 +18,11 @@ import tensorflow as tf from tensorgraph.cost import entropy, accuracy from tensorgraph.dataset import Mnist -======= -from tensorgraphx.layers import Conv2D, RELU, MaxPooling, LRN, Tanh, Dropout, \ - Softmax, Flatten, Linear, BatchNormalization -from tensorgraphx.utils import same -import tensorgraphx as tg -import tensorflow as tf -from tensorgraphx.cost import entropy, accuracy -from tensorgraphx.dataset import Mnist ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 from tensorflow.python.framework import ops def model(): with tf.name_scope('MnistCNN'): seq = tg.Sequential() -<<<<<<< HEAD seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(BatchNormalization()) seq.add(RELU()) @@ -57,35 +46,6 @@ def model(): seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(10)) -======= - seq.add(Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - h, w = same(in_height=28, in_width=28, stride=(1,1), kernel_size=(3,3)) - seq.add(BatchNormalization(input_shape=[h,w,32])) - seq.add(RELU()) - - seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME')) - h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(2,2)) - seq.add(LRN()) - - seq.add(Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) - h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) - seq.add(BatchNormalization(input_shape=[h,w,64])) - seq.add(RELU()) - - seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME')) - h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(2,2)) - seq.add(LRN()) - seq.add(Flatten()) - seq.add(Linear(int(h*w*64), 128)) - seq.add(BatchNormalization(input_shape=[128])) - seq.add(Tanh()) - seq.add(Dropout(0.8)) - seq.add(Linear(128, 256)) - seq.add(BatchNormalization(input_shape=[256])) - seq.add(Tanh()) - seq.add(Dropout(0.8)) - seq.add(Linear(256, 10)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) return seq @@ -171,11 +131,7 @@ def train(): def train_with_trainobject(): -<<<<<<< HEAD from tensorgraph.trainobject import train as mytrain -======= - from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 with tf.Session() as sess: seq = model() X_train, y_train, X_test, y_test = Mnist(flatten=False, onehot=True, binary=True, datadir='.') diff --git a/examples/multi_gpus_horovod.py b/examples/multi_gpus_horovod.py index 8dbb022..ea88435 100644 --- a/examples/multi_gpus_horovod.py +++ b/examples/multi_gpus_horovod.py @@ -1,9 +1,5 @@ -<<<<<<< HEAD import tensorgraph as tg -======= -import tensorgraphx as tg ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import numpy as np import tensorflow as tf import horovod.tensorflow as hvd diff --git a/setup.py b/setup.py index a8a56c5..9e4a51a 100644 --- a/setup.py +++ b/setup.py @@ -7,17 +7,17 @@ name='tensorgraph', version=__version__, author='Joe Wu', + author_email='hiceen@gmail.com', url='https://github.com/hycis/TensorGraph', download_url = 'https://github.com/hycis/TensorGraph/tarball/{}'.format(__version__), license='Apache 2.0, see LICENCE', description='A high level tensorflow library for building deep learning models', long_description=open('README.md').read(), packages=find_packages(), - zip_safe=False, - include_package_data=True, install_requires=['numpy>=1.7.1', 'six>=1.9.0', 'scikit-learn>=0.17', - 'pandas>=0.17', - 'scipy>=0.17'], + 'pandas>=0.17'], + include_package_data=True, + zip_safe=False ) diff --git a/tensorgraph/__init__.py b/tensorgraph/__init__.py index 67e61dc..f53e844 100644 --- a/tensorgraph/__init__.py +++ b/tensorgraph/__init__.py @@ -1,5 +1,5 @@ -__version__ = "7.0" +__version__ = "7.0.1" from .stopper import EarlyStopper from .sequential import Sequential diff --git a/test/cost_test.py b/test/cost_test.py index e0ac6ec..ee3e398 100644 --- a/test/cost_test.py +++ b/test/cost_test.py @@ -1,14 +1,7 @@ -<<<<<<< HEAD import tensorgraph as tg import tensorflow as tf import numpy as np from tensorgraph.utils import make_one_hot -======= -import tensorgraphx as tg -import tensorflow as tf -import numpy as np -from tensorgraphx.utils import make_one_hot ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 from sklearn.metrics import f1_score def test_binary_f1(): diff --git a/test/data_iterator_test.py b/test/data_iterator_test.py index 0110f18..076ba62 100644 --- a/test/data_iterator_test.py +++ b/test/data_iterator_test.py @@ -1,8 +1,4 @@ -<<<<<<< HEAD import tensorgraph as tg -======= -import tensorgraphx as tg ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import numpy as np import time diff --git a/test/layer_backbones_test.py b/test/layer_backbones_test.py index 886b4c1..af3c497 100644 --- a/test/layer_backbones_test.py +++ b/test/layer_backbones_test.py @@ -1,18 +1,9 @@ -<<<<<<< HEAD import tensorgraph as tg from tensorgraph.layers.backbones import * from tensorgraph.layers import Softmax, Flatten, Linear, MaxPooling, BaseModel, Concat, Select, NoChange import tensorflow as tf import os from tensorgraph.trainobject import train as mytrain -======= -import tensorgraphx as tg -from tensorgraphx.layers.backbones import * -from tensorgraphx.layers import Softmax, Flatten, Linear, MaxPooling -import tensorflow as tf -import os -from tensorgraphx.trainobject import train as mytrain ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 os.environ['CUDA_VISIBLE_DEVICES'] = '-1' @@ -31,11 +22,8 @@ def train(seq): optimizer = tf.train.AdamOptimizer(0.0001) test_accu_sb = tg.cost.accuracy(y_ph, y_test_sb) with tf.Session() as sess: -<<<<<<< HEAD this_dir = os.path.dirname(os.path.realpath(__file__)) writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) -======= ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 mytrain(session=sess, feed_dict={X_ph:X_train, y_ph:y_train}, train_cost_sb=train_cost_sb, @@ -44,139 +32,67 @@ def train(seq): epoch_look_back=5, max_epoch=1, percent_decrease=0, train_valid_ratio=[5,1], batchsize=1, randomize_split=False) -<<<<<<< HEAD writer.close() -======= ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 def test_VGG16(): seq = tg.Sequential() -<<<<<<< HEAD seq.add(VGG16()) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) -======= - vgg = VGG16(input_channels=c, input_shape=(h, w)) - print('output channels:', vgg.output_channels) - print('output shape:', vgg.output_shape) - out_dim = np.prod(vgg.output_shape) * vgg.output_channels - seq.add(vgg) - seq.add(Flatten()) - seq.add(Linear(int(out_dim), nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) train(seq) def test_VGG19(): seq = tg.Sequential() -<<<<<<< HEAD seq.add(VGG19()) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) -======= - vgg = VGG19(input_channels=c, input_shape=(h, w)) - print('output channels:', vgg.output_channels) - print('output shape:', vgg.output_shape) - out_dim = np.prod(vgg.output_shape) * vgg.output_channels - seq.add(vgg) - seq.add(Flatten()) - seq.add(Linear(int(out_dim), nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) train(seq) def test_ResNetSmall(): seq = tg.Sequential() -<<<<<<< HEAD seq.add(ResNetSmall(config=[1,1])) seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) -======= - model = ResNetSmall(input_channels=c, input_shape=(h, w), config=[1,1]) - model = ResNetBase(input_channels=c, input_shape=(h, w), config=[1,1,1,1]) - print('output channels:', model.output_channels) - print('output shape:', model.output_shape) - seq.add(model) - seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) - outshape = valid_nd(model.output_shape, kernel_size=model.output_shape, stride=(1,1)) - print(outshape) - out_dim = model.output_channels - seq.add(Flatten()) - seq.add(Linear(int(out_dim), nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) train(seq) def test_ResNetBase(): seq = tg.Sequential() -<<<<<<< HEAD seq.add(ResNetBase(config=[1,1,1,1])) seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) -======= - model = ResNetBase(input_channels=c, input_shape=(h, w), config=[1,1,1,1]) - print('output channels:', model.output_channels) - print('output shape:', model.output_shape) - seq.add(model) - seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) - outshape = valid_nd(model.output_shape, kernel_size=model.output_shape, stride=(1,1)) - print(outshape) - out_dim = model.output_channels - seq.add(Flatten()) - seq.add(Linear(int(out_dim), nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) train(seq) def test_DenseNet(): seq = tg.Sequential() -<<<<<<< HEAD seq.add(DenseNet(ndense=1, growth_rate=1, nlayer1blk=1)) seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) -======= - model = DenseNet(input_channels=c, input_shape=(h, w), ndense=1, growth_rate=1, nlayer1blk=1) - print('output channels:', model.output_channels) - print('output shape:', model.output_shape) - seq.add(model) - seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) - seq.add(Flatten()) - seq.add(Linear(model.output_channels, nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) train(seq) def test_UNet(): seq = tg.Sequential() -<<<<<<< HEAD seq.add(UNet(input_shape=(h, w))) seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) -======= - model = UNet(input_channels=c, input_shape=(h, w)) - print('output channels:', model.output_channels) - print('output shape:', model.output_shape) - out_dim = np.prod(model.output_shape) * model.output_channels - seq.add(model) - seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) - seq.add(Flatten()) - seq.add(Linear(model.output_channels, nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Softmax()) train(seq) -<<<<<<< HEAD class XModel(BaseModel): @BaseModel.init_name_scope @@ -227,19 +143,3 @@ def test_BaseModel(): # test_UNet() # print('..UNet running test done') test_BaseModel() -======= -if __name__ == '__main__': - print('runtime test') - test_VGG16() - print('..VGG16 running test done') - test_VGG19() - print('..VGG19 running test done') - test_ResNetSmall() - print('..ResNetSmall running test done') - test_ResNetBase() - print('..ResNetBase running test done') - test_DenseNet() - print('..DenseNet running test done') - test_UNet() - print('..UNet running test done') ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 diff --git a/test/layer_conv_test.py b/test/layer_conv_test.py index 70cae0b..9552725 100644 --- a/test/layer_conv_test.py +++ b/test/layer_conv_test.py @@ -1,6 +1,5 @@ import tensorflow as tf -<<<<<<< HEAD import tensorgraph as tg from tensorgraph.layers import Depthwise_Conv2D, Atrous_Conv2D, Conv2D, Conv3D import numpy as np @@ -9,16 +8,6 @@ def test_Depthwise_Conv2D(): seq = tg.Sequential() seq.add(Depthwise_Conv2D(num_filters=2, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) -======= -import tensorgraphx as tg -from tensorgraphx.layers import Depthwise_Conv2D, Atrous_Conv2D -import numpy as np - -def test_Depthwise_Conv2d(): - - seq = tg.Sequential() - seq.add(Depthwise_Conv2D(input_channels=5, num_filters=2, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 X_ph = tf.placeholder('float32', [None, 100, 100, 5]) @@ -30,7 +19,6 @@ def test_Depthwise_Conv2d(): print(out.shape) -<<<<<<< HEAD def test_Conv2D(): seq = tg.Sequential() @@ -50,12 +38,6 @@ def test_Atrous_Conv2D(): seq = tg.Sequential() seq.add(Atrous_Conv2D(num_filters=2, kernel_size=(3, 3), rate=3, padding='SAME')) -======= -def test_Atrous_Conv2d(): - - seq = tg.Sequential() - seq.add(Atrous_Conv2D(input_channels=5, num_filters=2, kernel_size=(3, 3), rate=3, padding='SAME')) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 h, w, c = 100, 300, 5 X_ph = tf.placeholder('float32', [None, h, w, c]) @@ -67,19 +49,10 @@ def test_Atrous_Conv2d(): out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32, h, w, c)}) print(out.shape) assert out.shape[1] == h and out.shape[2] == w -<<<<<<< HEAD seq = tg.Sequential() r = 2 k = 5 seq.add(Atrous_Conv2D(num_filters=2, kernel_size=(k, k), rate=r, padding='VALID')) -======= - - - seq = tg.Sequential() - r = 2 - k = 5 - seq.add(Atrous_Conv2D(input_channels=5, num_filters=2, kernel_size=(k, k), rate=r, padding='VALID')) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 h, w, c = 100, 300, 5 X_ph = tf.placeholder('float32', [None, h, w, c]) @@ -93,7 +66,6 @@ def test_Atrous_Conv2d(): assert out.shape[1] == h - 2*int((k+(k-1)*(r-1))/2), out.shape[2] == w - 2*int((w+(w-1)*(r-1))/2) -<<<<<<< HEAD def test_Conv3D(): seq = tg.Sequential() seq.add(Conv3D(num_filters=2, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME')) @@ -112,9 +84,3 @@ def test_Conv3D(): test_Depthwise_Conv2D() test_Atrous_Conv2D() test_Conv3D() -======= - -if __name__ == '__main__': - # test_Depthwise_Conv2d() - test_Atrous_Conv2d() ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 diff --git a/test/layer_merge_test.py b/test/layer_merge_test.py index 4435e39..22e9246 100644 --- a/test/layer_merge_test.py +++ b/test/layer_merge_test.py @@ -1,12 +1,7 @@ import tensorflow as tf -<<<<<<< HEAD import tensorgraph as tg from tensorgraph.layers import SequenceMask, MaskSoftmax, SelectedMaskSoftmax -======= -import tensorgraphx as tg -from tensorgraphx.layers import SequenceMask, MaskSoftmax, SelectedMaskSoftmax ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import numpy as np def test_SequenceMask(): diff --git a/test/layer_misc_test.py b/test/layer_misc_test.py index cbed51c..64789df 100644 --- a/test/layer_misc_test.py +++ b/test/layer_misc_test.py @@ -1,13 +1,7 @@ -<<<<<<< HEAD import tensorgraph as tg import tensorflow as tf from tensorgraph.layers import OneHot -======= -import tensorgraphx as tg -import tensorflow as tf -from tensorgraphx.layers import OneHot ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import numpy as np def test_OneHot(): @@ -24,5 +18,5 @@ def test_OneHot(): print(sess.run(y2, feed_dict={X2:np.random.random_integers(0, 2, [5,6,7,8])}).shape) if __name__ == '__main__': - + test_OneHot() diff --git a/test/layer_noise_test.py b/test/layer_noise_test.py index c976d76..dedb659 100644 --- a/test/layer_noise_test.py +++ b/test/layer_noise_test.py @@ -1,23 +1,13 @@ import tensorflow as tf -<<<<<<< HEAD import tensorgraph as tg from tensorgraph.layers import Linear, Dropout -======= -import tensorgraphx as tg -from tensorgraphx.layers import Linear, Dropout ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import numpy as np def test_Dropout(): X_ph = tf.placeholder('float32', [None, 32]) - seq = tg.Sequential() -<<<<<<< HEAD seq.add(Linear(20)) -======= - seq.add(Linear(32, 20)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 seq.add(Dropout(0.2, noise_shape=[-1, 20])) diff --git a/test/tensorgraph_test.py b/test/tensorgraph_test.py index aa7e14f..688739c 100644 --- a/test/tensorgraph_test.py +++ b/test/tensorgraph_test.py @@ -1,18 +1,10 @@ from tensorflow.python.layers.normalization import BatchNormalization as TFBatchNorm -<<<<<<< HEAD from tensorgraph.layers import Conv2D, BatchNormalization, RELU, Linear, Flatten, \ BaseModel, Sum import tensorflow as tf import numpy as np import tensorgraph as tg -======= -from tensorgraphx.layers import Conv2D, BatchNormalization, RELU, Linear, Flatten, \ - BaseModel, Sum -import tensorflow as tf -import numpy as np -import tensorgraphx as tg ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 from tensorflow.python.framework import ops import os @@ -21,7 +13,6 @@ class CBR(BaseModel): def __init__(self, h, w, c): layers1 = [] -<<<<<<< HEAD layers1.append(Conv2D(num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) layers1.append(BatchNormalization()) layers1.append(RELU()) @@ -29,15 +20,6 @@ def __init__(self, h, w, c): layers2 = [] layers2.append(Conv2D(num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) layers2.append(BatchNormalization()) -======= - layers1.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) - layers1.append(BatchNormalization(input_shape=[h,w,1])) - layers1.append(RELU()) - - layers2 = [] - layers2.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) - layers2.append(BatchNormalization(input_shape=[h,w,1])) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 layers2.append(RELU()) self.startnode = tg.StartNode(input_vars=[None]) @@ -54,11 +36,7 @@ def __init__(self, h, w, c, nclass): layers = [] layers.append(CBR(h,w,c)) layers.append(Flatten()) -<<<<<<< HEAD layers.append(Linear(nclass)) -======= - layers.append(Linear(1*h*w, nclass)) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 self.startnode = tg.StartNode(input_vars=[None]) hn = tg.HiddenNode(prev=[self.startnode], layers=layers) @@ -78,17 +56,10 @@ def conv_layer(state_below, input_channels, num_filters, kernel_size, stride, pa return tf.nn.bias_add(conv_out, _b) -<<<<<<< HEAD def batchnorm(state_below, input_shape, scope=None, training=True): bn = TFBatchNorm(name=scope) bn.build(input_shape=[None] + list(input_shape)) return bn.apply(state_below, training=training) -======= -def batchnorm(state_below, input_shape): - bn = TFBatchNorm() - bn.build(input_shape=[None] + list(input_shape)) - return bn.apply(state_below, training=True) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 @@ -112,7 +83,6 @@ def data(n_exp, h, w, c, nclass, batch_size): return dict(nr_train) -<<<<<<< HEAD def TFModel(state_below, h, w, c, nclass, scope=None, training=True): state_below1 = conv_layer(state_below, input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME') state_below1 = batchnorm(state_below1, input_shape=[h,w,1], scope=scope, training=training) @@ -120,15 +90,6 @@ def TFModel(state_below, h, w, c, nclass, scope=None, training=True): state_below2 = conv_layer(state_below, input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME') state_below2 = batchnorm(state_below2, input_shape=[h,w,1], scope=scope, training=training) -======= -def TFModel(state_below, h, w, c, nclass): - state_below1 = conv_layer(state_below, input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME') - state_below1 = batchnorm(state_below1, input_shape=[h,w,1]) - state_below1 = tf.nn.relu(state_below1) - - state_below2 = conv_layer(state_below, input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME') - state_below2 = batchnorm(state_below2, input_shape=[h,w,1]) ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 state_below2 = tf.nn.relu(state_below2) state_below = state_below1 + state_below2 @@ -150,11 +111,7 @@ def train(n_exp, h, w, c, nclass, batch_size=100, tgmodel=True): y_ph = tf.placeholder('float32', [None, nclass]) if tgmodel: -<<<<<<< HEAD # tensorgraph model -======= - # tensorgraphx model ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 print('..using graph model') seq = TGModel(h, w, c, nclass) y_train_sb = seq.train_fprop(X_ph) @@ -200,16 +157,11 @@ def train(n_exp, h, w, c, nclass, batch_size=100, tgmodel=True): print('epoch {}, train loss {}'.format(epoch, ttl_train_loss)) -<<<<<<< HEAD def compare_total_nodes(train_mode=True): -======= -def test_compare_total_nodes(): ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 h, w, c, nclass = 20, 20, 5, 2 X_ph = tf.placeholder('float32', [None, h, w, c]) with tf.name_scope('tgmodel'): seq = TGModel(h, w, c, nclass) -<<<<<<< HEAD if train_mode: seq.train_fprop(X_ph) else: @@ -236,18 +188,6 @@ def test_compare_total_nodes(): compare_total_nodes(train_mode=True) compare_total_nodes(train_mode=False) -======= - y_train_sb = seq.train_fprop(X_ph) - num_tg_nodes = [x for x in tf.get_default_graph().get_operations() if x.name.startswith('tgmodel/')] - print('num tg nodes:', len(num_tg_nodes)) - with tf.name_scope('tfmodel'): - y_train_sb = TFModel(X_ph, h, w, c, nclass) - num_tf_nodes = [x for x in tf.get_default_graph().get_operations() if x.name.startswith('tfmodel/')] - print('num tf nodes:', len(num_tf_nodes)) - assert len(num_tg_nodes) == len(num_tf_nodes) - - ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 def test_models(): train(n_exp=10, h=20, w=20, c=5, nclass=2, batch_size=1, tgmodel=False) train(n_exp=10, h=20, w=20, c=5, nclass=2, batch_size=1, tgmodel=True) @@ -255,10 +195,6 @@ def test_models(): if __name__ == '__main__': -<<<<<<< HEAD # test_models() # print('train mode') -======= - test_models() ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 test_compare_total_nodes() diff --git a/test/utils_test.py b/test/utils_test.py index 30f5cd9..4721393 100644 --- a/test/utils_test.py +++ b/test/utils_test.py @@ -1,8 +1,4 @@ -<<<<<<< HEAD from tensorgraph.utils import MakeTFRecords,MakeTFRecords_tfdata -======= -from tensorgraphx.utils import MakeTFRecords ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 import numpy as np import tensorflow as tf @@ -18,10 +14,7 @@ def test_make_tfrecords(): print(record.shape) print('\n') -<<<<<<< HEAD -======= ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 def test_fetch_queue_tfrecords(): tfrecords = MakeTFRecords() tfrecords_filename = './arr.tfrecords' @@ -43,7 +36,6 @@ def test_fetch_queue_tfrecords(): coord.request_stop() coord.join(threads) -<<<<<<< HEAD def test_make_tfrecords_tfdata(): tfrecords = MakeTFRecords_tfdata() data_records = {'X':np.random.rand(100,50,30), 'y':np.random.rand(100,10),'name':['a']*20+['b']*20+['c']*20+['d']*40} @@ -77,14 +69,9 @@ def test_fetch_queue_tfrecords_tfdata(): print (key, arrs[index].shape) print('\n') -======= ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5 if __name__ == '__main__': test_make_tfrecords() test_fetch_queue_tfrecords() -<<<<<<< HEAD test_make_tfrecords_tfdata() test_fetch_queue_tfrecords_tfdata() -======= ->>>>>>> e55a706e1467da7b7c54b6d04055aba847f5a2b5