Skip to content

Commit

Permalink
Release 0.0.7
Browse files Browse the repository at this point in the history
Release 0.0.7
  • Loading branch information
seliverstov authored Sep 1, 2018
2 parents 4855db9 + 5d76d90 commit 4cc9778
Show file tree
Hide file tree
Showing 149 changed files with 5,700 additions and 1,797 deletions.
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@ print(HelloBot(['Hello!', 'Boo...', 'Bye.']))

[Intent/Sentence Classification](http://docs.deeppavlov.ai/en/latest/components/classifiers.html) | [Sentence Similarity/Ranking](http://docs.deeppavlov.ai/en/latest/components/neural_ranking.html)

[Goal(Task)-oriented Bot](http://docs.deeppavlov.ai/en/latest/components/go_bot.html) | [Seq2seq Goal-Oriented bot](http://docs.deeppavlov.ai/en/latest/components/seq2seq_go_bot.html)

[Question Answering over Text (SQuAD)](http://docs.deeppavlov.ai/en/latest/components/squad.html)

[Morphological tagging](http://docs.deeppavlov.ai/en/latest/components/morphotagger.html) | [Automatic Spelling Correction](http://docs.deeppavlov.ai/en/latest/components/spelling_correction.html)
Expand Down
2 changes: 1 addition & 1 deletion deeppavlov/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

__version__ = '0.0.6.6'
__version__ = '0.0.7'
__author__ = 'Neural Networks and Deep Learning lab, MIPT'
__description__ = 'An open source library for building end-to-end dialog systems and training chatbots.'
__keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot']
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,24 @@
"name": "dirty_comments_preprocessor"
},
{
"in": "x_prep",
"out": "x_tok",
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": "x_tok",
"out": "x_emb",
"id": "my_embedder",
"name": "fasttext",
"save_path": "embeddings/wordpunct_tok_reddit_comments_2017_11_300.bin",
"load_path": "embeddings/wordpunct_tok_reddit_comments_2017_11_300.bin",
"dim": 300
},
{
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": [
"x_prep"
"x_emb"
],
"in_y": [
"y"
Expand All @@ -61,8 +65,9 @@
],
"main": true,
"name": "keras_classification_model",
"save_path": "sentiment/insults_kaggle_v0",
"load_path": "sentiment/insults_kaggle_v0",
"save_path": "classifiers/insults_kaggle_v0",
"load_path": "classifiers/insults_kaggle_v0",
"embedding_size": "#my_embedder.dim",
"classes": "#classes_vocab.keys()",
"kernel_sizes_cnn": [
1,
Expand All @@ -81,9 +86,7 @@
"coef_reg_den": 1e-2,
"dropout_rate": 0.5,
"dense_size": 100,
"model_name": "cnn_model",
"embedder": "#my_embedder",
"tokenizer": "#my_tokenizer"
"model_name": "cnn_model"
}
],
"out": [
Expand Down Expand Up @@ -117,7 +120,7 @@
},
"download": [
"http://files.deeppavlov.ai/deeppavlov_data/vocabs.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/sentiment.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/classifiers.tar.gz",
"http://files.deeppavlov.ai/datasets/insults_data.tar.gz",
{
"url": "http://files.deeppavlov.ai/embeddings/reddit_fastText/wordpunct_tok_reddit_comments_2017_11_300.bin",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,7 @@
},
"dataset_iterator": {
"name": "dstc2_intents_iterator",
"seed": 42,
"fields_to_merge": [
"train",
"valid"
],
"merged_field": "train",
"field_to_split": "train",
"split_fields": [
"train",
"valid"
],
"split_proportions": [
0.9,
0.1
]
"seed": 42
},
"chainer": {
"in": [
Expand All @@ -36,24 +22,28 @@
"y"
],
"level": "token",
"save_path": "vocabs/classes.dict",
"load_path": "vocabs/classes.dict"
"save_path": "vocabs/dstc2_classes.dict",
"load_path": "vocabs/dstc2_classes.dict"
},
{
"in": "x",
"out": "x_tok",
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": "x_tok",
"out": "x_emb",
"id": "my_embedder",
"name": "fasttext",
"save_path": "embeddings/dstc2_fastText_model.bin",
"load_path": "embeddings/dstc2_fastText_model.bin",
"dim": 100
},
{
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": [
"x"
"x_emb"
],
"in_y": [
"y"
Expand All @@ -64,8 +54,9 @@
],
"main": true,
"name": "keras_classification_model",
"save_path": "intents/intents_dstc2_v4",
"load_path": "intents/intents_dstc2_v4",
"save_path": "classifiers/intents_dstc2_v4",
"load_path": "classifiers/intents_dstc2_v4",
"embedding_size": "#my_embedder.dim",
"classes": "#classes_vocab.keys()",
"kernel_sizes_cnn": [
1,
Expand All @@ -83,9 +74,7 @@
"coef_reg_den": 1e-4,
"dropout_rate": 0.5,
"dense_size": 100,
"model_name": "cnn_model",
"embedder": "#my_embedder",
"tokenizer": "#my_tokenizer"
"model_name": "cnn_model"
}
],
"out": [
Expand Down Expand Up @@ -116,7 +105,7 @@
"server_utils": "KerasIntentModel"
},
"download": [
"http://files.deeppavlov.ai/deeppavlov_data/intents.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/classifiers.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/vocabs.tar.gz",
{
"url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/dstc2_fastText_model.bin",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,7 @@
},
"dataset_iterator": {
"name": "dstc2_intents_iterator",
"seed": 42,
"fields_to_merge": [
"train",
"valid"
],
"merged_field": "train",
"field_to_split": "train",
"split_fields": [
"train",
"valid"
],
"split_proportions": [
0.9,
0.1
]
"seed": 42
},
"chainer": {
"in": [
Expand All @@ -36,24 +22,28 @@
"y"
],
"level": "token",
"save_path": "vocabs/classes.dict",
"load_path": "vocabs/classes.dict"
"save_path": "vocabs/dstc2_classes.dict",
"load_path": "vocabs/dstc2_classes.dict"
},
{
"in": "x",
"out": "x_tok",
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": "x_tok",
"out": "x_emb",
"id": "my_embedder",
"name": "fasttext",
"save_path": "embeddings/wiki.en.bin",
"load_path": "embeddings/wiki.en.bin",
"dim": 300
},
{
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": [
"x"
"x_emb"
],
"in_y": [
"y"
Expand All @@ -64,8 +54,9 @@
],
"main": true,
"name": "keras_classification_model",
"save_path": "intents/intents_dstc2_v5",
"load_path": "intents/intents_dstc2_v5",
"save_path": "classifiers/intents_dstc2_v5",
"load_path": "classifiers/intents_dstc2_v5",
"embedding_size": "#my_embedder.dim",
"classes": "#classes_vocab.keys()",
"kernel_sizes_cnn": [
1,
Expand All @@ -83,9 +74,7 @@
"coef_reg_den": 1e-4,
"dropout_rate": 0.5,
"dense_size": 100,
"model_name": "cnn_model",
"embedder": "#my_embedder",
"tokenizer": "#my_tokenizer"
"model_name": "cnn_model"
}
],
"out": [
Expand All @@ -104,7 +93,9 @@
"validation_patience": 5,
"val_every_n_epochs": 5,
"log_every_n_batches": 100,
"show_examples": false
"show_examples": false,
"validate_best": true,
"test_best": true
},
"metadata": {
"requirements": [
Expand All @@ -115,7 +106,7 @@
"telegram_utils": "IntentModel"
},
"download": [
"http://files.deeppavlov.ai/deeppavlov_data/intents.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/classifiers.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/vocabs.tar.gz",
{
"url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,20 +44,24 @@
"load_path": "vocabs/snips_classes.dict"
},
{
"in": "x",
"out": "x_tok",
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": "x_tok",
"out": "x_emb",
"id": "my_embedder",
"name": "fasttext",
"save_path": "embeddings/dstc2_fastText_model.bin",
"load_path": "embeddings/dstc2_fastText_model.bin",
"dim": 100
},
{
"id": "my_tokenizer",
"name": "nltk_tokenizer",
"tokenizer": "wordpunct_tokenize"
},
{
"in": [
"x"
"x_emb"
],
"in_y": [
"y"
Expand All @@ -68,8 +72,9 @@
],
"main": true,
"name": "keras_classification_model",
"save_path": "intents/intents_snips_v4",
"load_path": "intents/intents_snips_v4",
"save_path": "classifiers/intents_snips_v4",
"load_path": "classifiers/intents_snips_v4",
"embedding_size": "#my_embedder.dim",
"classes": "#classes_vocab.keys()",
"kernel_sizes_cnn": [
1,
Expand All @@ -87,9 +92,7 @@
"coef_reg_den": 1e-4,
"dropout_rate": 0.5,
"dense_size": 100,
"model_name": "cnn_model",
"embedder": "#my_embedder",
"tokenizer": "#my_tokenizer"
"model_name": "cnn_model"
}
],
"out": [
Expand Down Expand Up @@ -122,7 +125,7 @@
"server_utils": "KerasIntentModel"
},
"download": [
"http://files.deeppavlov.ai/deeppavlov_data/intents.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/classifiers.tar.gz",
"http://files.deeppavlov.ai/deeppavlov_data/vocabs.tar.gz",
{
"url": "http://files.deeppavlov.ai/datasets/snips_intents/train.csv",
Expand Down
Loading

0 comments on commit 4cc9778

Please sign in to comment.