diff --git a/deeppavlov/_meta.py b/deeppavlov/_meta.py index 50272a6d6e..0a28c79be8 100644 --- a/deeppavlov/_meta.py +++ b/deeppavlov/_meta.py @@ -1,4 +1,4 @@ -__version__ = '0.15.0' +__version__ = '0.16.0' __author__ = 'Neural Networks and Deep Learning lab, MIPT' __description__ = 'An open source library for building end-to-end dialog systems and training chatbots.' __keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot'] diff --git a/deeppavlov/configs/classifiers/glue/glue_mnli_roberta.json b/deeppavlov/configs/classifiers/glue/glue_mnli_roberta.json new file mode 100644 index 0000000000..44dc66dbae --- /dev/null +++ b/deeppavlov/configs/classifiers/glue/glue_mnli_roberta.json @@ -0,0 +1,96 @@ +{ + "dataset_reader": { + "class_name": "huggingface_dataset_reader", + "path": "glue", + "name": "mnli", + "train": "train", + "valid": "validation_matched", + "test": "test_matched" + }, + "dataset_iterator": { + "class_name": "huggingface_dataset_iterator", + "features": ["hypothesis", "premise"], + "label": "label", + "seed": 42 + }, + "chainer": { + "in": ["hypothesis", "premise"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{BASE_MODEL}", + "do_lower_case": false, + "max_seq_length": 128, + "in": ["hypothesis", "premise"], + "out": ["bert_features"] + }, + { + "id": "classes_vocab", + "class_name": "simple_vocab", + "fit_on": ["y"], + "save_path": "{MODEL_PATH}/classes.dict", + "load_path": "{MODEL_PATH}/classes.dict", + "in": ["y"], + "out": ["y_ids"] + }, + { + "in": ["y_ids"], + "out": ["y_onehot"], + "class_name": "one_hotter", + "depth": "#classes_vocab.len", + "single_vector": true + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": "#classes_vocab.len", + "return_probas": true, + "pretrained_bert": "{BASE_MODEL}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 1e-05 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 2.0, + "in": ["bert_features"], + "in_y": ["y_ids"], + "out": ["y_pred_probas"] + }, + { + "in": ["y_pred_probas"], + "out": ["y_pred_ids"], + "class_name": "proba2labels", + "max_proba": true + }, + { + "in": ["y_pred_ids"], + "out": ["y_pred_labels"], + "ref": "classes_vocab" + } + ], + "out": ["y_pred_labels"] + }, + "train": { + "batch_size": 4, + "metrics": ["accuracy"], + "validation_patience": 10, + "val_every_n_batches": 250, + "log_every_n_batches": 250, + "show_examples": false, + "evaluation_targets": ["train", "valid"], + "class_name": "torch_trainer", + "tensorboard_log_dir": "{MODEL_PATH}/", + "pytest_max_batches": 2 + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "BASE_MODEL": "roberta-large", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/classifiers/glue_mnli_{BASE_MODEL}" + } + } +} diff --git a/deeppavlov/configs/classifiers/glue/glue_rte_roberta_mnli.json b/deeppavlov/configs/classifiers/glue/glue_rte_roberta_mnli.json new file mode 100644 index 0000000000..34c7054ced --- /dev/null +++ b/deeppavlov/configs/classifiers/glue/glue_rte_roberta_mnli.json @@ -0,0 +1,96 @@ +{ + "dataset_reader": { + "class_name": "huggingface_dataset_reader", + "path": "glue", + "name": "rte", + "train": "train", + "valid": "validation", + "test": "test" + }, + "dataset_iterator": { + "class_name": "huggingface_dataset_iterator", + "features": ["sentence1", "sentence2"], + "label": "label", + "seed": 42 + }, + "chainer": { + "in": ["sentence1", "sentence2"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{BASE_MODEL}", + "do_lower_case": false, + "max_seq_length": 256, + "in": ["sentence1", "sentence2"], + "out": ["bert_features"] + }, + { + "id": "classes_vocab", + "class_name": "simple_vocab", + "fit_on": ["y"], + "save_path": "{MODEL_PATH}/classes.dict", + "load_path": "{MODEL_PATH}/classes.dict", + "in": ["y"], + "out": ["y_ids"] + }, + { + "in": ["y_ids"], + "out": ["y_onehot"], + "class_name": "one_hotter", + "depth": "#classes_vocab.len", + "single_vector": true + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": "#classes_vocab.len", + "return_probas": true, + "pretrained_bert": "{BASE_MODEL}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 1e-06 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 2.0, + "in": ["bert_features"], + "in_y": ["y_ids"], + "out": ["y_pred_probas"] + }, + { + "in": ["y_pred_probas"], + "out": ["y_pred_ids"], + "class_name": "proba2labels", + "max_proba": true + }, + { + "in": ["y_pred_ids"], + "out": ["y_pred_labels"], + "ref": "classes_vocab" + } + ], + "out": ["y_pred_labels"] + }, + "train": { + "batch_size": 4, + "metrics": ["accuracy"], + "epochs": 2, + "val_every_n_epochs": 1, + "log_every_n_epochs": 1, + "show_examples": false, + "evaluation_targets": ["train", "valid"], + "class_name": "torch_trainer", + "tensorboard_log_dir": "{MODEL_PATH}/", + "pytest_max_batches": 2 + }, + "metadata": { + "variables": { + "BASE_MODEL": "roberta-large-mnli", + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/classifiers/glue_rte_{BASE_MODEL}" + } + } +} diff --git a/deeppavlov/configs/classifiers/intents_dstc2.json b/deeppavlov/configs/classifiers/intents_dstc2.json index 9290978dcf..828c01d634 100644 --- a/deeppavlov/configs/classifiers/intents_dstc2.json +++ b/deeppavlov/configs/classifiers/intents_dstc2.json @@ -87,7 +87,7 @@ "in": "y_pred_probas", "out": "y_pred_ids", "class_name": "proba2labels", - "confident_threshold": 0.5 + "confidence_threshold": 0.5 }, { "in": "y_pred_ids", diff --git a/deeppavlov/configs/classifiers/intents_dstc2_bert.json b/deeppavlov/configs/classifiers/intents_dstc2_bert.json index 4992fac32c..0143446b53 100644 --- a/deeppavlov/configs/classifiers/intents_dstc2_bert.json +++ b/deeppavlov/configs/classifiers/intents_dstc2_bert.json @@ -59,7 +59,7 @@ "in": "y_pred_probas", "out": "y_pred_ids", "class_name": "proba2labels", - "confident_threshold": 0.5 + "confidence_threshold": 0.5 }, { "in": "y_pred_ids", diff --git a/deeppavlov/configs/classifiers/intents_dstc2_big.json b/deeppavlov/configs/classifiers/intents_dstc2_big.json index a6f9d47d5e..d6a458dcab 100644 --- a/deeppavlov/configs/classifiers/intents_dstc2_big.json +++ b/deeppavlov/configs/classifiers/intents_dstc2_big.json @@ -87,7 +87,7 @@ "in": "y_pred_probas", "out": "y_pred_ids", "class_name": "proba2labels", - "confident_threshold": 0.5 + "confidence_threshold": 0.5 }, { "in": "y_pred_ids", diff --git a/deeppavlov/configs/classifiers/paraphraser_convers_distilrubert_2L.json b/deeppavlov/configs/classifiers/paraphraser_convers_distilrubert_2L.json new file mode 100644 index 0000000000..d0a2eee508 --- /dev/null +++ b/deeppavlov/configs/classifiers/paraphraser_convers_distilrubert_2L.json @@ -0,0 +1,93 @@ + { + "dataset_reader": { + "class_name": "paraphraser_reader", + "data_path": "{DOWNLOADS_PATH}/paraphraser_data", + "do_lower_case": false + }, + "dataset_iterator": { + "class_name": "siamese_iterator", + "seed": 243, + "len_valid": 500 + }, + "chainer": { + "in": ["text_a", "text_b"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": false, + "max_seq_length": 64, + "in": ["text_a", "text_b"], + "out": ["bert_features"] + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": 2, + "return_probas": false, + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "attention_probs_keep_prob": 0.11, + "hidden_keep_prob": 1.0, + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 1.89e-05 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 1.5, + "in": [ + "bert_features" + ], + "in_y": [ + "y" + ], + "out": [ + "predictions" + ] + } + ], + "out": ["predictions"] + }, + "train": { + "epochs": 100, + "batch_size": 64, + "metrics": [ + "f1", + "accuracy" + ], + "validation_patience": 7, + "val_every_n_batches": 50, + "log_every_n_batches": 50, + "evaluation_targets": [ + "train", + "valid", + "test" + ], + "tensorboard_log_dir": "{MODEL_PATH}/", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-tiny-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/paraphraser_convers_distilrubert_2L" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/classifiers/paraphraser_convers_distilrubert_2L.tar.gz", + "subdir": "{MODELS_PATH}" + }, + { + "url": "http://files.deeppavlov.ai/datasets/paraphraser.zip", + "subdir": "{DOWNLOADS_PATH}/paraphraser_data" + }, + { + "url": "http://files.deeppavlov.ai/datasets/paraphraser_gold.zip", + "subdir": "{DOWNLOADS_PATH}/paraphraser_data" + } + ] + } +} diff --git a/deeppavlov/configs/classifiers/paraphraser_convers_distilrubert_6L.json b/deeppavlov/configs/classifiers/paraphraser_convers_distilrubert_6L.json new file mode 100644 index 0000000000..c3f479ca07 --- /dev/null +++ b/deeppavlov/configs/classifiers/paraphraser_convers_distilrubert_6L.json @@ -0,0 +1,93 @@ +{ + "dataset_reader": { + "class_name": "paraphraser_reader", + "data_path": "{DOWNLOADS_PATH}/paraphraser_data", + "do_lower_case": false + }, + "dataset_iterator": { + "class_name": "siamese_iterator", + "seed": 243, + "len_valid": 500 + }, + "chainer": { + "in": ["text_a", "text_b"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": false, + "max_seq_length": 64, + "in": ["text_a", "text_b"], + "out": ["bert_features"] + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": 2, + "return_probas": false, + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "attention_probs_keep_prob": 0.0, + "hidden_keep_prob": 0.67, + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 7.22e-05 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 1.5, + "in": [ + "bert_features" + ], + "in_y": [ + "y" + ], + "out": [ + "predictions" + ] + } + ], + "out": ["predictions"] + }, + "train": { + "epochs": 100, + "batch_size": 64, + "metrics": [ + "f1", + "accuracy" + ], + "validation_patience": 7, + "val_every_n_batches": 50, + "log_every_n_batches": 50, + "evaluation_targets": [ + "train", + "valid", + "test" + ], + "tensorboard_log_dir": "{MODEL_PATH}/", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-base-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/paraphraser_convers_distilrubert_6L" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/classifiers/paraphraser_convers_distilrubert_6L.tar.gz", + "subdir": "{MODELS_PATH}" + }, + { + "url": "http://files.deeppavlov.ai/datasets/paraphraser.zip", + "subdir": "{DOWNLOADS_PATH}/paraphraser_data" + }, + { + "url": "http://files.deeppavlov.ai/datasets/paraphraser_gold.zip", + "subdir": "{DOWNLOADS_PATH}/paraphraser_data" + } + ] + } +} diff --git a/deeppavlov/configs/classifiers/rusentiment_convers_distilrubert_2L.json b/deeppavlov/configs/classifiers/rusentiment_convers_distilrubert_2L.json new file mode 100644 index 0000000000..8042987116 --- /dev/null +++ b/deeppavlov/configs/classifiers/rusentiment_convers_distilrubert_2L.json @@ -0,0 +1,145 @@ +{ + "dataset_reader": { + "class_name": "basic_classification_reader", + "x": "text", + "y": "label", + "data_path": "{DOWNLOADS_PATH}/rusentiment/", + "train": "rusentiment_random_posts.csv", + "test": "rusentiment_test.csv" + }, + "dataset_iterator": { + "class_name": "basic_classification_iterator", + "seed": 42, + "split_seed": 23, + "field_to_split": "train", + "split_fields": [ + "train", + "valid" + ], + "split_proportions": [ + 0.9, + 0.1 + ] + }, + "chainer": { + "in": [ + "x" + ], + "in_y": [ + "y" + ], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": true, + "max_seq_length": 64, + "in": [ + "x" + ], + "out": [ + "bert_features" + ] + }, + { + "id": "classes_vocab", + "class_name": "simple_vocab", + "fit_on": [ + "y" + ], + "save_path": "{MODEL_PATH}/classes.dict", + "load_path": "{MODEL_PATH}/classes.dict", + "in": "y", + "out": "y_ids" + }, + { + "in": "y_ids", + "out": "y_onehot", + "class_name": "one_hotter", + "depth": "#classes_vocab.len", + "single_vector": true + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": "#classes_vocab.len", + "return_probas": true, + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "attention_probs_keep_prob": 0.78, + "hidden_keep_prob": 0.89, + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 7.22e-05 + }, + "learning_rate_drop_patience": 5, + "learning_rate_drop_div": 1.5, + "in": [ + "bert_features" + ], + "in_y": [ + "y_ids" + ], + "out": [ + "y_pred_probas" + ] + }, + { + "in": "y_pred_probas", + "out": "y_pred_ids", + "class_name": "proba2labels", + "max_proba": true + }, + { + "in": "y_pred_ids", + "out": "y_pred_labels", + "ref": "classes_vocab" + } + ], + "out": [ + "y_pred_labels" + ] + }, + "train": { + "epochs": 100, + "batch_size": 64, + "metrics": [ + "f1_weighted", + "f1_macro", + "accuracy", + { + "name": "roc_auc", + "inputs": [ + "y_onehot", + "y_pred_probas" + ] + } + ], + "validation_patience": 5, + "val_every_n_epochs": 1, + "log_every_n_epochs": 1, + "show_examples": false, + "evaluation_targets": [ + "train", + "valid", + "test" + ], + "tensorboard_log_dir": "{MODEL_PATH}/", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-tiny-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/classifiers/rusentiment_convers_distilrubert_2L" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/classifiers/rusentiment_convers_distilrubert_2L.tar.gz", + "subdir": "{MODELS_PATH}/classifiers/" + } + ] + } +} diff --git a/deeppavlov/configs/classifiers/rusentiment_convers_distilrubert_6L.json b/deeppavlov/configs/classifiers/rusentiment_convers_distilrubert_6L.json new file mode 100644 index 0000000000..c02f44938a --- /dev/null +++ b/deeppavlov/configs/classifiers/rusentiment_convers_distilrubert_6L.json @@ -0,0 +1,145 @@ +{ + "dataset_reader": { + "class_name": "basic_classification_reader", + "x": "text", + "y": "label", + "data_path": "{DOWNLOADS_PATH}/rusentiment/", + "train": "rusentiment_random_posts.csv", + "test": "rusentiment_test.csv" + }, + "dataset_iterator": { + "class_name": "basic_classification_iterator", + "seed": 42, + "split_seed": 23, + "field_to_split": "train", + "split_fields": [ + "train", + "valid" + ], + "split_proportions": [ + 0.9, + 0.1 + ] + }, + "chainer": { + "in": [ + "x" + ], + "in_y": [ + "y" + ], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": true, + "max_seq_length": 64, + "in": [ + "x" + ], + "out": [ + "bert_features" + ] + }, + { + "id": "classes_vocab", + "class_name": "simple_vocab", + "fit_on": [ + "y" + ], + "save_path": "{MODEL_PATH}/classes.dict", + "load_path": "{MODEL_PATH}/classes.dict", + "in": "y", + "out": "y_ids" + }, + { + "in": "y_ids", + "out": "y_onehot", + "class_name": "one_hotter", + "depth": "#classes_vocab.len", + "single_vector": true + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": "#classes_vocab.len", + "return_probas": true, + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "attention_probs_keep_prob": 0.78, + "hidden_keep_prob": 0, + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 4.56e-05 + }, + "learning_rate_drop_patience": 5, + "learning_rate_drop_div": 1.5, + "in": [ + "bert_features" + ], + "in_y": [ + "y_ids" + ], + "out": [ + "y_pred_probas" + ] + }, + { + "in": "y_pred_probas", + "out": "y_pred_ids", + "class_name": "proba2labels", + "max_proba": true + }, + { + "in": "y_pred_ids", + "out": "y_pred_labels", + "ref": "classes_vocab" + } + ], + "out": [ + "y_pred_labels" + ] + }, + "train": { + "epochs": 100, + "batch_size": 64, + "metrics": [ + "f1_weighted", + "f1_macro", + "accuracy", + { + "name": "roc_auc", + "inputs": [ + "y_onehot", + "y_pred_probas" + ] + } + ], + "validation_patience": 5, + "val_every_n_epochs": 1, + "log_every_n_epochs": 1, + "show_examples": false, + "evaluation_targets": [ + "train", + "valid", + "test" + ], + "tensorboard_log_dir": "{MODEL_PATH}/", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-base-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/classifiers/rusentiment_convers_distilrubert_6L" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/classifiers/rusentiment_convers_distilrubert_6L.tar.gz", + "subdir": "{MODELS_PATH}/classifiers/" + } + ] + } +} diff --git a/deeppavlov/configs/classifiers/superglue/superglue_boolq_roberta_mnli.json b/deeppavlov/configs/classifiers/superglue/superglue_boolq_roberta_mnli.json new file mode 100644 index 0000000000..c68261480b --- /dev/null +++ b/deeppavlov/configs/classifiers/superglue/superglue_boolq_roberta_mnli.json @@ -0,0 +1,107 @@ +{ + "dataset_reader": { + "class_name": "huggingface_dataset_reader", + "path": "super_glue", + "name": "boolq", + "train": "train", + "valid": "validation", + "test": "test" + }, + "dataset_iterator": { + "class_name": "huggingface_dataset_iterator", + "features": ["question", "passage"], + "label": "label", + "seed": 42 + }, + "chainer": { + "in": ["question", "passage"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_preprocessor", + "vocab_file": "{BASE_MODEL}", + "do_lower_case": false, + "max_seq_length": 256, + "in": ["question", "passage"], + "out": ["bert_features"] + }, + { + "id": "classes_vocab", + "class_name": "simple_vocab", + "fit_on": ["y"], + "save_path": "{MODEL_PATH}/classes.dict", + "load_path": "{MODEL_PATH}/classes.dict", + "in": ["y"], + "out": ["y_ids"] + }, + { + "in": ["y_ids"], + "out": ["y_onehot"], + "class_name": "one_hotter", + "depth": "#classes_vocab.len", + "single_vector": true + }, + { + "class_name": "torch_transformers_classifier", + "n_classes": "#classes_vocab.len", + "return_probas": true, + "pretrained_bert": "{BASE_MODEL}", + "is_binary": "{BINARY_CLASSIFICATION}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 2e-05, + "weight_decay": 0.1 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 2.0, + "in": ["bert_features"], + "in_y": ["y_ids"], + "out": ["y_pred_probas"] + }, + { + "in": ["y_pred_probas"], + "out": ["y_pred_ids"], + "class_name": "proba2labels", + "is_binary": "{BINARY_CLASSIFICATION}", + "confidence_threshold": 0.5 + }, + { + "in": ["y_pred_ids"], + "out": ["y_pred_labels"], + "ref": "classes_vocab" + } + ], + "out": ["y_pred_labels"] + }, + "train": { + "batch_size": 24, + "metrics": ["accuracy"], + "validation_patience": 10, + "val_every_n_epochs": 1, + "log_every_n_epochs": 1, + "show_examples": false, + "evaluation_targets": ["train", "valid"], + "class_name": "torch_trainer", + "tensorboard_log_dir": "{MODEL_PATH}/", + "pytest_max_batches": 2, + "pytest_batch_size": 2 + }, + "metadata": { + "variables": { + "BASE_MODEL": "roberta-large-mnli", + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/classifiers/superglue_boolq_roberta_mnli", + "BINARY_CLASSIFICATION": true + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/v1/superglue/superglue_boolq_roberta_mnli.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/classifiers/superglue/superglue_copa_roberta.json b/deeppavlov/configs/classifiers/superglue/superglue_copa_roberta.json new file mode 100644 index 0000000000..1a9fda443d --- /dev/null +++ b/deeppavlov/configs/classifiers/superglue/superglue_copa_roberta.json @@ -0,0 +1,97 @@ +{ + "dataset_reader": { + "class_name": "huggingface_dataset_reader", + "path": "super_glue", + "name": "copa", + "train": "train", + "valid": "validation", + "test": "test" + }, + "dataset_iterator": { + "class_name": "huggingface_dataset_iterator", + "features": ["contexts", "choices"], + "label": "label", + "seed": 42 + }, + "chainer": { + "in": ["contexts_list", "choices_list"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_multiplechoice_preprocessor", + "vocab_file": "{BASE_MODEL}", + "do_lower_case": false, + "max_seq_length": 64, + "in": ["contexts_list", "choices_list"], + "out": ["bert_features"] + }, + { + "id": "classes_vocab", + "class_name": "simple_vocab", + "fit_on": ["y"], + "save_path": "{MODEL_PATH}/classes.dict", + "load_path": "{MODEL_PATH}/classes.dict", + "in": ["y"], + "out": ["y_ids"] + }, + { + "in": ["y_ids"], + "out": ["y_onehot"], + "class_name": "one_hotter", + "depth": "#classes_vocab.len", + "single_vector": true + }, + { + "class_name": "torch_transformers_multiplechoice", + "n_classes": "#classes_vocab.len", + "return_probas": true, + "pretrained_bert": "{BASE_MODEL}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 2e-05 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 2.0, + "in": ["bert_features"], + "in_y": ["y_ids"], + "out": ["y_pred_probas"] + }, + { + "in": ["y_pred_probas"], + "out": ["y_pred_ids"], + "class_name": "proba2labels", + "max_proba": true + }, + { + "in": ["y_pred_ids"], + "out": ["y_pred_labels"], + "ref": "classes_vocab" + } + ], + "out": ["y_pred_labels"] + }, + "train": { + "batch_size": 16, + "metrics": ["accuracy"], + "validation_patience": 10, + "val_every_n_epochs": 1, + "log_every_n_epochs": 1, + "show_examples": false, + "evaluation_targets": ["train", "valid"], + "class_name": "torch_trainer", + "tensorboard_log_dir": "{MODEL_PATH}/", + "pytest_max_batches": 2, + "pytest_batch_size": 2 + }, + "metadata": { + "variables": { + "BASE_MODEL": "roberta-large", + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/classifiers/superglue_copa_{BASE_MODEL}" + } + } +} diff --git a/deeppavlov/configs/ner/ner_conll2003_torch_bert.json b/deeppavlov/configs/ner/ner_conll2003_torch_bert.json index c7df510000..21d338dcff 100644 --- a/deeppavlov/configs/ner/ner_conll2003_torch_bert.json +++ b/deeppavlov/configs/ner/ner_conll2003_torch_bert.json @@ -64,16 +64,16 @@ ], "optimizer": "AdamW", "optimizer_parameters": { - "lr": 2e-5, - "weight_decay": 1e-6, + "lr": 2e-05, + "weight_decay": 1e-06, "betas": [ 0.9, 0.999 ], - "eps": 1e-6 + "eps": 1e-06 }, "clip_norm": 1.0, - "min_learning_rate": 1e-7, + "min_learning_rate": 1e-07, "learning_rate_drop_patience": 30, "learning_rate_drop_div": 1.5, "load_before_drop": true, @@ -142,12 +142,12 @@ "ROOT_PATH": "~/.deeppavlov", "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", "MODELS_PATH": "{ROOT_PATH}/models", - "TRANSFORMER": "bert-base-uncased", - "MODEL_PATH": "{MODELS_PATH}/ner_conll2003_torch_bert" + "TRANSFORMER": "bert-base-cased", + "MODEL_PATH": "{MODELS_PATH}/ner_conll2003_torch_bert/{TRANSFORMER}" }, "download": [ { - "url": "http://files.deeppavlov.ai/v1/ner/ner_conll2003_torch_bert.tar.gz", + "url": "http://files.deeppavlov.ai/0.16/ner/ner_conll2003_torch_bert.tar.gz", "subdir": "{MODELS_PATH}" } ] diff --git a/deeppavlov/configs/ner/ner_ontonotes_bert_mult_torch.json b/deeppavlov/configs/ner/ner_ontonotes_bert_mult_torch.json new file mode 100644 index 0000000000..ddced871ac --- /dev/null +++ b/deeppavlov/configs/ner/ner_ontonotes_bert_mult_torch.json @@ -0,0 +1,106 @@ +{ + "dataset_reader": { + "class_name": "conll2003_reader", + "data_path": "{DOWNLOADS_PATH}/ontonotes/", + "dataset_name": "ontonotes", + "provide_pos": false + }, + "dataset_iterator": { + "class_name": "data_learning_iterator" + }, + "chainer": { + "in": ["x"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_ner_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": false, + "max_seq_length": 512, + "max_subword_length": 15, + "token_masking_prob": 0.0, + "in": ["x"], + "out": ["x_tokens", "x_subword_tokens", "x_subword_tok_ids", "startofword_markers", "attention_mask"] + }, + { + "id": "tag_vocab", + "class_name": "simple_vocab", + "unk_token": ["O"], + "pad_with_zeros": true, + "save_path": "{MODEL_PATH}/tag.dict", + "load_path": "{MODEL_PATH}/tag.dict", + "fit_on": ["y"], + "in": ["y"], + "out": ["y_ind"] + }, + { + "class_name": "torch_transformers_sequence_tagger", + "n_tags": "#tag_vocab.len", + "pretrained_bert": "{TRANSFORMER}", + "attention_probs_keep_prob": 0.5, + "return_probas": false, + "encoder_layer_ids": [-1], + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 2e-05, + "weight_decay": 1e-06, + "betas": [0.9, 0.999], + "eps": 1e-06 + }, + "clip_norm": 1.0, + "min_learning_rate": 1e-07, + "learning_rate_drop_patience": 30, + "learning_rate_drop_div": 1.5, + "load_before_drop": true, + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "in": ["x_subword_tok_ids", "attention_mask", "startofword_markers"], + "in_y": ["y_ind"], + "out": ["y_pred_ind"] + }, + { + "ref": "tag_vocab", + "in": ["y_pred_ind"], + "out": ["y_pred"] + } + ], + "out": ["x_tokens", "y_pred"] + }, + "train": { + "epochs": 30, + "batch_size": 10, + "metrics": [ + { + "name": "ner_f1", + "inputs": ["y", "y_pred"] + }, + { + "name": "ner_token_f1", + "inputs": ["y", "y_pred"] + } + ], + "validation_patience": 100, + "val_every_n_batches": 20, + "log_every_n_batches": 20, + "show_examples": false, + "pytest_max_batches": 2, + "pytest_batch_size": 8, + "evaluation_targets": ["valid", "test"], + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "TRANSFORMER": "bert-base-multilingual-cased", + "MODEL_PATH": "{MODELS_PATH}/ner_ontonotes_bert_mult_torch/{TRANSFORMER}" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/v1/ner/ner_ontonotes_bert_mult_torch.tar.gz", + "subdir": "{ROOT_PATH}/models" + } + ] + } +} diff --git a/deeppavlov/configs/ner/ner_ontonotes_bert_torch.json b/deeppavlov/configs/ner/ner_ontonotes_bert_torch.json new file mode 100644 index 0000000000..ce9360ee80 --- /dev/null +++ b/deeppavlov/configs/ner/ner_ontonotes_bert_torch.json @@ -0,0 +1,106 @@ +{ + "dataset_reader": { + "class_name": "conll2003_reader", + "data_path": "{DOWNLOADS_PATH}/ontonotes/", + "dataset_name": "ontonotes", + "provide_pos": false + }, + "dataset_iterator": { + "class_name": "data_learning_iterator" + }, + "chainer": { + "in": ["x"], + "in_y": ["y"], + "pipe": [ + { + "class_name": "torch_transformers_ner_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": false, + "max_seq_length": 512, + "max_subword_length": 15, + "token_masking_prob": 0.0, + "in": ["x"], + "out": ["x_tokens", "x_subword_tokens", "x_subword_tok_ids", "startofword_markers", "attention_mask"] + }, + { + "id": "tag_vocab", + "class_name": "simple_vocab", + "unk_token": ["O"], + "pad_with_zeros": true, + "save_path": "{MODEL_PATH}/tag.dict", + "load_path": "{MODEL_PATH}/tag.dict", + "fit_on": ["y"], + "in": ["y"], + "out": ["y_ind"] + }, + { + "class_name": "torch_transformers_sequence_tagger", + "n_tags": "#tag_vocab.len", + "pretrained_bert": "{TRANSFORMER}", + "attention_probs_keep_prob": 0.5, + "return_probas": false, + "encoder_layer_ids": [-1], + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 2e-05, + "weight_decay": 1e-06, + "betas": [0.9, 0.999], + "eps": 1e-06 + }, + "clip_norm": 1.0, + "min_learning_rate": 1e-07, + "learning_rate_drop_patience": 30, + "learning_rate_drop_div": 1.5, + "load_before_drop": true, + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "in": ["x_subword_tok_ids", "attention_mask", "startofword_markers"], + "in_y": ["y_ind"], + "out": ["y_pred_ind"] + }, + { + "ref": "tag_vocab", + "in": ["y_pred_ind"], + "out": ["y_pred"] + } + ], + "out": ["x_tokens", "y_pred"] + }, + "train": { + "epochs": 30, + "batch_size": 10, + "metrics": [ + { + "name": "ner_f1", + "inputs": ["y", "y_pred"] + }, + { + "name": "ner_token_f1", + "inputs": ["y", "y_pred"] + } + ], + "validation_patience": 100, + "val_every_n_batches": 20, + "log_every_n_batches": 20, + "show_examples": false, + "pytest_max_batches": 2, + "pytest_batch_size": 8, + "evaluation_targets": ["valid", "test"], + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "TRANSFORMER": "bert-base-cased", + "MODEL_PATH": "{MODELS_PATH}/ner_ontonotes_bert_torch/{TRANSFORMER}" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/v1/ner/ner_ontonotes_bert_torch.tar.gz", + "subdir": "{ROOT_PATH}/models" + } + ] + } +} \ No newline at end of file diff --git a/deeppavlov/configs/ner/ner_rus_bert_torch.json b/deeppavlov/configs/ner/ner_rus_bert_torch.json index 0c8e5c568b..8a4c51ff5f 100644 --- a/deeppavlov/configs/ner/ner_rus_bert_torch.json +++ b/deeppavlov/configs/ner/ner_rus_bert_torch.json @@ -64,16 +64,16 @@ ], "optimizer": "AdamW", "optimizer_parameters": { - "lr": 2e-5, - "weight_decay": 1e-6, + "lr": 2e-05, + "weight_decay": 1e-06, "betas": [ 0.9, 0.999 ], - "eps": 1e-6 + "eps": 1e-06 }, "clip_norm": 1.0, - "min_learning_rate": 1e-7, + "min_learning_rate": 1e-07, "learning_rate_drop_patience": 30, "learning_rate_drop_div": 1.5, "load_before_drop": true, @@ -147,7 +147,7 @@ }, "download": [ { - "url": "http://files.deeppavlov.ai/v1/ner/ner_rus_bert_torch.tar.gz", + "url": "http://files.deeppavlov.ai/0.16/ner/ner_rus_bert_torch.tar.gz", "subdir": "{MODELS_PATH}" } ] diff --git a/deeppavlov/configs/ner/ner_rus_convers_distilrubert_2L.json b/deeppavlov/configs/ner/ner_rus_convers_distilrubert_2L.json new file mode 100644 index 0000000000..15c931c1eb --- /dev/null +++ b/deeppavlov/configs/ner/ner_rus_convers_distilrubert_2L.json @@ -0,0 +1,155 @@ + { + "dataset_reader": { + "class_name": "conll2003_reader", + "data_path": "{DOWNLOADS_PATH}/total_rus/", + "dataset_name": "collection_rus", + "provide_pos": false + }, + "dataset_iterator": { + "class_name": "data_learning_iterator" + }, + "chainer": { + "in": [ + "x" + ], + "in_y": [ + "y" + ], + "pipe": [ + { + "class_name": "torch_transformers_ner_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": false, + "max_seq_length": 512, + "max_subword_length": 15, + "token_masking_prob": 0.0, + "in": [ + "x" + ], + "out": [ + "x_tokens", + "x_subword_tokens", + "x_subword_tok_ids", + "startofword_markers", + "attention_mask" + ] + }, + { + "id": "tag_vocab", + "class_name": "simple_vocab", + "unk_token": [ + "O" + ], + "pad_with_zeros": true, + "save_path": "{MODEL_PATH}/tag.dict", + "load_path": "{MODEL_PATH}/tag.dict", + "fit_on": [ + "y" + ], + "in": [ + "y" + ], + "out": [ + "y_ind" + ] + }, + { + "class_name": "torch_transformers_sequence_tagger", + "n_tags": "#tag_vocab.len", + "pretrained_bert": "{TRANSFORMER}", + "attention_probs_keep_prob": 0.11, + "hidden_keep_prob": 0.11, + "return_probas": false, + "encoder_layer_ids": [ + -1 + ], + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 5.45e-05, + "weight_decay": 1e-06, + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-06 + }, + "clip_norm": 1.0, + "min_learning_rate": 1e-07, + "learning_rate_drop_patience": 30, + "learning_rate_drop_div": 1.5, + "load_before_drop": true, + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "in": [ + "x_subword_tok_ids", + "attention_mask", + "startofword_markers" + ], + "in_y": [ + "y_ind" + ], + "out": [ + "y_pred_ind" + ] + }, + { + "ref": "tag_vocab", + "in": [ + "y_pred_ind" + ], + "out": [ + "y_pred" + ] + } + ], + "out": [ + "x_tokens", + "y_pred" + ] + }, + "train": { + "epochs": 30, + "batch_size": 10, + "metrics": [ + { + "name": "ner_f1", + "inputs": [ + "y", + "y_pred" + ] + }, + { + "name": "ner_token_f1", + "inputs": [ + "y", + "y_pred" + ] + } + ], + "validation_patience": 100, + "val_every_n_batches": 20, + "log_every_n_batches": 20, + "show_examples": false, + "evaluation_targets": [ + "valid", + "test" + ], + "tensorboard_log_dir": "{MODEL_PATH}/", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/ner_rus_conversational_distilrubert_2L", + "TRANSFORMER": "DeepPavlov/distilrubert-tiny-cased-conversational" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/v1/ner/ner_rus_conversational_distilrubert_2L.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/ner/ner_rus_convers_distilrubert_6L.json b/deeppavlov/configs/ner/ner_rus_convers_distilrubert_6L.json new file mode 100644 index 0000000000..b2534426a6 --- /dev/null +++ b/deeppavlov/configs/ner/ner_rus_convers_distilrubert_6L.json @@ -0,0 +1,155 @@ + { + "dataset_reader": { + "class_name": "conll2003_reader", + "data_path": "{DOWNLOADS_PATH}/total_rus/", + "dataset_name": "collection_rus", + "provide_pos": false + }, + "dataset_iterator": { + "class_name": "data_learning_iterator" + }, + "chainer": { + "in": [ + "x" + ], + "in_y": [ + "y" + ], + "pipe": [ + { + "class_name": "torch_transformers_ner_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": false, + "max_seq_length": 512, + "max_subword_length": 15, + "token_masking_prob": 0.0, + "in": [ + "x" + ], + "out": [ + "x_tokens", + "x_subword_tokens", + "x_subword_tok_ids", + "startofword_markers", + "attention_mask" + ] + }, + { + "id": "tag_vocab", + "class_name": "simple_vocab", + "unk_token": [ + "O" + ], + "pad_with_zeros": true, + "save_path": "{MODEL_PATH}/tag.dict", + "load_path": "{MODEL_PATH}/tag.dict", + "fit_on": [ + "y" + ], + "in": [ + "y" + ], + "out": [ + "y_ind" + ] + }, + { + "class_name": "torch_transformers_sequence_tagger", + "n_tags": "#tag_vocab.len", + "pretrained_bert": "{TRANSFORMER}", + "attention_probs_keep_prob": 0.44, + "hidden_keep_prob": 0.89, + "return_probas": false, + "encoder_layer_ids": [ + -1 + ], + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 2.78e-05, + "weight_decay": 1e-06, + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-06 + }, + "clip_norm": 1.0, + "min_learning_rate": 1e-07, + "learning_rate_drop_patience": 30, + "learning_rate_drop_div": 1.5, + "load_before_drop": true, + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "in": [ + "x_subword_tok_ids", + "attention_mask", + "startofword_markers" + ], + "in_y": [ + "y_ind" + ], + "out": [ + "y_pred_ind" + ] + }, + { + "ref": "tag_vocab", + "in": [ + "y_pred_ind" + ], + "out": [ + "y_pred" + ] + } + ], + "out": [ + "x_tokens", + "y_pred" + ] + }, + "train": { + "epochs": 30, + "batch_size": 10, + "metrics": [ + { + "name": "ner_f1", + "inputs": [ + "y", + "y_pred" + ] + }, + { + "name": "ner_token_f1", + "inputs": [ + "y", + "y_pred" + ] + } + ], + "validation_patience": 100, + "val_every_n_batches": 20, + "log_every_n_batches": 20, + "show_examples": false, + "evaluation_targets": [ + "valid", + "test" + ], + "tensorboard_log_dir": "{MODEL_PATH}/", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/ner_rus_conversational_distilrubert_6L", + "TRANSFORMER": "DeepPavlov/distilrubert-base-cased-conversational" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/v1/ner/ner_rus_conversational_distilrubert_6L.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/ranking/paraphrase_ident_elmo_interact.json b/deeppavlov/configs/ranking/paraphrase_ident_elmo_interact.json deleted file mode 100644 index 458064f0a7..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_elmo_interact.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "dataset_reader": { - "class_name": "paraphraser_reader", - "data_path": "{DOWNLOADS_PATH}/paraphraser_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "elmo_embedder", - "elmo_output_names": [ - "elmo" - ], - "mini_batch_size": 8, - "spec": "{DOWNLOADS_PATH}/embeddings/elmo_news_wmt11-16-simple_reduce_para_pre_fine_tuned_ep1" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "id": "model", - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 0.001, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_model/model_weights.h5", - "load_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_model/model_weights.h5" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "siamese_predictor", - "model": "#model", - "ranking": false, - "attention": true, - "batch_size": "#model.batch_size", - "preproc_func": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 1, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": ["f1", "acc", "log_loss"], - "metrics": ["f1", "acc", "log_loss"], - "validation_patience": 1, - "val_every_n_epochs": 1, - "log_every_n_batches": 24, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/paraphraser_elmo_ft_pre_1_model.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser_gold.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/elmo_news_wmt11-16-simple_reduce_para_pretrain_fine_tuned_ep1.tar.gz", - "subdir": "{DOWNLOADS_PATH}/embeddings/elmo_news_wmt11-16-simple_reduce_para_pre_fine_tuned_ep1" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_elmo.json b/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_elmo.json deleted file mode 100644 index 6916a169d9..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_elmo.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "dataset_reader": { - "class_name": "paraphraser_reader", - "data_path": "{DOWNLOADS_PATH}/paraphraser_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "pytest_num_samples": 384, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "elmo_embedder", - "elmo_output_names": [ - "elmo" - ], - "mini_batch_size": 8, - "spec": "{DOWNLOADS_PATH}/embeddings/elmo_news_wmt11-16-simple_reduce_para_pre_fine_tuned_ep1" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_model/model_weights.h5", - "load_path": "{MODELS_PATH}/paraphraser_elmo_ft_pre_1_model/model_weights.h5", - "preprocess": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 1, - "batch_size": 192, - "pytest_max_batches": 2, - "train_metrics": ["f1", "acc", "log_loss"], - "metrics": ["f1", "acc", "log_loss"], - "validation_patience": 1, - "val_every_n_epochs": 1, - "log_every_n_batches": 24, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser_gold.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/elmo_news_wmt11-16-simple_reduce_para_pretrain_fine_tuned_ep1.tar.gz", - "subdir": "{DOWNLOADS_PATH}/embeddings/elmo_news_wmt11-16-simple_reduce_para_pre_fine_tuned_ep1" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_pretrain.json b/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_pretrain.json deleted file mode 100644 index cc084628ae..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_pretrain.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "dataset_reader": { - "class_name": "paraphraser_pretrain_reader", - "data_path": "{DOWNLOADS_PATH}/paraphraser_pretrain_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_pretrain_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/paraphraser_pretrain_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_pretrain_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/paraphraser_pretrain_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/ft_native_300_ru_wiki_lenta_lower_case.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/pretrained_model/pretrained_weights.h5", - "load_path": "{MODELS_PATH}/pretrained_model/pretrained_weights.h5", - "preprocess": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 10, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": ["f1", "acc", "log_loss"], - "metrics": ["f1", "acc", "log_loss"], - "validation_patience": 2, - "val_every_n_epochs": 1, - "log_every_n_batches": 12, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser_pretrain_train.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_pretrain_data" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser_pretrain_val.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_pretrain_data" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_lower_case/ft_native_300_ru_wiki_lenta_lower_case.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_tune.json b/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_tune.json deleted file mode 100644 index 398c343936..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_paraphraser_tune.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "dataset_reader": { - "class_name": "paraphraser_reader", - "data_path": "{DOWNLOADS_PATH}/paraphraser_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/paraphraser_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/paraphraser_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/ft_native_300_ru_wiki_lenta_lower_case.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/paraphraser_model/model_weights.h5", - "load_path": "{MODELS_PATH}/pretrained_model/pretrained_weights.h5", - "preprocess": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 1, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": ["f1", "acc", "log_loss"], - "metrics": ["f1", "acc", "log_loss"], - "validation_patience": 1, - "val_every_n_epochs": 1, - "log_every_n_batches": 12, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser_gold.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_lower_case/ft_native_300_ru_wiki_lenta_lower_case.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/paraphrase_ident_paraphraser_pretrained.tar.gz", - "subdir": "{MODELS_PATH}/pretrained_model" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/paraphrase_ident_qqp.json b/deeppavlov/configs/ranking/paraphrase_ident_qqp.json deleted file mode 100644 index 5a6f5bb4f5..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_qqp.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "dataset_reader": { - "class_name": "qqp_reader", - "data_path": "{DOWNLOADS_PATH}/qqp_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "len_valid": 20000, - "len_test": 20000, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/qqp_model/model_weights.h5", - "load_path": "{MODELS_PATH}/qqp_model/model_weights.h5", - "preprocess": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "metric_optimization": "maximize", - "train_metrics": ["acc", "log_loss", "f1"], - "metrics": ["acc", "log_loss", "f1"], - "validation_patience": 2, - "val_every_n_epochs": 1, - "log_every_n_batches": 525, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/quora_question_pairs.zip", - "subdir": "{DOWNLOADS_PATH}/qqp_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} diff --git a/deeppavlov/configs/ranking/paraphrase_ident_qqp_bilstm.json b/deeppavlov/configs/ranking/paraphrase_ident_qqp_bilstm.json deleted file mode 100644 index 53c1f01f97..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_qqp_bilstm.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "dataset_reader": { - "class_name": "qqp_reader", - "data_path": "{DOWNLOADS_PATH}/qqp_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "len_valid": 20000, - "len_test": 20000, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs_bilstm/sent.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs_bilstm/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs_bilstm/tok.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs_bilstm/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "bilstm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/qqp_model_bilstm/model_weights.h5", - "load_path": "{MODELS_PATH}/qqp_model_bilstm/model_weights.h5", - "preprocess": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "metric_optimization": "minimize", - "train_metrics": ["log_loss", "acc", "f1"], - "metrics": ["log_loss", "acc", "f1"], - "validation_patience": 10, - "val_every_n_epochs": 1, - "log_every_n_batches": 525, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/quora_question_pairs.zip", - "subdir": "{DOWNLOADS_PATH}/qqp_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/paraphrase_ident_qqp_bilstm_interact.json b/deeppavlov/configs/ranking/paraphrase_ident_qqp_bilstm_interact.json deleted file mode 100644 index de19ee9901..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_qqp_bilstm_interact.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "dataset_reader": { - "class_name": "qqp_reader", - "data_path": "{DOWNLOADS_PATH}/qqp_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "len_valid": 20000, - "len_test": 20000, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "id": "model", - "class_name": "bilstm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/qqp_model/model_weights.h5", - "load_path": "{MODELS_PATH}/qqp_model/model_weights.h5", - "preprocess": "#preproc.__call__" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "siamese_predictor", - "model": "#model", - "ranking": false, - "attention": true, - "batch_size": "#model.batch_size", - "preproc_func": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": ["log_loss", "acc", "f1"], - "metrics": ["log_loss", "acc", "f1"], - "validation_patience": 10, - "val_every_n_epochs": 1, - "log_every_n_batches": 525, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/paraphrase_ident_qqp_bilstm.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/quora_question_pairs.zip", - "subdir": "{DOWNLOADS_PATH}/qqp_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/paraphrase_ident_qqp_interact.json b/deeppavlov/configs/ranking/paraphrase_ident_qqp_interact.json deleted file mode 100644 index 626e3bb548..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_qqp_interact.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "dataset_reader": { - "class_name": "qqp_reader", - "data_path": "{DOWNLOADS_PATH}/qqp_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "len_valid": 20000, - "len_test": 20000, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/qqp_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/qqp_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "id": "model", - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/qqp_model/model_weights.h5", - "load_path": "{MODELS_PATH}/qqp_model/model_weights.h5", - "preprocess": "#preproc.__call__" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "siamese_predictor", - "model": "#model", - "ranking": false, - "attention": true, - "batch_size": "#model.batch_size", - "preproc_func": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": ["log_loss", "acc", "f1"], - "metrics": ["log_loss", "acc", "f1"], - "validation_patience": 10, - "val_every_n_epochs": 1, - "log_every_n_batches": 525, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/paraphrase_ident_qqp_27112020.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/quora_question_pairs.zip", - "subdir": "{DOWNLOADS_PATH}/qqp_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} diff --git a/deeppavlov/configs/ranking/paraphrase_ident_tune_interact.json b/deeppavlov/configs/ranking/paraphrase_ident_tune_interact.json deleted file mode 100644 index 63b287f74e..0000000000 --- a/deeppavlov/configs/ranking/paraphrase_ident_tune_interact.json +++ /dev/null @@ -1,121 +0,0 @@ -{ - "dataset_reader": { - "class_name": "paraphraser_reader", - "data_path": "{DOWNLOADS_PATH}/paraphraser_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "max_sequence_length": 28, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/paraphraser_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/paraphraser_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/paraphraser_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/ft_native_300_ru_wiki_lenta_lower_case.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "id": "model", - "class_name": "mpm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "attention": true, - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 200, - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/paraphraser_model/model_weights.h5", - "load_path": "{MODELS_PATH}/paraphraser_model/model_weights.h5", - "preprocess": "#preproc.__call__" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "siamese_predictor", - "model": "#model", - "ranking": false, - "attention": true, - "batch_size": "#model.batch_size", - "preproc_func": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 20, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": ["f1", "acc", "log_loss"], - "metrics": ["f1", "acc", "log_loss"], - "validation_patience": 10, - "val_every_n_epochs": 1, - "log_every_n_batches": 12, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/paraphrase_ident_paraphraser_tuned.tar.gz", - "subdir": "{MODELS_PATH}/paraphraser_model" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/datasets/paraphraser_gold.zip", - "subdir": "{DOWNLOADS_PATH}/paraphraser_data" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_lower_case/ft_native_300_ru_wiki_lenta_lower_case.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/ranking_insurance.json b/deeppavlov/configs/ranking/ranking_insurance.json deleted file mode 100644 index bdb6572365..0000000000 --- a/deeppavlov/configs/ranking/ranking_insurance.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "dataset_reader": { - "class_name": "insurance_reader", - "data_path": "{DOWNLOADS_PATH}/insurance_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "num_ranking_samples": 500, - "pytest_num_ranking_samples": 2, - "max_sequence_length": 200, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/insurance_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/insurance_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "split_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/insurance_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/insurance_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "bilstm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "reccurent": "bilstm", - "max_pooling": true, - "shared_weights": true, - "hidden_dim": 300, - "learning_rate": 1e-3, - "triplet_loss": true, - "hard_triplets": false, - "margin": 0.1, - "batch_size": 256, - "save_path": "{MODELS_PATH}/insurance_model/model_weights.h5", - "load_path": "{MODELS_PATH}/insurance_model/model_weights.h5", - "preprocess": "#preproc.__call__", - "interact_pred_num": 3 - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": ["r@1_insQA", "rank_response"], - "validation_patience": 5, - "val_every_n_epochs": 5, - "log_every_n_batches": 24, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/insuranceQA-master.zip", - "subdir": "{DOWNLOADS_PATH}/insurance_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/ranking_insurance_interact.json b/deeppavlov/configs/ranking/ranking_insurance_interact.json deleted file mode 100644 index 53a9fc8839..0000000000 --- a/deeppavlov/configs/ranking/ranking_insurance_interact.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "dataset_reader": { - "class_name": "insurance_reader", - "data_path": "{DOWNLOADS_PATH}/insurance_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": false, - "num_ranking_samples": 500, - "pytest_num_ranking_samples": 2, - "max_sequence_length": 200, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/insurance_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/insurance_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "split_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/insurance_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/insurance_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "id": "model", - "class_name": "bilstm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "max_sequence_length": "#preproc.max_sequence_length", - "emb_matrix": "#embeddings.emb_mat", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "reccurent": "bilstm", - "max_pooling": true, - "shared_weights": true, - "hidden_dim": 300, - "learning_rate": 1e-3, - "triplet_loss": true, - "hard_triplets": false, - "margin": 0.1, - "batch_size": 256, - "save_path": "{MODELS_PATH}/insurance_model/model_weights.h5", - "load_path": "{MODELS_PATH}/insurance_model/model_weights.h5", - "preprocess": "#preproc.__call__", - "pytest_interact_pred_num": 3 - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "siamese_predictor", - "model": "#model", - "batch_size": "#model.batch_size", - "interact_pred_num": 3, - "responses": "#siam_sent_vocab", - "preproc_func": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": ["r@1_insQA", "rank_response"], - "validation_patience": 5, - "val_every_n_epochs": 5, - "log_every_n_batches": 24, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/insurance_ranking.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/insuranceQA-master.zip", - "subdir": "{DOWNLOADS_PATH}/insurance_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_dam.json b/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_dam.json deleted file mode 100644 index fab0f536f1..0000000000 --- a/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_dam.json +++ /dev/null @@ -1,131 +0,0 @@ -{ - "info": "The config is for training of DAM on Ubuntu Dialogue Corpus v1 using prepared Word2vec embeddings", - "dataset_reader": { - "class_name": "ubuntu_v1_mt_reader", - "data_path": "{DOWNLOADS_PATH}/ubuntu_v1_data", - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "padding": "pre" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "shuffle": true, - "seed": 42 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "class_name": "split_tokenizer", - "id": "tok_1" - }, - { - "class_name": "simple_vocab", - "special_tokens": ["", ""], - "unk_token": "", - "fit_on": ["x"], - "id": "vocab_1", - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/vocabs/int_tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/vocabs/int_tok.dict" - }, - { - "id": "word2vec_embedder", - "class_name": "glove", - "dim": 200, - "load_path": "{DOWNLOADS_PATH}/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt" - }, - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/preproc/tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/preproc/tok.dict", - "num_ranking_samples": 10, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": 50, - "embedding_dim": 200, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "tokenizer": { - "ref": "tok_1", - "notes": "use defined tokenizer" - }, - "vocab": { - "ref": "vocab_1", - "notes": "use vocab built for tokenized data" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#word2vec_embedder", - "vocab": "#vocab_1" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "dam_nn", - "stack_num": 5, - "is_positional": true, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": "#preproc.max_sequence_length", - "embedding_dim": "#word2vec_embedder.dim", - "emb_matrix": "#embeddings.emb_mat", - "learning_rate": 1e-3, - "batch_size": 100, - "seed": 42, - "decay_steps": 2000, - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/model_dam/model", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/model_dam/model" - } - ], - "out": [ - "y_predicted" - ] - }, - "train": { - "class_name": "nn_trainer", - "epochs": 8, - "batch_size": 100, - "shuffle": true, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": [ - "r@1", - "r@2", - "r@5", - "rank_response" - ], - "validation_patience": 3, - "val_every_n_epochs": 1, - "log_every_n_batches": 100, - "evaluation_targets": [ - "valid", - "test" - ], - "tensorboard_log_dir": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam/logs_dam/" - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models", - "NUM_CONTEXT_TURNS": 10 - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/ubuntu_v1_mt_word2vec_dam.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/ubuntu_v1_data.tar.gz", - "subdir": "{DOWNLOADS_PATH}/ubuntu_v1_data" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt.tar.gz", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} diff --git a/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_dam_transformer.json b/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_dam_transformer.json deleted file mode 100644 index 429eb5d32d..0000000000 --- a/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_dam_transformer.json +++ /dev/null @@ -1,134 +0,0 @@ -{ - "info": "The config is for training or evaluation of DAM_USE-T on Ubuntu Dialogue Corpus v1 using prepared Word2vec embeddings", - "dataset_reader": { - "class_name": "ubuntu_v1_mt_reader", - "data_path": "{DOWNLOADS_PATH}/ubuntu_v1_data", - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "padding": "pre" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "shuffle": true, - "seed": 42 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "class_name": "split_tokenizer", - "id": "tok_1" - }, - { - "class_name": "simple_vocab", - "special_tokens": ["", ""], - "unk_token": "", - "fit_on": ["x"], - "id": "vocab_1", - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/vocabs/int_tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/vocabs/int_tok.dict" - }, - { - "id": "word2vec_embedder", - "class_name": "glove", - "dim": 200, - "load_path": "{DOWNLOADS_PATH}/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt" - }, - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/preproc/tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/preproc/tok.dict", - "num_ranking_samples": 10, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": 50, - "embedding_dim": 200, - "add_raw_text": true, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "tokenizer": { - "ref": "tok_1", - "notes": "use defined tokenizer" - }, - "vocab": { - "ref": "vocab_1", - "notes": "use vocab built for tokenized data" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#word2vec_embedder", - "vocab": "#vocab_1" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "dam_nn_use_transformer", - "stack_num": 5, - "is_positional": true, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": "#preproc.max_sequence_length", - "embedding_dim": "#word2vec_embedder.dim", - "emb_matrix": "#embeddings.emb_mat", - "learning_rate": 1e-3, - "batch_size": 100, - "seed": 42, - "decay_steps": 2000, - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/model_dam/model", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/model_dam/model" - } - ], - "out": [ - "y_predicted" - ] - }, - "train": { - "class_name": "nn_trainer", - "epochs": 8, - "batch_size": 100, - "shuffle": true, - "pytest_max_batches": 2, - "train_metrics": [], - "validate_best": true, - "test_best": true, - "metrics": [ - "r@1", - "r@2", - "r@5", - "rank_response" - ], - "validation_patience": 1, - "val_every_n_epochs": 1, - "log_every_n_batches": 100, - "evaluation_targets": [ - "valid", - "test" - ], - "tensorboard_log_dir": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/logs_dam/" - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models", - "NUM_CONTEXT_TURNS": 10 - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/ubuntu_v1_mt_word2vec_dam_transformer.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/ubuntu_v1_data.tar.gz", - "subdir": "{DOWNLOADS_PATH}/ubuntu_v1_data" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt.tar.gz", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} diff --git a/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_smn.json b/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_smn.json deleted file mode 100644 index d2f44feafb..0000000000 --- a/deeppavlov/configs/ranking/ranking_ubuntu_v1_mt_word2vec_smn.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "info": "The config is for training of SMN on Ubuntu Dialogue Corpus v1 using prepared Word2vec embeddings", - "dataset_reader": { - "class_name": "ubuntu_v1_mt_reader", - "data_path": "{DOWNLOADS_PATH}/ubuntu_v1_data", - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "padding": "pre" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "shuffle": true, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "class_name": "split_tokenizer", - "id": "tok_1" - }, - { - "class_name": "simple_vocab", - "special_tokens": ["", ""], - "unk_token": "", - "fit_on": ["x"], - "id": "vocab_1", - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/vocabs/int_tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/vocabs/int_tok.dict" - }, - { - "id": "word2vec_embedder", - "class_name": "glove", - "dim": 200, - "load_path": "{DOWNLOADS_PATH}/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt" - }, - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/preproc/tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/preproc/tok.dict", - "num_ranking_samples": 10, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": 50, - "embedding_dim": 200, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "tokenizer": { - "ref": "tok_1", - "notes": "use defined tokenizer" - }, - "vocab": { - "ref": "vocab_1", - "notes": "use vocab built for tokenized data" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#word2vec_embedder", - "vocab": "#vocab_1" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "smn_nn", - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": "#preproc.max_sequence_length", - "embedding_dim": "#word2vec_embedder.dim", - "emb_matrix": "#embeddings.emb_mat", - "learning_rate": 1e-3, - "batch_size": 500, - "seed": 65, - "save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/model_smn/model", - "load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/model_smn/model" - } - ], - "out": [ - "y_predicted" - ] - }, - "train": { - "class_name": "nn_trainer", - "epochs": 8, - "batch_size": 500, - "shuffle": true, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": [ - "r@1", - "r@2", - "r@5", - "rank_response" - ], - "validation_patience": 3, - "val_every_n_epochs": 1, - "log_every_n_batches": 100, - "evaluation_targets": [ - "valid", - "test" - ], - "tensorboard_log_dir": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_smn/logs_smn/" - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models", - "NUM_CONTEXT_TURNS": 10 - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/ubuntu_v1_mt_word2vec_smn.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/ubuntu_v1_data.tar.gz", - "subdir": "{DOWNLOADS_PATH}/ubuntu_v1_data" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt.tar.gz", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} diff --git a/deeppavlov/configs/ranking/ranking_ubuntu_v2.json b/deeppavlov/configs/ranking/ranking_ubuntu_v2.json deleted file mode 100644 index c71543371e..0000000000 --- a/deeppavlov/configs/ranking/ranking_ubuntu_v2.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "dataset_reader": { - "class_name": "ubuntu_v2_reader", - "data_path": "{DOWNLOADS_PATH}/ubuntu_v2_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": true, - "num_ranking_samples": 10, - "max_sequence_length": 50, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/ubuntu_v2_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/ubuntu_v2_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/ubuntu_v2_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v2_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "bilstm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "max_sequence_length": "#preproc.max_sequence_length", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 300, - "emb_matrix": "#embeddings.emb_mat", - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "interact_pred_num": 3, - "save_path": "{MODELS_PATH}/ubuntu_v2_model/model_weights.h5", - "load_path": "{MODELS_PATH}/ubuntu_v2_model/model_weights.h5" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": ["r@1", "rank_response"], - "validation_patience": 10, - "val_every_n_epochs": 1, - "log_every_n_batches": 1000, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/datasets/ubuntu_v2_data.tar.gz", - "subdir": "{DOWNLOADS_PATH}/ubuntu_v2_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/ranking_ubuntu_v2_interact.json b/deeppavlov/configs/ranking/ranking_ubuntu_v2_interact.json deleted file mode 100644 index 1d7be11e17..0000000000 --- a/deeppavlov/configs/ranking/ranking_ubuntu_v2_interact.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "dataset_reader": { - "class_name": "ubuntu_v2_reader", - "data_path": "{DOWNLOADS_PATH}/ubuntu_v2_data" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "use_matrix": true, - "num_ranking_samples": 10, - "max_sequence_length": 50, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "sent_vocab": { - "id": "siam_sent_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/ubuntu_v2_vocabs/sent.dict", - "load_path": "{MODELS_PATH}/ubuntu_v2_vocabs/sent.dict" - }, - "tokenizer": { - "class_name": "nltk_tokenizer" - }, - "vocab": { - "id": "siam_vocab", - "class_name": "simple_vocab", - "save_path": "{MODELS_PATH}/ubuntu_v2_vocabs/tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v2_vocabs/tok.dict" - }, - "embedder": { - "id": "siam_embedder", - "class_name": "fasttext", - "load_path": "{DOWNLOADS_PATH}/embeddings/wiki.en.bin" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#siam_embedder", - "vocab": "#siam_vocab" - }, - { - "id": "model", - "class_name": "bilstm_nn", - "len_vocab": "#siam_vocab.len", - "use_matrix": "#preproc.use_matrix", - "max_sequence_length": "#preproc.max_sequence_length", - "embedding_dim": "#siam_embedder.dim", - "seed": 243, - "hidden_dim": 300, - "emb_matrix": "#embeddings.emb_mat", - "learning_rate": 1e-3, - "triplet_loss": false, - "batch_size": 256, - "save_path": "{MODELS_PATH}/ubuntu_v2_model/model_weights.h5", - "load_path": "{MODELS_PATH}/ubuntu_v2_model/model_weights.h5" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "siamese_predictor", - "model": "#model", - "batch_size": "#model.batch_size", - "interact_pred_num": 3, - "responses": "#siam_sent_vocab", - "preproc_func": "#preproc.__call__" - } - ], - "out": ["y_predicted"] - }, - "train": { - "epochs": 200, - "batch_size": 256, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": ["r@1", "rank_response"], - "validation_patience": 10, - "val_every_n_epochs": 1, - "log_every_n_batches": 1000, - "class_name": "nn_trainer", - "evaluation_targets": [ - "valid", - "test" - ] - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models" - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/ubuntu_v2_ranking.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/ubuntu_v2_data.tar.gz", - "subdir": "{DOWNLOADS_PATH}/ubuntu_v2_data" - }, - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/embeddings/wiki.en.bin", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} \ No newline at end of file diff --git a/deeppavlov/configs/ranking/ranking_ubuntu_v2_mt_word2vec_dam.json b/deeppavlov/configs/ranking/ranking_ubuntu_v2_mt_word2vec_dam.json deleted file mode 100644 index 24390093f4..0000000000 --- a/deeppavlov/configs/ranking/ranking_ubuntu_v2_mt_word2vec_dam.json +++ /dev/null @@ -1,131 +0,0 @@ -{ - "info": "The config is for training or evaluation of DAM on Ubuntu Dialogue Corpus v2 using prepared Word2vec embeddings", - "dataset_reader": { - "class_name": "ubuntu_v2_mt_reader", - "data_path": "{DOWNLOADS_PATH}/ubuntu_v2_data_clean", - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "padding": "pre" - }, - "dataset_iterator": { - "class_name": "siamese_iterator", - "shuffle": true, - "seed": 243 - }, - "chainer": { - "in": ["x"], - "in_y": ["y"], - "pipe": [ - { - "class_name": "split_tokenizer", - "id": "tok_1" - }, - { - "class_name": "simple_vocab", - "special_tokens": ["", ""], - "unk_token": "", - "fit_on": ["x"], - "id": "vocab_1", - "save_path": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/vocabs/int_tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/vocabs/int_tok.dict" - }, - { - "id": "word2vec_embedder", - "class_name": "glove", - "dim": 200, - "load_path": "{DOWNLOADS_PATH}/embeddings/v2_ubuntu_w2v_vectors.txt" - }, - { - "id": "preproc", - "class_name": "siamese_preprocessor", - "save_path": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/preproc/tok.dict", - "load_path": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/preproc/tok.dict", - "num_ranking_samples": 10, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": 50, - "embedding_dim": 200, - "fit_on": ["x"], - "in": ["x"], - "out": ["x_proc"], - "tokenizer": { - "ref": "tok_1", - "notes": "use defined tokenizer" - }, - "vocab": { - "ref": "vocab_1", - "notes": "use vocab built for tokenized data" - } - }, - { - "id": "embeddings", - "class_name": "emb_mat_assembler", - "embedder": "#word2vec_embedder", - "vocab": "#vocab_1" - }, - { - "in": ["x_proc"], - "in_y": ["y"], - "out": ["y_predicted"], - "class_name": "dam_nn", - "stack_num": 5, - "filters2_conv3d": 32, - "num_context_turns": "{NUM_CONTEXT_TURNS}", - "max_sequence_length": "#preproc.max_sequence_length", - "embedding_dim": "#word2vec_embedder.dim", - "emb_matrix": "#embeddings.emb_mat", - "learning_rate": 1e-3, - "batch_size": 100, - "seed": 65, - "decay_steps": 2000, - "save_path": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/model_dam/model", - "load_path": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/model_dam/model" - } - ], - "out": [ - "y_predicted" - ] - }, - "train": { - "class_name": "nn_trainer", - "epochs": 8, - "batch_size": 100, - "shuffle": true, - "pytest_max_batches": 2, - "train_metrics": [], - "metrics": [ - "r@1", - "r@2", - "r@5", - "rank_response" - ], - "validation_patience": 1, - "val_every_n_epochs": 1, - "log_every_n_batches": 100, - "evaluation_targets": [ - "valid", - "test" - ], - "tensorboard_log_dir": "{MODELS_PATH}/ubuntu_v2_mt_word2vec_dam/logs_dam/" - }, - "metadata": { - "variables": { - "ROOT_PATH": "~/.deeppavlov", - "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models", - "NUM_CONTEXT_TURNS": 10 - }, - "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/ubuntu_v2_mt_word2vec_dam.tar.gz", - "subdir": "{MODELS_PATH}" - }, - { - "url": "http://files.deeppavlov.ai/datasets/ubuntu_v2_data_clean.tar.gz", - "subdir": "{DOWNLOADS_PATH}/ubuntu_v2_data_clean" - }, - { - "url": "http://files.deeppavlov.ai/embeddings/v2_ubuntu_w2v_vectors.txt.tar.gz", - "subdir": "{DOWNLOADS_PATH}/embeddings" - } - ] - } -} diff --git a/deeppavlov/configs/spelling_correction/brillmoore_kartaslov_ru_custom_vocab.json b/deeppavlov/configs/spelling_correction/brillmoore_kartaslov_ru_custom_vocab.json index d35561b812..46694d2205 100644 --- a/deeppavlov/configs/spelling_correction/brillmoore_kartaslov_ru_custom_vocab.json +++ b/deeppavlov/configs/spelling_correction/brillmoore_kartaslov_ru_custom_vocab.json @@ -45,7 +45,7 @@ "load_path": "{MODELS_PATH}/error_model/error_model_ru.tsv" }, { - "class_name": "kenlm_electorr", + "class_name": "kenlm_elector", "in": ["tokens_candidates"], "out": ["y_predicted_tokens"], "load_path": "{DOWNLOADS_PATH}/language_models/ru_wiyalen_no_punkt.arpa.binary" diff --git a/deeppavlov/configs/squad/squad_ru_convers_distilrubert_2L.json b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_2L.json new file mode 100644 index 0000000000..f278ad9627 --- /dev/null +++ b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_2L.json @@ -0,0 +1,173 @@ +{ + "dataset_reader": { + "class_name": "squad_dataset_reader", + "dataset": "SberSQuADClean", + "url": "http://files.deeppavlov.ai/datasets/sber_squad_clean-v1.1.tar.gz", + "data_path": "{DOWNLOADS_PATH}/squad_ru_clean/" + }, + "dataset_iterator": { + "class_name": "squad_iterator", + "seed": 1337, + "shuffle": true + }, + "chainer": { + "in": [ + "context_raw", + "question_raw" + ], + "in_y": [ + "ans_raw", + "ans_raw_start" + ], + "pipe": [ + { + "class_name": "torch_squad_transformers_preprocessor", + "add_token_type_ids": true, + "vocab_file": "{TRANSFORMER}", + "do_lower_case": "{lowercase}", + "max_seq_length": 384, + "return_tokens": true, + "in": [ + "question_raw", + "context_raw" + ], + "out": [ + "bert_features", + "subtokens" + ] + }, + { + "class_name": "squad_bert_mapping", + "do_lower_case": "{lowercase}", + "in": [ + "context_raw", + "bert_features", + "subtokens" + ], + "out": [ + "subtok2chars", + "char2subtoks" + ] + }, + { + "class_name": "squad_bert_ans_preprocessor", + "do_lower_case": "{lowercase}", + "in": [ + "ans_raw", + "ans_raw_start", + "char2subtoks" + ], + "out": [ + "ans", + "ans_start", + "ans_end" + ] + }, + { + "class_name": "torch_transformers_squad", + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "attention_probs_keep_prob": 0.11, + "hidden_keep_prob": 0.33, + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 9e-05 + }, + "learning_rate_drop_patience": 2, + "learning_rate_drop_div": 1.5, + "in": [ + "bert_features" + ], + "in_y": [ + "ans_start", + "ans_end" + ], + "out": [ + "ans_start_predicted", + "ans_end_predicted", + "logits" + ] + }, + { + "class_name": "squad_bert_ans_postprocessor", + "in": [ + "ans_start_predicted", + "ans_end_predicted", + "context_raw", + "bert_features", + "subtok2chars", + "subtokens" + ], + "out": [ + "ans_predicted", + "ans_start_predicted", + "ans_end_predicted" + ] + } + ], + "out": [ + "ans_predicted", + "ans_start_predicted", + "logits" + ] + }, + "train": { + "show_examples": false, + "evaluation_targets": [ + "valid" + ], + "log_every_n_batches": 250, + "val_every_n_batches": 500, + "batch_size": 10, + "validation_patience": 10, + "metrics": [ + { + "name": "squad_v2_f1", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v2_em", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v1_f1", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v1_em", + "inputs": [ + "ans", + "ans_predicted" + ] + } + ], + "tensorboard_log_dir": "{MODEL_PATH}/logs", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "lowercase": false, + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-tiny-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/squad_ru_convers_distilrubert_2L" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/squad_ru_convers_distilrubert_2L.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/squad/squad_ru_convers_distilrubert_2L_infer.json b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_2L_infer.json new file mode 100644 index 0000000000..9202d83ba8 --- /dev/null +++ b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_2L_infer.json @@ -0,0 +1,76 @@ +{ + "dataset_reader": { + "class_name": "squad_dataset_reader", + "dataset": "SberSQuADClean", + "url": "http://files.deeppavlov.ai/datasets/sber_squad_clean-v1.1.tar.gz", + "data_path": "{DOWNLOADS_PATH}/squad_ru_clean/" + }, + "dataset_iterator": { + "class_name": "squad_iterator", + "seed": 1337, + "shuffle": true + }, + "chainer": { + "in": ["context_raw", "question_raw"], + "in_y": ["ans_raw", "ans_raw_start"], + "pipe": [ + { + "class_name": "torch_transformers_squad_infer", + "lang": "ru", + "batch_size": 128, + "squad_model_config": "{CONFIGS_PATH}/squad/squad_ru_convers_distilrubert_2L.json", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": "{lowercase}", + "max_seq_length": 256, + "in": ["context_raw", "question_raw"], + "out": ["ans_predicted", "ans_start_predicted", "logits"] + } + ], + "out": ["ans_predicted", "ans_start_predicted", "logits"] + }, + "train": { + "show_examples": false, + "evaluation_targets": [ + "valid" + ], + "log_every_n_batches": 250, + "val_every_n_batches": 500, + "batch_size": 10, + "validation_patience": 10, + "metrics": [ + { + "name": "squad_v2_f1", + "inputs": ["ans_raw", "ans_predicted"] + }, + { + "name": "squad_v2_em", + "inputs": ["ans_raw", "ans_predicted"] + }, + { + "name": "squad_v1_f1", + "inputs": ["ans_raw", "ans_predicted"] + }, + { + "name": "squad_v1_em", + "inputs": ["ans_raw", "ans_predicted"] + } + ] + }, + "metadata": { + "variables": { + "lowercase": false, + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-tiny-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/squad_ru_convers_distilrubert_2L", + "CONFIGS_PATH": "{DEEPPAVLOV_PATH}/configs" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/squad_ru_convers_distilrubert_2L.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/squad/squad_ru_convers_distilrubert_6L.json b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_6L.json new file mode 100644 index 0000000000..8ca10a28f7 --- /dev/null +++ b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_6L.json @@ -0,0 +1,173 @@ +{ + "dataset_reader": { + "class_name": "squad_dataset_reader", + "dataset": "SberSQuADClean", + "url": "http://files.deeppavlov.ai/datasets/sber_squad_clean-v1.1.tar.gz", + "data_path": "{DOWNLOADS_PATH}/squad_ru_clean/" + }, + "dataset_iterator": { + "class_name": "squad_iterator", + "seed": 1337, + "shuffle": true + }, + "chainer": { + "in": [ + "context_raw", + "question_raw" + ], + "in_y": [ + "ans_raw", + "ans_raw_start" + ], + "pipe": [ + { + "class_name": "torch_squad_transformers_preprocessor", + "add_token_type_ids": true, + "vocab_file": "{TRANSFORMER}", + "do_lower_case": "{lowercase}", + "max_seq_length": 384, + "return_tokens": true, + "in": [ + "question_raw", + "context_raw" + ], + "out": [ + "bert_features", + "subtokens" + ] + }, + { + "class_name": "squad_bert_mapping", + "do_lower_case": "{lowercase}", + "in": [ + "context_raw", + "bert_features", + "subtokens" + ], + "out": [ + "subtok2chars", + "char2subtoks" + ] + }, + { + "class_name": "squad_bert_ans_preprocessor", + "do_lower_case": "{lowercase}", + "in": [ + "ans_raw", + "ans_raw_start", + "char2subtoks" + ], + "out": [ + "ans", + "ans_start", + "ans_end" + ] + }, + { + "class_name": "torch_transformers_squad", + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "attention_probs_keep_prob": 0.0, + "hidden_keep_prob": 0.33, + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 3.67e-5 + }, + "learning_rate_drop_patience": 2, + "learning_rate_drop_div": 1.5, + "in": [ + "bert_features" + ], + "in_y": [ + "ans_start", + "ans_end" + ], + "out": [ + "ans_start_predicted", + "ans_end_predicted", + "logits" + ] + }, + { + "class_name": "squad_bert_ans_postprocessor", + "in": [ + "ans_start_predicted", + "ans_end_predicted", + "context_raw", + "bert_features", + "subtok2chars", + "subtokens" + ], + "out": [ + "ans_predicted", + "ans_start_predicted", + "ans_end_predicted" + ] + } + ], + "out": [ + "ans_predicted", + "ans_start_predicted", + "logits" + ] + }, + "train": { + "show_examples": false, + "evaluation_targets": [ + "valid" + ], + "log_every_n_batches": 250, + "val_every_n_batches": 500, + "batch_size": 10, + "validation_patience": 10, + "metrics": [ + { + "name": "squad_v2_f1", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v2_em", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v1_f1", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v1_em", + "inputs": [ + "ans", + "ans_predicted" + ] + } + ], + "tensorboard_log_dir": "{MODEL_PATH}/logs", + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "lowercase": false, + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-base-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/squad_ru_convers_distilrubert_6L" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/squad_ru_convers_distilrubert_6L.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/squad/squad_ru_convers_distilrubert_6L_infer.json b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_6L_infer.json new file mode 100644 index 0000000000..5c6171311c --- /dev/null +++ b/deeppavlov/configs/squad/squad_ru_convers_distilrubert_6L_infer.json @@ -0,0 +1,76 @@ +{ + "dataset_reader": { + "class_name": "squad_dataset_reader", + "dataset": "SberSQuADClean", + "url": "http://files.deeppavlov.ai/datasets/sber_squad_clean-v1.1.tar.gz", + "data_path": "{DOWNLOADS_PATH}/squad_ru_clean/" + }, + "dataset_iterator": { + "class_name": "squad_iterator", + "seed": 1337, + "shuffle": true + }, + "chainer": { + "in": ["context_raw", "question_raw"], + "in_y": ["ans_raw", "ans_raw_start"], + "pipe": [ + { + "class_name": "torch_transformers_squad_infer", + "lang": "ru", + "batch_size": 128, + "squad_model_config": "{CONFIGS_PATH}/squad/squad_ru_convers_distilrubert_6L.json", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": "{lowercase}", + "max_seq_length": 256, + "in": ["context_raw", "question_raw"], + "out": ["ans_predicted", "ans_start_predicted", "logits"] + } + ], + "out": ["ans_predicted", "ans_start_predicted", "logits"] + }, + "train": { + "show_examples": false, + "evaluation_targets": [ + "valid" + ], + "log_every_n_batches": 250, + "val_every_n_batches": 500, + "batch_size": 10, + "validation_patience": 10, + "metrics": [ + { + "name": "squad_v2_f1", + "inputs": ["ans_raw", "ans_predicted"] + }, + { + "name": "squad_v2_em", + "inputs": ["ans_raw", "ans_predicted"] + }, + { + "name": "squad_v1_f1", + "inputs": ["ans_raw", "ans_predicted"] + }, + { + "name": "squad_v1_em", + "inputs": ["ans_raw", "ans_predicted"] + } + ] + }, + "metadata": { + "variables": { + "lowercase": false, + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "TRANSFORMER": "DeepPavlov/distilrubert-base-cased-conversational", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/squad_ru_convers_distilrubert_6L", + "CONFIGS_PATH": "{DEEPPAVLOV_PATH}/configs" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/squad_ru_convers_distilrubert_6L.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/squad/squad_ru_torch_bert.json b/deeppavlov/configs/squad/squad_ru_torch_bert.json new file mode 100644 index 0000000000..029777626e --- /dev/null +++ b/deeppavlov/configs/squad/squad_ru_torch_bert.json @@ -0,0 +1,175 @@ +{ + "dataset_reader": { + "class_name": "squad_dataset_reader", + "dataset": "SberSQuADClean", + "url": "http://files.deeppavlov.ai/datasets/sber_squad_clean-v1.1.tar.gz", + "data_path": "{DOWNLOADS_PATH}/squad_ru_clean/" + }, + "dataset_iterator": { + "class_name": "squad_iterator", + "seed": 1337, + "shuffle": true + }, + "chainer": { + "in": [ + "context_raw", + "question_raw" + ], + "in_y": [ + "ans_raw", + "ans_raw_start" + ], + "pipe": [ + { + "class_name": "torch_squad_transformers_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": "{LOWERCASE}", + "max_seq_length": 384, + "return_tokens": true, + "in": [ + "question_raw", + "context_raw" + ], + "out": [ + "bert_features", + "subtokens" + ] + }, + { + "class_name": "squad_bert_mapping", + "do_lower_case": "{LOWERCASE}", + "in": [ + "context_raw", + "bert_features", + "subtokens" + ], + "out": [ + "subtok2chars", + "char2subtoks" + ] + }, + { + "class_name": "squad_bert_ans_preprocessor", + "do_lower_case": "{LOWERCASE}", + "in": [ + "ans_raw", + "ans_raw_start", + "char2subtoks" + ], + "out": [ + "ans", + "ans_start", + "ans_end" + ] + }, + { + "class_name": "torch_transformers_squad", + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", + "optimizer": "AdamW", + "optimizer_parameters": { + "lr": 2e-05, + "weight_decay": 0.01, + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-06 + }, + "learning_rate_drop_patience": 3, + "learning_rate_drop_div": 2.0, + "in": [ + "bert_features" + ], + "in_y": [ + "ans_start", + "ans_end" + ], + "out": [ + "ans_start_predicted", + "ans_end_predicted", + "logits" + ] + }, + { + "class_name": "squad_bert_ans_postprocessor", + "in": [ + "ans_start_predicted", + "ans_end_predicted", + "context_raw", + "bert_features", + "subtok2chars", + "subtokens" + ], + "out": [ + "ans_predicted", + "ans_start_predicted", + "ans_end_predicted" + ] + } + ], + "out": [ + "ans_predicted", + "ans_start_predicted", + "logits" + ] + }, + "train": { + "show_examples": false, + "evaluation_targets": [ + "valid" + ], + "log_every_n_batches": 250, + "val_every_n_batches": 500, + "batch_size": 10, + "validation_patience": 10, + "metrics": [ + { + "name": "squad_v1_f1", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v1_em", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v2_f1", + "inputs": [ + "ans", + "ans_predicted" + ] + }, + { + "name": "squad_v2_em", + "inputs": [ + "ans", + "ans_predicted" + ] + } + ], + "class_name": "torch_trainer" + }, + "metadata": { + "variables": { + "LOWERCASE": false, + "TRANSFORMER": "DeepPavlov/rubert-base-cased", + "ROOT_PATH": "~/.deeppavlov", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/squad_ru_torch_bert/{TRANSFORMER}" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/v1/squad/squad_ru_torch_bert.tar.gz", + "subdir": "{MODELS_PATH}" + } + ] + } +} diff --git a/deeppavlov/configs/squad/squad_torch_bert.json b/deeppavlov/configs/squad/squad_torch_bert.json index 64d75f00ee..32b104c96a 100644 --- a/deeppavlov/configs/squad/squad_torch_bert.json +++ b/deeppavlov/configs/squad/squad_torch_bert.json @@ -19,9 +19,9 @@ ], "pipe": [ { - "class_name": "torch_transformers_preprocessor", - "vocab_file": "bert-base-cased", - "do_lower_case": false, + "class_name": "torch_squad_transformers_preprocessor", + "vocab_file": "{TRANSFORMER}", + "do_lower_case": "{LOWERCASE}", "max_seq_length": 384, "return_tokens": true, "in": [ @@ -35,7 +35,7 @@ }, { "class_name": "squad_bert_mapping", - "do_lower_case": false, + "do_lower_case": "{LOWERCASE}", "in": [ "context_raw", "bert_features", @@ -48,7 +48,7 @@ }, { "class_name": "squad_bert_ans_preprocessor", - "do_lower_case": false, + "do_lower_case": "{LOWERCASE}", "in": [ "ans_raw", "ans_raw_start", @@ -61,19 +61,19 @@ ] }, { - "class_name": "torch_squad_bert_model", - "pretrained_bert": "bert-base-cased", - "save_path": "{MODELS_PATH}/model", - "load_path": "{MODELS_PATH}/model", + "class_name": "torch_transformers_squad", + "pretrained_bert": "{TRANSFORMER}", + "save_path": "{MODEL_PATH}/model", + "load_path": "{MODEL_PATH}/model", "optimizer": "AdamW", "optimizer_parameters": { - "lr": 2e-5, - "weight_decay": 1e-2, + "lr": 2e-05, + "weight_decay": 0.01, "betas": [ 0.9, 0.999 ], - "eps": 1e-6 + "eps": 1e-06 }, "learning_rate_drop_patience": 2, "learning_rate_drop_div": 2.0, @@ -158,16 +158,18 @@ }, "metadata": { "variables": { + "LOWERCASE": true, + "TRANSFORMER": "bert-base-uncased", "ROOT_PATH": "~/.deeppavlov", "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models/squad_torch_bert" + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/squad_torch_bert/{TRANSFORMER}" }, "download": [ { - "url": "http://files.deeppavlov.ai/deeppavlov_data/squad_torch_bert_v0.tar.gz", + "url": "http://files.deeppavlov.ai/v1/squad/squad_torch_bert.tar.gz", "subdir": "{ROOT_PATH}/models" } ] } } - diff --git a/deeppavlov/configs/squad/squad_torch_bert_infer.json b/deeppavlov/configs/squad/squad_torch_bert_infer.json index 6a3b054844..62398a515e 100644 --- a/deeppavlov/configs/squad/squad_torch_bert_infer.json +++ b/deeppavlov/configs/squad/squad_torch_bert_infer.json @@ -13,12 +13,12 @@ "in_y": ["ans_raw", "ans_raw_start"], "pipe": [ { - "class_name": "torch_squad_bert_infer", + "class_name": "torch_transformers_squad_infer", "batch_size": 10, "squad_model_config": "{CONFIGS_PATH}/squad/squad_torch_bert.json", "vocab_file": "bert-base-cased", "do_lower_case": false, - "max_seq_length": 512, + "max_seq_length": 384, "in": ["context_raw", "question_raw"], "out": ["ans_predicted", "ans_start_predicted", "logits"] } @@ -57,15 +57,12 @@ "metadata": { "variables": { "ROOT_PATH": "~/.deeppavlov", + "TRANSFORMER": "bert-base-cased", "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", - "MODELS_PATH": "{ROOT_PATH}/models/squad_torch_bert", + "MODELS_PATH": "{ROOT_PATH}/models/squad_torch_bert/{TRANSFORMER}", "CONFIGS_PATH": "{DEEPPAVLOV_PATH}/configs" }, "download": [ - { - "url": "http://files.deeppavlov.ai/deeppavlov_data/squad_torch_bert_v0.tar.gz", - "subdir": "{ROOT_PATH}/models" - } ] } } diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 30f493b085..57eb2b92dc 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -30,7 +30,6 @@ "conll2003_reader": "deeppavlov.dataset_readers.conll2003_reader:Conll2003DatasetReader", "convert_ids2tags": "deeppavlov.models.preprocessors.ner_preprocessor:ConvertIds2Tags", "cos_sim_classifier": "deeppavlov.models.classifiers.cos_sim_classifier:CosineSimilarityClassifier", - "dam_nn": "deeppavlov.models.ranking.deep_attention_matching_network:DAMNetwork", "dam_nn_use_transformer": "deeppavlov.models.ranking.deep_attention_matching_network_use_transformer:DAMNetworkUSETransformer", "data_fitting_iterator": "deeppavlov.core.data.data_fitting_iterator:DataFittingIterator", "data_learning_iterator": "deeppavlov.core.data.data_learning_iterator:DataLearningIterator", @@ -68,7 +67,6 @@ "hybrid_ner_model": "deeppavlov.models.ner.NER_model:HybridNerModel", "imdb_reader": "deeppavlov.dataset_readers.imdb_reader:ImdbReader", "input_splitter": "deeppavlov.models.multitask_bert.multitask_bert:InputSplitter", - "insurance_reader": "deeppavlov.dataset_readers.insurance_reader:InsuranceReader", "jieba_tokenizer": "deeppavlov.models.tokenizers.jieba_tokenizer:JiebaTokenizer", "joint_tagger_parser": "deeppavlov.models.syntax_parser.joint:JointTaggerParser", "kbqa_entity_linker": "deeppavlov.models.kbqa.kbqa_entity_linking:KBEntityLinker", @@ -110,15 +108,12 @@ "nn_trainer": "deeppavlov.core.trainers.nn_trainer:NNTrainer", "odqa_reader": "deeppavlov.dataset_readers.odqa_reader:ODQADataReader", "one_hotter": "deeppavlov.models.preprocessors.one_hotter:OneHotter", - "ontonotes_reader": "deeppavlov.dataset_readers.ontonotes_reader:OntonotesReader", "params_search": "deeppavlov.core.common.params_search:ParamsSearch", - "paraphraser_pretrain_reader": "deeppavlov.dataset_readers.paraphraser_pretrain_reader:ParaphraserPretrainReader", "paraphraser_reader": "deeppavlov.dataset_readers.paraphraser_reader:ParaphraserReader", "pop_ranker": "deeppavlov.models.doc_retrieval.pop_ranker:PopRanker", "proba2labels": "deeppavlov.models.classifiers.proba2labels:Proba2Labels", "pymorphy_russian_lemmatizer": "deeppavlov.models.preprocessors.russian_lemmatizer:PymorphyRussianLemmatizer", "pymorphy_vectorizer": "deeppavlov.models.vectorizers.word_vectorizer:PymorphyVectorizer", - "qqp_reader": "deeppavlov.dataset_readers.quora_question_pairs_reader:QuoraQuestionPairsReader", "query_generator": "deeppavlov.models.kbqa.query_generator:QueryGenerator", "query_generator_online": "deeppavlov.models.kbqa.query_generator_online:QueryGeneratorOnline", "question_sign_checker": "deeppavlov.models.kbqa.entity_detection_parser:QuestionSignChecker", @@ -178,12 +173,15 @@ "tfidf_ranker": "deeppavlov.models.doc_retrieval.tfidf_ranker:TfidfRanker", "tfidf_weighted": "deeppavlov.models.embedders.tfidf_weighted_embedder:TfidfWeightedEmbedder", "top1_elector": "deeppavlov.models.spelling_correction.electors.top1_elector:TopOneElector", + "torch_squad_transformers_preprocessor": "deeppavlov.models.preprocessors.torch_transformers_preprocessor:TorchSquadTransformersPreprocessor", "torch_transformers_ner_preprocessor": "deeppavlov.models.preprocessors.torch_transformers_preprocessor:TorchTransformersNerPreprocessor", + "torch_transformers_multiplechoice_preprocessor": "deeppavlov.models.preprocessors.torch_transformers_preprocessor:TorchTransformersMultiplechoicePreprocessor", + "torch_transformers_multiplechoice": "deeppavlov.models.torch_bert.torch_transformers_multiplechoice:TorchTransformersMultiplechoiceModel", "torch_bert_ranker": "deeppavlov.models.torch_bert.torch_bert_ranker:TorchBertRankerModel", "torch_bert_ranker_preprocessor": "deeppavlov.models.preprocessors.torch_transformers_preprocessor:TorchBertRankerPreprocessor", "torch_transformers_sequence_tagger": "deeppavlov.models.torch_bert.torch_transformers_sequence_tagger:TorchTransformersSequenceTagger", - "torch_squad_bert_infer": "deeppavlov.models.torch_bert.torch_bert_squad:TorchBertSQuADInferModel", - "torch_squad_bert_model": "deeppavlov.models.torch_bert.torch_bert_squad:TorchBertSQuADModel", + "torch_transformers_squad_infer": "deeppavlov.models.torch_bert.torch_transformers_squad:TorchTransformersSquadInfer", + "torch_transformers_squad": "deeppavlov.models.torch_bert.torch_transformers_squad:TorchTransformersSquad", "torch_text_classification_model": "deeppavlov.models.classifiers.torch_classification_model:TorchTextClassificationModel", "torch_trainer": "deeppavlov.core.trainers.torch_trainer:TorchTrainer", "torch_transformers_classifier": "deeppavlov.models.torch_bert.torch_transformers_classifier:TorchTransformersClassifierModel", @@ -197,11 +195,8 @@ "typos_iterator": "deeppavlov.dataset_iterators.typos_iterator:TyposDatasetIterator", "typos_kartaslov_reader": "deeppavlov.dataset_readers.typos_reader:TyposKartaslov", "typos_wikipedia_reader": "deeppavlov.dataset_readers.typos_reader:TyposWikipedia", - "ubuntu_dstc7_mt_reader": "deeppavlov.dataset_readers.ubuntu_dstc7_mt_reader:UbuntuDSTC7MTReader", - "ubuntu_v1_mt_reader": "deeppavlov.dataset_readers.ubuntu_v1_mt_reader:UbuntuV1MTReader", "ubuntu_v2_mt_reader": "deeppavlov.dataset_readers.ubuntu_v2_mt_reader:UbuntuV2MTReader", "ubuntu_v2_reader": "deeppavlov.dataset_readers.ubuntu_v2_reader:UbuntuV2Reader", - "udpipe_parser": "deeppavlov.models.kbqa.tree_to_sparql:UdpipeParser", "wiki_parser": "deeppavlov.models.kbqa.wiki_parser:WikiParser", "wiki_parser_online": "deeppavlov.models.kbqa.wiki_parser_online:WikiParserOnline", "wiki_sqlite_vocab": "deeppavlov.vocabs.wiki_sqlite:WikiSQLiteVocab", diff --git a/deeppavlov/core/common/requirements_registry.json b/deeppavlov/core/common/requirements_registry.json index 9afb597ab0..1131d45ef7 100644 --- a/deeppavlov/core/common/requirements_registry.json +++ b/deeppavlov/core/common/requirements_registry.json @@ -57,9 +57,6 @@ "chu_liu_edmonds_transformer": [ "{DEEPPAVLOV_PATH}/requirements/syntax_parser.txt" ], - "dam_nn": [ - "{DEEPPAVLOV_PATH}/requirements/tf.txt" - ], "dam_nn_use_transformer": [ "{DEEPPAVLOV_PATH}/requirements/tf.txt", "{DEEPPAVLOV_PATH}/requirements/tf-hub.txt" @@ -218,7 +215,7 @@ "{DEEPPAVLOV_PATH}/requirements/datasets.txt" ], "tree_to_sparql": [ - "{DEEPPAVLOV_PATH}/requirements/udpipe.txt" + "{DEEPPAVLOV_PATH}/requirements/udapi.txt" ], "torch_squad_bert_model": [ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", @@ -228,10 +225,22 @@ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", "{DEEPPAVLOV_PATH}/requirements/transformers.txt" ], - "torch_bert_ranker": [ + "torch_squad_transformers_preprocessor": [ + "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", + "{DEEPPAVLOV_PATH}/requirements/transformers.txt" + ], + "torch_transformers_multiplechoice_preprocessor": [ + "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", + "{DEEPPAVLOV_PATH}/requirements/transformers.txt" + ], + "torch_transformers_multiplechoice": [ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", "{DEEPPAVLOV_PATH}/requirements/transformers.txt" ], + "torch_bert_ranker": [ + "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", + "{DEEPPAVLOV_PATH}/requirements/transformers28.txt" + ], "torch_transformers_classifier": [ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", "{DEEPPAVLOV_PATH}/requirements/transformers.txt" @@ -241,7 +250,7 @@ "{DEEPPAVLOV_PATH}/requirements/transformers.txt" ], "ru_adj_to_noun": [ - "{DEEPPAVLOV_PATH}/requirements/udpipe.txt" + "{DEEPPAVLOV_PATH}/requirements/udapi.txt" ], "transformers_bert_embedder": [ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", @@ -253,7 +262,7 @@ ], "torch_bert_ranker_preprocessor": [ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", - "{DEEPPAVLOV_PATH}/requirements/transformers.txt" + "{DEEPPAVLOV_PATH}/requirements/transformers28.txt" ], "transformers_bert_preprocessor": [ "{DEEPPAVLOV_PATH}/requirements/transformers.txt" @@ -277,15 +286,12 @@ "{DEEPPAVLOV_PATH}/requirements/pytorch14.txt", "{DEEPPAVLOV_PATH}/requirements/nemo.txt", "{DEEPPAVLOV_PATH}/requirements/nemo-asr.txt", - "{DEEPPAVLOV_PATH}/requirements/transformers.txt", + "{DEEPPAVLOV_PATH}/requirements/transformers28.txt", "{DEEPPAVLOV_PATH}/requirements/nemo-tts.txt" ], "spelling_error_model": [ "{DEEPPAVLOV_PATH}/requirements/lxml.txt" ], - "udpipe_parser": [ - "{DEEPPAVLOV_PATH}/requirements/udpipe.txt" - ], "torchtext_classification_data_reader": [ "{DEEPPAVLOV_PATH}/requirements/torchtext.txt" ], diff --git a/deeppavlov/dataset_iterators/huggingface_dataset_iterator.py b/deeppavlov/dataset_iterators/huggingface_dataset_iterator.py index dbd35e127a..75a509fa17 100644 --- a/deeppavlov/dataset_iterators/huggingface_dataset_iterator.py +++ b/deeppavlov/dataset_iterators/huggingface_dataset_iterator.py @@ -39,8 +39,9 @@ def preprocess(self, use_label_name: Use actual label name instead of its index (0, 1, ...). Defaults to True. Returns: - List[Tuple[Any, Any]]: list of pairs of extrated features and labels + List[Tuple[Any, Any]]: list of pairs of extracted features and labels """ + dataset = [] for example in data: if isinstance(features, str): diff --git a/deeppavlov/dataset_readers/huggingface_dataset_reader.py b/deeppavlov/dataset_readers/huggingface_dataset_reader.py index 0a66ff5873..56b90e8c14 100644 --- a/deeppavlov/dataset_readers/huggingface_dataset_reader.py +++ b/deeppavlov/dataset_readers/huggingface_dataset_reader.py @@ -13,7 +13,8 @@ # limitations under the License. -from typing import Dict, Optional +import re +from typing import Dict, Optional, List from datasets import load_dataset, Dataset from overrides import overrides @@ -28,8 +29,14 @@ class HuggingFaceDatasetReader(DatasetReader): """ @overrides - def read(self, data_path: str, path: str, name: Optional[str] = None, train: str = 'train', - valid: Optional[str] = None, test: Optional[str] = None, **kwargs) -> Dict[str, Dataset]: + def read(self, + data_path: str, + path: str, + name: Optional[str] = None, + train: str = 'train', + valid: Optional[str] = None, + test: Optional[str] = None, + **kwargs) -> Dict[str, Dataset]: """Wraps datasets.load_dataset method Args: @@ -49,4 +56,43 @@ def read(self, data_path: str, path: str, name: Optional[str] = None, train: str # filter unused splits split_mapping = {el: split_mapping[el] for el in split_mapping if split_mapping[el]} dataset = load_dataset(path=path, name=name, split=list(split_mapping.values()), **kwargs) + if path == "super_glue" and name == "copa": + dataset = [dataset_split.map(preprocess_copa, batched=True) for dataset_split in dataset] + elif path == "super_glue" and name == "boolq": + dataset = load_dataset(path=path, name=name, split=interleave_splits(list(split_mapping.values())), **kwargs) + dataset = [dataset_split.map(preprocess_boolq, batched=True) for dataset_split in dataset] return dict(zip(split_mapping.keys(), dataset)) + + +def interleave_splits(splits: List[str]) -> List[str]: + return [f"{splits[0]}+{splits[1]}[:50%]", f"{splits[1]}[-50%:]", splits[2]] + + +def preprocess_copa(examples: Dataset) -> Dict[str, List[List[str]]]: + question_dict = { + "cause": "What was the cause of this?", + "effect": "What happened as a result?", + } + + num_choices = 2 + + questions = [question_dict[question] for question in examples["question"]] + premises = examples["premise"] + + contexts = [f"{premise} {question}" for premise, question in zip(premises, questions)] + contexts = [[context] * num_choices for context in contexts] + + choices = [[choice1, choice2] for choice1, choice2 in zip(examples["choice1"], examples["choice2"])] + + return {"contexts": contexts, + "choices": choices} + + +def preprocess_boolq(examples: Dataset) -> Dict[str, List[str]]: + + def remove_passage_title(passage: str) -> str: + return re.sub(r"^.+-- ", "", passage) + + passages = [remove_passage_title(passage) for passage in examples["passage"]] + + return {"passage": passages} diff --git a/deeppavlov/dataset_readers/insurance_reader.py b/deeppavlov/dataset_readers/insurance_reader.py deleted file mode 100644 index 54c1c5b38a..0000000000 --- a/deeppavlov/dataset_readers/insurance_reader.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2017 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -from typing import Dict, List, Tuple - -from deeppavlov.core.commands.utils import expand_path -from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader - - -@register('insurance_reader') -class InsuranceReader(DatasetReader): - """The class to read the InsuranceQA V1 dataset from files. - - Please, see https://github.com/shuzi/insuranceQA. - """ - - def read(self, data_path: str, **kwargs) -> Dict[str, List[Tuple[List[str], int]]]: - """Read the InsuranceQA V1 dataset from files. - - Args: - data_path: A path to a folder with dataset files. - """ - - data_path = expand_path(data_path) - dataset = {'train': None, 'valid': None, 'test': None} - train_fname = data_path / 'insuranceQA-master/V1/question.train.token_idx.label' - valid_fname = data_path / 'insuranceQA-master/V1/question.dev.label.token_idx.pool' - test_fname = data_path / 'insuranceQA-master/V1/question.test1.label.token_idx.pool' - int2tok_fname = data_path / 'insuranceQA-master/V1/vocabulary' - response2ints_fname = data_path / 'insuranceQA-master/V1/answers.label.token_idx' - self.int2tok_vocab = self._build_int2tok_vocab(int2tok_fname) - self.idxs2cont_vocab = self._build_context2toks_vocab(train_fname, valid_fname, test_fname) - self.response2str_vocab = self._build_response2str_vocab(response2ints_fname) - dataset["valid"] = self._preprocess_data_valid_test(valid_fname) - dataset["train"] = self._preprocess_data_train(train_fname) - dataset["test"] = self._preprocess_data_valid_test(test_fname) - - return dataset - - def _build_context2toks_vocab(self, train_f: Path, val_f: Path, test_f: Path) -> Dict[int, str]: - contexts = [] - with open(train_f, 'r') as f: - data = f.readlines() - for eli in data: - eli = eli[:-1] - c, _ = eli.split('\t') - contexts.append(c) - with open(val_f, 'r') as f: - data = f.readlines() - for eli in data: - eli = eli[:-1] - _, c, _ = eli.split('\t') - contexts.append(c) - with open(test_f, 'r') as f: - data = f.readlines() - for eli in data: - eli = eli[:-1] - _, c, _ = eli.split('\t') - contexts.append(c) - idxs2cont_vocab = {el[1]: el[0] for el in enumerate(contexts)} - return idxs2cont_vocab - - def _build_int2tok_vocab(self, fname: Path) -> Dict[int, str]: - with open(fname, 'r') as f: - data = f.readlines() - int2tok_vocab = {int(el.split('\t')[0].split('_')[1]): el.split('\t')[1][:-1] for el in data} - return int2tok_vocab - - def _build_response2str_vocab(self, fname: Path) -> Dict[int, str]: - with open(fname, 'r') as f: - data = f.readlines() - response2idxs_vocab = {int(el.split('\t')[0]) - 1: - (el.split('\t')[1][:-1]).split(' ') for el in data} - response2str_vocab = {el[0]: ' '.join([self.int2tok_vocab[int(x.split('_')[1])] - for x in el[1]]) for el in response2idxs_vocab.items()} - return response2str_vocab - - def _preprocess_data_train(self, fname: Path) -> List[Tuple[List[str], int]]: - positive_responses_pool = [] - contexts = [] - responses = [] - labels = [] - with open(fname, 'r') as f: - data = f.readlines() - for k, eli in enumerate(data): - eli = eli[:-1] - q, pa = eli.split('\t') - q_tok = ' '.join([self.int2tok_vocab[int(el.split('_')[1])] for el in q.split()]) - pa_list = [int(el) - 1 for el in pa.split(' ')] - pa_list_tok = [self.response2str_vocab[el] for el in pa_list] - for elj in pa_list_tok: - contexts.append(q_tok) - responses.append(elj) - positive_responses_pool.append(pa_list_tok) - labels.append(k) - train_data = list(zip(contexts, responses)) - train_data = list(zip(train_data, labels)) - return train_data - - def _preprocess_data_valid_test(self, fname: Path) -> List[Tuple[List[str], int]]: - pos_responses_pool = [] - neg_responses_pool = [] - contexts = [] - pos_responses = [] - with open(fname, 'r') as f: - data = f.readlines() - for eli in data: - eli = eli[:-1] - pa, q, na = eli.split('\t') - q_tok = ' '.join([self.int2tok_vocab[int(el.split('_')[1])] for el in q.split()]) - pa_list = [int(el) - 1 for el in pa.split(' ')] - pa_list_tok = [self.response2str_vocab[el] for el in pa_list] - nas = [int(el) - 1 for el in na.split(' ')] - nas_tok = [self.response2str_vocab[el] for el in nas] - for elj in pa_list_tok: - contexts.append(q_tok) - pos_responses.append(elj) - pos_responses_pool.append(pa_list_tok) - neg_responses_pool.append(nas_tok) - data = [[el[0]] + el[1] for el in zip(contexts, neg_responses_pool)] - data = [(el[0], len(el[1])) for el in zip(data, pos_responses_pool)] - return data diff --git a/deeppavlov/dataset_readers/ontonotes_reader.py b/deeppavlov/dataset_readers/ontonotes_reader.py deleted file mode 100644 index 6a174aea8c..0000000000 --- a/deeppavlov/dataset_readers/ontonotes_reader.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2017 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pickle -from logging import getLogger -from pathlib import Path - -from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader -from deeppavlov.core.data.utils import download - -log = getLogger(__name__) - - -@register('ontonotes_reader') -class OntonotesReader(DatasetReader): - """Class to read training datasets in OntoNotes format""" - URL = 'http://files.deeppavlov.ai/datasets/ontonotes_senna.pckl' - - def __init__(self): - log.warning('ontonotes_reader is deprecated and will be removed in future versions.' - ' Please, use conll2003_reader with `"dataset_name": "ontonotes"` instead') - - def read(self, data_path, file_name: str = 'ontonotes_senna.pckl', provide_senna_pos=False, - provide_senna_ner=False): - path = Path(data_path).resolve() / file_name - if not path.exists(): - download(str(path), self.URL) - with open(path, 'rb') as f: - dataset = pickle.load(f) - - dataset_filtered = {} - for key, data in dataset.items(): - dataset_filtered[key] = [] - for (toks, pos, ner), tags in data: - if not provide_senna_pos and not provide_senna_ner: - dataset_filtered[key].append((toks, tags)) - else: - x = [toks] - if provide_senna_pos: - x.append(pos) - if provide_senna_ner: - x.append(ner) - dataset_filtered[key].append((x, tags)) - - return dataset_filtered diff --git a/deeppavlov/dataset_readers/paraphraser_pretrain_reader.py b/deeppavlov/dataset_readers/paraphraser_pretrain_reader.py deleted file mode 100644 index 1ad5d68adf..0000000000 --- a/deeppavlov/dataset_readers/paraphraser_pretrain_reader.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2017 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from typing import Dict, List, Tuple - -from deeppavlov.core.commands.utils import expand_path -from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader - - -@register("paraphraser_pretrain_reader") -class ParaphraserPretrainReader(DatasetReader): - """The class to read the pretraining dataset for the paraphrase identification task from files.""" - - def read(self, - data_path: str, - seed: int = None, *args, **kwargs) -> Dict[str, List[Tuple[List[str], int]]]: - """Read the pretraining dataset for the paraphrase identification task from files. - - Args: - data_path: A path to a folder with dataset files. - seed: Random seed. - """ - - data_path = expand_path(data_path) - train_fname = data_path / 'paraphraser_pretrain_train.json' - test_fname = data_path / 'paraphraser_pretrain_val.json' - train_data = self.build_data(train_fname) - test_data = self.build_data(test_fname) - dataset = {"train": train_data, "valid": test_data, "test": test_data} - return dataset - - def int_class(self, str_y): - if str_y == '-1': - return 0 - else: - return 1 - - def build_data(self, name): - with open(name) as f: - data = json.load(f) - return [([doc['text_1'], doc['text_2']], self.int_class(doc['class'])) for doc in data] diff --git a/deeppavlov/dataset_readers/quora_question_pairs_reader.py b/deeppavlov/dataset_readers/quora_question_pairs_reader.py deleted file mode 100644 index b9d2c3a8e6..0000000000 --- a/deeppavlov/dataset_readers/quora_question_pairs_reader.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2017 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import csv -from typing import Dict, List, Tuple - -from deeppavlov.core.commands.utils import expand_path -from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader - - -@register('qqp_reader') -class QuoraQuestionPairsReader(DatasetReader): - """The class to read the Quora Question Pairs dataset from files. - - Please, see https://www.kaggle.com/c/quora-question-pairs/data. - - Args: - data_path: A path to a folder with dataset files. - seed: Random seed. - """ - - def read(self, data_path: str, - seed: int = None, *args, **kwargs) -> Dict[str, List[Tuple[Tuple[str, str], int]]]: - data_path = expand_path(data_path) - fname = data_path / 'train.csv' - contexts = [] - responses = [] - labels = [] - with open(fname, 'r') as f: - reader = csv.reader(f) - next(reader) - for el in reader: - contexts.append(el[-3].replace('\n', '').lower()) - responses.append(el[-2].replace('\n', '').lower()) - labels.append(int(el[-1])) - data = list(zip(contexts, responses)) - data = list(zip(data, labels)) - data = {"train": data, - "valid": [], - "test": []} - return data diff --git a/deeppavlov/dataset_readers/ubuntu_dstc7_mt_reader.py b/deeppavlov/dataset_readers/ubuntu_dstc7_mt_reader.py deleted file mode 100644 index d7539ae171..0000000000 --- a/deeppavlov/dataset_readers/ubuntu_dstc7_mt_reader.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2018 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from pathlib import Path -from typing import List, Tuple, Dict - -import numpy as np - -from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader - - -@register('ubuntu_dstc7_mt_reader') -class UbuntuDSTC7MTReader(DatasetReader): - """ - DatasetReader for Ubuntu Dialogue Corpus Dataset (version 3), prepared for DSTC 7 competition Track 1 Subtrack 1. - - https://github.com/IBM/dstc7-noesis - - Args: - data_path (str): A path to a folder with dataset json files. - num_context_turns (int): A maximum number of dialogue ``context`` turns. - num_responses (int): A number of responses for each context; default is equal to all 100 responses, - it can be reduced to 10 (1 true response + 9 random wrong responses) to adapt with succeeding pipeline - padding (str): "post" or "pre" context sentences padding - """ - - def read(self, - data_path: str, - num_context_turns: int = 10, - num_responses: int = 100, - padding: str = "post", - seed: int = 42, - *args, **kwargs) -> Dict[str, List[Tuple[List[str], int]]]: - - self.num_turns = num_context_turns - self.padding = padding - self.num_responses = num_responses - self.np_random = np.random.RandomState(seed) - - dataset = {} - dataset["train"] = self._create_dialog_iter(Path(data_path) / 'ubuntu_train_subtask_1.json', "train") - dataset["valid"] = self._create_dialog_iter(Path(data_path) / 'ubuntu_dev_subtask_1.json', "valid") - dataset["test"] = self._create_dialog_iter(Path(data_path) / 'ubuntu_test_subtask_1.json', "test") - return dataset - - def _create_dialog_iter(self, filename, mode="train"): - """ - Read input json file with test data and transform it to the following format: - [ - ( [context_utt_1, ..., context_utt_10, response_utt_1, ..., response_utt_N], label ), - ( [context_utt_1, ..., context_utt_10, response_utt_1, ..., response_utt_N], label ), - ... - ] - - where - * [context_utt_1, ..., context_utt_10, response_utt_1, ..., response_utt_N] - list that consists of - ``num_context_turn`` utterances, followed by ``num_responses`` responses. - Where - * label - label of the sample - - Args: - filename (Path): filename to read - mode (str): which dataset to return. Can be "train", "valid" or "test" - - Returns: - list of contexts and responses with their labels. More details about the format are provided above - """ - data = [] - with open(filename, encoding='utf-8') as f: - json_data = json.load(f) - for entry in json_data: - - dialog = entry - utterances = [] # all the context sentences - for msg in dialog['messages-so-far']: - utterances.append(msg['utterance']) - - true_response = "" # true response sentence - if mode != "test": - true_response = dialog['options-for-correct-answers'][0]['utterance'] - - fake_responses = [] # rest (wrong) responses - target_id = "" - if mode != "test": - correct_answer = dialog['options-for-correct-answers'][0] - target_id = correct_answer['candidate-id'] - for i, utterance in enumerate(dialog['options-for-next']): - if utterance['candidate-id'] != target_id: - fake_responses.append(utterance['utterance']) - - # aligned list of context utterances - expanded_context = self._expand_context(utterances, padding=self.padding) - - if mode == 'train': - data.append((expanded_context + [true_response], 1)) - data.append( - (expanded_context + list(self.np_random.choice(fake_responses, size=1)), 0)) # random 1 from 99 - - elif mode == 'valid': - # NOTE: labels are useless here... - data.append((expanded_context + [true_response] + list( - self.np_random.choice(fake_responses, self.num_responses - 1)), 0)) - - elif mode == 'test': - data.append((expanded_context + fake_responses, 0)) - - return data - - def _expand_context(self, context: List[str], padding: str) -> List[str]: - """ - Align context length by using pre/post padding of empty sentences up to ``self.num_turns`` sentences - or by reducing the number of context sentences to ``self.num_turns`` sentences. - - Args: - context (List[str]): list of raw context sentences - padding (str): "post" or "pre" context sentences padding - - Returns: - List[str]: list of ``self.num_turns`` context sentences - """ - if padding == "post": - sent_list = context - res = sent_list + (self.num_turns - len(sent_list)) * \ - [''] if len(sent_list) < self.num_turns else sent_list[:self.num_turns] - return res - elif padding == "pre": - sent_list = context[-(self.num_turns + 1):-1] - if len(sent_list) <= self.num_turns: - tmp = sent_list[:] - sent_list = [''] * (self.num_turns - len(sent_list)) - sent_list.extend(tmp) - return sent_list diff --git a/deeppavlov/dataset_readers/ubuntu_v1_mt_reader.py b/deeppavlov/dataset_readers/ubuntu_v1_mt_reader.py deleted file mode 100644 index 6761cee749..0000000000 --- a/deeppavlov/dataset_readers/ubuntu_v1_mt_reader.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2018 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -from typing import List, Tuple, Union, Dict - -from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader - - -@register('ubuntu_v1_mt_reader') -class UbuntuV1MTReader(DatasetReader): - """The class to read the Ubuntu V1 dataset from csv files taking into account multi-turn dialogue ``context``. - - Please, see https://github.com/rkadlec/ubuntu-ranking-dataset-creator. - - Args: - data_path: A path to a folder with dataset csv files. - num_context_turns: A maximum number of dialogue ``context`` turns. - padding: "post" or "pre" context sentences padding - """ - - def read(self, data_path: str, - num_context_turns: int = 1, - padding: str = "post", - *args, **kwargs) -> Dict[str, List[Tuple[List[str], int]]]: - self.num_turns = num_context_turns - self.padding = padding - dataset = {'train': None, 'valid': None, 'test': None} - train_fname = Path(data_path) / 'train.txt' - valid_fname = Path(data_path) / 'valid.txt' - test_fname = Path(data_path) / 'test.txt' - self.sen2int_vocab = {} - self.classes_vocab_train = {} - self.classes_vocab_valid = {} - self.classes_vocab_test = {} - dataset["train"] = self.preprocess_data_train(train_fname) - dataset["valid"] = self.preprocess_data_validation(valid_fname) - dataset["test"] = self.preprocess_data_validation(test_fname) - return dataset - - def preprocess_data_train(self, train_fname: Union[Path, str]) -> List[Tuple[List[str], int]]: - contexts = [] - responses = [] - labels = [] - with open(train_fname, encoding='utf-8') as f: - for line in f: - line = line.replace('_', '') - parts = line.strip().split('\t') - - label = int(parts[0]) - context = parts[1:-1] - response = parts[-1] - - contexts.append(self._expand_context(context, padding=self.padding)) - responses.append(response) - labels.append(label) - data = [el[0] + [el[1]] for el in zip(contexts, responses)] - data = list(zip(data, labels)) - return data - - def preprocess_data_validation(self, fname: Union[Path, str]) -> List[Tuple[List[str], int]]: - contexts = [] - responses = [] - with open(fname, encoding='utf-8') as f: - responses_buf = [] - for line in f: - line = line.replace('_', '') - parts = line.strip().split('\t') - - label = int(parts[0]) # labels are not used - context = parts[1:-1] - responses_buf.append(parts[-1]) # add the next response - - if len(responses_buf) % 10 == 0: # add context and 10 response candidates - contexts.append(self._expand_context(context, padding=self.padding)) - responses.append(responses_buf) - responses_buf = [] - - data = [el[0] + el[1] for el in zip(contexts, responses)] - data = [(el, 1) for el in data] # NOTE: labels are useless here actually... - return data - - def _expand_context(self, context: List[str], padding: str) -> List[str]: - """ - Align context length by using pre/post padding of empty sentences up to ``self.num_turns`` sentences - or by reducing the number of context sentences to ``self.num_turns`` sentences. - - Args: - context (List[str]): list of raw context sentences - padding (str): "post" or "pre" context sentences padding - - Returns: - List[str]: list of ``self.num_turns`` context sentences - """ - if padding == "post": - sent_list = context - res = sent_list + (self.num_turns - len(sent_list)) * \ - [''] if len(sent_list) < self.num_turns else sent_list[:self.num_turns] - return res - elif padding == "pre": - # context[-self.num_turns:] because there is no empty strings in `context` - sent_list = context[-self.num_turns:] - if len(sent_list) <= self.num_turns: - tmp = sent_list[:] - sent_list = [''] * (self.num_turns - len(sent_list)) - sent_list.extend(tmp) - return sent_list diff --git a/deeppavlov/models/classifiers/proba2labels.py b/deeppavlov/models/classifiers/proba2labels.py index 29ab96ebc5..3fa485b32d 100644 --- a/deeppavlov/models/classifiers/proba2labels.py +++ b/deeppavlov/models/classifiers/proba2labels.py @@ -33,28 +33,34 @@ class Proba2Labels(Component): Args: max_proba: whether to choose label with maximal probability - confident_threshold: boundary probability value for sample to belong with the class (best use for multi-label) + confidence_threshold: boundary probability value for sample to belong with the class (best use for multi-label) top_n: how many top labels with the highest probabilities to return Attributes: max_proba: whether to choose label with maximal probability - confident_threshold: boundary probability value for sample to belong with the class (best use for multi-label) + confidence_threshold: boundary probability value for sample to belong with the class (best use for multi-label) top_n: how many top labels with the highest probabilities to return """ def __init__(self, max_proba: bool = None, - confident_threshold: float = None, + confidence_threshold: float = None, top_n: int = None, + is_binary: bool = False, **kwargs) -> None: """ Initialize class with given parameters""" self.max_proba = max_proba - self.confident_threshold = confident_threshold + self.confidence_threshold = confidence_threshold self.top_n = top_n + self.is_binary = is_binary - def __call__(self, data: Union[np.ndarray, List[List[float]], List[List[int]]], - *args, **kwargs) -> Union[List[List[int]], List[int]]: + def __call__(self, + data: Union[np.ndarray, + List[List[float]], + List[List[int]]], + *args, + **kwargs) -> Union[List[List[int]], List[int]]: """ Process probabilities to labels @@ -64,14 +70,17 @@ def __call__(self, data: Union[np.ndarray, List[List[float]], List[List[int]]], Returns: list of labels (only label classification) or list of lists of labels (multi-label classification) """ - if self.confident_threshold: - return [list(np.where(np.array(d) > self.confident_threshold)[0]) - for d in data] + if self.confidence_threshold: + if self.is_binary: + return [int(el > self.confidence_threshold) for el in data] + else: + return [list(np.where(np.array(d) > self.confidence_threshold)[0]) + for d in data] elif self.max_proba: return [np.argmax(d) for d in data] elif self.top_n: return [np.argsort(d)[::-1][:self.top_n] for d in data] else: raise ConfigError("Proba2Labels requires one of three arguments: bool `max_proba` or " - "float `confident_threshold` for multi-label classification or" + "float `confidence_threshold` for multi-label classification or" "integer `top_n` for choosing several labels with the highest probabilities") diff --git a/deeppavlov/models/classifiers/utils.py b/deeppavlov/models/classifiers/utils.py index 17944707fe..f49de72534 100644 --- a/deeppavlov/models/classifiers/utils.py +++ b/deeppavlov/models/classifiers/utils.py @@ -49,15 +49,15 @@ def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [li return y -def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]: +def proba2labels(proba: [list, np.ndarray], confidence_threshold: float, classes: [list, np.ndarray]) -> List[List]: """ Convert vectors of probabilities to labels using confident threshold - (if probability to belong with the class is bigger than confident_threshold, sample belongs with the class; + (if probability to belong with the class is bigger than confidence_threshold, sample belongs with the class; if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability) Args: proba: list of samples where each sample is a vector of probabilities to belong with given classes - confident_threshold (float): boundary of probability to belong with a class + confidence_threshold (float): boundary of probability to belong with a class classes: array of classes' names Returns: @@ -65,7 +65,7 @@ def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: """ y = [] for sample in proba: - to_add = np.where(sample > confident_threshold)[0] + to_add = np.where(sample > confidence_threshold)[0] if len(to_add) > 0: y.append(np.array(classes)[to_add].tolist()) else: @@ -74,16 +74,16 @@ def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: return y -def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray: +def proba2onehot(proba: [list, np.ndarray], confidence_threshold: float, classes: [list, np.ndarray]) -> np.ndarray: """ Convert vectors of probabilities to one-hot representations using confident threshold Args: proba: samples where each sample is a vector of probabilities to belong with given classes - confident_threshold: boundary of probability to belong with a class + confidence_threshold: boundary of probability to belong with a class classes: array of classes' names Returns: 2d array with one-hot representation of given samples """ - return labels2onehot(proba2labels(proba, confident_threshold, classes), classes) + return labels2onehot(proba2labels(proba, confidence_threshold, classes), classes) diff --git a/deeppavlov/models/embedders/transformers_embedder.py b/deeppavlov/models/embedders/transformers_embedder.py index 6a733e5764..afce923844 100644 --- a/deeppavlov/models/embedders/transformers_embedder.py +++ b/deeppavlov/models/embedders/transformers_embedder.py @@ -52,7 +52,9 @@ def load(self): self.model = transformers.BertModel.from_pretrained(self.load_path, config=self.config).eval().to(self.device) self.dim = self.model.config.hidden_size - def __call__(self, subtoken_ids_batch: Collection[Collection[int]], startofwords_batch: Collection[Collection[int]], + def __call__(self, + subtoken_ids_batch: Collection[Collection[int]], + startofwords_batch: Collection[Collection[int]], attention_batch: Collection[Collection[int]]) -> Tuple[Collection[Collection[Collection[float]]], Collection[Collection[Collection[float]]], Collection[Collection[float]], @@ -66,11 +68,13 @@ def __call__(self, subtoken_ids_batch: Collection[Collection[int]], startofwords for every other subtoken attention_batch: a mask matrix with ``1`` for every significant subtoken and ``0`` for paddings """ - ids_tensor = torch.tensor(subtoken_ids_batch, device=self.device, dtype = torch.long) + ids_tensor = torch.tensor(subtoken_ids_batch, device=self.device, dtype=torch.long) startofwords_tensor = torch.tensor(startofwords_batch, device=self.device).bool() attention_tensor = torch.tensor(attention_batch, device=self.device) with torch.no_grad(): - last_hidden, pooler_output = self.model(ids_tensor, attention_tensor) + output = self.model(ids_tensor, attention_tensor) + last_hidden = output.last_hidden_state + pooler_output = output.pooler_output attention_tensor = attention_tensor.unsqueeze(-1) max_emb = torch.max(last_hidden - 1e9 * (1 - attention_tensor), dim=1)[0] subword_emb = last_hidden * attention_tensor diff --git a/deeppavlov/models/kbqa/tree_to_sparql.py b/deeppavlov/models/kbqa/tree_to_sparql.py index 98f7b8da26..1793164cc5 100644 --- a/deeppavlov/models/kbqa/tree_to_sparql.py +++ b/deeppavlov/models/kbqa/tree_to_sparql.py @@ -17,12 +17,10 @@ from logging import getLogger from collections import defaultdict -import nltk import numpy as np import pymorphy2 import re from scipy.sparse import csr_matrix -from ufal_udpipe import Model as udModel, Pipeline from udapi.block.read.conllu import Conllu from udapi.core.node import Node @@ -112,33 +110,6 @@ def make_sparse_matrix(self, words: List[str]): return matrix -@register('udpipe_parser') -class UdpipeParser(Component): - """ - Class for building syntactic trees from sentences using UDPipe - """ - - def __init__(self, udpipe_filename: str, **kwargs): - """ - - Args: - udpipe_filename: file with UDPipe model - **kwargs: - """ - self.udpipe_filename = udpipe_filename - self.ud_model = udModel.load(str(expand_path(self.udpipe_filename))) - self.full_ud_model = Pipeline(self.ud_model, "vertical", Pipeline.DEFAULT, Pipeline.DEFAULT, "conllu") - - def __call__(self, sentences_batch: List[str]): - conll_outputs = [] - for sentence in sentences_batch: - sentence_tokens = nltk.word_tokenize(sentence) - sentence_inp = '\n'.join(sentence_tokens) - conll_output = self.full_ud_model.process(sentence_inp) - conll_outputs.append(conll_output) - return conll_outputs - - @register('tree_to_sparql') class TreeToSparql(Component): """ diff --git a/deeppavlov/models/preprocessors/squad_preprocessor.py b/deeppavlov/models/preprocessors/squad_preprocessor.py index f7558c296e..c342902d4f 100644 --- a/deeppavlov/models/preprocessors/squad_preprocessor.py +++ b/deeppavlov/models/preprocessors/squad_preprocessor.py @@ -19,7 +19,7 @@ from collections import Counter from logging import getLogger from pathlib import Path -from typing import Tuple, List, Union +from typing import Tuple, List, Union, Dict import numpy as np from nltk import word_tokenize @@ -393,9 +393,11 @@ def __init__(self, do_lower_case: bool = True, *args, **kwargs): self.do_lower_case = do_lower_case def __call__(self, contexts, bert_features, *args, **kwargs): - subtok2chars = [] - char2subtoks = [] + subtok2chars: List[Dict[int, int]] = [] + char2subtoks: List[Dict[int, int]] = [] + for batch_counter, (context, features) in enumerate(zip(contexts, bert_features)): + subtokens: List[str] if self.do_lower_case: context = context.lower() if len(args) > 0: @@ -404,8 +406,8 @@ def __call__(self, contexts, bert_features, *args, **kwargs): subtokens = features.tokens context_start = subtokens.index('[SEP]') + 1 idx = 0 - subtok2char = {} - char2subtok = {} + subtok2char: Dict[int, int] = {} + char2subtok: Dict[int, int] = {} for i, subtok in list(enumerate(subtokens))[context_start:-1]: subtok = subtok[2:] if subtok.startswith('##') else subtok subtok_pos = context[idx:].find(subtok) @@ -421,7 +423,6 @@ def __call__(self, contexts, bert_features, *args, **kwargs): idx += len(subtok) subtok2chars.append(subtok2char) char2subtoks.append(char2subtok) - return subtok2chars, char2subtoks diff --git a/deeppavlov/models/preprocessors/torch_transformers_preprocessor.py b/deeppavlov/models/preprocessors/torch_transformers_preprocessor.py index ca28bf2dec..aa67557185 100644 --- a/deeppavlov/models/preprocessors/torch_transformers_preprocessor.py +++ b/deeppavlov/models/preprocessors/torch_transformers_preprocessor.py @@ -17,7 +17,7 @@ from logging import getLogger from pathlib import Path import torch -from typing import Tuple, List, Optional, Union +from typing import Tuple, List, Optional, Union, Dict from transformers import AutoTokenizer from transformers.data.processors.utils import InputFeatures @@ -31,6 +31,88 @@ log = getLogger(__name__) +@register('torch_transformers_multiplechoice_preprocessor') +class TorchTransformersMultiplechoicePreprocessor(Component): + """Tokenize text on subtokens, encode subtokens with their indices, create tokens and segment masks. + + Check details in :func:`bert_dp.preprocessing.convert_examples_to_features` function. + + Args: + vocab_file: path to vocabulary + do_lower_case: set True if lowercasing is needed + max_seq_length: max sequence length in subtokens, including [SEP] and [CLS] tokens + return_tokens: whether to return tuple of input features and tokens, or only input features + + Attributes: + max_seq_length: max sequence length in subtokens, including [SEP] and [CLS] tokens + return_tokens: whether to return tuple of input features and tokens, or only input features + tokenizer: instance of Bert FullTokenizer + + """ + + def __init__(self, + vocab_file: str, + do_lower_case: bool = True, + max_seq_length: int = 512, + return_tokens: bool = False, + **kwargs) -> None: + self.max_seq_length = max_seq_length + self.return_tokens = return_tokens + if Path(vocab_file).is_file(): + vocab_file = str(expand_path(vocab_file)) + self.tokenizer = AutoTokenizer(vocab_file=vocab_file, + do_lower_case=do_lower_case) + else: + self.tokenizer = AutoTokenizer.from_pretrained(vocab_file, do_lower_case=do_lower_case) + + def tokenize_mc_examples(self, + contexts: List[List[str]], + choices: List[List[str]]) -> Dict[str, torch.tensor]: + + num_choices = len(contexts[0]) + batch_size = len(contexts) + + # tokenize examples in groups of `num_choices` + examples = [] + for context_list, choice_list in zip(contexts, choices): + for context, choice in zip(context_list, choice_list): + tokenized_input = self.tokenizer.encode_plus(text=context, + text_pair=choice, + return_attention_mask=True, + add_special_tokens=True, + truncation=True) + + examples.append(tokenized_input) + + padded_examples = self.tokenizer.pad( + examples, + padding=True, + max_length=self.max_seq_length, + return_tensors='pt', + ) + + padded_examples = {k: v.view(batch_size, num_choices, -1) for k, v in padded_examples.items()} + + return padded_examples + + def __call__(self, texts_a: List[List[str]], texts_b: List[List[str]] = None) -> Dict[str, torch.tensor]: + """Tokenize and create masks. + + texts_a and texts_b are separated by [SEP] token + + Args: + texts_a: list of texts, + texts_b: list of texts, it could be None, e.g. single sentence classification task + + Returns: + batch of :class:`transformers.data.processors.utils.InputFeatures` with subtokens, subtoken ids, \ + subtoken mask, segment mask, or tuple of batch of InputFeatures and Batch of subtokens + """ + + input_features = self.tokenize_mc_examples(texts_a, texts_b) + return input_features + + @register('torch_transformers_preprocessor') class TorchTransformersPreprocessor(Component): """Tokenize text on subtokens, encode subtokens with their indices, create tokens and segment masks. @@ -41,11 +123,11 @@ class TorchTransformersPreprocessor(Component): vocab_file: path to vocabulary do_lower_case: set True if lowercasing is needed max_seq_length: max sequence length in subtokens, including [SEP] and [CLS] tokens - return_tokens: whether to return tuple of inputfeatures and tokens, or only inputfeatures + return_tokens: whether to return tuple of input features and tokens, or only input features Attributes: max_seq_length: max sequence length in subtokens, including [SEP] and [CLS] tokens - return_tokens: whether to return tuple of inputfeatures and tokens, or only inputfeatures + return_tokens: whether to return tuple of input features and tokens, or only input features tokenizer: instance of Bert FullTokenizer """ @@ -65,8 +147,76 @@ def __init__(self, else: self.tokenizer = AutoTokenizer.from_pretrained(vocab_file, do_lower_case=do_lower_case) - def __call__(self, texts_a: List[str], texts_b: Optional[List[str]] = None) -> Union[ - List[InputFeatures], Tuple[List[InputFeatures], List[List[str]]]]: + def __call__(self, texts_a: List[str], texts_b: Optional[List[str]] = None) -> Union[List[InputFeatures], + Tuple[List[InputFeatures], + List[List[str]]]]: + """Tokenize and create masks. + + texts_a and texts_b are separated by [SEP] token + + Args: + texts_a: list of texts, + texts_b: list of texts, it could be None, e.g. single sentence classification task + + Returns: + batch of :class:`transformers.data.processors.utils.InputFeatures` with subtokens, subtoken ids, \ + subtoken mask, segment mask, or tuple of batch of InputFeatures and Batch of subtokens + """ + + # in case of iterator's strange behaviour + if isinstance(texts_a, tuple): + texts_a = list(texts_a) + + input_features = self.tokenizer(text=texts_a, + text_pair=texts_b, + add_special_tokens=True, + max_length=self.max_seq_length, + padding='max_length', + return_attention_mask=True, + truncation=True, + return_tensors='pt') + return input_features + + +@register('torch_squad_transformers_preprocessor') +class TorchSquadTransformersPreprocessor(Component): + """Tokenize text on subtokens, encode subtokens with their indices, create tokens and segment masks. + + Check details in :func:`bert_dp.preprocessing.convert_examples_to_features` function. + + Args: + vocab_file: path to vocabulary + do_lower_case: set True if lowercasing is needed + max_seq_length: max sequence length in subtokens, including [SEP] and [CLS] tokens + return_tokens: whether to return tuple of input features and tokens, or only input features + + Attributes: + max_seq_length: max sequence length in subtokens, including [SEP] and [CLS] tokens + return_tokens: whether to return tuple of input features and tokens, or only input features + tokenizer: instance of Bert FullTokenizer + + """ + + def __init__(self, + vocab_file: str, + do_lower_case: bool = True, + max_seq_length: int = 512, + return_tokens: bool = False, + add_token_type_ids: bool = False, + **kwargs) -> None: + self.max_seq_length = max_seq_length + self.return_tokens = return_tokens + self.add_token_type_ids = add_token_type_ids + if Path(vocab_file).is_file(): + vocab_file = str(expand_path(vocab_file)) + self.tokenizer = AutoTokenizer(vocab_file=vocab_file, + do_lower_case=do_lower_case) + else: + self.tokenizer = AutoTokenizer.from_pretrained(vocab_file, do_lower_case=do_lower_case) + + def __call__(self, texts_a: List[str], texts_b: Optional[List[str]] = None) -> Union[List[InputFeatures], + Tuple[List[InputFeatures], + List[List[str]]]]: """Tokenize and create masks. texts_a and texts_b are separated by [SEP] token @@ -84,14 +234,28 @@ def __call__(self, texts_a: List[str], texts_b: Optional[List[str]] = None) -> U texts_b = [None] * len(texts_a) input_features = [] - tokens = [] + tokens = [] for text_a, text_b in zip(texts_a, texts_b): encoded_dict = self.tokenizer.encode_plus( - text=text_a, text_pair=text_b, add_special_tokens=True, max_length=self.max_seq_length, - pad_to_max_length=True, return_attention_mask=True, return_tensors='pt') + text=text_a, text_pair=text_b, + add_special_tokens=True, + max_length=self.max_seq_length, + truncation=True, + padding='max_length', + return_attention_mask=True, + return_tensors='pt') if 'token_type_ids' not in encoded_dict: - encoded_dict['token_type_ids'] = torch.tensor([0]) + if self.add_token_type_ids: + input_ids = encoded_dict['input_ids'] + seq_len = input_ids.size(1) + sep = torch.where(input_ids == self.tokenizer.sep_token_id)[1][0].item() + len_a = min(sep + 1, seq_len) + len_b = seq_len - len_a + encoded_dict['token_type_ids'] = torch.cat((torch.zeros(1, len_a, dtype=int), + torch.ones(1, len_b, dtype=int)), dim=1) + else: + encoded_dict['token_type_ids'] = torch.tensor([0]) curr_features = InputFeatures(input_ids=encoded_dict['input_ids'], attention_mask=encoded_dict['attention_mask'], @@ -151,7 +315,7 @@ def __init__(self, self.tokenizer = AutoTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) else: - self.tokenizer = AutoTokenizer.from_pretrained(vocab_file, do_lower_case=True) + self.tokenizer = AutoTokenizer.from_pretrained(vocab_file, do_lower_case=do_lower_case) self.token_masking_prob = token_masking_prob def __call__(self, @@ -194,7 +358,7 @@ def __call__(self, if tags is not None: if self.provide_subword_tags: return tokens, subword_tokens, subword_tok_ids, \ - attention_mask, startofword_markers, subword_tags + attention_mask, startofword_markers, subword_tags else: nonmasked_tags = [[t for t in ts if t != 'X'] for ts in tags] for swts, swids, swms, ts in zip(subword_tokens, @@ -208,7 +372,7 @@ def __call__(self, log.warning(f'Masks: {swms}') log.warning(f'Tags len: {len(ts)}\n Tags: {ts}') return tokens, subword_tokens, subword_tok_ids, \ - attention_mask, startofword_markers, nonmasked_tags + attention_mask, startofword_markers, nonmasked_tags return tokens, subword_tokens, subword_tok_ids, startofword_markers, attention_mask @staticmethod diff --git a/deeppavlov/models/ranking/deep_attention_matching_network.py b/deeppavlov/models/ranking/deep_attention_matching_network.py deleted file mode 100644 index 71b74a64a6..0000000000 --- a/deeppavlov/models/ranking/deep_attention_matching_network.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2018 Neural Networks and Deep Learning lab, MIPT -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from logging import getLogger -from typing import Optional - -import numpy as np -import tensorflow as tf - -from deeppavlov.core.common.registry import register -from deeppavlov.models.ranking.matching_models.dam_utils import layers -from deeppavlov.models.ranking.matching_models.dam_utils import operations as op -from deeppavlov.models.ranking.tf_base_matching_model import TensorflowBaseMatchingModel - -log = getLogger(__name__) - - -@register('dam_nn') -class DAMNetwork(TensorflowBaseMatchingModel): - """ - Tensorflow implementation of Deep Attention Matching Network (DAM) - - ``` - @inproceedings{ , - title={Multi-Turn Response Selection for Chatbots with Deep Attention Matching Network}, - author={Xiangyang Zhou, Lu Li, Daxiang Dong, Yi Liu, Ying Chen, Wayne Xin Zhao, Dianhai Yu and Hua Wu}, - booktitle={Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, - volume={1}, - pages={ -- }, - year={2018} - } - ``` - http://aclweb.org/anthology/P18-1103 - - Based on authors' Tensorflow code: https://github.com/baidu/Dialogue/tree/master/DAM - - Args: - num_context_turns (int): A number of ``context`` turns in data samples. - max_sequence_length(int): A maximum length of text sequences in tokens. - Longer sequences will be truncated and shorter ones will be padded. - learning_rate (float): Initial learning rate. - emb_matrix (np.ndarray): An embeddings matrix to initialize an embeddings layer of a model. - trainable_embeddings (bool): Whether train embeddings matrix or not. - embedding_dim (int): Dimensionality of token (word) embeddings. - is_positional (bool): Adds a bunch of sinusoids of different frequencies to an embeddings. - filters2_conv3d (int): number of filters in the second conv3d layer (cnn aggregation). Default: 16. - stack_num (int): Number of stack layers, default is 5. - seed (int): Random seed. - decay_steps (int): Number of steps after which is to decay the learning rate. - """ - - def __init__(self, - embedding_dim: int = 200, - max_sequence_length: int = 50, - learning_rate: float = 1e-3, - emb_matrix: Optional[np.ndarray] = None, - trainable_embeddings: bool = False, - is_positional: bool = True, - filters2_conv3d: int = 16, - stack_num: int = 5, - seed: int = 65, - decay_steps: int = 600, - *args, - **kwargs): - - self.seed = seed - tf.set_random_seed(self.seed) - - self.max_sentence_len = max_sequence_length - self.word_embedding_size = embedding_dim - self.trainable = trainable_embeddings - self.is_positional = is_positional - self.stack_num = stack_num - self.filters2_conv3d = filters2_conv3d - - self.learning_rate = learning_rate - self.emb_matrix = emb_matrix - self.decay_steps = decay_steps - - super(DAMNetwork, self).__init__(*args, **kwargs) - - self.sess_config = tf.ConfigProto(allow_soft_placement=True) - self.sess_config.gpu_options.allow_growth = True - self.sess = tf.Session(config=self.sess_config) - self._init_graph() - self.sess.run(tf.global_variables_initializer()) - - if self.load_path is not None: - self.load() - - def _init_placeholders(self): - with tf.variable_scope('inputs'): - # Utterances and their lengths - self.utterance_ph = tf.placeholder(tf.int32, shape=(None, self.num_context_turns, self.max_sentence_len)) - self.all_utterance_len_ph = tf.placeholder(tf.int32, shape=(None, self.num_context_turns)) - - # Responses and their lengths - self.response_ph = tf.placeholder(tf.int32, shape=(None, self.max_sentence_len)) - self.response_len_ph = tf.placeholder(tf.int32, shape=(None,)) - - # Labels - self.y_true = tf.placeholder(tf.int32, shape=(None,)) - - def _init_graph(self): - self._init_placeholders() - - with tf.variable_scope('embedding_matrix_init'): - word_embeddings = tf.get_variable("word_embeddings_v", - initializer=tf.constant(self.emb_matrix, dtype=tf.float32), - trainable=self.trainable) - with tf.variable_scope('embedding_lookup'): - response_embeddings = tf.nn.embedding_lookup(word_embeddings, self.response_ph) - - Hr = response_embeddings - if self.is_positional and self.stack_num > 0: - with tf.variable_scope('positional'): - Hr = op.positional_encoding_vector(Hr, max_timescale=10) - - Hr_stack = [Hr] - - for index in range(self.stack_num): - with tf.variable_scope('self_stack_' + str(index)): - Hr = layers.block( - Hr, Hr, Hr, - Q_lengths=self.response_len_ph, K_lengths=self.response_len_ph, attention_type='dot') - Hr_stack.append(Hr) - - # context part - # a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len] - list_turn_t = tf.unstack(self.utterance_ph, axis=1) - list_turn_length = tf.unstack(self.all_utterance_len_ph, axis=1) - - sim_turns = [] - # for every turn_t calculate matching vector - for turn_t, t_turn_length in zip(list_turn_t, list_turn_length): - Hu = tf.nn.embedding_lookup(word_embeddings, turn_t) # [batch, max_turn_len, emb_size] - - if self.is_positional and self.stack_num > 0: - with tf.variable_scope('positional', reuse=True): - Hu = op.positional_encoding_vector(Hu, max_timescale=10) - Hu_stack = [Hu] - - for index in range(self.stack_num): - with tf.variable_scope('self_stack_' + str(index), reuse=True): - Hu = layers.block( - Hu, Hu, Hu, - Q_lengths=t_turn_length, K_lengths=t_turn_length, attention_type='dot') - - Hu_stack.append(Hu) - - r_a_t_stack = [] - t_a_r_stack = [] - for index in range(self.stack_num + 1): - - with tf.variable_scope('t_attend_r_' + str(index)): - try: - t_a_r = layers.block( - Hu_stack[index], Hr_stack[index], Hr_stack[index], - Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot') - except ValueError: - tf.get_variable_scope().reuse_variables() - t_a_r = layers.block( - Hu_stack[index], Hr_stack[index], Hr_stack[index], - Q_lengths=t_turn_length, K_lengths=self.response_len_ph, attention_type='dot') - - with tf.variable_scope('r_attend_t_' + str(index)): - try: - r_a_t = layers.block( - Hr_stack[index], Hu_stack[index], Hu_stack[index], - Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot') - except ValueError: - tf.get_variable_scope().reuse_variables() - r_a_t = layers.block( - Hr_stack[index], Hu_stack[index], Hu_stack[index], - Q_lengths=self.response_len_ph, K_lengths=t_turn_length, attention_type='dot') - - t_a_r_stack.append(t_a_r) - r_a_t_stack.append(r_a_t) - - t_a_r_stack.extend(Hu_stack) - r_a_t_stack.extend(Hr_stack) - - t_a_r = tf.stack(t_a_r_stack, axis=-1) - r_a_t = tf.stack(r_a_t_stack, axis=-1) - - # log.info(t_a_r, r_a_t) # debug - - # calculate similarity matrix - with tf.variable_scope('similarity'): - # sim shape [batch, max_turn_len, max_turn_len, 2*stack_num+1] - # divide sqrt(200) to prevent gradient explosion - sim = tf.einsum('biks,bjks->bijs', t_a_r, r_a_t) / tf.sqrt(float(self.word_embedding_size)) - - sim_turns.append(sim) - - # cnn and aggregation - sim = tf.stack(sim_turns, axis=1) - log.info('sim shape: %s' % sim.shape) - with tf.variable_scope('cnn_aggregation'): - final_info = layers.CNN_3d(sim, 32, self.filters2_conv3d) - # for douban - # final_info = layers.CNN_3d(sim, 16, 16) - - # loss and train - with tf.variable_scope('loss'): - self.loss, self.logits = layers.loss(final_info, self.y_true, clip_value=10.) - self.y_pred = tf.nn.softmax(self.logits, name="y_pred") - tf.summary.scalar('loss', self.loss) - - self.global_step = tf.Variable(0, trainable=False) - initial_learning_rate = self.learning_rate - self.learning_rate = tf.train.exponential_decay( - initial_learning_rate, - global_step=self.global_step, - decay_steps=self.decay_steps, - decay_rate=0.9, - staircase=True) - - Optimizer = tf.train.AdamOptimizer(self.learning_rate) - self.grads_and_vars = Optimizer.compute_gradients(self.loss) - - for grad, var in self.grads_and_vars: - if grad is None: - log.info(var) - - self.capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in self.grads_and_vars] - self.train_op = Optimizer.apply_gradients( - self.capped_gvs, - global_step=self.global_step) - - # Debug - self.print_number_of_parameters() diff --git a/deeppavlov/models/torch_bert/torch_bert_ranker.py b/deeppavlov/models/torch_bert/torch_bert_ranker.py index 184a292553..8990e8ef0e 100644 --- a/deeppavlov/models/torch_bert/torch_bert_ranker.py +++ b/deeppavlov/models/torch_bert/torch_bert_ranker.py @@ -13,20 +13,25 @@ # limitations under the License. from logging import getLogger +from pathlib import Path from typing import List, Dict, Union, Optional import numpy as np import torch +from overrides import overrides +from transformers import AutoModelForSequenceClassification, AutoConfig from transformers.data.processors.utils import InputFeatures +from deeppavlov.core.commands.utils import expand_path +from deeppavlov.core.common.errors import ConfigError from deeppavlov.core.common.registry import register -from deeppavlov.models.torch_bert.torch_transformers_classifier import TorchTransformersClassifierModel +from deeppavlov.core.models.torch_model import TorchModel -logger = getLogger(__name__) +log = getLogger(__name__) @register('torch_bert_ranker') -class TorchBertRankerModel(TorchTransformersClassifierModel): +class TorchBertRankerModel(TorchModel): """BERT-based model for interaction-based text ranking on PyTorch. Linear transformation is trained over the BERT pooled output from [CLS] token. @@ -47,11 +52,27 @@ def __init__(self, pretrained_bert: str, n_classes: int = 2, return_probas: bool = True, optimizer: str = "AdamW", - optimizer_parameters: dict = {"lr": 2e-5, "weight_decay": 0.01, "betas": (0.9, 0.999), "eps": 1e-6}, + clip_norm: Optional[float] = None, + optimizer_parameters: Optional[dict] = None, **kwargs) -> None: - super().__init__(pretrained_bert=pretrained_bert, bert_config_file=bert_config_file, - n_classes=n_classes, return_probas=return_probas, - optimizer=optimizer, optimizer_parameters=optimizer_parameters, + + if not optimizer_parameters: + optimizer_parameters = {"lr": 2e-5, + "weight_decay": 0.01, + "betas": (0.9, 0.999), + "eps": 1e-6} + + self.return_probas = return_probas + self.pretrained_bert = pretrained_bert + self.bert_config_file = bert_config_file + self.n_classes = n_classes + self.clip_norm = clip_norm + + if self.return_probas and self.n_classes == 1: + raise RuntimeError('Set return_probas to False for regression task!') + + super().__init__(optimizer=optimizer, + optimizer_parameters=optimizer_parameters, **kwargs) def train_on_batch(self, features_li: List[List[InputFeatures]], y: Union[List[int], List[List[int]]]) -> Dict: @@ -101,7 +122,7 @@ def __call__(self, features_li: List[List[InputFeatures]]) -> Union[List[int], L """ if len(features_li) == 1 and len(features_li[0]) == 1: msg = f"It is not intended to use the {self.__class__} in the interact mode." - logger.error(msg) + log.error(msg) return [msg] predictions = [] @@ -133,3 +154,79 @@ def __call__(self, features_li: List[List[InputFeatures]]) -> Union[List[int], L predictions = np.hstack([np.expand_dims(el, 1) for el in predictions]) return predictions + + @overrides + def load(self, fname=None): + if fname is not None: + self.load_path = fname + + if self.pretrained_bert: + log.info(f"From pretrained {self.pretrained_bert}.") + config = AutoConfig.from_pretrained(self.pretrained_bert, + # num_labels=self.n_classes, + output_attentions=False, + output_hidden_states=False) + + self.model = AutoModelForSequenceClassification.from_pretrained(self.pretrained_bert, config=config) + + try: + hidden_size = self.model.classifier.out_proj.in_features + + if self.n_classes != self.model.num_labels: + self.model.classifier.out_proj.weight = torch.nn.Parameter(torch.randn(self.n_classes, hidden_size)) + self.model.classifier.out_proj.bias = torch.nn.Parameter(torch.randn(self.n_classes)) + self.model.classifier.out_proj.out_features = self.n_classes + self.model.num_labels = self.n_classes + + except torch.nn.modules.module.ModuleAttributeError: + hidden_size = self.model.classifier.in_features + + if self.n_classes != self.model.num_labels: + self.model.classifier.weight = torch.nn.Parameter(torch.randn(self.n_classes, hidden_size)) + self.model.classifier.bias = torch.nn.Parameter(torch.randn(self.n_classes)) + self.model.classifier.out_features = self.n_classes + self.model.num_labels = self.n_classes + + + elif self.bert_config_file and Path(self.bert_config_file).is_file(): + self.bert_config = AutoConfig.from_json_file(str(expand_path(self.bert_config_file))) + if self.attention_probs_keep_prob is not None: + self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob + if self.hidden_keep_prob is not None: + self.bert_config.hidden_dropout_prob = 1.0 - self.hidden_keep_prob + self.model = AutoModelForSequenceClassification.from_config(config=self.bert_config) + else: + raise ConfigError("No pre-trained BERT model is given.") + + self.model.to(self.device) + + self.optimizer = getattr(torch.optim, self.optimizer_name)( + self.model.parameters(), **self.optimizer_parameters) + if self.lr_scheduler_name is not None: + self.lr_scheduler = getattr(torch.optim.lr_scheduler, self.lr_scheduler_name)( + self.optimizer, **self.lr_scheduler_parameters) + + if self.load_path: + log.info(f"Load path {self.load_path} is given.") + if isinstance(self.load_path, Path) and not self.load_path.parent.is_dir(): + raise ConfigError("Provided load path is incorrect!") + + weights_path = Path(self.load_path.resolve()) + weights_path = weights_path.with_suffix(f".pth.tar") + if weights_path.exists(): + log.info(f"Load path {weights_path} exists.") + log.info(f"Initializing `{self.__class__.__name__}` from saved.") + + # now load the weights, optimizer from saved + log.info(f"Loading weights from {weights_path}.") + checkpoint = torch.load(weights_path, map_location=self.device) + # set strict flag to False if position_ids are missing + # this is needed to load models trained on older versions + # of transformers library + strict_load_flag = bool([key for key in checkpoint["model_state_dict"].keys() + if key.endswith("embeddings.position_ids")]) + self.model.load_state_dict(checkpoint["model_state_dict"], strict=strict_load_flag) + self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + self.epochs_done = checkpoint.get("epochs_done", 0) + else: + log.info(f"Init from scratch. Load path {weights_path} does not exist.") diff --git a/deeppavlov/models/torch_bert/torch_transformers_classifier.py b/deeppavlov/models/torch_bert/torch_transformers_classifier.py index 20afc25700..3bf8077518 100644 --- a/deeppavlov/models/torch_bert/torch_transformers_classifier.py +++ b/deeppavlov/models/torch_bert/torch_transformers_classifier.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re from logging import getLogger from pathlib import Path -from typing import List, Dict, Union, Optional +from typing import List, Dict, Union, Optional, Tuple import numpy as np import torch from overrides import overrides -from transformers import AutoModelForSequenceClassification, AutoConfig -from transformers.data.processors.utils import InputFeatures +from torch.nn import BCEWithLogitsLoss +from transformers import AutoModelForSequenceClassification, AutoConfig, AutoModel +from transformers.modeling_outputs import SequenceClassifierOutput from deeppavlov.core.common.errors import ConfigError from deeppavlov.core.commands.utils import expand_path @@ -59,11 +61,18 @@ def __init__(self, n_classes, attention_probs_keep_prob: Optional[float] = None, hidden_keep_prob: Optional[float] = None, optimizer: str = "AdamW", - optimizer_parameters: dict = {"lr": 1e-3, "weight_decay": 0.01, "betas": (0.9, 0.999), "eps": 1e-6}, + optimizer_parameters: Optional[dict] = None, clip_norm: Optional[float] = None, bert_config_file: Optional[str] = None, + is_binary: Optional[bool] = False, **kwargs) -> None: + if not optimizer_parameters: + optimizer_parameters = {"lr": 1e-3, + "weight_decay": 0.01, + "betas": (0.9, 0.999), + "eps": 1e-6}, + self.return_probas = return_probas self.one_hot_labels = one_hot_labels self.multilabel = multilabel @@ -73,6 +82,8 @@ def __init__(self, n_classes, self.hidden_keep_prob = hidden_keep_prob self.n_classes = n_classes self.clip_norm = clip_norm + self.is_binary = is_binary + self.bert_config = None if self.multilabel and not self.one_hot_labels: raise RuntimeError('Use one-hot encoded labels for multilabel classification!') @@ -82,12 +93,12 @@ def __init__(self, n_classes, if self.return_probas and self.n_classes == 1: raise RuntimeError('Set return_probas to False for regression task!') - + super().__init__(optimizer=optimizer, optimizer_parameters=optimizer_parameters, **kwargs) - def train_on_batch(self, features: List[InputFeatures], y: Union[List[int], List[List[int]]]) -> Dict: + def train_on_batch(self, features: Dict[str, torch.tensor], y: Union[List[int], List[List[int]]]) -> Dict: """Train model on given batch. This method calls train_op using features and y (labels). @@ -99,25 +110,23 @@ def train_on_batch(self, features: List[InputFeatures], y: Union[List[int], List dict with loss and learning_rate values """ - _input = {} - for elem in ['input_ids', 'attention_mask', 'token_type_ids']: - _input[elem] = [getattr(f, elem) for f in features] + _input = {key: value.to(self.device) for key, value in features.items()} - for elem in ['input_ids', 'attention_mask', 'token_type_ids']: - _input[elem] = torch.cat(_input[elem], dim=0).to(self.device) + if self.n_classes > 1 and not self.is_binary: + _input["labels"] = torch.from_numpy(np.array(y)).to(self.device) - if self.n_classes > 1: - _input['labels'] = torch.from_numpy(np.array(y)).to(self.device) + # regression else: - _input['labels'] = torch.from_numpy(np.array(y, dtype=np.float32)).to(self.device) + _input["labels"] = torch.from_numpy(np.array(y, dtype=np.float32)).unsqueeze(1).to(self.device) self.optimizer.zero_grad() - tokenized = {key:value for (key,value) in _input.items() if key in self.model.forward.__code__.co_varnames} - - # Token_type_id is omitted for Text Classification + tokenized = {key: value for (key, value) in _input.items() + if key in self.accepted_keys} - loss, logits = self.model(**tokenized) + loss = self.model(**tokenized).loss + if self.is_data_parallel: + loss = loss.mean() loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. @@ -130,7 +139,7 @@ def train_on_batch(self, features: List[InputFeatures], y: Union[List[int], List return {'loss': loss.item()} - def __call__(self, features: List[InputFeatures]) -> Union[List[int], List[List[float]]]: + def __call__(self, features: Dict[str, torch.tensor]) -> Union[List[int], List[List[float]]]: """Make prediction for given features (texts). Args: @@ -141,22 +150,20 @@ def __call__(self, features: List[InputFeatures]) -> Union[List[int], List[List[ """ - _input = {} - for elem in ['input_ids', 'attention_mask', 'token_type_ids']: - _input[elem] = [getattr(f, elem) for f in features] - - for elem in ['input_ids', 'attention_mask', 'token_type_ids']: - _input[elem] = torch.cat(_input[elem], dim=0).to(self.device) + _input = {key: value.to(self.device) for key, value in features.items()} with torch.no_grad(): - tokenized = {key:value for (key,value) in _input.items() if key in self.model.forward.__code__.co_varnames} + tokenized = {key: value for (key, value) in _input.items() + if key in self.accepted_keys} # Forward pass, calculate logit predictions logits = self.model(**tokenized) logits = logits[0] if self.return_probas: - if not self.multilabel: + if self.is_binary: + pred = torch.sigmoid(logits).squeeze(1) + elif not self.multilabel: pred = torch.nn.functional.softmax(logits, dim=-1) else: pred = torch.nn.functional.sigmoid(logits) @@ -164,11 +171,27 @@ def __call__(self, features: List[InputFeatures]) -> Union[List[int], List[List[ elif self.n_classes > 1: logits = logits.detach().cpu().numpy() pred = np.argmax(logits, axis=1) - else: # regression + # regression + else: pred = logits.squeeze(-1).detach().cpu().numpy() return pred + # TODO move to the super class + @property + def accepted_keys(self) -> Tuple[str]: + if self.is_data_parallel: + accepted_keys = self.model.module.forward.__code__.co_varnames + else: + accepted_keys = self.model.forward.__code__.co_varnames + return accepted_keys + + # TODO move to the super class + @property + def is_data_parallel(self) -> bool: + return isinstance(self.model, torch.nn.DataParallel) + + # TODO this method requires massive refactoring @overrides def load(self, fname=None): if fname is not None: @@ -176,13 +199,39 @@ def load(self, fname=None): if self.pretrained_bert: log.info(f"From pretrained {self.pretrained_bert}.") - config = AutoConfig.from_pretrained(self.pretrained_bert, num_labels=self.n_classes, - output_attentions=False, output_hidden_states=False) + config = AutoConfig.from_pretrained(self.pretrained_bert, + # num_labels=self.n_classes, + output_attentions=False, + output_hidden_states=False) + + if self.is_binary: + config.add_pooling_layer = False + self.model = AutoModelForBinaryClassification(self.pretrained_bert, config) + else: + self.model = AutoModelForSequenceClassification.from_pretrained(self.pretrained_bert, config=config) - self.model = AutoModelForSequenceClassification.from_pretrained(self.pretrained_bert, config=config) + # TODO need a better solution here + try: + hidden_size = self.model.classifier.out_proj.in_features + + if self.n_classes != self.model.num_labels: + self.model.classifier.out_proj.weight = torch.nn.Parameter(torch.randn(self.n_classes, + hidden_size)) + self.model.classifier.out_proj.bias = torch.nn.Parameter(torch.randn(self.n_classes)) + self.model.classifier.out_proj.out_features = self.n_classes + self.model.num_labels = self.n_classes + + except torch.nn.modules.module.ModuleAttributeError: + hidden_size = self.model.classifier.in_features + + if self.n_classes != self.model.num_labels: + self.model.classifier.weight = torch.nn.Parameter(torch.randn(self.n_classes, hidden_size)) + self.model.classifier.bias = torch.nn.Parameter(torch.randn(self.n_classes)) + self.model.classifier.out_features = self.n_classes + self.model.num_labels = self.n_classes elif self.bert_config_file and Path(self.bert_config_file).is_file(): - self.bert_config = AutoConfig.from_json_file(str(expand_path(self.bert_config_file))) + self.bert_config = AutoConfig.from_pretrained(str(expand_path(self.bert_config_file))) if self.attention_probs_keep_prob is not None: self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob if self.hidden_keep_prob is not None: @@ -191,6 +240,10 @@ def load(self, fname=None): else: raise ConfigError("No pre-trained BERT model is given.") + # TODO that should probably be parametrized in config + if self.device.type == "cuda" and torch.cuda.device_count() > 1: + self.model = torch.nn.DataParallel(self.model) + self.model.to(self.device) self.optimizer = getattr(torch.optim, self.optimizer_name)( @@ -213,8 +266,101 @@ def load(self, fname=None): # now load the weights, optimizer from saved log.info(f"Loading weights from {weights_path}.") checkpoint = torch.load(weights_path, map_location=self.device) - self.model.load_state_dict(checkpoint["model_state_dict"]) - self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + model_state = checkpoint["model_state_dict"] + optimizer_state = checkpoint["optimizer_state_dict"] + + # load a multi-gpu model on a single device + if not self.is_data_parallel and "module." in list(model_state.keys())[0]: + tmp_model_state = {} + for key, value in model_state.items(): + tmp_model_state[re.sub("module.", "", key)] = value + model_state = tmp_model_state + + # set strict flag to False if position_ids are missing + # this is needed to load models trained on older versions + # of transformers library + strict_load_flag = bool([key for key in checkpoint["model_state_dict"].keys() + if key.endswith("embeddings.position_ids")]) + self.model.load_state_dict(model_state, strict=strict_load_flag) + self.optimizer.load_state_dict(optimizer_state) self.epochs_done = checkpoint.get("epochs_done", 0) else: log.info(f"Init from scratch. Load path {weights_path} does not exist.") + + +class AutoModelForBinaryClassification(torch.nn.Module): + + def __init__(self, pretrained_bert, config): + super().__init__() + self.pretrained_bert = pretrained_bert + self.config = config + + self.model = AutoModel.from_pretrained(self.pretrained_bert, self.config) + self.classifier = BinaryClassificationHead(config) + + self.classifier.init_weights() + + def forward(self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None): + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model(input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict) + + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput(loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions) + + +class BinaryClassificationHead(torch.nn.Module): + def __init__(self, config): + super().__init__() + + self.config = config + + self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) + self.out_proj = torch.nn.Linear(config.hidden_size, 1) + + def init_weights(self): + self.dense.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if self.dense.bias is not None: + self.dense.bias.data.zero_() + + def forward(self, features, **kwargs): + x = features[:, 0, :] + x = self.dropout(x) + x = self.dense(x) + x = torch.tanh(x) + x = self.dropout(x) + x = self.out_proj(x) + return x diff --git a/deeppavlov/models/torch_bert/torch_transformers_multiplechoice.py b/deeppavlov/models/torch_bert/torch_transformers_multiplechoice.py new file mode 100644 index 0000000000..efaae6c206 --- /dev/null +++ b/deeppavlov/models/torch_bert/torch_transformers_multiplechoice.py @@ -0,0 +1,209 @@ +# Copyright 2017 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger +from pathlib import Path +from typing import List, Dict, Union, Optional + +import numpy as np +import torch +from overrides import overrides +from transformers import AutoModelForMultipleChoice, AutoConfig + +from deeppavlov.core.common.errors import ConfigError +from deeppavlov.core.commands.utils import expand_path +from deeppavlov.core.common.registry import register +from deeppavlov.core.models.torch_model import TorchModel + +log = getLogger(__name__) + + +@register('torch_transformers_multiplechoice') +class TorchTransformersMultiplechoiceModel(TorchModel): + """Bert-based model for text classification on PyTorch. + + It uses output from [CLS] token and predicts labels using linear transformation. + + Args: + n_classes: number of classes + pretrained_bert: pretrained Bert checkpoint path or key title (e.g. "bert-base-uncased") + one_hot_labels: set True if one-hot encoding for labels is used + multilabel: set True if it is multi-label classification + return_probas: set True if return class probabilites instead of most probable label needed + attention_probs_keep_prob: keep_prob for Bert self-attention layers + hidden_keep_prob: keep_prob for Bert hidden layers + optimizer: optimizer name from `torch.optim` + optimizer_parameters: dictionary with optimizer's parameters, + e.g. {'lr': 0.1, 'weight_decay': 0.001, 'momentum': 0.9} + clip_norm: clip gradients by norm coefficient + bert_config_file: path to Bert configuration file (not used if pretrained_bert is key title) + """ + + def __init__(self, n_classes, + pretrained_bert, + one_hot_labels: bool = False, + multilabel: bool = False, + return_probas: bool = False, + attention_probs_keep_prob: Optional[float] = None, + hidden_keep_prob: Optional[float] = None, + optimizer: str = "AdamW", + optimizer_parameters: Optional[dict] = None, + clip_norm: Optional[float] = None, + bert_config_file: Optional[str] = None, + **kwargs) -> None: + + self.return_probas = return_probas + self.one_hot_labels = one_hot_labels + self.multilabel = multilabel + self.pretrained_bert = pretrained_bert + self.bert_config_file = bert_config_file + self.attention_probs_keep_prob = attention_probs_keep_prob + self.hidden_keep_prob = hidden_keep_prob + self.n_classes = n_classes + self.clip_norm = clip_norm + + if self.multilabel and not self.one_hot_labels: + raise RuntimeError('Use one-hot encoded labels for multilabel classification!') + + if self.multilabel and not self.return_probas: + raise RuntimeError('Set return_probas to True for multilabel classification!') + + if self.return_probas and self.n_classes == 1: + raise RuntimeError('Set return_probas to False for regression task!') + + if optimizer_parameters is None: + optimizer_parameters = {"lr": 1e-3, "weight_decay": 0.01, "betas": (0.9, 0.999), "eps": 1e-6} + + super().__init__(optimizer=optimizer, + optimizer_parameters=optimizer_parameters, + **kwargs) + + def train_on_batch(self, features: Dict[str, torch.tensor], y: Union[List[int], List[List[int]]]) -> Dict: + """Train model on given batch. + This method calls train_op using features and y (labels). + + Args: + features: batch of InputFeatures + y: batch of labels (class id or one-hot encoding) + + Returns: + dict with loss and learning_rate values + """ + + _input = {key: value.to(self.device) for key, value in features.items()} + + _input["labels"] = torch.tensor(y).long().to(self.device) + + self.optimizer.zero_grad() + + tokenized = {key: value for (key, value) in _input.items() if key in self.model.forward.__code__.co_varnames} + + loss = self.model(**tokenized).loss + + loss.backward() + # Clip the norm of the gradients to 1.0. + # This is to help prevent the "exploding gradients" problem. + if self.clip_norm: + torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm) + + self.optimizer.step() + if self.lr_scheduler is not None: + self.lr_scheduler.step() + + return {'loss': loss.item()} + + def __call__(self, features: Dict[str, torch.tensor]) -> Union[List[int], List[List[float]]]: + """Make prediction for given features (texts). + + Args: + features: batch of InputFeatures + + Returns: + predicted classes or probabilities of each class + + """ + + _input = {key: value.to(self.device) for key, value in features.items()} + + with torch.no_grad(): + tokenized = {key: value for (key, value) in _input.items() + if key in self.model.forward.__code__.co_varnames} + + # Forward pass, calculate logit predictions + logits = self.model(**tokenized) + logits = logits[0] + + if self.return_probas: + if not self.multilabel: + pred = torch.nn.functional.softmax(logits, dim=-1) + else: + pred = torch.nn.functional.sigmoid(logits) + pred = pred.detach().cpu().numpy() + elif self.n_classes > 1: + logits = logits.detach().cpu().numpy() + pred = np.argmax(logits, axis=1) + else: # regression + pred = logits.squeeze(-1).detach().cpu().numpy() + + return pred + + @overrides + def load(self, fname = None): + if fname is not None: + self.load_path = fname + + if self.pretrained_bert: + log.info(f"From pretrained {self.pretrained_bert}.") + config = AutoConfig.from_pretrained(self.pretrained_bert, num_labels=self.n_classes, + output_attentions=False, output_hidden_states=False) + + self.model = AutoModelForMultipleChoice.from_pretrained(self.pretrained_bert, config=config) + + elif self.bert_config_file and Path(self.bert_config_file).is_file(): + self.bert_config = AutoConfig.from_json_file(str(expand_path(self.bert_config_file))) + if self.attention_probs_keep_prob is not None: + self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob + if self.hidden_keep_prob is not None: + self.bert_config.hidden_dropout_prob = 1.0 - self.hidden_keep_prob + self.model = AutoModelForMultipleChoice.from_config(config=self.bert_config) + else: + raise ConfigError("No pre-trained BERT model is given.") + + self.model.to(self.device) + + self.optimizer = getattr(torch.optim, self.optimizer_name)( + self.model.parameters(), **self.optimizer_parameters) + if self.lr_scheduler_name is not None: + self.lr_scheduler = getattr(torch.optim.lr_scheduler, self.lr_scheduler_name)( + self.optimizer, **self.lr_scheduler_parameters) + + if self.load_path: + log.info(f"Load path {self.load_path} is given.") + if isinstance(self.load_path, Path) and not self.load_path.parent.is_dir(): + raise ConfigError("Provided load path is incorrect!") + + weights_path = Path(self.load_path.resolve()) + weights_path = weights_path.with_suffix(f".pth.tar") + if weights_path.exists(): + log.info(f"Load path {weights_path} exists.") + log.info(f"Initializing `{self.__class__.__name__}` from saved.") + + # now load the weights, optimizer from saved + log.info(f"Loading weights from {weights_path}.") + checkpoint = torch.load(weights_path, map_location=self.device) + self.model.load_state_dict(checkpoint["model_state_dict"]) + self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + self.epochs_done = checkpoint.get("epochs_done", 0) + else: + log.info(f"Init from scratch. Load path {weights_path} does not exist.") diff --git a/deeppavlov/models/torch_bert/torch_transformers_sequence_tagger.py b/deeppavlov/models/torch_bert/torch_transformers_sequence_tagger.py index 9c1959a3ac..1ca16eb637 100644 --- a/deeppavlov/models/torch_bert/torch_transformers_sequence_tagger.py +++ b/deeppavlov/models/torch_bert/torch_transformers_sequence_tagger.py @@ -136,6 +136,7 @@ def token_from_subtoken(units: torch.Tensor, mask: torch.Tensor) -> torch.Tensor full_range = torch.arange(batch_size * max_token_seq_len).to(torch.int64) # full_range -> [0, 1, 2, 3, 4, 5, 6, 7, 8] nonword_indices_flat = torch.masked_select(full_range, torch.logical_not(x_mask)) + # # y_idxs -> [5, 7, 8] # get a sequence of units corresponding to the start subtokens of the words @@ -276,8 +277,9 @@ def train_on_batch(self, b_labels = torch.from_numpy(np.array(subtoken_labels)).to(torch.int64).to(self.device) self.optimizer.zero_grad() - loss, logits = self.model(input_ids=b_input_ids, attention_mask=b_input_masks, - labels=b_labels) + loss = self.model(input_ids=b_input_ids, + attention_mask=b_input_masks, + labels=b_labels).loss loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. @@ -347,7 +349,7 @@ def load(self, fname=None): raise ConfigError("No pre-trained BERT model is given.") self.model.to(self.device) - + self.optimizer = getattr(torch.optim, self.optimizer_name)( self.model.parameters(), **self.optimizer_parameters) if self.lr_scheduler_name is not None: diff --git a/deeppavlov/models/torch_bert/torch_bert_squad.py b/deeppavlov/models/torch_bert/torch_transformers_squad.py similarity index 80% rename from deeppavlov/models/torch_bert/torch_bert_squad.py rename to deeppavlov/models/torch_bert/torch_transformers_squad.py index 8b229e1c14..9506ce924e 100644 --- a/deeppavlov/models/torch_bert/torch_bert_squad.py +++ b/deeppavlov/models/torch_bert/torch_transformers_squad.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re import json import math from logging import getLogger @@ -21,7 +22,7 @@ import numpy as np import torch from overrides import overrides -from transformers import BertForQuestionAnswering, BertConfig, BertTokenizer +from transformers import AutoModelForQuestionAnswering, AutoConfig, AutoTokenizer from transformers.data.processors.utils import InputFeatures from deeppavlov import build_model @@ -35,12 +36,12 @@ def softmax_mask(val, mask): - INF = 1e30 - return -INF * (1 - mask.to(torch.float32)) + val + inf = 1e30 + return -inf * (1 - mask.to(torch.float32)) + val -@register('torch_squad_bert_model') -class TorchBertSQuADModel(TorchModel): +@register('torch_transformers_squad') +class TorchTransformersSquad(TorchModel): """Bert-based on PyTorch model for SQuAD-like problem setting: It predicts start and end position of answer for given question and context. @@ -71,8 +72,7 @@ def __init__(self, attention_probs_keep_prob: Optional[float] = None, hidden_keep_prob: Optional[float] = None, optimizer: str = "AdamW", - optimizer_parameters: dict = {"lr": 0.01, "weight_decay": 0.01, - "betas": (0.9, 0.999), "eps": 1e-6}, + optimizer_parameters: Optional[dict] = None, bert_config_file: Optional[str] = None, learning_rate_drop_patience: int = 20, learning_rate_drop_div: float = 2.0, @@ -81,6 +81,12 @@ def __init__(self, min_learning_rate: float = 1e-06, **kwargs) -> None: + if not optimizer_parameters: + optimizer_parameters = {"lr": 0.01, + "weight_decay": 0.01, + "betas": (0.9, 0.999), + "eps": 1e-6} + self.attention_probs_keep_prob = attention_probs_keep_prob self.hidden_keep_prob = hidden_keep_prob self.clip_norm = clip_norm @@ -109,6 +115,7 @@ def train_on_batch(self, features: List[InputFeatures], y_st: List[List[int]], y dict with loss and learning_rate values """ + input_ids = [f.input_ids for f in features] input_masks = [f.attention_mask for f in features] input_type_ids = [f.token_type_ids for f in features] @@ -121,13 +128,21 @@ def train_on_batch(self, features: List[InputFeatures], y_st: List[List[int]], y y_end = [x[0] for x in y_end] b_y_st = torch.from_numpy(np.array(y_st)).to(self.device) b_y_end = torch.from_numpy(np.array(y_end)).to(self.device) + + input_ = { + 'input_ids': b_input_ids, + 'attention_mask': b_input_masks, + 'token_type_ids': b_input_type_ids, + 'start_positions': b_y_st, + 'end_positions': b_y_end, + 'return_dict': True + } self.optimizer.zero_grad() - - outputs = self.model(input_ids=b_input_ids, attention_mask=b_input_masks, - token_type_ids=b_input_type_ids, - start_positions=b_y_st, end_positions=b_y_end) - loss = outputs[0] + input_ = {arg_name: arg_value for arg_name, arg_value in input_.items() if arg_name in self.accepted_keys} + loss = self.model(**input_).loss + if self.is_data_parallel: + loss = loss.mean() loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. @@ -140,6 +155,18 @@ def train_on_batch(self, features: List[InputFeatures], y_st: List[List[int]], y return {'loss': loss.item()} + @property + def accepted_keys(self) -> Tuple[str]: + if self.is_data_parallel: + accepted_keys = self.model.module.forward.__code__.co_varnames + else: + accepted_keys = self.model.forward.__code__.co_varnames + return accepted_keys + + @property + def is_data_parallel(self) -> bool: + return isinstance(self.model, torch.nn.DataParallel) + def __call__(self, features: List[InputFeatures]) -> Tuple[List[int], List[int], List[float], List[float]]: """get predictions using features as input @@ -157,11 +184,21 @@ def __call__(self, features: List[InputFeatures]) -> Tuple[List[int], List[int], b_input_ids = torch.cat(input_ids, dim=0).to(self.device) b_input_masks = torch.cat(input_masks, dim=0).to(self.device) b_input_type_ids = torch.cat(input_type_ids, dim=0).to(self.device) + + input_ = { + 'input_ids': b_input_ids, + 'attention_mask': b_input_masks, + 'token_type_ids': b_input_type_ids, + 'return_dict': True + } with torch.no_grad(): + input_ = {arg_name: arg_value for arg_name, arg_value in input_.items() if arg_name in self.accepted_keys} # Forward pass, calculate logit predictions - outputs = self.model(input_ids=b_input_ids, attention_mask=b_input_masks, token_type_ids=b_input_type_ids) - logits_st, logits_end = outputs[:2] + outputs = self.model(**input_) + + logits_st = outputs.start_logits + logits_end = outputs.end_logits bs = b_input_ids.size()[0] seq_len = b_input_ids.size()[-1] @@ -205,20 +242,28 @@ def load(self, fname=None): if fname is not None: self.load_path = fname - if self.pretrained_bert and not Path(self.pretrained_bert).is_file(): - self.model = BertForQuestionAnswering.from_pretrained( - self.pretrained_bert, output_attentions=False, output_hidden_states=False) + if self.pretrained_bert: + logger.info(f"From pretrained {self.pretrained_bert}.") + config = AutoConfig.from_pretrained(self.pretrained_bert, + output_attentions=False, + output_hidden_states=False) + + self.model = AutoModelForQuestionAnswering.from_pretrained(self.pretrained_bert, config=config) + elif self.bert_config_file and Path(self.bert_config_file).is_file(): - self.bert_config = BertConfig.from_json_file(str(expand_path(self.bert_config_file))) + self.bert_config = AutoConfig.from_json_file(str(expand_path(self.bert_config_file))) if self.attention_probs_keep_prob is not None: self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob if self.hidden_keep_prob is not None: self.bert_config.hidden_dropout_prob = 1.0 - self.hidden_keep_prob - self.model = BertForQuestionAnswering(config=self.bert_config) + self.model = AutoModelForQuestionAnswering(config=self.bert_config) else: raise ConfigError("No pre-trained BERT model is given.") + if self.device.type == "cuda" and torch.cuda.device_count() > 1: + self.model = torch.nn.DataParallel(self.model) + self.model.to(self.device) self.optimizer = getattr(torch.optim, self.optimizer_name)( self.model.parameters(), **self.optimizer_parameters) @@ -240,15 +285,27 @@ def load(self, fname=None): # now load the weights, optimizer from saved logger.info(f"Loading weights from {weights_path}.") checkpoint = torch.load(weights_path, map_location=self.device) - self.model.load_state_dict(checkpoint["model_state_dict"]) - self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + model_state = checkpoint["model_state_dict"] + optimizer_state = checkpoint["optimizer_state_dict"] + + # load a multi-gpu model on a single device + if not self.is_data_parallel and "module." in list(model_state.keys())[0]: + tmp_model_state = {} + for key, value in model_state.items(): + tmp_model_state[re.sub("module.", "", key)] = value + model_state = tmp_model_state + + strict_load_flag = bool([key for key in checkpoint["model_state_dict"].keys() + if key.endswith("embeddings.position_ids")]) + self.model.load_state_dict(model_state, strict=strict_load_flag) + self.optimizer.load_state_dict(optimizer_state) self.epochs_done = checkpoint.get("epochs_done", 0) else: logger.info(f"Init from scratch. Load path {weights_path} does not exist.") -@register('torch_squad_bert_infer') -class TorchBertSQuADInferModel(Component): +@register('torch_transformers_squad_infer') +class TorchTransformersSquadInfer(Component): """This model wraps BertSQuADModel to make predictions on longer than 512 tokens sequences. It splits context on chunks with `max_seq_length - 3 - len(question)` length, preserving sentences boundaries. @@ -287,10 +344,10 @@ def __init__(self, squad_model_config: str, if Path(vocab_file).is_file(): vocab_file = str(expand_path(vocab_file)) - self.tokenizer = BertTokenizer(vocab_file=vocab_file, + self.tokenizer = AutoTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) else: - self.tokenizer = BertTokenizer.from_pretrained(vocab_file, do_lower_case=do_lower_case) + self.tokenizer = AutoTokenizer.from_pretrained(vocab_file, do_lower_case=do_lower_case) self.batch_size = batch_size diff --git a/deeppavlov/requirements/tf-gpu.txt b/deeppavlov/requirements/tf-gpu.txt index d97315f1cc..c6114f09c4 100644 --- a/deeppavlov/requirements/tf-gpu.txt +++ b/deeppavlov/requirements/tf-gpu.txt @@ -1 +1 @@ -tensorflow-gpu==1.15.2 \ No newline at end of file +tensorflow-gpu==1.15.5 \ No newline at end of file diff --git a/deeppavlov/requirements/tf.txt b/deeppavlov/requirements/tf.txt index 26ff9379f8..d5a56dee1e 100644 --- a/deeppavlov/requirements/tf.txt +++ b/deeppavlov/requirements/tf.txt @@ -1 +1 @@ -tensorflow==1.15.2 \ No newline at end of file +tensorflow==1.15.5 \ No newline at end of file diff --git a/deeppavlov/requirements/transformers.txt b/deeppavlov/requirements/transformers.txt index ec122c087f..ac8b9921ca 100644 --- a/deeppavlov/requirements/transformers.txt +++ b/deeppavlov/requirements/transformers.txt @@ -1 +1 @@ -transformers==2.8.0 \ No newline at end of file +transformers==4.6.0 \ No newline at end of file diff --git a/deeppavlov/requirements/transformers28.txt b/deeppavlov/requirements/transformers28.txt new file mode 100644 index 0000000000..ec122c087f --- /dev/null +++ b/deeppavlov/requirements/transformers28.txt @@ -0,0 +1 @@ +transformers==2.8.0 \ No newline at end of file diff --git a/deeppavlov/requirements/udpipe.txt b/deeppavlov/requirements/udapi.txt similarity index 59% rename from deeppavlov/requirements/udpipe.txt rename to deeppavlov/requirements/udapi.txt index 44bb975f46..d923dfbf55 100644 --- a/deeppavlov/requirements/udpipe.txt +++ b/deeppavlov/requirements/udapi.txt @@ -1,2 +1 @@ -ufal.udpipe==1.2.0.2 -git+https://github.com/udapi/udapi-python.git@1e4004f577f3c6e471528ce4b87dd570ce8f2706 +git+https://github.com/udapi/udapi-python.git@1e4004f577f3c6e471528ce4b87dd570ce8f2706 \ No newline at end of file diff --git a/docs/apiref/dataset_readers.rst b/docs/apiref/dataset_readers.rst index 57a183be95..1dd26030f5 100644 --- a/docs/apiref/dataset_readers.rst +++ b/docs/apiref/dataset_readers.rst @@ -19,8 +19,6 @@ Concrete DatasetReader classes. .. autoclass:: deeppavlov.dataset_readers.file_paths_reader.FilePathsReader :members: -.. autoclass:: deeppavlov.dataset_readers.insurance_reader.InsuranceReader - .. automodule:: deeppavlov.dataset_readers.kvret_reader :members: @@ -32,10 +30,6 @@ Concrete DatasetReader classes. .. autoclass:: deeppavlov.dataset_readers.paraphraser_reader.ParaphraserReader -.. autoclass:: deeppavlov.dataset_readers.paraphraser_pretrain_reader.ParaphraserPretrainReader - -.. autoclass:: deeppavlov.dataset_readers.quora_question_pairs_reader.QuoraQuestionPairsReader - .. autoclass:: deeppavlov.dataset_readers.siamese_reader.SiameseReader .. autoclass:: deeppavlov.dataset_readers.squad_dataset_reader.SquadDatasetReader diff --git a/docs/apiref/models/kbqa.rst b/docs/apiref/models/kbqa.rst index d5c9ed3536..f873053bd5 100644 --- a/docs/apiref/models/kbqa.rst +++ b/docs/apiref/models/kbqa.rst @@ -43,11 +43,6 @@ deeppavlov.models.kbqa .. automethod:: __init__ .. automethod:: __call__ -.. autoclass:: deeppavlov.models.kbqa.tree_to_sparql.UdpipeParser - - .. automethod:: __init__ - .. automethod:: __call__ - .. autoclass:: deeppavlov.models.kbqa.wiki_parser.WikiParser .. automethod:: __init__ diff --git a/docs/apiref/models/torch_bert.rst b/docs/apiref/models/torch_bert.rst index 403d96b354..a13ec3e52f 100644 --- a/docs/apiref/models/torch_bert.rst +++ b/docs/apiref/models/torch_bert.rst @@ -26,12 +26,12 @@ deeppavlov.models.torch_bert .. automethod:: __call__ .. automethod:: train_on_batch -.. autoclass:: deeppavlov.models.torch_bert.torch_bert_squad.TorchBertSQuADModel +.. autoclass:: deeppavlov.models.torch_bert.torch_transformers_squad.TorchTransformersSquad .. automethod:: __call__ .. automethod:: train_on_batch -.. autoclass:: deeppavlov.models.torch_bert.torch_bert_squad.TorchBertSQuADInferModel +.. autoclass:: deeppavlov.models.torch_bert.torch_transformers_squad.TorchTransformersSquadInfer .. automethod:: __call__ diff --git a/docs/conf.py b/docs/conf.py index 9fb9843cd4..088c7a6511 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -193,7 +193,7 @@ autodoc_mock_imports = ['aiml', 'bert_dp', 'bs4', 'faiss', 'fastText', 'fasttext', 'gensim', 'hdt', 'kenlm', 'librosa', 'lxml', 'nemo', 'nemo_asr', 'nemo_tts', 'nltk', 'rapidfuzz', 'rasa', 'russian_tagsets', 'sacremoses', 'sortedcontainers', 'spacy', 'tensorflow', 'tensorflow_hub', 'torch', - 'transformers', 'udapi', 'ufal_udpipe', 'whapi', 'xeger'] + 'transformers', 'udapi', 'whapi', 'xeger'] extlinks = { 'config': (f'https://github.com/deepmipt/DeepPavlov/blob/{release}/deeppavlov/configs/%s', None) diff --git a/docs/features/models/bert.rst b/docs/features/models/bert.rst index 564cd31d73..9e68437742 100644 --- a/docs/features/models/bert.rst +++ b/docs/features/models/bert.rst @@ -29,6 +29,8 @@ We have trained BERT-base model for other languages and domains: `[deeppavlov_pytorch] `__ - Conversational RuBERT, Russian, cased, 12-layer, 768-hidden, 12-heads, 180M parameters: `[deeppavlov] `__, `[deeppavlov_pytorch] `__ +- Conversational DistilRuBERT, Russian, cased, 6-layer, 768-hidden, 12-heads, 135.4M parameters: `[deeppavlov_pytorch] `__ +- Conversational DistilRuBERT-tiny, Russian, cased, 2-layer, 768-hidden, 12-heads, 107M parameters: `[deeppavlov_pytorch] `__ - Sentence Multilingual BERT, 101 languages, cased, 12-layer, 768-hidden, 12-heads, 180M parameters: `[deeppavlov] `__, `[deeppavlov_pytorch] `__ - Sentence RuBERT, Russian, cased, 12-layer, 768-hidden, 12-heads, 180M parameters: `[deeppavlov] `__, @@ -50,6 +52,13 @@ English cased version of BERT-base as initialization for English Conversational Conversational RuBERT was trained on OpenSubtitles [5]_, Dirty, Pikabu, and Social Media segment of Taiga corpus [8]_. We assembled new vocabulary for Conversational RuBERT model on this data and initialized model with RuBERT. +Conversational DistilRuBERT (6 transformer layers) and DistilRuBERT-tiny (2 transformer layers) were trained on the same data as Conversational RuBERT and highly inspired by DistilBERT [13]_. Namely, Distil* models (students) used pretrained Conversational RuBERT as teacher and linear combination of the following losses: + +1. Masked language modeling loss (between student output logits for tokens and its true labels) +2. Kullback-Leibler divergence (between student and teacher output logits) +3. Cosine embedding loss (between averaged hidden states of the teacher and hidden states of the student) +4. Mean squared error loss (between averaged attention maps of the teacher and attention maps of the student) + Sentence Multilingual BERT is a representation-based sentence encoder for 101 languages of Multilingual BERT. It is initialized with Multilingual BERT and then fine-tuned on english MultiNLI [9]_ and on dev set of multilingual XNLI [10]_. Sentence representations are mean pooled token embeddings in the same manner as in Sentence-BERT [12]_. @@ -148,7 +157,7 @@ BERT for Context Question Answering (SQuAD) Context Question Answering on `SQuAD `__ dataset is a task of looking for an answer on a question in a given context. This task could be formalized as predicting answer start and end position in a given context. :class:`~deeppavlov.models.bert.bert_squad.BertSQuADModel` on TensorFlow and -:class:`~deeppavlov.models.torch_bert.torch_bert_squad.TorchBertSQuADModel` on PyTorch use two linear +:class:`~deeppavlov.models.torch_bert.torch_transformers_squad:TorchTransformersSquad` on PyTorch use two linear transformations to predict probability that current subtoken is start/end position of an answer. For details check :doc:`Context Question Answering documentation page `. @@ -196,3 +205,4 @@ the :doc:`config ` file must be changed to match new BERT .. [10] Williams A., Bowman S. (2018) XNLI: Evaluating Cross-lingual Sentence Representations. arXiv preprint arXiv:1809.05053 .. [11] S. R. Bowman, G. Angeli, C. Potts, and C. D. Manning. (2015) A large annotated corpus for learning natural language inference. arXiv preprint arXiv:1508.05326 .. [12] N. Reimers, I. Gurevych (2019) Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks. arXiv preprint arXiv:1908.10084 +.. [13] Sanh, V., Debut, L., Chaumond, J., & Wolf, T. (2019). DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108. diff --git a/docs/features/models/ner.rst b/docs/features/models/ner.rst index 99172d7d2b..3663bb84ef 100644 --- a/docs/features/models/ner.rst +++ b/docs/features/models/ner.rst @@ -12,15 +12,15 @@ model make sure that all required packages are installed using the command: .. code:: bash - python -m deeppavlov install ner_ontonotes_bert + python -m deeppavlov install ner_ontonotes_bert_torch To use a pre-trained model from CLI use the following command: .. code:: bash - python deeppavlov/deep.py interact ner_ontonotes_bert [-d] + python deeppavlov/deep.py interact ner_ontonotes_bert_torch [-d] -where ``ner_conll2003_bert`` is the name of the config and ``-d`` is an optional download key. The key ``-d`` is used +where ``ner_ontonotes_bert_torch`` is the name of the config and ``-d`` is an optional download key. The key ``-d`` is used to download the pre-trained model along with embeddings and all other files needed to run the model. Other possible commands are ``train``, ``evaluate``, and ``download``, @@ -34,15 +34,15 @@ Here is the list of all available configs: +------------------------------------------------------------------------+--------------------+----------+-----------------+------------+------------+ | Model | Dataset | Language | Embeddings Size | Model Size | F1 score | +========================================================================+====================+==========+=================+============+============+ - | :config:`ner_rus_bert ` | Collection3 [1]_ | Ru | 700 MB | 1.4 GB | **98.1** | + | :config:`ner_rus_bert_torch ` | Collection3 [1]_ | Ru | 700 MB | 2.0 GB | **97.7** | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ | :config:`ner_collection3_m1 ` | | | 1.1 GB | 1 GB | 97.8 | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ | :config:`ner_rus ` | | | 1.0 GB | 5.6 MB | 95.1 | +------------------------------------------------------------------------+--------------------+----------+-----------------+------------+------------+ - | :config:`ner_ontonotes_bert_mult ` | Ontonotes | Multi | 700 MB | 1.4 GB | **88.8** | + | :config:`` | Ontonotes | Multi | 700 MB | 2.0 GB | **87.2** | +------------------------------------------------------------------------+ +----------+-----------------+------------+------------+ - | :config:`ner_ontonotes_bert ` | | En | 400 MB | 800 MB | 88.6 | + | :config:`ner_ontonotes_bert_torch ` | | En | 400 MB | 1.3 GB | 87.9 | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ | :config:`ner_ontonotes_m1 ` | | | 347 MB | 379.4 MB | 87.7 | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ @@ -50,7 +50,7 @@ Here is the list of all available configs: +------------------------------------------------------------------------+--------------------+ +-----------------+------------+------------+ | :config:`ner_conll2003_bert ` | CoNLL-2003 | | 400 MB | 850 MB | 91.7 | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ - | :config:`ner_conll2003_torch_bert ` | | | --- | 1.1 GB | 88.6 | + | :config:`ner_conll2003_torch_bert ` | | | --- | 1.3 GB | 90.7 | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ | :config:`ner_conll2003 ` | | | 331 MB | 3.1 MB | 89.9 | +------------------------------------------------------------------------+ + +-----------------+------------+------------+ @@ -67,7 +67,7 @@ Models can be used from Python using the following code: from deeppavlov import configs, build_model - ner_model = build_model(configs.ner.ner_ontonotes_bert, download=True) + ner_model = build_model(configs.ner.ner_ontonotes_bert_torch, download=True) ner_model(['Bob Ross lived in Florida']) >>> [[['Bob', 'Ross', 'lived', 'in', 'Florida']], [['B-PERSON', 'I-PERSON', 'O', 'O', 'B-GPE']]] @@ -78,7 +78,7 @@ The model also can be trained from the Python: from deeppavlov import configs, train_model - ner_model = train_model(configs.ner.ner_ontonotes_bert) + ner_model = train_model(configs.ner.ner_ontonotes_bert_torch) The data for training should be placed in the folder provided in the config: @@ -87,7 +87,7 @@ The data for training should be placed in the folder provided in the config: from deeppavlov import configs, train_model from deeppavlov.core.commands.utils import parse_config - config_dict = parse_config(configs.ner.ner_ontonotes_bert) + config_dict = parse_config(configs.ner.ner_ontonotes_bert_torch) print(config_dict['dataset_reader']['data_path']) >>> '~/.deeppavlov/downloads/ontonotes' @@ -102,7 +102,7 @@ Multilingual BERT Zero-Shot Transfer ------------------------------------ Multilingual BERT models allow to perform zero-shot transfer from one language to another. The model -:config:`ner_ontonotes_bert_mult ` was trained on OntoNotes corpus which has 19 types +:config:`ner_ontonotes_bert_mult_torch ` was trained on OntoNotes corpus which has 19 types in the markup schema. The model performance was evaluated on Russian corpus Collection 3 [1]_. Results of the transfer are presented in the table below. @@ -123,7 +123,7 @@ The following Python code can be used to infer the model: from deeppavlov import configs, build_model - ner_model = build_model(configs.ner.ner_ontonotes_bert_mult, download=True) + ner_model = build_model(configs.ner.ner_ontonotes_bert_mult_torch, download=True) ner_model(['Curling World Championship will be held in Antananarivo']) >>> (['Curling', 'World', 'Championship', 'will', 'be', 'held', 'in', 'Antananarivo']], diff --git a/docs/features/models/neural_ranking.rst b/docs/features/models/neural_ranking.rst index 891358d9b8..a02f089f4d 100644 --- a/docs/features/models/neural_ranking.rst +++ b/docs/features/models/neural_ranking.rst @@ -76,29 +76,6 @@ Next time you will use the model, built vector representations will be loaded. Ranking ~~~~~~~ -Before using the model make sure that all required packages are installed running the command: - -.. code:: bash - - python -m deeppavlov install ranking_insurance - -To train the model on the `InsuranceQA V1`_ dataset one can use the following code in python: - -.. code:: python - - from deeppavlov import configs, train_model - - rank_model = train_model(configs.ranking.ranking_insurance, download=True) - -To train from command line: - -:: - - python -m deeppavlov train deeppavlov/configs/ranking/ranking_insurance.json [-d] - -As an example of configuration file see -:config:`ranking_insurance.json `. - To use Sequential Matching Network (SMN) or Deep Attention Matching Network (DAM) or Deep Attention Matching Network with Universal Sentence Encoder (DAM-USE-T) on the `Ubuntu V2`_ for inference, please run one of the following commands: @@ -106,7 +83,6 @@ on the `Ubuntu V2`_ for inference, please run one of the following commands: :: python -m deeppavlov interact -d ranking_ubuntu_v2_mt_word2vec_smn - python -m deeppavlov interact -d ranking_ubuntu_v2_mt_word2vec_dam python -m deeppavlov interact -d ranking_ubuntu_v2_mt_word2vec_dam_transformer Now a user can enter a dialog consists of 10 context sentences and several (>=1) candidate response sentences separated by '&' @@ -122,60 +98,17 @@ To train the models on the `Ubuntu V2`_ dataset please run one of the following :: python -m deeppavlov train -d ranking_ubuntu_v2_mt_word2vec_smn - python -m deeppavlov train -d ranking_ubuntu_v2_mt_word2vec_dam python -m deeppavlov train -d ranking_ubuntu_v2_mt_word2vec_dam_transformer As an example of configuration file see :config:`ranking_ubuntu_v2_mt_word2vec_smn.json `. - -To use the model trained on the `InsuranceQA V1`_ dataset for -inference one can use the following code in python: - -.. code:: python - - from deeppavlov import build_model, configs - - rank_model = build_model(configs.ranking.ranking_insurance_interact, download=True) - rank_model(['how much to pay for auto insurance?']) - - >>> ['the cost of auto insurance be based on several factor include your driving record , claim history , type of vehicle , credit score where you live and how far you travel to and from work I will recommend work with an independent agent who can shop several company find the good policy for you', 'there be not any absolute answer to this question rate for auto insurance coverage can vary greatly from carrier to carrier and from area to area contact local agent in your area find out about coverage availablity and pricing within your area look for an agent that you be comfortable working with as they will be the first last point of contact in most instance', 'the cost of auto insurance coverage for any vehicle or driver can vary greatly thing that effect your auto insurance rate be geographical location , vehicle , age (s) of driver (s) , type of coverage desire , motor vehicle record of all driver , credit rating of all driver and more contact a local agent get a quote a quote cost nothing but will let you know where your rate will'] - - -By default the model returns the ``interact_pred_num`` most relevant responses from all responses the model saw during training time. -To get predictions on your own list of responses use the following code: - -.. code:: python - - from deeppavlov import build_model, configs - - rank_model = build_model(configs.ranking.ranking_insurance_interact, download=True) - predictor = rank_model.pipe[-1][-1] - candidates = ['auto insurance', 'life insurance', 'home insurance'] - predictor.rebuild_responses(candidates) - rank_model(['how much to pay for auto insurance?']) - - >>> [['auto insurance']] - If the model with multi-turn context is used (such as :class:`~deeppavlov.models.ranking.bilstm_gru_siamese_network.BiLSTMGRUSiameseNetwork` with the parameter ``num_context_turns`` set to the value higher than 1 in the configuration JSON file) then the ``context`` to evaluate should consist of ``num_context_turns`` strings connected by the ampersand. Some of these strings can be empty, i.e. equal to ``''``. -To run the model for inference from command line: - -:: - - python -m deeppavlov interact deeppavlov/configs/ranking/ranking_insurance_interact.json [-d] - -Then a user can enter a context and get responses: - -:: - - :: how much to pay for auto insurance? - >> ['the cost of auto insurance be based on several factor include your driving record , claim history , type of vehicle , credit score where you live and how far you travel to and from work I will recommend work with an independent agent who can shop several company find the good policy for you', 'there be not any absolute answer to this question rate for auto insurance coverage can vary greatly from carrier to carrier and from area to area contact local agent in your area find out about coverage availablity and pricing within your area look for an agent that you be comfortable working with as they will be the first last point of contact in most instance', 'the cost of auto insurance coverage for any vehicle or driver can vary greatly thing that effect your auto insurance rate be geographical location , vehicle , age (s) of driver (s) , type of coverage desire , motor vehicle record of all driver , credit rating of all driver and more contact a local agent get a quote a quote cost nothing but will let you know where your rate will'] - Paraphrase identification ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -189,9 +122,6 @@ Before using the model make sure that all required packages are installed runnin python -m deeppavlov install paraphrase_ident_paraphraser python -m deeppavlov install elmo_paraphraser_fine_tuning - python -m deeppavlov install paraphrase_ident_paraphraser_elmo - python -m deeppavlov install paraphrase_ident_paraphraser_pretrain - python -m deeppavlov install paraphrase_ident_paraphraser_tune To train the model on the `paraphraser.ru`_ dataset with fasttext embeddings one can use the following code in python: @@ -210,101 +140,6 @@ To train the model on the `paraphraser.ru`_ dataset with fine-tuned ELMO embeddi para_model = train_model(configs.elmo.elmo_paraphraser_fine_tuning, download=True) -To train the model itself with fine-tuned embeddings: - -.. code:: python - - from deeppavlov import configs, train_model - - para_model = train_model(configs.elmo.paraphrase_ident_paraphraser_elmo, download=True) - -The fine-tuned ELMO embeddings obtained at the previous step can be downloaded directly -from the :config:`paraphrase_ident_paraphraser_elmo.json `. - -To train the model on the `paraphraser.ru`_ dataset with pre-training one should first train the model -on the additionally collected dataset: - -.. code:: python - - from deeppavlov import configs, train_model - - para_model = train_model(configs.elmo.paraphrase_ident_paraphraser_pretrain, download=True) - -To fine-tune the model on the target dataset: - -.. code:: python - - from deeppavlov import configs, train_model - - para_model = train_model(configs.elmo.paraphrase_ident_paraphraser_tune , download=True) - -The pre-trained model obtained at the previous step can be downloaded directly -from the :config:`paraphrase_ident_paraphraser_tune.json `. - -To use the model trained on the `paraphraser.ru`_ dataset for -inference, one can use the following code in python: - -.. code:: python - - from deeppavlov import build_model, configs - - para_model = build_model(configs.ranking.paraphrase_ident_tune_interact, download=True) - para_model(['9 мая метрополитен Петербурга будет работать круглосуточно&Петербургское метро в ночь на 10 мая будет работать круглосуточно']) - >>> 'This is a paraphrase.' - -Quora question pairs dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before using the model make sure that all required packages are installed running the command: - -.. code:: bash - - python -m deeppavlov install paraphrase_ident_qqp - -To train the model on the `Quora Question Pairs`_ dataset one can use the following code in python: - -.. code:: python - - from deeppavlov import configs, train_model - - para_model = train_model(configs.ranking.paraphrase_ident_qqp, download=True) - -To train from command line: - -:: - - python -m deeppavlov train deeppavlov/configs/ranking/paraphrase_ident_qqp.json [-d] - -As an example of configuration file see -:config:`paraphrase_ident_qqp.json `. - - -To use the model trained on the `Quora Question Pairs`_ dataset for -inference, one can use the following code in python: - -.. code:: python - - from deeppavlov import build_model, configs - - para_model = build_model(configs.ranking.paraphrase_ident_qqp_interact, download=True) - para_model(['How can I be a good geologist?&What should I do to be a great geologist?']) - >>> 'This is a paraphrase.' - -Note that two sentences to evaluate are connected by the ampersand. - -To use the model for inference from command line: - -:: - - python -m deeppavlov interact deeppavlov/configs/ranking/paraphrase_ident_qqp_interact.json [-d] - -Now a user can enter two sentences and the model will make a prediction whether these sentences are paraphrases or not. - -:: - - :: How can I be a good geologist?&What should I do to be a great geologist? - >> This is a paraphrase. - Training and inference on your own data --------------------------------------- @@ -372,7 +207,5 @@ Instead of ``response`` and ``context`` it can be simply two phrases which are p Classification metrics on the valid and test dataset parts (the parameter ``metrics`` in the JSON configuration file) such as ``f1``, ``acc`` and ``log_loss`` can be calculated. -.. _`InsuranceQA V1`: https://github.com/shuzi/insuranceQA .. _`paraphraser.ru`: https://paraphraser.ru -.. _`Quora Question Pairs`: https://www.kaggle.com/c/quora-question-pairs/data .. _`Ubuntu V2`: https://github.com/rkadlec/ubuntu-ranking-dataset-creator diff --git a/docs/features/models/squad.rst b/docs/features/models/squad.rst index b864948aee..128ea07627 100644 --- a/docs/features/models/squad.rst +++ b/docs/features/models/squad.rst @@ -48,7 +48,7 @@ BERT outputs for each subtoken. First/second linear transformation is used for p subtoken is start/end position of an answer. BERT for SQuAD model documentation on TensorFlow :class:`~deeppavlov.models.bert.bert_squad.BertSQuADModel` -and on PyTorch :class:`~deeppavlov.models.torch_bert.torch_bert_squad.TorchBertSQuADModel`. +and on PyTorch :class:`~deeppavlov.models.torch_bert.torch_transformers_squad:TorchTransformersSquad`. R-Net ~~~~~ @@ -149,7 +149,7 @@ Leadearboad `__. +=========================================================+================+=================+ | :config:`DeepPavlov BERT ` | 80.88 | 88.49 | +---------------------------------------------------------+----------------+-----------------+ -| :config:`BERT on PyTorch ` | 80.79 | 88.30 | +| :config:`BERT on PyTorch ` | 78.8 | 86.7 | +---------------------------------------------------------+----------------+-----------------+ | :config:`DeepPavlov R-Net ` | 71.49 | 80.34 | +---------------------------------------------------------+----------------+-----------------+ diff --git a/docs/features/overview.rst b/docs/features/overview.rst index 4c108096ed..7a515a40cb 100644 --- a/docs/features/overview.rst +++ b/docs/features/overview.rst @@ -20,27 +20,31 @@ The second model reproduces architecture from the paper `Application of a Hybrid Bi-LSTM-CRF model to the task of Russian Named Entity Recognition `__ which is inspired by Bi-LSTM+CRF architecture from https://arxiv.org/pdf/1603.01360.pdf. -+---------------------------------------------------------+-------+-----------------------------------------------------------------------------+-------------+ -| Dataset | Lang | Model | Test F1 | -+=========================================================+=======+=============================================================================+=============+ -| Persons-1000 dataset with additional LOC and ORG markup | Ru | :config:`ner_rus_bert.json ` | 98.1 | -+ + +-----------------------------------------------------------------------------+-------------+ -| (Collection 3) | | :config:`ner_rus.json ` | 95.1 | -+---------------------------------------------------------+-------+-----------------------------------------------------------------------------+-------------+ -| Ontonotes | Multi | :config:`ner_ontonotes_bert_mult.json ` | 88.8 | -+ +-------+-----------------------------------------------------------------------------+-------------+ -| | En | :config:`ner_ontonotes_bert.json ` | 88.6 | -+ + +-----------------------------------------------------------------------------+-------------+ -| | | :config:`ner_ontonotes.json ` | 87.1 | -+---------------------------------------------------------+ +-----------------------------------------------------------------------------+-------------+ -| ConLL-2003 | | :config:`ner_conll2003_bert.json ` | 91.7 | -+ + +-----------------------------------------------------------------------------+-------------+ -| | | :config:`ner_conll2003_torch_bert.json ` | 88.6 | -+ + +-----------------------------------------------------------------------------+-------------+ -| | | :config:`ner_conll2003.json ` | 89.9 | -+---------------------------------------------------------+ +-----------------------------------------------------------------------------+-------------+ -| DSTC2 | | :config:`ner_dstc2.json ` | 97.1 | -+---------------------------------------------------------+-------+-----------------------------------------------------------------------------+-------------+ ++---------------------------------------------------------+-------+--------------------------------------------------------------------------------------------+-------------+ +| Dataset | Lang | Model | Test F1 | ++=========================================================+=======+============================================================================================+=============+ +| Persons-1000 dataset with additional LOC and ORG markup | Ru | :config:`ner_rus_bert.json ` | 98.1 | ++ + +--------------------------------------------------------------------------------------------+-------------+ +| (Collection 3) | | :config:`ner_rus.json ` | 95.1 | ++ + +--------------------------------------------------------------------------------------------+-------------+ +| | | :config:`ner_rus_convers_distilrubert_2L.json ` | 88.4 ± 0.5 | ++ + +--------------------------------------------------------------------------------------------+-------------+ +| | | :config:`ner_rus_convers_distilrubert_6L.json ` | 93.3 ± 0.3 | ++---------------------------------------------------------+-------+--------------------------------------------------------------------------------------------+-------------+ +| Ontonotes | Multi | :config:`ner_ontonotes_bert_mult.json ` | 88.8 | ++ +-------+--------------------------------------------------------------------------------------------+-------------+ +| | En | :config:`ner_ontonotes_bert.json ` | 88.6 | ++ + +--------------------------------------------------------------------------------------------+-------------+ +| | | :config:`ner_ontonotes.json ` | 87.1 | ++---------------------------------------------------------+ +--------------------------------------------------------------------------------------------+-------------+ +| ConLL-2003 | | :config:`ner_conll2003_bert.json ` | 91.7 | ++ + +--------------------------------------------------------------------------------------------+-------------+ +| | | :config:`ner_conll2003_torch_bert.json ` | 88.6 | ++ + +--------------------------------------------------------------------------------------------+-------------+ +| | | :config:`ner_conll2003.json ` | 89.9 | ++---------------------------------------------------------+ +--------------------------------------------------------------------------------------------+-------------+ +| DSTC2 | | :config:`ner_dstc2.json ` | 97.1 | ++---------------------------------------------------------+-------+--------------------------------------------------------------------------------------------+-------------+ Slot filling models :doc:`[docs] ` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -63,61 +67,65 @@ BiLSTM with self-attention and other models are presented. The model also allows Several pre-trained models are available and presented in Table below. -+------------------+--------------------+------+-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| Task | Dataset | Lang | Model | Metric | Valid | Test | Downloads | -+==================+====================+======+=================================================================================================+=============+========+========+===========+ -| 28 intents | `DSTC 2`_ | En | :config:`DSTC 2 emb ` | Accuracy | 0.7613 | 0.7733 | 800 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Wiki emb ` | | 0.9629 | 0.9617 | 8.5 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`BERT ` | | 0.9673 | 0.9636 | 800 Mb | -+------------------+--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| 7 intents | `SNIPS-2017`_ [1]_ | | :config:`DSTC 2 emb ` | F1-macro | 0.8591 | -- | 800 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Wiki emb ` | | 0.9820 | -- | 8.5 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Tfidf + SelectKBest + PCA + Wiki emb ` | | 0.9673 | -- | 8.6 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Wiki emb weighted by Tfidf ` | | 0.9786 | -- | 8.5 Gb | -+------------------+--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| Insult detection | `Insults`_ | | :config:`Reddit emb ` | ROC-AUC | 0.9263 | 0.8556 | 6.2 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`English BERT ` | | 0.9255 | 0.8612 | 1200 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`English Conversational BERT ` | | 0.9389 | 0.8941 | 1200 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`English BERT on PyTorch ` | | 0.9329 | 0.877 | 1.1 Gb | -+------------------+--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| 5 topics | `AG News`_ | | :config:`Wiki emb ` | Accuracy | 0.8922 | 0.9059 | 8.5 Gb | -+------------------+--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| Intent |`Yahoo-L31`_ | | :config:`Yahoo-L31 on conversational BERT ` | ROC-AUC | 0.9436 | -- | 1200 Mb | -+------------------+--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| Sentiment |`SST`_ | | :config:`5-classes SST on conversational BERT ` | Accuracy | 0.6456 | 0.6715 | 400 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`5-classes SST on multilingual BERT ` | | 0.5738 | 0.6024 | 660 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`3-classes SST SWCNN on PyTorch ` | | 0.7379 | 0.6312 | 4.3 Mb | -+ +--------------------+ +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| |`Yelp`_ | | :config:`5-classes Yelp on conversational BERT ` | | 0.6925 | 0.6842 | 400 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`5-classes Yelp on multilingual BERT ` | | 0.5896 | 0.5874 | 660 Mb | -+------------------+--------------------+------+-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| Sentiment |`Twitter mokoron`_ | Ru | :config:`RuWiki+Lenta emb w/o preprocessing ` | | 0.9965 | 0.9961 | 6.2 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`RuWiki+Lenta emb with preprocessing ` | | 0.7823 | 0.7759 | 6.2 Gb | -+ +--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| |`RuSentiment`_ | | :config:`RuWiki+Lenta emb ` | F1-weighted | 0.6541 | 0.7016 | 6.2 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Twitter emb super-convergence ` [2]_ | | 0.7301 | 0.7576 | 3.4 Gb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`ELMo ` | | 0.7519 | 0.7875 | 700 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Multi-language BERT ` | | 0.6809 | 0.7193 | 1900 Mb | -+ + + +-------------------------------------------------------------------------------------------------+ +--------+--------+-----------+ -| | | | :config:`Conversational RuBERT ` | | 0.7548 | 0.7742 | 657 Mb | -+------------------+--------------------+ +-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ -| Intent |Ru like`Yahoo-L31`_ | | :config:`Conversational vs Informational on ELMo ` | ROC-AUC | 0.9412 | -- | 700 Mb | -+------------------+--------------------+------+-------------------------------------------------------------------------------------------------+-------------+--------+--------+-----------+ ++------------------+---------------------+------+----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| Task | Dataset | Lang | Model | Metric | Valid | Test | Downloads | ++==================+=====================+======+====================================================================================================+=============+==================+=================+===========+ +| 28 intents | `DSTC 2`_ | En | :config:`DSTC 2 emb ` | Accuracy | 0.7613 | 0.7733 | 800 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Wiki emb ` | | 0.9629 | 0.9617 | 8.5 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`BERT ` | | 0.9673 | 0.9636 | 800 Mb | ++------------------+---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| 7 intents | `SNIPS-2017`_ [1]_ | | :config:`DSTC 2 emb ` | F1-macro | 0.8591 | -- | 800 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Wiki emb ` | | 0.9820 | -- | 8.5 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Tfidf + SelectKBest + PCA + Wiki emb ` | | 0.9673 | -- | 8.6 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Wiki emb weighted by Tfidf ` | | 0.9786 | -- | 8.5 Gb | ++------------------+---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| Insult detection | `Insults`_ | | :config:`Reddit emb ` | ROC-AUC | 0.9263 | 0.8556 | 6.2 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`English BERT ` | | 0.9255 | 0.8612 | 1200 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`English Conversational BERT ` | | 0.9389 | 0.8941 | 1200 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`English BERT on PyTorch ` | | 0.9329 | 0.877 | 1.1 Gb | ++------------------+---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| 5 topics | `AG News`_ | | :config:`Wiki emb ` | Accuracy | 0.8922 | 0.9059 | 8.5 Gb | ++------------------+---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| Intent | `Yahoo-L31`_ | | :config:`Yahoo-L31 on conversational BERT ` | ROC-AUC | 0.9436 | -- | 1200 Mb | ++------------------+---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| Sentiment | `SST`_ | | :config:`5-classes SST on conversational BERT ` | Accuracy | 0.6456 | 0.6715 | 400 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`5-classes SST on multilingual BERT ` | | 0.5738 | 0.6024 | 660 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`3-classes SST SWCNN on PyTorch ` | | 0.7379 | 0.6312 | 4.3 Mb | ++ +---------------------+ +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | `Yelp`_ | | :config:`5-classes Yelp on conversational BERT ` | | 0.6925 | 0.6842 | 400 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`5-classes Yelp on multilingual BERT ` | | 0.5896 | 0.5874 | 660 Mb | ++------------------+---------------------+------+----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| Sentiment | `Twitter mokoron`_ | Ru | :config:`RuWiki+Lenta emb w/o preprocessing ` | | 0.9965 | 0.9961 | 6.2 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`RuWiki+Lenta emb with preprocessing ` | | 0.7823 | 0.7759 | 6.2 Gb | ++ +---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| | `RuSentiment`_ | | :config:`RuWiki+Lenta emb ` | F1-weighted | 0.6541 | 0.7016 | 6.2 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Twitter emb super-convergence ` [2]_ | | 0.7301 | 0.7576 | 3.4 Gb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`ELMo ` | | 0.7519 | 0.7875 | 700 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Multi-language BERT ` | | 0.6809 | 0.7193 | 1900 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Conversational RuBERT ` | | 0.7548 | 0.7742 | 657 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Conversational DistilRuBERT-tiny ` | | 0.703 ± 0.0031 | 0.7348 ± 0.0028 | 690 Mb | ++ + + +----------------------------------------------------------------------------------------------------+ +------------------+-----------------+-----------+ +| | | | :config:`Conversational DistilRuBERT-base ` | | 0.7376 ± 0.0045 | 0.7645 ± 0.035 | 1.0 Gb | ++------------------+---------------------+ +----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ +| Intent | Ru like`Yahoo-L31`_ | | :config:`Conversational vs Informational on ELMo ` | ROC-AUC | 0.9412 | -- | 700 Mb | ++------------------+---------------------+------+----------------------------------------------------------------------------------------------------+-------------+------------------+-----------------+-----------+ .. [1] Coucke A. et al. Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces //arXiv preprint arXiv:1805.10190. – 2018. .. [2] Smith L. N., Topin N. Super-convergence: Very fast training of residual networks using large learning rates. – 2018. @@ -227,97 +235,41 @@ Available pre-trained models for ranking: | | +-----------+-------+-------+-------+-----------+ | | | R10@1 | R10@1 | R10@2 | R10@5 | Downloads | +===================+======================================================================================================================+===========+=======+=======+=======+===========+ - | `InsuranceQA v1`_ | :config:`ranking_insurance_interact ` | 72.0 | 72.2 | -- | -- | 8374 MB | - +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_mt_word2vec_dam_transformer ` | 74.32 | 74.46 | 86.77 | 97.38 | 2457 MB | +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_mt_word2vec_dam ` | 71.20 | 71.54 | 83.66 | 96.33 | 1645 MB | - +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_mt_word2vec_smn ` | 68.56 | 67.91 | 81.49 | 95.63 | 1609 MB | +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V2`_ |:config:`ranking_ubuntu_v2_bert_uncased ` | 66.5 | 66.6 | -- | -- | 396 MB | + | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_bert_uncased ` | 66.5 | 66.6 | -- | -- | 396 MB | +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V2`_ |:config:`ranking_ubuntu_v2_bert_uncased on PyTorch ` | 65.73 | 65.74 | -- | -- | 1.1 Gb | + | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_bert_uncased on PyTorch ` | 65.73 | 65.74 | -- | -- | 1.1 Gb | +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V2`_ |:config:`ranking_ubuntu_v2_bert_sep ` | 66.5 | 66.5 | -- | -- | 396 MB | - +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_interact ` | 52.9 | 52.4 | -- | -- | 8913 MB | + | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_bert_sep ` | 66.5 | 66.5 | -- | -- | 396 MB | +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ | `Ubuntu V2`_ | :config:`ranking_ubuntu_v2_mt_interact ` | 59.2 | 58.7 | -- | -- | 8906 MB | +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V1`_ | :config:`ranking_ubuntu_v1_mt_word2vec_dam_transformer ` | -- | 79.57 | 89.32 | 97.34 | 2439 MB | - +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V1`_ | :config:`ranking_ubuntu_v1_mt_word2vec_dam ` | -- | 77.95 | 88.07 | 97.06 | 1645 MB | - +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ - | `Ubuntu V1`_ | :config:`ranking_ubuntu_v1_mt_word2vec_smn ` | -- | 75.90 | 87.16 | 96.80 | 1591 MB | - +-------------------+----------------------------------------------------------------------------------------------------------------------+-----------+-------+-------+-------+-----------+ -.. _`InsuranceQA V1`: https://github.com/shuzi/insuranceQA .. _`Ubuntu V2`: https://github.com/rkadlec/ubuntu-ranking-dataset-creator -.. _`Ubuntu V1`: https://arxiv.org/abs/1506.08909 Available pre-trained models for paraphrase identification: .. table:: :widths: auto - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - | Dataset |Model config | Val (accuracy)| Test (accuracy)| Val (F1)| Test (F1)| Val (log_loss)| Test (log_loss)|Downloads | - +========================+===============================================================================================+===============+================+=========+==========+===============+================+==========+ - |`paraphraser.ru`_ |:config:`paraphrase_ident_paraphraser_ft ` | 83.8 | 75.4 | 87.9 | 80.9 | 0.468 | 0.616 |5938M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - |`paraphraser.ru`_ |:config:`paraphrase_ident_paraphraser_elmo ` | 82.7 | 76.0 | 87.3 | 81.4 | 0.391 | 0.510 |5938M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - |`paraphraser.ru`_ |:config:`paraphrase_ident_paraphraser_tune ` | 82.9 | 76.7 | 87.3 | 82.0 | 0.392 | 0.479 |5938M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - |`paraphraser.ru`_ |:config:`paraphrase_bert_multilingual ` | 87.4 | 79.3 | 90.2 | 83.4 | -- | -- |1330M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - |`paraphraser.ru`_ |:config:`paraphrase_rubert ` | 90.2 | 84.9 | 92.3 | 87.9 | -- | -- |1325M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - |`Quora Question Pairs`_ |:config:`paraphrase_ident_qqp_bilstm ` | 87.1 | 87.0 | 83.0 | 82.6 | 0.300 | 0.305 |8134M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ - |`Quora Question Pairs`_ |:config:`paraphrase_ident_qqp ` | 86.8 | 87.1 | 82.3 | 83.0 | 0.304 | 0.297 |8136M | - +------------------------+-----------------------------------------------------------------------------------------------+---------------+----------------+---------+----------+---------------+----------------+----------+ + +------------------------+------------------------------------------------------------------------------------------------------+----------------+-----------------+------------+------------+----------------+-----------------+-----------+ + | Dataset | Model config | Val (accuracy) | Test (accuracy) | Val (F1) | Test (F1) | Val (log_loss) | Test (log_loss) | Downloads | + +========================+======================================================================================================+================+=================+============+============+================+=================+===========+ + | `paraphraser.ru`_ | :config:`paraphrase_ident_paraphraser_ft ` | 83.8 | 75.4 | 87.9 | 80.9 | 0.468 | 0.616 | 5938M | + +------------------------+------------------------------------------------------------------------------------------------------+----------------+-----------------+------------+------------+----------------+-----------------+-----------+ + | `paraphraser.ru`_ | :config:`paraphrase_bert_multilingual ` | 87.4 | 79.3 | 90.2 | 83.4 | -- | -- | 1330M | + +------------------------+------------------------------------------------------------------------------------------------------+----------------+-----------------+------------+------------+----------------+-----------------+-----------+ + | `paraphraser.ru`_ | :config:`paraphrase_rubert ` | 90.2 | 84.9 | 92.3 | 87.9 | -- | -- | 1325M | + +------------------------+------------------------------------------------------------------------------------------------------+----------------+-----------------+------------+------------+----------------+-----------------+-----------+ + | `paraphraser.ru`_ | :config:`paraphraser_convers_distilrubert_2L ` | 76.1 ± 0.2 | 64.5 ± 0.5 | 81.8 ± 0.2 | 73.9 ± 0.8 | -- | -- | 618M | + +------------------------+------------------------------------------------------------------------------------------------------+----------------+-----------------+------------+------------+----------------+-----------------+-----------+ + | `paraphraser.ru`_ | :config:`paraphraser_convers_distilrubert_6L ` | 86.5 ± 0.5 | 78.9 ± 0.4 | 89.6 ± 0.3 | 83.2 ± 0.5 | -- | -- | 930M | + +------------------------+------------------------------------------------------------------------------------------------------+----------------+-----------------+------------+------------+----------------+-----------------+-----------+ .. _`paraphraser.ru`: https://paraphraser.ru/ -.. _`Quora Question Pairs`: https://www.kaggle.com/c/quora-question-pairs/data - -Comparison with other models on the `InsuranceQA V1 `__: - -+------------------------------------------------------------------------+-------------------------+--------------------+ -| Model | Validation (Recall@1) | Test1 (Recall@1) | -+========================================================================+=========================+====================+ -| `Architecture II (HLQA(200) CNNQA(4000) 1-MaxPooling Tanh)`_ | 61.8 | 62.8 | -+------------------------------------------------------------------------+-------------------------+--------------------+ -| `QA-LSTM basic-model(max pooling)`_ | 64.3 | 63.1 | -+------------------------------------------------------------------------+-------------------------+--------------------+ -| :config:`ranking_insurance ` | **72.0** | **72.2** | -+------------------------------------------------------------------------+-------------------------+--------------------+ - -.. _`Architecture II (HLQA(200) CNNQA(4000) 1-MaxPooling Tanh)`: https://arxiv.org/pdf/1508.01585.pdf -.. _`QA-LSTM basic-model(max pooling)`: https://arxiv.org/pdf/1511.04108.pdf - -Comparison with other models on the `Ubuntu Dialogue Corpus v1 `__ (test): - -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| Model | R@1 | R@2 | R@5 | -+=============================================================================================================================================+============+============+============+ -| SMN last [`Wu et al., 2017 `_] | 0.723 | 0.842 | 0.956 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| SMN last [DeepPavlov :config:`ranking_ubuntu_v1_mt_word2vec_smn `] | 0.754 | 0.869 | 0.967 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| DAM [`Zhou et al., 2018 `_] | 0.767 | 0.874 | 0.969 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| DAM [DeepPavlov :config:`ranking_ubuntu_v1_mt_word2vec_dam `] | 0.779 | 0.880 | 0.970 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| MRFN-FLS [`Tao et al., 2019 `_] | 0.786 | 0.886 | 0.976 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| IMN [`Gu et al., 2019 `_] | 0.777 | 0.880 | 0.974 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| IMN Ensemble [`Gu et al., 2019 `_] | 0.794 | 0.893 | **0.978** | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ -| DAM-USE-T [DeepPavlov :config:`ranking_ubuntu_v1_mt_word2vec_dam_transformer `] | **0.7957** | **0.8932** | 0.9734 | -+---------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+ Comparison with other models on the `Ubuntu Dialogue Corpus v2 `__ (test): @@ -331,8 +283,6 @@ Comparison with other models on the `Ubuntu Dialogue Corpus v2 `_] | -- | -- | -- | +---------------------------------------------------------------------------------------------------------------------------------------------+-----------+-----------+-----------+ -| DAM [DeepPavlov :config:`ranking_ubuntu_v2_mt_word2vec_dam `] | 0.7154 | 0.8366 | 0.9633 | -+---------------------------------------------------------------------------------------------------------------------------------------------+-----------+-----------+-----------+ | MRFN-FLS [`Tao et al., 2019 `_] | -- | -- | -- | +---------------------------------------------------------------------------------------------------------------------------------------------+-----------+-----------+-----------+ | IMN [`Gu et al., 2019 `_] | 0.771 | 0.886 | 0.979 | @@ -381,25 +331,29 @@ BERT-based model is described in `BERT: Pre-training of Deep Bidirectional Tran R-Net model is based on `R-NET: Machine Reading Comprehension with Self-matching Networks `__. -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| Dataset | Model config | lang | EM (dev) | F-1 (dev) | Downloads | -+===============+========================================================================+=======+================+=================+=================+ -| `SQuAD-v1.1`_ | :config:`DeepPavlov BERT ` | en | 80.88 | 88.49 | 806Mb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| `SQuAD-v1.1`_ | :config:`DeepPavlov BERT on PyTorch ` | en | 80.79 | 88.30 | 1.1 Gb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| `SQuAD-v1.1`_ | :config:`DeepPavlov R-Net ` | en | 71.49 | 80.34 | ~2.5Gb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| SDSJ Task B | :config:`DeepPavlov RuBERT ` | ru | 66.30+-0.24 | 84.60+-0.11 | 1325Mb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| SDSJ Task B | :config:`DeepPavlov multilingual BERT `| ru | 64.35+-0.39 | 83.39+-0.08 | 1323Mb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| SDSJ Task B | :config:`DeepPavlov R-Net ` | ru | 60.62 | 80.04 | ~5Gb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| `DRCD`_ | :config:`DeepPavlov multilingual BERT ` | ch | 84.86 | 89.03 | 630Mb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ -| `DRCD`_ | :config:`DeepPavlov Chinese BERT ` | ch | 84.19 | 89.23 | 362Mb | -+---------------+------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| Dataset | Model config | lang | EM (dev) | F-1 (dev) | Downloads | ++================+=============================================================================================+=======+================+=================+=================+ +| `SQuAD-v1.1`_ | :config:`DeepPavlov BERT ` | en | 80.88 | 88.49 | 806Mb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SQuAD-v1.1`_ | :config:`DeepPavlov BERT on PyTorch ` | en | 80.79 | 88.30 | 1.1 Gb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SQuAD-v1.1`_ | :config:`DeepPavlov R-Net ` | en | 71.49 | 80.34 | ~2.5Gb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SDSJ Task B`_ | :config:`DeepPavlov RuBERT ` | ru | 66.30 ± 0.24 | 84.60 ± 0.11 | 1325Mb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SDSJ Task B`_ | :config:`DeepPavlov multilingual BERT ` | ru | 64.35 ± 0.39 | 83.39 ± 0.08 | 1323Mb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SDSJ Task B`_ | :config:`DeepPavlov R-Net ` | ru | 60.62 | 80.04 | ~5Gb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SDSJ Task B`_ | :config:`DeepPavlov DistilRuBERT-tiny ` | ru | 44.2 ± 0.46 | 65.1 ± 0.36 | 867Mb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `SDSJ Task B`_ | :config:`DeepPavlov DistilRuBERT-base ` | ru | 61.23 ± 0.42 | 80.36 ± 0.28 | 1.18Gb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `DRCD`_ | :config:`DeepPavlov multilingual BERT ` | ch | 84.86 | 89.03 | 630Mb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ +| `DRCD`_ | :config:`DeepPavlov Chinese BERT ` | ch | 84.19 | 89.23 | 362Mb | ++----------------+---------------------------------------------------------------------------------------------+-------+----------------+-----------------+-----------------+ In the case when answer is not necessary present in given context we have :config:`squad_noans ` model. This model outputs empty string in case if there is no answer in context. @@ -423,31 +377,31 @@ For more scores see :doc:`full table `. +----------------------+--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ | Dataset | Model | Word accuracy | Sent. accuracy | Download size (MB) | +======================+==============================================================================================================+===============+================+====================+ - |`UD2.3`_ (Russian) |`UD Pipe 2.3`_ (Straka et al., 2017) | 93.5 | | | + | `UD2.3`_ (Russian) | `UD Pipe 2.3`_ (Straka et al., 2017) | 93.5 | | | | +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ | | `UD Pipe Future`_ (Straka et al., 2018) | 96.90 | | | | +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |:config:`BERT-based model ` | 97.83 | 72.02 | 661 | + | | :config:`BERT-based model ` | 97.83 | 72.02 | 661 | +----------------------+--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |`Pymorphy`_ + `russian_tagsets`_ (first tag) | 60.93 | 0.00 | | + | | `Pymorphy`_ + `russian_tagsets`_ (first tag) | 60.93 | 0.00 | | + +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - |`UD2.0`_ (Russian) |`UD Pipe 1.2`_ (Straka et al., 2017) | 93.57 | 43.04 | | + | `UD2.0`_ (Russian) | `UD Pipe 1.2`_ (Straka et al., 2017) | 93.57 | 43.04 | | + +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |:config:`Basic model ` | 95.17 | 50.58 | 48.7 | + | | :config:`Basic model ` | 95.17 | 50.58 | 48.7 | + +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |:config:`Pymorphy-enhanced model ` | **96.23** | 58.00 | 48.7 | + | | :config:`Pymorphy-enhanced model ` | **96.23** | 58.00 | 48.7 | +----------------------+--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | `UD2.0`_ (Czech) |`UD Pipe 1.2`_ (Straka et al., 2017) | 91.86 | 42.28 | | + | `UD2.0`_ (Czech) | `UD Pipe 1.2`_ (Straka et al., 2017) | 91.86 | 42.28 | | | +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |:config:`Basic model ` | **94.35** | 51.56 | 41.8 | + | | :config:`Basic model ` | **94.35** | 51.56 | 41.8 | +----------------------+--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - |`UD2.0`_ (English) |`UD Pipe 1.2`_ (Straka et al., 2017) | 92.89 | 55.75 | | + | `UD2.0`_ (English) | `UD Pipe 1.2`_ (Straka et al., 2017) | 92.89 | 55.75 | | | +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |:config:`Basic model ` | **93.00** | 55.18 | 16.9 | + | | :config:`Basic model ` | **93.00** | 55.18 | 16.9 | +----------------------+--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - |`UD2.0`_ (German) |`UD Pipe 1.2`_ (Straka et al., 2017) | 76.65 | 10.24 | | + | `UD2.0`_ (German) | `UD Pipe 1.2`_ (Straka et al., 2017) | 76.65 | 10.24 | | | +--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ - | |:config:`Basic model ` | **83.83** | 15.25 | 18.6 | + | | :config:`Basic model ` | **83.83** | 15.25 | 18.6 | +----------------------+--------------------------------------------------------------------------------------------------------------+---------------+----------------+--------------------+ .. _`Pymorphy`: https://pymorphy2.readthedocs.io/en/latest/ @@ -477,7 +431,7 @@ on ``ru_syntagrus`` Russian corpus (version UD 2.3). | +-------------------------------------------------------------------------------------------+---------+----------+ | | `UDify (multilingual BERT)`_ (Kondratyuk, 2018) | 94.8 | 93.1 | | +-------------------------------------------------------------------------------------------+---------+----------+ - | |:config:`our BERT model ` | 95.2 | 93.7 | + | | :config:`our BERT model ` | 95.2 | 93.7 | +-------------------------+-------------------------------------------------------------------------------------------+---------+----------+ .. _`UD2.3`: http://hdl.handle.net/11234/1-2895 @@ -535,13 +489,13 @@ based on its Wikipedia knowledge. +----------------+--------------------------------------------------------------------+-----------------------+--------+-----------+ | Dataset | Model config | Wiki dump | F1 | Downloads | +================+====================================================================+=======================+========+===========+ -| `SQuAD-v1.1`_ |:config:`ODQA ` | enwiki (2018-02-11) | 35.89 | 9.7Gb | +| `SQuAD-v1.1`_ | :config:`ODQA ` | enwiki (2018-02-11) | 35.89 | 9.7Gb | +----------------+--------------------------------------------------------------------+-----------------------+--------+-----------+ -| `SQuAD-v1.1`_ |:config:`ODQA ` | enwiki (2016-12-21) | 37.83 | 9.3Gb | +| `SQuAD-v1.1`_ | :config:`ODQA ` | enwiki (2016-12-21) | 37.83 | 9.3Gb | +----------------+--------------------------------------------------------------------+-----------------------+--------+-----------+ -| `SDSJ Task B`_ |:config:`ODQA ` | ruwiki (2018-04-01) | 28.56 | 7.7Gb | +| `SDSJ Task B`_ | :config:`ODQA ` | ruwiki (2018-04-01) | 28.56 | 7.7Gb | +----------------+--------------------------------------------------------------------+-----------------------+--------+-----------+ -| `SDSJ Task B`_ |:config:`ODQA with RuBERT ` | ruwiki (2018-04-01) | 37.83 | 4.3Gb | +| `SDSJ Task B`_ | :config:`ODQA with RuBERT ` | ruwiki (2018-04-01) | 37.83 | 4.3Gb | +----------------+--------------------------------------------------------------------+-----------------------+--------+-----------+ @@ -616,5 +570,5 @@ goal-oriented bot and a slot-filling model with Telegram UI. .. _`SQuAD-v1.1`: https://arxiv.org/abs/1606.05250 -.. _`SDSJ Task B`: https://sdsj.sberbank.ai/2017/ru/contest.html +.. _`SDSJ Task B`: https://arxiv.org/abs/1912.09723 .. _`DRCD`: https://arxiv.org/abs/1806.00920 diff --git a/docs/features/skills/go_bot.rst b/docs/features/skills/go_bot.rst index 267f6add39..e585ab8e55 100644 --- a/docs/features/skills/go_bot.rst +++ b/docs/features/skills/go_bot.rst @@ -339,11 +339,19 @@ If some required packages are missing, install all the requirements by running i How Do I: Build Go-Bot with DSTC2 =================================== -DSTC is a set of competitions originally known as "Dialog State Tracking Challenges" (DSTC, for short). First challenge was organized in 2012-2013. Starting as an initiative to provide a common testbed for the task of Dialog State Tracking, the first Dialog State Tracking Challenge (DSTC) was organized in 2013, followed by DSTC2&3 in 2014, DSTC4 in 2015, and DSTC5 in 2016. Given the remarkable success of the first five editions, and understanding both, the complexity of the dialog phenomenon and the interest of the research community in a wider variety of dialog related problems, the DSTC rebranded itself as "Dialog System Technology Challenges" for its sixth edition. Then, DSTC6 and DSTC7 have been completed in 2017 and 2018, respectively. - -DSTC-2 released a large number of training dialogs related to restaurant search. Compared to DSTC (which was in the bus timetables domain), DSTC 2 introduced changing user goals, tracking 'requested slots' as well as the new Restaurants domain. - -Historically, DeepPavlov's Go-Bot used this DSTC-2 approach to defining domain model and behavior of the goal-oriented bots. In this section you will learn how to use this approach to build a DSTC-2-based Go-Bot. +DSTC is a set of competitions originally known as "Dialog State Tracking Challenges" (DSTC, for short). First challenge +was organized in 2012-2013. Starting as an initiative to provide a common testbed for the task of Dialog State Tracking, +the first Dialog State Tracking Challenge (DSTC) was organized in 2013, followed by DSTC2&3 in 2014, DSTC4 in 2015, +and DSTC5 in 2016. Given the remarkable success of the first five editions, and understanding both, the complexity +of the dialog phenomenon and the interest of the research community in a wider variety of dialog related problems, +the DSTC rebranded itself as "Dialog System Technology Challenges" for its sixth edition. Then, DSTC6 and DSTC7 have +been completed in 2017 and 2018, respectively. + +DSTC-2 released a large number of training dialogs related to restaurant search. Compared to DSTC (which was in the bus +timetables domain), DSTC 2 introduced changing user goals, tracking 'requested slots' as well as the new Restaurants domain. + +Historically, DeepPavlov's Go-Bot used this DSTC-2 approach to defining domain model and behavior of the goal-oriented bots. +In this section you will learn how to use this approach to build a DSTC-2-based Go-Bot. Requirements ^^^^^^^^^^^^ diff --git a/docs/index.rst b/docs/index.rst index 7a4458d740..685c734e3f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -83,6 +83,14 @@ Welcome to DeepPavlov's documentation! Register your model +.. toctree:: + :glob: + :maxdepth: 3 + :caption: Internships + + Internships + + .. toctree:: :glob: :maxdepth: 3 diff --git a/docs/internships/internships.rst b/docs/internships/internships.rst new file mode 100644 index 0000000000..b18384dbd6 --- /dev/null +++ b/docs/internships/internships.rst @@ -0,0 +1,20 @@ + +Internships +=========== + +Do you have ideas on how to improve dialog systems for everyone? Are you ready to make an impact across the world? +Great, then join us! + +Let’s shape the future of Conversational AI together. An internship is for aspiring graduate and undergraduate students +who are passionate about Conversational AI technology and offer diverse perspectives. + +As an intern, you will work on some of the most ambitious technical problems, develop new ML solutions that will impact +future DeepPavlov products and make the lives of DeepPavlov users easier. + +All interns are paired with a mentor and will participate directly in DeepPavlov's groundbreaking work. +There are no restrictions on publications based on internships. International candidates are welcome to apply. + +Each of our research teams has specific test assignments for interested candidates, so please familiarize yourself +with our `projects `_ that best match your skills and interests. + +`Apply now at our website `_. diff --git a/docs/intro/quick_start.rst b/docs/intro/quick_start.rst index 2baf71b170..9f31ee475f 100644 --- a/docs/intro/quick_start.rst +++ b/docs/intro/quick_start.rst @@ -138,11 +138,11 @@ Using GPU To run or train **TensorFlow**-based DeepPavlov models on GPU you should have `CUDA `__ 10.0 installed on your host machine and TensorFlow with GPU support (``tensorflow-gpu``) -installed in your python environment. Current supported TensorFlow version is 1.15.2. Run +installed in your python environment. Current supported TensorFlow version is 1.15.5. Run .. code:: bash - pip install tensorflow-gpu==1.15.2 + pip install tensorflow-gpu==1.15.5 before installing model's package requirements to install supported ``tensorflow-gpu`` version. diff --git a/examples/classification_tutorial.ipynb b/examples/classification_tutorial.ipynb index 55d312e3c1..e7792ccd1e 100644 --- a/examples/classification_tutorial.ipynb +++ b/examples/classification_tutorial.ipynb @@ -1326,7 +1326,7 @@ "# \n", "# `Proba2Labels` converts probabilities to indices and supports three different modes:\n", "# if `max_proba` is true, returns indices of the highest probabilities\n", - "# if `confident_threshold` is given, returns indices with probabiltiies higher than threshold\n", + "# if `confidence_threshold` is given, returns indices with probabiltiies higher than threshold\n", "# if `top_n` is given, returns `top_n` indices with highest probabilities\n", "prob2labels = Proba2Labels(max_proba=True)" ] diff --git a/examples/gobot_md_yaml_configs_tutorial.ipynb b/examples/gobot_md_yaml_configs_tutorial.ipynb index bc533f564d..69d9431d51 100644 --- a/examples/gobot_md_yaml_configs_tutorial.ipynb +++ b/examples/gobot_md_yaml_configs_tutorial.ipynb @@ -2931,7 +2931,7 @@ "outputId": "44ecd8d0-2767-45b5-9432-741f995a384f" }, "source": [ - "!pip install rasa\n", + "!pip install rasa==1.10.10\n", "!python -m spacy download en_core_web_md\n", "!python -m spacy link en_core_web_md en" ], diff --git a/tests/test_quick_start.py b/tests/test_quick_start.py index 420b46bd6b..74ae65dedc 100644 --- a/tests/test_quick_start.py +++ b/tests/test_quick_start.py @@ -52,6 +52,8 @@ TWO_ARGUMENTS_INFER_CHECK = ('Dummy text', 'Dummy text', None) FOUR_ARGUMENTS_INFER_CHECK = ('Dummy text', 'Dummy text', 'Dummy text', 'Dummy_text', None) +LIST_ARGUMENTS_INFER_CHECK = (['Dummy text', 'Dummy text'], ['Dummy text', 'Dummy text'], None) + # Mapping from model name to config-model_dir-ispretrained and corresponding queries-response list. PARAMS = { "faq": { @@ -116,7 +118,11 @@ ("classifiers/sst_torch_swcnn.json", "classifiers", ('IP', 'TI')): [ONE_ARGUMENT_INFER_CHECK], ("classifiers/insults_kaggle_bert_torch.json", "classifiers", ('IP', 'TI')): [ONE_ARGUMENT_INFER_CHECK], ("classifiers/glue/glue_mrpc_cased_bert_torch.json", "classifiers", ('TI',)): [TWO_ARGUMENTS_INFER_CHECK], - ("classifiers/glue/glue_stsb_cased_bert_torch.json", "classifiers", ('TI',)): [TWO_ARGUMENTS_INFER_CHECK] + ("classifiers/glue/glue_stsb_cased_bert_torch.json", "classifiers", ('TI',)): [TWO_ARGUMENTS_INFER_CHECK], + ("classifiers/glue/glue_mnli_roberta.json", "classifiers", ('TI',)): [TWO_ARGUMENTS_INFER_CHECK], + ("classifiers/glue/glue_rte_roberta_mnli.json", "classifiers", ('TI',)): [TWO_ARGUMENTS_INFER_CHECK], + ("classifiers/superglue/superglue_copa_roberta.json", "classifiers", ('TI',)): [LIST_ARGUMENTS_INFER_CHECK], + ("classifiers/superglue/superglue_boolq_roberta_mnli.json", "classifiers", ('TI',)): [TWO_ARGUMENTS_INFER_CHECK] }, "snips": { ("classifiers/intents_snips.json", "classifiers", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], @@ -138,6 +144,18 @@ ("classifiers/intents_sample_csv.json", "classifiers", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("classifiers/intents_sample_json.json", "classifiers", ('TI',)): [ONE_ARGUMENT_INFER_CHECK] }, + "distil": { + ("classifiers/paraphraser_convers_distilrubert_2L.json", "distil", ('IP')): [TWO_ARGUMENTS_INFER_CHECK], + ("classifiers/paraphraser_convers_distilrubert_6L.json", "distil", ('IP')): [TWO_ARGUMENTS_INFER_CHECK], + ("classifiers/rusentiment_convers_distilrubert_2L.json", "distil", ('IP')): [ONE_ARGUMENT_INFER_CHECK], + ("classifiers/rusentiment_convers_distilrubert_6L.json", "distil", ('IP')): [ONE_ARGUMENT_INFER_CHECK], + ("ner/ner_rus_convers_distilrubert_2L.json", "distil", ('IP')): [ONE_ARGUMENT_INFER_CHECK], + ("ner/ner_rus_convers_distilrubert_6L.json", "distil", ('IP')): [ONE_ARGUMENT_INFER_CHECK], + ("squad/squad_ru_convers_distilrubert_2L.json", "distil", ('IP')): [TWO_ARGUMENTS_INFER_CHECK], + ("squad/squad_ru_convers_distilrubert_2L_infer.json", "distil", ('IP')): [TWO_ARGUMENTS_INFER_CHECK], + ("squad/squad_ru_convers_distilrubert_6L.json", "distil", ('IP')): [TWO_ARGUMENTS_INFER_CHECK], + ("squad/squad_ru_convers_distilrubert_6L_infer.json", "distil", ('IP')): [TWO_ARGUMENTS_INFER_CHECK], + }, "entity_linking": { ("kbqa/entity_linking_rus.json", "entity_linking", ('IP',)): [ @@ -189,7 +207,9 @@ ("ner/slotfill_simple_rasa_raw.json", "slotfill_simple_rasa_raw", ('IP')): [ ("i see 1 cat", ({"number": '1'},))], ("ner/ner_conll2003_torch_bert.json", "ner_conll2003_torch_bert", ('IP', 'TI')): [ONE_ARGUMENT_INFER_CHECK], - ("ner/ner_rus_bert_torch.json", "ner_rus_bert_torch", ('IP', 'TI')): [ONE_ARGUMENT_INFER_CHECK] + ("ner/ner_rus_bert_torch.json", "ner_rus_bert_torch", ('IP', 'TI')): [ONE_ARGUMENT_INFER_CHECK], + ("ner/ner_ontonotes_bert_torch.json", "ner_ontonotes_bert_torch", ('IP')): [ONE_ARGUMENT_INFER_CHECK], + ("ner/ner_ontonotes_bert_mult_torch.json", "ner_ontonotes_bert_mult_torch", ('IP')): [ONE_ARGUMENT_INFER_CHECK] }, "sentence_segmentation": { ("sentence_segmentation/sentseg_dailydialog.json", "sentseg_dailydialog", ('IP', 'TI')): [ @@ -245,35 +265,14 @@ ("elmo/elmo_1b_benchmark_test.json", "elmo_1b_benchmark_test", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], }, "ranking": { - ("ranking/ranking_insurance.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_insurance_interact.json", "ranking", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_ubuntu_v2.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_ubuntu_v2_interact.json", "ranking", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_mt.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_mt_interact.json", "ranking", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/paraphrase_ident_paraphraser.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_paraphraser_interact.json", "ranking", - ('IP',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_paraphraser_pretrain.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_paraphraser_tune.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_tune_interact.json", "ranking", - ('IP',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_paraphraser_elmo.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_elmo_interact.json", "ranking", - ('IP',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_qqp.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_qqp_bilstm_interact.json", "ranking", - ('IP',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_qqp_bilstm.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/paraphrase_ident_qqp_interact.json", "ranking", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], + ("ranking/paraphrase_ident_paraphraser_interact.json", "ranking", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_bert_uncased.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_bert_sep.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_bert_sep_interact.json", "ranking", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_ubuntu_v1_mt_word2vec_smn.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_ubuntu_v1_mt_word2vec_dam.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_ubuntu_v1_mt_word2vec_dam_transformer.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_mt_word2vec_smn.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], - ("ranking/ranking_ubuntu_v2_mt_word2vec_dam.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_mt_word2vec_dam_transformer.json", "ranking", ('TI',)): [ONE_ARGUMENT_INFER_CHECK], ("ranking/ranking_ubuntu_v2_mt_word2vec_dam_transformer.json", "ranking", ('IP',)): [(' & & & & & & & & bonhoeffer whar drives do you want to mount what & i have an ext3 usb drive ' @@ -301,6 +300,7 @@ ("squad/squad_zh_bert_zh.json", "squad_zh_bert_zh", ALL_MODES): [TWO_ARGUMENTS_INFER_CHECK], ("squad/squad_torch_bert.json", "squad_torch_bert", ('IP', 'TI')): [TWO_ARGUMENTS_INFER_CHECK], ("squad/squad_torch_bert_infer.json", "squad_torch_bert_infer", ('IP',)): [TWO_ARGUMENTS_INFER_CHECK], + ("squad/squad_ru_torch_bert.json", "squad_ru_torch_bert", ('IP',)): [TWO_ARGUMENTS_INFER_CHECK] }, "odqa": { ("odqa/en_odqa_infer_wiki_test.json", "odqa", ('IP',)): [ONE_ARGUMENT_INFER_CHECK], diff --git a/utils/prepare/upload.py b/utils/prepare/upload.py index d488f5bc3b..718dd8b03b 100644 --- a/utils/prepare/upload.py +++ b/utils/prepare/upload.py @@ -24,24 +24,38 @@ def upload(config_in_file): + + print(config_in_file) config_in = parse_config(config_in_file) config_in_file = find_config(config_in_file) model_path = Path(config_in['metadata']['variables']['MODEL_PATH']).expanduser() - + models_path = Path(config_in['metadata']['variables']['MODELS_PATH']).expanduser() model_name, class_name = config_in_file.stem, config_in_file.parent.name + + if str(model_name) not in str(model_path): + raise(f'{model_name} is not the path of the {model_path}') + + arcname = str(model_path).split("models/")[1] + tar_path = models_path/model_name + tmp_folder = f'/tmp/' + tmp_tar = tmp_folder + f'{model_name}.tar.gz' - tmp_dir = f'/tmp/{class_name}' - tmp_tar = f'/tmp/{class_name}/{model_name}.tar.gz' - shutil.rmtree(tmp_dir, ignore_errors=True) - os.mkdir(tmp_dir) - - with tarfile.open(tmp_tar, "w:gz") as tar: - tar.add(model_path, arcname=model_name) + print("model_path", model_path) + print("class_name", class_name) + print("model_name", model_name) + + print("Start tarring") + archive = tarfile.open(tmp_tar, "w|gz") + archive.add(model_path, arcname=arcname) + archive.close() + print("Stop tarring") + print("Calculating hash") main(tmp_tar) - command = f'scp -r {tmp_dir} share.ipavlov.mipt.ru:/home/export/v1/' + print("tmp_tar", tmp_tar) + command = f'scp -r {tmp_folder}{model_name}* share.ipavlov.mipt.ru:/home/export/v1/{class_name}' donwload_url = f'http://files.deeppavlov.ai/v1/{class_name}/{model_name}.tar.gz' print(command, donwload_url, sep='\n')