Skip to content

Commit

Permalink
Merge pull request #307 from DeNeutoy/spacy3-transformer
Browse files Browse the repository at this point in the history
Spacy3 `en_core_sci_scibert`
  • Loading branch information
DeNeutoy authored Feb 12, 2021
2 parents 72c3a1a + aa38d1e commit 8d4d49d
Show file tree
Hide file tree
Showing 6 changed files with 420 additions and 6 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ pip install CMD-V(to paste the copied URL)
| en_core_sci_sm | A full spaCy pipeline for biomedical data with a ~100k vocabulary. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_sm-0.4.0.tar.gz)|
| en_core_sci_md | A full spaCy pipeline for biomedical data with a ~360k vocabulary and 50k word vectors. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_md-0.4.0.tar.gz)|
| en_core_sci_lg | A full spaCy pipeline for biomedical data with a ~785k vocabulary and 600k word vectors. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_lg-0.4.0.tar.gz)|
| en_core_sci_scibert | A full spaCy pipeline for biomedical data with a ~785k vocabulary and `allenai/scibert-base` as the transformer model. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_scibert-0.4.0.tar.gz)|
| en_ner_craft_md| A spaCy NER model trained on the CRAFT corpus.|[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_ner_craft_md-0.4.0.tar.gz)|
| en_ner_jnlpba_md | A spaCy NER model trained on the JNLPBA corpus.| [Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_ner_jnlpba_md-0.4.0.tar.gz)|
| en_ner_bc5cdr_md | A spaCy NER model trained on the BC5CDR corpus. | [Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_ner_bc5cdr_md-0.4.0.tar.gz)|
Expand Down
147 changes: 147 additions & 0 deletions configs/base_ner_scibert.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
[paths]
vectors = null
init_tok2vec = null
parser_tagger_path = null
vocab_path = null

[system]
gpu_allocator = null
seed = 0

[nlp]
lang = "en"
pipeline = ["transformer", "tagger","attribute_ruler","lemmatizer","parser","ner"]
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null

[components]

[components.attribute_ruler]
source = "en_core_web_sm"

[components.lemmatizer]
source = "en_core_web_sm"

[components.ner]
factory = "ner"
moves = null
update_with_oracle_cut_size = 100

[components.ner.model]
@architectures = "spacy.TransitionBasedParser.v1"
state_type = "ner"
extra_state_tokens = false
hidden_width = 64
maxout_pieces = 2
use_upper = true
nO = null

[components.ner.model.tok2vec]
@architectures = "spacy.Tok2Vec.v1"

[components.ner.model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v1"
width = 96
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
rows = [5000, 2500, 2500, 2500]
include_static_vectors = false

[components.ner.model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1"
width = 96
depth = 4
window_size = 1
maxout_pieces = 3

[components.parser]
source = ${paths.parser_tagger_path}

[components.tagger]
source = ${paths.parser_tagger_path}

[components.transformer]
source = ${paths.parser_tagger_path}


[corpora]

[corpora.dev]
@readers = "med_mentions_reader"
directory_path = "assets/"
split = "dev"

[corpora.train]
@readers = "med_mentions_reader"
directory_path = "assets/"
split = "train"

[training]
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.2
accumulate_gradient = 1
patience = 0
max_epochs = 7
max_steps = 0
eval_frequency = 500
frozen_components = ["transformer", "parser", "tagger", "attribute_ruler", "lemmatizer"]
before_to_disk = null

[training.batcher]
@batchers = "spacy.batch_by_sequence.v1"
get_length = null

[training.batcher.size]
@schedules = "compounding.v1"
start = 1
stop = 32
compound = 1.001
t = 0.0

[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
progress_bar = true

[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
learn_rate = 0.001

[training.score_weights]
dep_las_per_type = null
sents_p = null
sents_r = null
ents_per_type = null
tag_acc = null
dep_uas = null
dep_las = null
sents_f = null
ents_f = 1.0
ents_p = 0.0
ents_r = 0.0

[pretraining]

[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = ${paths.vocab_path}
lookups = null

[initialize.components]

[initialize.tokenizer]

[initialize.before_init]
@callbacks = "replace_tokenizer"
170 changes: 170 additions & 0 deletions configs/base_parser_tagger_scibert.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
[paths]
genia_train = "project_data/genia_train.spacy"
genia_dev = "project_data/genia_dev.spacy"
onto_train = "project_data/train"
vectors = null
init_tok2vec = null
vocab_path = null

[system]
gpu_allocator = "pytorch"
seed = 0

[nlp]
lang = "en"
pipeline = ["transformer","tagger","attribute_ruler","lemmatizer","parser"]
batch_size = 256
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null

[components]

[components.attribute_ruler]
source = "en_core_web_sm"

[components.lemmatizer]
source = "en_core_web_sm"

[components.parser]
factory = "parser"
learn_tokens = false
min_action_freq = 30
moves = null
update_with_oracle_cut_size = 100

[components.parser.model]
@architectures = "spacy.TransitionBasedParser.v1"
state_type = "parser"
extra_state_tokens = false
hidden_width = 128
maxout_pieces = 3
use_upper = true
nO = null

[components.parser.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"

[components.tagger]
factory = "tagger"

[components.tagger.model]
@architectures = "spacy.Tagger.v1"
nO = null

[components.tagger.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"

[components.transformer]
factory = "transformer"
max_batch_items = 4096
set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"}

[components.transformer.model]
@architectures = "spacy-transformers.TransformerModel.v1"
name = "allenai/scibert_scivocab_uncased"
tokenizer_config = {"use_fast": true}

[components.transformer.model.get_spans]
@span_getters = "spacy-transformers.strided_spans.v1"
window = 128
stride = 96


[corpora]

[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.genia_dev}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null

[corpora.train]
@readers = "parser_tagger_data"
path = ${paths.genia_train}
mixin_data_path = ${paths.onto_train}
mixin_data_percent = 0.2
max_length = 2000
gold_preproc = false
limit = 0
augmenter = null
seed = ${system.seed}

[training]
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.2
accumulate_gradient = 1
patience = 0
max_epochs = 8
max_steps = 0
eval_frequency = 2300
frozen_components = ["attribute_ruler", "lemmatizer"]
before_to_disk = null

[training.batcher]
@batchers = "spacy.batch_by_sequence.v1"
get_length = null

[training.batcher.size]
@schedules = "compounding.v1"
start = 16
stop = 64
compound = 1.001
t = 0.0

[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
progress_bar = true

[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
learn_rate = 0.00005


[training.score_weights]
dep_las_per_type = null
sents_p = null
sents_r = null
ents_per_type = null
tag_acc = 0.33
dep_uas = 0.33
dep_las = 0.33
sents_f = 0.0
ents_f = 0.0
ents_p = 0.0
ents_r = 0.0

[pretraining]

[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = ${paths.vocab_path}
lookups = null

[initialize.components]

[initialize.tokenizer]

[initialize.before_init]
@callbacks = "replace_tokenizer"
10 changes: 10 additions & 0 deletions data/meta_scibert.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"lang":"en",
"name":"core_sci_scibert",
"sources": ["OntoNotes 5", "Common Crawl", "GENIA 1.0"],
"description":"Spacy Models for Biomedical Text.",
"author":"Allen Institute for Artificial Intelligence",
"email": "[email protected]",
"url":"https://allenai.github.io/SciSpaCy/",
"license":"CC BY-SA 3.0"
}
3 changes: 3 additions & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ pip install <Model URL>
|:---------------|:------------------|:----------|
| en_core_sci_sm | A full spaCy pipeline for biomedical data. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.3.0/en_core_sci_sm-0.3.0.tar.gz)|
| en_core_sci_md | A full spaCy pipeline for biomedical data with a larger vocabulary and 50k word vectors. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.3.0/en_core_sci_md-0.3.0.tar.gz)|
| en_core_sci_scibert | A full spaCy pipeline for biomedical data with a ~785k vocabulary and `allenai/scibert-base` as the transformer model. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_core_sci_scibert-0.4.0.tar.gz)|
| en_core_sci_lg | A full spaCy pipeline for biomedical data with a larger vocabulary and 600k word vectors. |[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.3.0/en_core_sci_lg-0.3.0.tar.gz)|
| en_ner_craft_md| A spaCy NER model trained on the CRAFT corpus.|[Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.3.0/en_ner_craft_md-0.3.0.tar.gz)|
| en_ner_jnlpba_md | A spaCy NER model trained on the JNLPBA corpus.| [Download](https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.3.0/en_ner_jnlpba_md-0.3.0.tar.gz)|
Expand All @@ -34,9 +35,11 @@ Our models achieve performance within 3% of published state of the art dependenc

| model | UAS | LAS | POS | Mentions (F1) | Web UAS |
|:---------------|:----|:------|:------|:---|:---|

| en_core_sci_sm | 89.54| 87.62 | 98.32 | 68.15 | 87.62 |
| en_core_sci_md | 89.61| 87.77 | 98.56 | 69.64 | 88.05 |
| en_core_sci_lg | 89.63| 87.81 | 98.56 | 69.61 | 88.08 |
| en_core_sci_scibert | 92.03| 90.25 | 98.91 | 67.91 | 92.21 |


| model | F1 | Entity Types|
Expand Down
Loading

0 comments on commit 8d4d49d

Please sign in to comment.