forked from kerlomz/captcha_trainer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model_demo.yaml
99 lines (93 loc) · 3.79 KB
/
model_demo.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# - requirement.txt - GPU: tensorflow-gpu, CPU: tensorflow
# - If you use the GPU version, you need to install some additional applications.
System:
DeviceUsage: 0.7
# ModelName: Corresponding to the model file in the model directory,
# - such as YourModelName.pb, fill in YourModelName here.
# CharSet: Provides a default optional built-in solution:
# - [ALPHANUMERIC, ALPHANUMERIC_LOWER, ALPHANUMERIC_UPPER,
# -- NUMERIC, ALPHABET_LOWER, ALPHABET_UPPER, ALPHABET, ALPHANUMERIC_LOWER_MIX_CHINESE_3500]
# - Or you can use your own customized character set like: ['a', '1', '2'].
# CharMaxLength: Maximum length of characters, used for label padding.
# CharExclude: CharExclude should be a list, like: ['a', '1', '2']
# - which is convenient for users to freely combine character sets.
# - If you don't want to manually define the character set manually,
# - you can choose a built-in character set
# - and set the characters to be excluded by CharExclude parameter.
Model:
Sites: [
'YourModelName'
]
ModelName: YourModelName
ModelType: 150x50
CharSet: ALPHANUMERIC_LOWER
CharExclude: []
CharReplace: {}
ImageWidth: 150
ImageHeight: 50
# Binaryzation: [-1: Off, >0 and < 255: On].
# Smoothing: [-1: Off, >0: On].
# Blur: [-1: Off, >0: On].
# Resize: [WIDTH, HEIGHT]
# - If the image size is too small, the training effect will be poor and you need to zoom in.
# ReplaceTransparent: [True, False]
# - True: Convert transparent images in RGBA format to opaque RGB format,
# - False: Keep the original image
Pretreatment:
Binaryzation: -1
Smoothing: -1
Blur: -1
Resize: [150, 50]
ReplaceTransparent: True
# CNNNetwork: [CNN5, ResNet, DenseNet]
# RecurrentNetwork: [BLSTM, LSTM, SRU, BSRU, GRU]
# - The recommended configuration is CNN5+BLSTM / ResNet+BLSTM
# HiddenNum: [64, 128, 256]
# - This parameter indicates the number of nodes used to remember and store past states.
# Optimizer: Loss function algorithm for calculating gradient.
# - [AdaBound, Adam, Momentum]
NeuralNet:
CNNNetwork: CNN5
RecurrentNetwork: BLSTM
HiddenNum: 64
KeepProb: 0.98
Optimizer: AdaBound
PreprocessCollapseRepeated: False
CTCMergeRepeated: True
CTCBeamWidth: 1
CTCTopPaths: 1
WarpCTC: False
# TrainsPath and TestPath: The local absolute path of your training and testing set.
# DatasetPath: Package a sample of the TFRecords format from this path.
# TrainRegex and TestRegex: Default matching apple_20181010121212.jpg file.
# - The Default is .*?(?=_.*\.)
# TestSetNum: This is an optional parameter that is used when you want to extract some of the test set
# - from the training set when you are not preparing the test set separately.
# SavedSteps: A Session.run() execution is called a Step,
# - Used to save training progress, Default value is 100.
# ValidationSteps: Used to calculate accuracy, Default value is 500.
# TestSetNum: The number of test sets, if an automatic allocation strategy is used (TestPath not set).
# EndAcc: Finish the training when the accuracy reaches [EndAcc*100]% and other conditions.
# EndCost: Finish the training when the cost reaches EndCost and other conditions.
# EndEpochs: Finish the training when the epoch is greater than the defined epoch and other conditions.
# BatchSize: Number of samples selected for one training step.
# TestBatchSize: Number of samples selected for one validation step.
# LearningRate: Recommended value[0.01: MomentumOptimizer/AdamOptimizer, 0.001: AdaBoundOptimizer]
Trains:
TrainsPath: './dataset/mnist-CNN5BLSTM-H64-28x28_trains.tfrecords'
TestPath: './dataset/mnist-CNN5BLSTM-H64-28x28_test.tfrecords'
DatasetPath: [
"D:/***"
]
TrainRegex: '.*?(?=_)'
TestSetNum: 300
SavedSteps: 100
ValidationSteps: 500
EndAcc: 0.95
EndCost: 0.1
EndEpochs: 2
BatchSize: 128
TestBatchSize: 300
LearningRate: 0.001
DecayRate: 0.98
DecaySteps: 10000