-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.yaml
117 lines (102 loc) · 2.07 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
resultDirName: 'DANESample'
movies617:
layers: [512,128,16]
View_num: 2
ft_times: 500
beta_W: [10,10,10]
L: [0.001,0.01,0.1,1,10,100]
alpha: [0.001,0.01,0.1,1,10,100]
gama: [0.001,0.01,0.1,1,10,100]
batch_size: 617
pretrain: False
learning_rate: [0.0001] #[0.001,0.0001,0.00001,0.000001,0.0000001]
sent_outputs_norm: True
cora:
layers: [512,128,16]
View_num: 2
ft_times: 500
beta_W: [50,60]
L: [0.1]
alpha: [0.1]
gama: [100]
batch_size: 256
pretrain: False
learning_rate: [0.000001] #[0.0001] #
sent_outputs_norm: True
BBCSport:
loadW: False
layers: [256,64,16]
View_num: 2
ft_times: 1000
beta_W: [10,10,10]
L: [0.1]
alpha: [0.001]
gama: [100]
batch_size: 256
pretrain: False
learning_rate: [0.00001] #[0.0001] #
sent_outputs_norm: True
Digits76240:
layers: [512,128,32]
View_num: 2
ft_times: 100
beta_W: [10,10,10]
L: [0.1]
alpha: [0.001]
gama: [100]
batch_size: 2000
pretrain: False
learning_rate: [0.000001] #[0.0001] #
sent_outputs_norm: False
100leaves:
loadW: True
layers: [500,100]
View_num: 3
ft_times: 1000
beta_W: [10,10,10]
L: [0.1]
alpha: [0.001]
gama: [100]
batch_size: 1600
pretrain: False
learning_rate: [0.000001] #[0.0001] #
sent_outputs_norm: True
NUSWIDEOBJ:
loadW: True
layers: [ 512,128,32 ]
View_num: 5
ft_times: 1000
beta_W: [ 10,10,10,10,10 ]
L: [ 0.1 ]
alpha: [ 0.001 ]
gama: [ 0.001 ]
batch_size: 128
pretrain: True
learning_rate: [ 0.00001 ] #[0.0001] #
sent_outputs_norm: True
Caltech101-all:
loadW: True
layers: [512, 128, 32]
View_num: 6
ft_times: 100
beta_W: [ 0,0,0,0,0,0 ]
L: [ 0.1 ]
alpha: [ 0.001 ]
gama: [ 100 ]
batch_size: 256
pretrain: False
learning_rate: [ 0.00001 ] #[0.0001] #
sent_outputs_norm: True
ALOI_100:
loadW: True
layers: [500,200,100] #[500,100]
View_num: 4
ft_times: 1000
beta_W: [ 10,10,10,10,10,10 ]
L: [ 0.1 ]
alpha: [ 0.001 ]
gama: [ 100 ]
batch_size: 300
pretrain: False
learning_rate: [ 0.0001 ] #[0.0001] #
sent_outputs_norm: True