generated from mcserep/elteiktdk
-
Notifications
You must be signed in to change notification settings - Fork 1
/
elteiktdk.bib
274 lines (248 loc) · 12.4 KB
/
elteiktdk.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
@article{10278395,
author={Li, Jian-Hang and Gao, Xin-Yue and Lu, Xiang and Liu, Guo-Dong},
journal={IEEE Access},
title={Multi-Head Attention-Based Hybrid Deep Neural Network for Aeroengine Risk Assessment},
year={2023},
volume={11},
number={},
pages={113376-113389},
keywords={Feature extraction;Risk management;Atmospheric modeling;Logic gates;Convolution;Aircraft propulsion;Data models;Artificial neural networks;Aeroengine risk assessment;hybrid deep neural network;multi-head attention mechanism;Time2Vec},
doi={10.1109/ACCESS.2023.3323843}}
@book{pedersen2019efficiently,
title = {Efficiently Inefficient: How Smart Money Invests and Market Prices Are Determined},
author = {Pedersen, Lasse Heje},
year = {2019},
publisher = {Princeton University Press}
}
@article{6795963,
author={Hochreiter, Sepp and Schmidhuber, Jürgen},
journal={Neural Computation},
title={Long Short-Term Memory},
year={1997},
volume={9},
number={8},
pages={1735-1780},
keywords={},
doi={10.1162/neco.1997.9.8.1735}}
@article{DBLP:journals/corr/abs-1907-00235,
author = {Shiyang Li and
Xiaoyong Jin and
Yao Xuan and
Xiyou Zhou and
Wenhu Chen and
Yu{-}Xiang Wang and
Xifeng Yan},
title = {Enhancing the Locality and Breaking the Memory Bottleneck of Transformer
on Time Series Forecasting},
journal = {CoRR},
volume = {abs/1907.00235},
year = {2019},
url = {http://arxiv.org/abs/1907.00235},
eprinttype = {arXiv},
eprint = {1907.00235},
timestamp = {Thu, 30 Sep 2021 17:13:33 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1907-00235.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/corr/VaswaniSPUJGKP17,
author = {Ashish Vaswani and
Noam Shazeer and
Niki Parmar and
Jakob Uszkoreit and
Llion Jones and
Aidan N. Gomez and
Lukasz Kaiser and
Illia Polosukhin},
title = {Attention Is All You Need},
journal = {CoRR},
volume = {abs/1706.03762},
year = {2017},
url = {http://arxiv.org/abs/1706.03762},
eprinttype = {arXiv},
eprint = {1706.03762},
timestamp = {Sat, 23 Jan 2021 01:20:40 +0100},
biburl = {https://dblp.org/rec/journals/corr/VaswaniSPUJGKP17.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{SEMENOGLOU20211072,
title = {Investigating the accuracy of cross-learning time series forecasting methods},
journal = {International Journal of Forecasting},
volume = {37},
number = {3},
pages = {1072-1084},
year = {2021},
issn = {0169-2070},
doi = {https://doi.org/10.1016/j.ijforecast.2020.11.009},
url = {https://www.sciencedirect.com/science/article/pii/S0169207020301850},
author = {Artemios-Anargyros Semenoglou and Evangelos Spiliotis and Spyros Makridakis and Vassilios Assimakopoulos},
keywords = {Time series, Cross-learning, Features, Neural networks, M4 competition}
}
@article{gardner1985forecasting,
title = {Forecasting Trends in Time Series},
author = {Gardner, Everette S., Jr. and McKenzie, Ed},
journal = {Management Science},
volume = {31},
number = {10},
pages = {1237--1246},
year = {1985},
month = {10},
url = {http://www.jstor.org/stable/2631439},
abstract = {This paper discusses forecasting trends in time series data using various statistical methods.},
}
@article{FARGALLA2024130184,
title = {TimeNet: Time2Vec attention-based CNN-BiGRU neural network for predicting production in shale and sandstone gas reservoirs},
journal = {Energy},
volume = {290},
pages = {130184},
year = {2024},
issn = {0360-5442},
doi = {https://doi.org/10.1016/j.energy.2023.130184},
url = {https://www.sciencedirect.com/science/article/pii/S0360544223035788},
author = {Mandella Ali M. Fargalla and Wei Yan and Jingen Deng and Tao Wu and Wyclif Kiyingi and Guangcong Li and Wei Zhang},
keywords = {Gas Production, Shale gas forecasting, Sandstone gas forecasting, Deep learning, Feature extraction, Attention mechanism, Bidirectional gated recurrent unit}
}
@article{DBLP:journals/corr/abs-1907-05321,
author = {Seyed Mehran Kazemi and
Rishab Goel and
Sepehr Eghbali and
Janahan Ramanan and
Jaspreet Sahota and
Sanjay Thakur and
Stella Wu and
Cathal Smyth and
Pascal Poupart and
Marcus A. Brubaker},
title = {Time2Vec: Learning a Vector Representation of Time},
journal = {CoRR},
volume = {abs/1907.05321},
year = {2019},
url = {http://arxiv.org/abs/1907.05321},
eprinttype = {arXiv},
eprint = {1907.05321},
timestamp = {Tue, 12 Apr 2022 13:09:29 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1907-05321.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{taieb2010forecasting,
author = {Ben Taieb, Souhaib and Sorjamaa, Antti and Bontempi, Gianluca},
year = {2010},
month = {06},
pages = {1950-1957},
title = {Multiple-output modeling for multi-step-ahead time series forecasting},
volume = {73},
journal = {Neurocomputing},
doi = {10.1016/j.neucom.2009.11.030}
}
@article{marcellino2006comparison,
title = {A Comparison of Direct and Iterated Multistep AR Methods for Forecasting Macroeconomic Time Series},
author = {Marcellino, Massimiliano and Stock, James H. and Watson, Mark W.},
journal = {Journal of Econometrics},
volume = {135},
number = {1-2},
pages = {499--526},
year = {2006},
doi = {10.1016/j.jeconom.2005.07.013}
}
@article{Rumelhart1986,
author = {Rumelhart, David E. and Hinton, Geoffrey E. and Williams, Ronald J.},
title = {Learning representations by back-propagating errors},
journal = {Nature},
volume = {323},
number = {6088},
pages = {533--536},
year = {1986},
doi = {10.1038/323533a0},
url = {https://doi.org/10.1038/323533a0},
issn = {1476-4687}
}
@inproceedings{Wang_2019, series={IJCAI-2019},
title={CLVSA: A Convolutional LSTM Based Variational Sequence-to-Sequence Model with Attention for Predicting Trends of Financial Markets},
url={http://dx.doi.org/10.24963/ijcai.2019/514},
DOI={10.24963/ijcai.2019/514},
booktitle={Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence},
publisher={International Joint Conferences on Artificial Intelligence Organization},
author={Wang, Jia and Sun, Tong and Liu, Benyuan and Cao, Yu and Zhu, Hongwei},
year={2019},
month=aug, collection={IJCAI-2019} }
@misc{feng2019enhancing,
title={Enhancing Stock Movement Prediction with Adversarial Training},
author={Fuli Feng and Huimin Chen and Xiangnan He and Ji Ding and Maosong Sun and Tat-Seng Chua},
year={2019},
eprint={1810.09936},
archivePrefix={arXiv},
primaryClass={q-fin.TR}
}
@inproceedings{Shen2012StockMF,
title={Stock Market Forecasting Using Machine Learning Algorithms},
author={Shunrong Shen and Haomiao Jiang and Tongda Zhang},
year={2012},
url={https://api.semanticscholar.org/CorpusID:16643114}
}
@misc{yf,
url = {https://www.finance.yahoo.com},
note = {Accessed: 2024-05-05}
}
@misc{resnet,
url = {https://wikipedia.org/wiki/Residual_neural_network},
note = {Accessed: 2024-05-08}
}
@misc{rmsp,
url = {https://optimization.cbe.cornell.edu/index.php?title=RMSProp},
note = {Accessed: 2024-05-10}
}
@misc{ada,
url = {https://optimization.cbe.cornell.edu/index.php?title=AdaGrad},
note = {Accessed: 2024-05-10}
}
@misc{pic4trans,
url = {https://blog.stackademic.com/what-i-learned-from-creating-a-large-language-model-from-scratch-dd9a4dc6a73d},
note = {Accessed: 2024-05-10}
}
@misc{pic4nn,
url = {https://victorzhou.com/series/neural-networks-from-scratch/},
note = {Accessed: 2024-05-10}
}
@article{pic4rnn,
title = {From model-driven to data-driven: A review of hysteresis modeling in structural and mechanical systems},
journal = {Mechanical Systems and Signal Processing},
volume = {204},
pages = {110785},
year = {2023},
issn = {0888-3270},
doi = {https://doi.org/10.1016/j.ymssp.2023.110785},
url = {https://www.sciencedirect.com/science/article/pii/S0888327023006933},
author = {Tianyu Wang and Mohammad Noori and Wael A. Altabey and Zhishen Wu and Ramin Ghiasi and Sin-Chi Kuok and Ahmed Silik and Nabeel S.D. Farhan and Vasilis Sarhosis and Ehsan Noroozinejad Farsangi},
keywords = {Hysteresis modeling, Structural and mechanical system, Model-driven method, Data-driven method, Model-data hybrid driven method},
abstract = {Hysteresis is a natural phenomenon that widely exists in structural and mechanical systems. The characteristics of structural hysteretic behaviors are complicated. Therefore, numerous methods have been developed to describe hysteresis. In this paper, a review of the available hysteretic modeling methods is carried out. Such methods are divided into: a) model-driven and b) data-driven methods. The model-driven method uses parameter identification to determine parameters. Three types of parametric models are introduced including polynomial models, differential based models, and operator based models. Four algorithms as least mean square error algorithm, Kalman filter algorithm, metaheuristic algorithms, and Bayesian estimation are presented to realize parameter identification. The data-driven method utilizes universal mathematical models to describe hysteretic behavior. Regression model, artificial neural network, least square support vector machine, and deep learning are introduced in turn as the classical data-driven methods. Model-data driven hybrid methods are also discussed to make up for the shortcomings of the two methods. Based on a multi-dimensional evaluation, the existing problems and open challenges of different hysteresis modeling methods are discussed. Some possible research directions about hysteresis description are given in the final section.}
}
@misc{pic4lstm,
url = {https://colah.github.io/posts/2015-08-Understanding-LSTMs/},
note = {Accessed: 2024-05-10}
}
@inproceedings{devlin-etal-2019-bert,
title = "{BERT}: Pre-training of Deep Bidirectional Transformers for Language Understanding",
author = "Devlin, Jacob and
Chang, Ming-Wei and
Lee, Kenton and
Toutanova, Kristina",
editor = "Burstein, Jill and
Doran, Christy and
Solorio, Thamar",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N19-1423",
doi = "10.18653/v1/N19-1423",
pages = "4171--4186",
abstract = "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7{\%} (4.6{\%} absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).",
}
@misc{wen2023transformers,
title={Transformers in Time Series: A Survey},
author={Qingsong Wen and Tian Zhou and Chaoli Zhang and Weiqi Chen and Ziqing Ma and Junchi Yan and Liang Sun},
year={2023},
eprint={2202.07125},
archivePrefix={arXiv},
primaryClass={cs.LG}
}