-
Notifications
You must be signed in to change notification settings - Fork 0
/
bibliografia.bib
338 lines (293 loc) · 13.5 KB
/
bibliografia.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
% Exemplos de vários tipos de referências
% https://verbosus.com/bibtex-style-examples.html
@Book{hands,
author = {Aurélien Géron},
title = {Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow},
edition = {2º},
publisher= {O'Reilly},
year = {2019},
}
@Book{classic,
author = {David Kopec},
title = {Problemas Clássicos de Ciência da Computação com Python},
edition = {1º},
publisher= {Novatec},
year = {2019},
}
@Book{data,
author = {Joel Grus},
title = {Data Science do Zero: Primeiras regras com o Python},
edition = {1º},
publisher= {O'Reilly},
year = {2016},
}
@Book{statLearn,
author = {Gareth James and Daniela Witten and Trevor Hastie and Robert Tibshirani},
title = {An Introduction to Statistical Learning},
edition = {1º},
publisher = {Springer},
year = {2013}
}
@Book{apostila,
author = {Pedro Alberto Morettin and Julio Singer},
title = {Introdução à Ciência de Dados - Fundamentos e Aplicações},
publisher = {Departamento de Estatística. Universidade de São Paulo},
year = {2020},
}
@Book{morettin,
author = {Pedro Alberto Morettin and Clélia M. C. Toloi},
title = {Análise de séries temporais, vol. 1: Modelos lineares univariados},
edition = {3º},
publisher= {Edgard Blücher Ltda.},
year = {2019}
}
@Book{box,
author = {George Edward Pelham Box and Gwilym Meirion Jenkins and Gregory C. Reinsel and Greta M. Ljung},
title = {Time Series Analysis - Forecasting and Control},
edition = {5º},
publisher= {John Wiley \& Sons},
year = {2016}
}
@Book{antonio,
author = {Marcos Nascimento Magalhães and Antonio Carlos Pedroso de Lima},
title = {Noções de Probabilidade e Estatística},
edition = {5º},
publisher= {Edusp},
year = {2002}
}
@article{cox,
author = {George Edward Pelham Box and David Roxbee Cox},
title = {An analysis of transformations},
journal = {Journal of the Royal Statistical Society, Series B (Methodological)},
year = 1964,
volume = 26,
number = 2,
pages = {211-252},
url = {\url{https://www.jstor.org/stable/2984418}}
}
% Resumo com a estacionaridade dos modelos AR, MA e ARMA
% http://www.portalaction.com.br/series-temporais/13-processos-estacionarios
% http://www.portalaction.com.br/series-temporais/14-testes-de-estacionariedade
% Explicação sobre o AIC
% https://towardsdatascience.com/the-akaike-information-criterion-c20c8fd832f2
% Obter dados de cotacoes do dolar
% https://olinda.bcb.gov.br/olinda/servico/PTAX/versao/v1/aplicacao#!/recursos
% USD EUR GBP
% https://www.youtube.com/watch?v=3JQ3hYko51Y % redes
% https://keras.io/api/models/model_training_apis/
@MISC{aic,
author = {Sachin Date},
title = {The Akaike Information Criterion},
howpublished = {\url{https://towardsdatascience.com/the-akaike-information-criterion-c20c8fd832f2}},
month = {9},
year = {2019}
}
@MISC{venn,
author = {Andrew Silver},
title = {The Essential Data Science Venn Diagram},
howpublished = {\url{https://towardsdatascience.com/the-essential-data-science-venn-diagram-35800c3bef40}},
month = {9},
year = {2018}
}
@MISC{prince,
author = {Prince Barpaga},
title = {A Gentle Introduction to Machine Learning},
howpublished = {\url{https://towardsdatascience.com/a-gentle-introduction-to-machine-learning-599210ec34ad}},
month = {6},
year = {2019}
}
@MISC{allen,
author = {Robbie Allen},
title = {A Gentle Introduction to Machine Learning Concepts},
howpublished = {\url{https://medium.com/machine-learning-in-practice/a-gentle-introduction-to-machine-learning-concepts-cfe710910eb}},
month = {2},
year = {2020}
}
@MISC{korbut,
author = {Daniil Korbut},
title = {Machine Learning Algorithms: Which One to Choose for Your Problem},
howpublished = {\url{https://blog.statsbot.co/machine-learning-algorithms-183cc73197c}},
month = {10},
year = {2017}
}
@MISC{means,
author = {Khyati Mahendru},
title = {How to Determine the Optimal K for K-Means?},
howpublished = {\url{https://medium.com/analytics-vidhya/how-to-determine-the-optimal-k-for-k-means-708505d204eb}},
month = {6},
year = {2019}
}
@MISC{yash,
author = {Yash Upadhyay},
title = {Regularization techniques for Neural Networks},
howpublished = {\url{https://towardsdatascience.com/regularization-techniques-for-neural-networks-e55f295f2866}},
month = {3},
year = {2019}
}
% https://medium.com/machina-sapiens/algoritmos-de-aprendizagem-de-m%C3%A1quina-qual-deles-escolher-67040ad68737
@phdthesis{doutorado,
author = {Rosangela Ballini},
title = {Análise e Previsão de Vazões Utilizando Modelos de Séries Temporais, Redes Neurais e Redes Neurais Nebulosas},
school = {Faculdade de Engenharia Elétrica e de Computação da Universidade Estadual de Campinas},
year = {2000},
type = {Doutorado em Engenharia Elétrica}
}
@Book{carroll,
author = {Bradley W. Carroll and Dale A. Ostlie},
title = {An Introduction to Modern Astrophysics},
edition = {2º},
publisher= {Addison Wewley Longman},
year = {2006},
}
@article{blei,
author = {David M. Blei and Padhraic Smyth},
title = {Science and data science},
journal = {PNAS},
year = 2017,
number = 33,
pages = {8689–8692},
month = 8,
volume = 114
}
@article{frank,
author = {Frank Rosenblatt},
title = {The perceptron: a probabilistic model for information storage and organization in the brain},
journal = {Psychological Review},
year = 1958,
number = 6,
pages = {386-408},
volume = 65,
url = {\url{https://www.ling.upenn.edu/courses/cogs501/Rosenblatt1958.pdf}}
}
@MISC{matheus,
author = {Matheus Facure},
title = {Funções de Ativação - Entendendo a importância da ativação correta nas redes neurais.},
howpublished = {\url{https://matheusfacure.github.io/2017/07/12/activ-func/}},
month = {7},
year = {2017}
}
@MISC{matheus_2,
author = {Matheus Facure},
title = {Dificuldades no Treinamento de Redes Neurais - Examinando o problema de gradientes explodindo ou desvanecendo.},
howpublished = {\url{https://matheusfacure.github.io/2017/07/10/problemas-treinamento/}},
month = {7},
year = {2017}
}
@MISC{layers_1,
author = {James Dellinger},
title = {Weight Initialization in Neural Networks: A Journey From the Basics to Kaiming},
howpublished = {\url{https://towardsdatascience.com/weight-initialization-in-neural-networks-a-journey-from-the-basics-to-kaiming-954fb9b47c79}},
month = {4},
year = {2019}
}
@MISC{layers_2,
author = {Jeff Heaton},
title = {The Number of Hidden Layers},
howpublished = {\url{https://www.heatonresearch.com/2017/06/01/hidden-layers.html}},
month = {6},
year = {2017}
}
@MISC{network_1,
author = {Anas Al-Masri},
title = {What Are Overfitting and Underfitting in Machine Learning?},
howpublished = {\url{https://towardsdatascience.com/what-are-overfitting-and-underfitting-in-machine-learning-a96b30864690}},
month = {6},
year = {2019}
}
@MISC{network_2,
author = {Amar Budhiraja},
title = {Dropout in (Deep) Machine learning},
howpublished = {\url{https://medium.com/@amarbudhiraja/https-medium-com-amarbudhiraja-learning-less-to-learn-better-dropout-in-deep-machine-learning-74334da4bfc5}},
month = {12},
year = {2016}
}
@MISC{automl,
author = {Andre Ye},
title = {AutoML: Creating Top-Performing Neural Networks Without Defining Architectures},
howpublished = {\url{https://towardsdatascience.com/automl-creating-top-performing-neural-networks-without-defining-architectures-c7d3b08cddc}},
month = {9},
year = {2020}
}
@article{xu_relu,
author = {{Xu}, Bing and {Wang}, Naiyan and {Chen}, Tianqi and {Li}, Mu},
title = "{Empirical Evaluation of Rectified Activations in Convolutional Network}",
journal = {arXiv e-prints},
keywords = {Computer Science - Machine Learning, Computer Science - Computer Vision and Pattern Recognition, Statistics - Machine Learning},
year = {2015},
month = {5},
eid = {arXiv:1505.00853},
pages = {arXiv:1505.00853},
archivePrefix = {arXiv},
eprint = {1505.00853},
primaryClass = {cs.LG},
adsurl = {https://ui.adsabs.harvard.edu/abs/2015arXiv150500853X},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@article{clevert,
author = {{Clevert}, Djork-Arné and {Unterthiner}, Thomas and
{Hochreiter}, Sepp},
title = "{Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)}",
journal = {arXiv e-prints},
keywords = {Computer Science - Machine Learning},
year = {2015},
month = {11},
eid = {arXiv:1511.07289},
pages = {arXiv:1511.07289},
archivePrefix = {arXiv},
eprint = {1511.07289},
primaryClass = {cs.LG},
adsurl = {https://ui.adsabs.harvard.edu/abs/2015arXiv151107289C},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@article{fuzzy_1,
title = "Fuzzy sets",
journal = "Information and Control",
volume = "8",
number = "3",
pages = "338 - 353",
year = "1965",
issn = "0019-9958",
doi = "https://doi.org/10.1016/S0019-9958(65)90241-X",
url = "http://www.sciencedirect.com/science/article/pii/S001999586590241X",
author = "Lotfi Aliasker {Zadeh}",
abstract = "A fuzzy set is a class of objects with a continuum of grades of membership. Such a set is characterized by a membership (characteristic) function which assigns to each object a grade of membership ranging between zero and one. The notions of inclusion, union, intersection, complement, relation, convexity, etc., are extended to such sets, and various properties of these notions in the context of fuzzy sets are established. In particular, a separation theorem for convex fuzzy sets is proved without requiring that the fuzzy sets be disjoint."
}
@article{fuzzy_2,
author={Li-Xin {Wang}}, journal={[1992 Proceedings] IEEE International Conference on Fuzzy Systems}, title={Fuzzy systems are universal approximators},
year={1992}, month={03}, volume={}, number={}, pages={1163-1170},
abstract={The author proves that fuzzy systems are universal approximators. The Stone-Weierstrass theorem is used to prove that fuzzy systems with product inference, centroid defuzzification, and a Gaussian membership function are capable of approximating any real continuous function on a compact set to arbitrary accuracy. This result can be viewed as an existence theorem of an optimal fuzzy system for a wide variety of problems.<>},
keywords={fuzzy set theory;inference mechanisms;modelling;universal approximators;Stone-Weierstrass theorem;product inference;centroid defuzzification;Gaussian membership function;real continuous function;compact set;optimal fuzzy system;Fuzzy systems},
doi={10.1109/FUZZY.1992.258721}
}
@article{4809024,
author={M. k. {Alsmadi} and K. B. {Omar} and S. A. {Noah} and I. {Almarashdah}},
journal={2009 IEEE International Advance Computing Conference},
title={Performance Comparison of Multi-layer Perceptron (Back Propagation, Delta Rule and Perceptron) algorithms in Neural Networks},
year={2009}, month={3}, pages={296-299},
abstract={A multilayer perceptron is a feedforward artificial neural network model that maps sets of input data onto a set of appropriate output. It is a modification of the standard linear perceptron in that it uses three or more layers of neurons (nodes) with nonlinear activation functions, and is more powerful than the perceptron in that it can distinguish data that is not linearly separable, or separable by a hyper plane. MLP networks are general-purpose, flexible, nonlinear models consisting of a number of units organised into multiple layers. The complexity of the MLP network can be changed by varying the number of layers and the number of units in each layer. Given enough hidden units and enough data, it has been shown that MLPs can approximate virtually any function to any desired accuracy. This paper presents the performance comparison between Multi-layer Perceptron (back propagation, delta rule and perceptron). Perceptron is a steepest descent type algorithm that normally has slow convergence rate and the search for the global minimum often becomes trapped at poor local minima. The current study investigates the performance of three algorithms to train MLP networks. Its was found that the Perceptron algorithm are much better than others algorithms.}, keywords={backpropagation;convergence of numerical methods;multilayer perceptrons;recurrent neural nets;performance comparison;multilayer perceptron;back propagation;delta rule;feedforward artificial neural network;nonlinear activation function;steepest descent type algorithm;convergence rate;Multilayer perceptrons;Neural networks;Multi-layer neural network;Artificial neural networks;Neurons;Nervous system;Computer networks;Information science;Computer industry;Power system modeling;Back propagation;perceptron;delta rule learning;classification},
doi={10.1109/IADCC.2009.4809024}
}
@article{article,
author = {Maier, Holger and Dandy, Graeme},
year = {2000},
month = {01},
pages = {101-124},
title = {Neural networks for the prediction and forecasting of water resources variables: A review of modelling issues and applications},
volume = {15},
journal = {Environmental Modelling and Software},
doi = {10.1016/S1364-8152(99)00007-9}
}
@book{guidorizzi2,
title={Um curso de c{\'a}lculo},
author={Guidorizzi, Hamilton Luiz},
number={v. 2},
isbn={8521604254},
year={1986},
publisher={LTC}
}
% Link exemplo fbprophet
% https://pythondata.com/forecasting-time-series-data-with-prophet-part-1/
% https://nextjournal.com/viebel/forecasting-time-series-data-with-prophet
% https://www.mikulskibartosz.name/prophet-plot-explained/
% Artigo sobre o prophet
% https://peerj.com/preprints/3190v2/