-
Notifications
You must be signed in to change notification settings - Fork 0
/
sample.bib
168 lines (153 loc) · 9.69 KB
/
sample.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Calin Teodor at 2021-09-26 21:41:23 +0100
%% Saved with string encoding Unicode (UTF-8)
@article{Pelt254,
abstract = {Popular neural networks for image-processing problems often contain many different operations, multiple layers of connections, and a large number of trainable parameters, often exceeding several million. They are typically tailored to specific applications, making it difficult to apply a network that is successful in one application to different applications. Here, we introduce a neural network architecture that is less complex than existing networks, is easy to train, and achieves accurate results with relatively few trainable parameters. The network automatically adapts to a specific problem, allowing the same network to be applied to a wide variety of different problems.Deep convolutional neural networks have been successfully applied to many image-processing problems in recent works. Popular network architectures often add additional operations and connections to the standard architecture to enable training deeper networks. To achieve accurate results in practice, a large number of trainable parameters are often required. Here, we introduce a network architecture based on using dilated convolutions to capture features at different image scales and densely connecting all feature maps with each other. The resulting architecture is able to achieve accurate results with relatively few parameters and consists of a single set of operations, making it easier to implement, train, and apply in practice, and automatically adapts to different problems. We compare results of the proposed network architecture with popular existing architectures for several segmentation problems, showing that the proposed architecture is able to achieve accurate results with fewer parameters, with a reduced risk of overfitting the training data.},
author = {Pelt, Dani{\"e}l M. and Sethian, James A.},
date-added = {2021-09-26 21:38:23 +0100},
date-modified = {2021-09-26 21:38:23 +0100},
doi = {10.1073/pnas.1715832114},
eprint = {https://www.pnas.org/content/115/2/254.full.pdf},
issn = {0027-8424},
journal = {Proceedings of the National Academy of Sciences},
number = {2},
pages = {254--259},
publisher = {National Academy of Sciences},
title = {A mixed-scale dense convolutional neural network for image analysis},
url = {https://www.pnas.org/content/115/2/254},
volume = {115},
year = {2018},
Bdsk-Url-1 = {https://www.pnas.org/content/115/2/254},
Bdsk-Url-2 = {https://doi.org/10.1073/pnas.1715832114}}
@inproceedings{10.1145/3082031.3083236,
abstract = {This paper presents an empirical study on applying convolutional neural networks (CNNs)
to detecting J-UNIWARD -- one of the most secure JPEG steganographic method. Experiments
guiding the architectural design of the CNNs have been conducted on the JPEG compressed
BOSSBase containing 10,000 covers of size 512\texttimes{}512. Results have verified that both
the pooling method and the depth of the CNNs are critical for performance. Results
have also proved that a 20-layer CNN, in general, outperforms the most sophisticated
feature-based methods, but its advantage gradually diminishes on hard-to-detect cases.
To show that the performance generalizes to large-scale databases and to different
cover sizes, one experiment has been conducted on the CLS-LOC dataset of ImageNet
containing more than one million covers cropped to unified size of 256\texttimes{}256. The proposed
20-layer CNN has cut the error achieved by a CNN recently proposed for large-scale
JPEG steganalysis by 35%. Source code is available via GitHub: https://github.com/GuanshuoXu/deep_cnn_jpeg_steganalysis},
address = {New York, NY, USA},
author = {Xu, Guanshuo},
booktitle = {Proceedings of the 5th ACM Workshop on Information Hiding and Multimedia Security},
date-added = {2021-09-26 21:37:29 +0100},
date-modified = {2021-09-26 21:37:29 +0100},
doi = {10.1145/3082031.3083236},
isbn = {9781450350617},
keywords = {cnn, deep cnn, deep learning, j-uniward, steganalysis},
location = {Philadelphia, Pennsylvania, USA},
numpages = {7},
pages = {67--73},
publisher = {Association for Computing Machinery},
series = {IH&MMSec '17},
title = {Deep Convolutional Neural Network to Detect J-UNIWARD},
url = {https://doi.org/10.1145/3082031.3083236},
year = {2017},
Bdsk-Url-1 = {https://doi.org/10.1145/3082031.3083236}}
@webpage{Putnam,
title = {53rd Putnam}
author={John Scholes}
year={1992}
date-added = {2021-10-05 20:18:22 +0100},
date-modified = {2021-10-05 20:18:59 +0100},
keywords = {Putnam},
url = {https://prase.cz/kalva/putnam/putn92.html}}
@article{5352485,
author = {Wilamowski, Bogdan M.},
date-added = {2021-09-26 21:36:32 +0100},
date-modified = {2021-09-26 21:36:32 +0100},
doi = {10.1109/MIE.2009.934790},
journal = {IEEE Industrial Electronics Magazine},
number = {4},
pages = {56-63},
title = {Neural network architectures and learning algorithms},
volume = {3},
year = {2009},
Bdsk-Url-1 = {https://doi.org/10.1109/MIE.2009.934790}}
@article{80269,
author = {Wan, E.A.},
date-added = {2021-09-26 21:36:26 +0100},
date-modified = {2021-09-26 21:36:26 +0100},
doi = {10.1109/72.80269},
journal = {IEEE Transactions on Neural Networks},
number = {4},
pages = {303-305},
title = {Neural network classification: a Bayesian interpretation},
volume = {1},
year = {1990},
Bdsk-Url-1 = {https://doi.org/10.1109/72.80269}}
@inbook{Kim2017,
abstract = {The importance of the deep neural network lies in the fact that it opened the door to the complicated non-linear model and systematic approach for the hierarchical processing of knowledge.},
address = {Berkeley, CA},
author = {Kim, Phil},
booktitle = {MATLAB Deep Learning: With Machine Learning, Neural Networks and Artificial Intelligence},
date-added = {2021-09-26 21:36:23 +0100},
date-modified = {2021-09-26 21:36:23 +0100},
doi = {10.1007/978-1-4842-2845-6_6},
isbn = {978-1-4842-2845-6},
pages = {121--147},
publisher = {Apress},
title = {Convolutional Neural Network},
url = {https://doi.org/10.1007/978-1-4842-2845-6_6},
year = {2017},
Bdsk-Url-1 = {https://doi.org/10.1007/978-1-4842-2845-6_6}}
@inproceedings{8308186,
author = {Albawi, Saad and Mohammed, Tareq Abed and Al-Zawi, Saad},
booktitle = {2017 International Conference on Engineering and Technology (ICET)},
date-added = {2021-09-26 21:34:26 +0100},
date-modified = {2021-09-26 21:34:26 +0100},
doi = {10.1109/ICEngTechnol.2017.8308186},
pages = {1-6},
title = {Understanding of a convolutional neural network},
year = {2017},
Bdsk-Url-1 = {https://doi.org/10.1109/ICEngTechnol.2017.8308186}}
@book{anthony1999neural,
author = {Anthony, M. and Anthony, L.M.M. and Bartlett, P.L. and Bartlett, P.L. and Cambridge University Press},
date-added = {2021-09-26 21:33:46 +0100},
date-modified = {2021-09-26 21:33:46 +0100},
isbn = {9780521573535},
lccn = {98053260},
publisher = {Cambridge University Press},
title = {Neural Network Learning: Theoretical Foundations},
url = {https://books.google.fr/books?id=UH6XRoEQ4h8C},
year = {1999},
Bdsk-Url-1 = {https://books.google.fr/books?id=UH6XRoEQ4h8C}}
@inbook{Wang2003,
abstract = {Inspired by the sophisticated functionality of human brains where hundreds of billions of interconnected neurons process information in parallel, researchers have successfully tried demonstrating certain levels of intelligence on silicon. Examples include language translation and pattern recognition software. While simulation of human consciousness and emotion is still in the realm of science fiction, we, in this chapter, consider artificial neural networks as universal function approximators. Especially, we introduce neural networks which are suited for time series forecasts.},
address = {Boston, MA},
author = {Wang, Sun-Chong},
booktitle = {Interdisciplinary Computing in Java Programming},
date-added = {2021-09-26 21:33:27 +0100},
date-modified = {2021-09-26 21:33:27 +0100},
doi = {10.1007/978-1-4615-0377-4_5},
isbn = {978-1-4615-0377-4},
pages = {81--100},
publisher = {Springer US},
title = {Artificial Neural Network},
url = {https://doi.org/10.1007/978-1-4615-0377-4_5},
year = {2003},
Bdsk-Url-1 = {https://doi.org/10.1007/978-1-4615-0377-4_5}}
@article{FEINDT2006190,
abstract = {Detailed analysis of correlated data plays a vital role in modern analyses. We present a sophisticated neural network package based on Bayesian statistics which can be used for both classification and event-by-event prediction of the complete probability density distribution for continuous quantities. The network provides numerous possibilities to automatically preprocess the input variables and uses advanced regularisation and pruning techniques to essentially eliminate the risk of overtraining. Examples from physics and industry are given.},
author = {M. Feindt and U. Kerzel},
date-added = {2021-09-26 21:32:57 +0100},
date-modified = {2021-09-26 21:32:57 +0100},
doi = {https://doi.org/10.1016/j.nima.2005.11.166},
issn = {0168-9002},
journal = {Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment},
keywords = {Bayes, Neural network, Classification, Density reconstruction, Data-mining, Preprocessing},
note = {Proceedings of the X International Workshop on Advanced Computing and Analysis Techniques in Physics Research},
number = {1},
pages = {190-194},
title = {The NeuroBayes neural network package},
url = {https://www.sciencedirect.com/science/article/pii/S0168900205022679},
volume = {559},
year = {2006},
Bdsk-Url-1 = {https://www.sciencedirect.com/science/article/pii/S0168900205022679},
Bdsk-Url-2 = {https://doi.org/10.1016/j.nima.2005.11.166}}