fyp-report/main.bib
Andre Henriques 10ca099809
All checks were successful
continuous-integration/drone/push Build is passing
finished report I think
2024-05-15 05:22:33 +01:00

395 lines
14 KiB
BibTeX

@online{google-vision-api,
author ={Google},
title ={Vision {AI} | Google Cloud},
year ={2023},
url ={https://cloud.google.com/vision?hl=en}
}
@misc{amazon-rekognition,
title = {{What Is Amazon Rekognition? (1:42)}},
journal = {Amazon Web Services, Inc},
year = {2023},
month = dec,
note = {[Online; accessed 18. Dec. 2023]},
url = {https://aws.amazon.com/rekognition}
}
@article{lecun1989handwritten,
title={Handwritten digit recognition with a back-propagation network},
author={LeCun, Yann and Boser, Bernhard and Denker, John and Henderson, Donnie and Howard, Richard and Hubbard, Wayne and Jackel, Lawrence},
journal={Advances in neural information processing systems},
volume={2},
year={1989}
}
@article{krizhevsky2012imagenet,
title={Imagenet classification with deep convolutional neural networks},
author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E},
journal={Advances in neural information processing systems},
volume={25},
year={2012}
}
@article{fukushima1980neocognitron,
title={Neocognitron: A self-organizing neural network model for a mechanism of pattern recognition unaffected by shift in position},
author={Fukushima, Kunihiko},
journal={Biological cybernetics},
volume={36},
number={4},
pages={193--202},
year={1980},
publisher={Springer}
}
@misc{tensorflow2015-whitepaper,
title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
url={https://www.tensorflow.org/},
note={Software available from tensorflow.org},
author={
Mart\'{i}n~Abadi and
Ashish~Agarwal and
Paul~Barham and
Eugene~Brevdo and
Zhifeng~Chen and
Craig~Citro and
Greg~S.~Corrado and
Andy~Davis and
Jeffrey~Dean and
Matthieu~Devin and
Sanjay~Ghemawat and
Ian~Goodfellow and
Andrew~Harp and
Geoffrey~Irving and
Michael~Isard and
Yangqing Jia and
Rafal~Jozefowicz and
Lukasz~Kaiser and
Manjunath~Kudlur and
Josh~Levenberg and
Dandelion~Man\'{e} and
Rajat~Monga and
Sherry~Moore and
Derek~Murray and
Chris~Olah and
Mike~Schuster and
Jonathon~Shlens and
Benoit~Steiner and
Ilya~Sutskever and
Kunal~Talwar and
Paul~Tucker and
Vincent~Vanhoucke and
Vijay~Vasudevan and
Fernanda~Vi\'{e}gas and
Oriol~Vinyals and
Pete~Warden and
Martin~Wattenberg and
Martin~Wicke and
Yuan~Yu and
Xiaoqiang~Zheng},
year={2015},
}
@misc{chollet2015keras,
title={Keras},
author={Chollet, Fran\c{c}ois and others},
year={2015},
howpublished={\url{https://keras.io}},
}
@misc{htmx,
title = {{{$<$}/{$>$} htmx - high power tools for html}},
year = {2023},
month = nov,
note = {[Online; accessed 1. Nov. 2023]},
url = {https://htmx.org}
}
@misc{go,
title = {{The Go Programming Language}},
year = {2023},
month = nov,
note = {[Online; accessed 1. Nov. 2023]},
url = {https://go.dev}
}
@misc{node-to-go,
title = {{A journey from Node to GoLang}},
year = {2023},
month = nov,
note = {[Online; accessed 5. Nov. 2023]},
url = {https://www.loginradius.com/blog/engineering/a-journey-from-node-to-golang}
}
@misc{amazon-machine-learning,
title = {{An overview of AI and machine learning services from AWS}},
journal = {Amazon Web Services, Inc},
year = {2023},
month = dec,
note = {[Online; accessed 18. Dec. 2023]},
url = {https://aws.amazon.com/machine-learning}
}
@misc{amazon-rekognition-custom-labels,
title = {{What is Amazon Rekognition Custom Labels? - Rekognition}},
year = {2023},
month = dec,
note = {[Online; accessed 18. Dec. 2023]},
url = {https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/what-is.html?pg=ln&sec=ft}
}
@misc{amazon-rekognition-custom-labels-training,
title = {{Training an Amazon Rekognition Custom Labels model - Rekognition}},
year = {2023},
month = dec,
note = {[Online; accessed 18. Dec. 2023]},
url = {https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/training-model.html#tm-console}
}
@misc{google-vision-price-sheet,
title = {{Pricing {$\vert$} Vertex AI Vision {$\vert$} Google Cloud}},
journal = {Google Cloud},
year = {2023},
month = dec,
note = {[Online; accessed 20. Dec. 2023]},
url = {https://cloud.google.com/vision-ai/pricing}
}
@misc{google-vision-product-recognizer-guide,
title = {{Product Recognizer guide}},
year = {2023},
month = dec,
note = {[Online; accessed 20. Dec. 2023]},
url = {https://cloud.google.com/vision-ai/docs/product-recognizer}
}
@article{mnist,
title={The mnist database of handwritten digit images for machine learning research},
author={Deng, Li},
journal={IEEE Signal Processing Magazine},
volume={29},
number={6},
pages={141--142},
year={2012},
publisher={IEEE}
}
@article{mist-high-accuracy,
author = {Sanghyeon An and
Min Jun Lee and
Sanglee Park and
Heerin Yang and
Jungmin So},
title = {An Ensemble of Simple Convolutional Neural Network Models for {MNIST}
Digit Recognition},
journal = {CoRR},
volume = {abs/2008.10400},
year = {2020},
url = {https://arxiv.org/abs/2008.10400},
eprinttype = {arXiv},
eprint = {2008.10400},
timestamp = {Fri, 28 Aug 2020 12:11:44 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2008-10400.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article {lecun-98,
original = "orig/lecun-98.ps.gz",
author = "LeCun, Y. and Bottou, L. and Bengio, Y. and Haffner, P.",
title = "Gradient-Based Learning Applied to Document Recognition",
journal = "Proceedings of the IEEE",
month = "November",
volume = "86",
number = "11",
pages = "2278-2324",
year = 1998
}
@inproceedings{imagenet,
title={Imagenet: A large-scale hierarchical image database},
author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li},
booktitle={2009 IEEE conference on computer vision and pattern recognition},
pages={248--255},
year={2009},
organization={Ieee}
}
@article{resnet-152,
author = {Qilong Wang and
Banggu Wu and
Pengfei Zhu and
Peihua Li and
Wangmeng Zuo and
Qinghua Hu},
title = {ECA-Net: Efficient Channel Attention for Deep Convolutional Neural
Networks},
journal = {CoRR},
volume = {abs/1910.03151},
year = {2019},
url = {http://arxiv.org/abs/1910.03151},
eprinttype = {arXiv},
eprint = {1910.03151},
timestamp = {Mon, 04 Dec 2023 21:30:01 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1910-03151.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{efficientnet,
author = {Mingxing Tan and
Quoc V. Le},
title = {EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
journal = {CoRR},
volume = {abs/1905.11946},
year = {2019},
url = {http://arxiv.org/abs/1905.11946},
eprinttype = {arXiv},
eprint = {1905.11946},
timestamp = {Mon, 03 Jun 2019 13:42:33 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-11946.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{resnet,
title={Deep Residual Learning for Image Recognition},
author={Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun},
year={2015},
eprint={1512.03385},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{going-deeper-with-convolutions,
title={Going Deeper with Convolutions},
author={Christian Szegedy and Wei Liu and Yangqing Jia and Pierre Sermanet and Scott Reed and Dragomir Anguelov and Dumitru Erhan and Vincent Vanhoucke and Andrew Rabinovich},
year={2014},
eprint={1409.4842},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{very-deep-convolution-networks-for-large-scale-image-recognition,
title={Very Deep Convolutional Networks for Large-Scale Image Recognition},
author={Karen Simonyan and Andrew Zisserman},
year={2015},
eprint={1409.1556},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{efficient-net,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@INPROCEEDINGS{inverted-bottleneck-mobilenet,
author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh},
booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
title={MobileNetV2: Inverted Residuals and Linear Bottlenecks},
year={2018},
volume={},
number={},
pages={4510-4520},
keywords={Manifolds;Neural networks;Computer architecture;Standards;Computational modeling;Task analysis},
doi={10.1109/CVPR.2018.00474}
}
@article{json-api-usage-stats,
author = {Hnatyuk, Kolya},
title = {{130+ API Statistics: Usage, Growth {\&} Security}},
journal = {MarketSplash},
year = {2023},
month = oct,
publisher = {MarketSplash},
url = {https://marketsplash.com/api-statistics}
}
@misc{svelte,
title = {{Svelte {\ifmmode\bullet\else\textbullet\fi} Cybernetically enhanced web apps}},
year = {2024},
month = mar,
note = {[Online; accessed 12. Mar. 2024]},
url = {https://svelte.dev}
}
@misc{state-of-js-2022,
title = {{State of JavaScript 2022: Front-end Frameworks}},
year = {2023},
month = nov,
note = {[Online; accessed 12. Mar. 2024]},
url = {https://2022.stateofjs.com/en-US/libraries/front-end-frameworks}
}
@misc{js-frontend-frameworks-performance,
title = {{Interactive Results}},
year = {2024},
month = mar,
note = {[Online; accessed 12. Mar. 2024]},
url = {https://krausest.github.io/js-framework-benchmark/current.html}
}
@misc{svelte-kit,
title = {{SvelteKit {\ifmmode\bullet\else\textbullet\fi} Web development, streamlined}},
year = {2024},
month = mar,
note = {[Online; accessed 12. Mar. 2024]},
url = {https://kit.svelte.dev}
}
@misc{nginx,
title = {{Advanced Load Balancer, Web Server, {\&} Reverse Proxy - NGINX}},
journal = {NGINX},
year = {2024},
month = feb,
note = {[Online; accessed 12. Mar. 2024]},
url = {https://www.nginx.com}
}
@article{bycrpt,
author = {Provos, Niels and Mazieres, David},
year = {2001},
month = {03},
pages = {},
title = {A Future-Adaptable Password Scheme}
}
@TECHREPORT{cifar10,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009}
}
@misc{stl10,
title = {{STL-10 dataset}},
year = {2015},
month = nov,
note = {[Online; accessed 11. May 2024]},
url = {https://cs.stanford.edu/~acoates/stl10}
}
@misc{caltech256, title={Caltech 256}, DOI={10.22002/D1.20087}, abstractNote={We introduce a challenging set of 256 object categories containing a total of 30607 images. The original Caltech-101 was collected by choosing a set of object categories, downloading examples from Google Images and then manually screening out all images that did not fit the category. Caltech-256 is collected in a similar manner with several improvements: a) the number of categories is more than doubled, b) the minimum number of images in any category is increased from 31 to 80, c) artifacts due to image rotation are avoided and d) a new and larger clutter category is introduced for testing background rejection. We suggest several testing paradigms to measure classification performance, then benchmark the dataset using two simple metrics as well as a state-of-the-art spatial pyramid matching algorithm. Finally we use the clutter category to train an interest detector which rejects uninformative background regions.}, publisher={CaltechDATA}, author={Griffin, Gregory and Holub, Alex and Perona, Pietro}, year={2022}, month={Apr} }
@techreport{fgvca,
title = {Fine-Grained Visual Classification of Aircraft},
author = {S. Maji and J. Kannala and E. Rahtu
and M. Blaschko and A. Vedaldi},
year = {2013},
archivePrefix = {arXiv},
eprint = {1306.5151},
primaryClass = "cs-cv",
}
@article{fooddataset,
title={{FoodX-251: A Dataset for Fine-grained Food Classification}},
author={Kaur, Parneet and Sikka, Karan and Wang, Weijun and Belongie, serge and Divakaran, Ajay},
journal={arXiv preprint arXiv:1907.06167},
year={2019}
}
@incollection{pytorch,
title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library},
author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith},
booktitle = {Advances in Neural Information Processing Systems 32},
pages = {8024--8035},
year = {2019},
publisher = {Curran Associates, Inc.},
url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf}
}
@misc{pytorch-vs-tensorflow-1,
title = {{PyTorch vs TensorFlow: Deep Learning Frameworks [2024]}},
year = {2023},
month = dec,
note = {[Online; accessed 14. May 2024]},
url = {https://www.knowledgehut.com/blog/data-science/pytorch-vs-tensorflow}
}
@article{pytorch-vs-tensorflow-2,
author = {O'Connor, Ryan},
title = {{PyTorch vs TensorFlow in 2023}},
journal = {News, Tutorials, AI Research},
year = {2023},
month = apr,
publisher = {News, Tutorials, AI Research},
url = {https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2023}
}
@article{artbench,
title={The ArtBench Dataset: Benchmarking Generative Models with Artworks},
author={Liao, Peiyuan and Li, Xiuyu and Liu, Xihui and Keutzer, Kurt},
journal={arXiv preprint arXiv:2206.11404},
year={2022}
}
https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2023/
https://www.knowledgehut.com/blog/data-science/pytorch-vs-tensorflow
@misc{postgressql,
title = {{PostgreSQL}},
journal = {PostgreSQL},
year = {2024},
month = may,
note = {[Online; accessed 14. May 2024]},
url = {https://www.postgresql.org}
}