diff --git a/images for report/artbench1.jpg b/images for report/artbench1.jpg
new file mode 100644
index 0000000..ae552ba
Binary files /dev/null and b/images for report/artbench1.jpg differ
diff --git a/images for report/artbench2.jpg b/images for report/artbench2.jpg
new file mode 100644
index 0000000..a95b332
Binary files /dev/null and b/images for report/artbench2.jpg differ
diff --git a/images for report/cifar_1.jpg b/images for report/cifar_1.jpg
new file mode 100644
index 0000000..7e675d9
Binary files /dev/null and b/images for report/cifar_1.jpg differ
diff --git a/images for report/cifar_2.jpg b/images for report/cifar_2.jpg
new file mode 100644
index 0000000..3e30844
Binary files /dev/null and b/images for report/cifar_2.jpg differ
diff --git a/images for report/incompatible_images.png b/images for report/incompatible_images.png
new file mode 100644
index 0000000..c88c4a7
Binary files /dev/null and b/images for report/incompatible_images.png differ
diff --git a/images for report/max-no-1.png b/images for report/max-no-1.png
new file mode 100644
index 0000000..622655d
Binary files /dev/null and b/images for report/max-no-1.png differ
diff --git a/images for report/max.png b/images for report/max.png
new file mode 100644
index 0000000..5ff55a8
Binary files /dev/null and b/images for report/max.png differ
diff --git a/images for report/mean-no-1.png b/images for report/mean-no-1.png
new file mode 100644
index 0000000..52aedcc
Binary files /dev/null and b/images for report/mean-no-1.png differ
diff --git a/images for report/mean.png b/images for report/mean.png
new file mode 100644
index 0000000..c683de4
Binary files /dev/null and b/images for report/mean.png differ
diff --git a/images for report/minst_1.png b/images for report/minst_1.png
new file mode 100644
index 0000000..69031fb
Binary files /dev/null and b/images for report/minst_1.png differ
diff --git a/images for report/minst_2.png b/images for report/minst_2.png
new file mode 100644
index 0000000..28bba6d
Binary files /dev/null and b/images for report/minst_2.png differ
diff --git a/images for report/stl_1.png b/images for report/stl_1.png
new file mode 100644
index 0000000..13e010d
Binary files /dev/null and b/images for report/stl_1.png differ
diff --git a/images for report/stl_2.png b/images for report/stl_2.png
new file mode 100644
index 0000000..43728c8
Binary files /dev/null and b/images for report/stl_2.png differ
diff --git a/main.bib b/main.bib
index 6a3a085..26672f3 100644
--- a/main.bib
+++ b/main.bib
@@ -322,10 +322,73 @@ month = {03},
pages = {},
title = {A Future-Adaptable Password Scheme}
}
-
-
-
-
-
-
-
+@TECHREPORT{cifar10,
+ author = {Alex Krizhevsky},
+ title = {Learning multiple layers of features from tiny images},
+ institution = {},
+ year = {2009}
+}
+@misc{stl10,
+ title = {{STL-10 dataset}},
+ year = {2015},
+ month = nov,
+ note = {[Online; accessed 11. May 2024]},
+ url = {https://cs.stanford.edu/~acoates/stl10}
+}
+ @misc{caltech256, title={Caltech 256}, DOI={10.22002/D1.20087}, abstractNote={We introduce a challenging set of 256 object categories containing a total of 30607 images. The original Caltech-101 was collected by choosing a set of object categories, downloading examples from Google Images and then manually screening out all images that did not fit the category. Caltech-256 is collected in a similar manner with several improvements: a) the number of categories is more than doubled, b) the minimum number of images in any category is increased from 31 to 80, c) artifacts due to image rotation are avoided and d) a new and larger clutter category is introduced for testing background rejection. We suggest several testing paradigms to measure classification performance, then benchmark the dataset using two simple metrics as well as a state-of-the-art spatial pyramid matching algorithm. Finally we use the clutter category to train an interest detector which rejects uninformative background regions.}, publisher={CaltechDATA}, author={Griffin, Gregory and Holub, Alex and Perona, Pietro}, year={2022}, month={Apr} }
+@techreport{fgvca,
+ title = {Fine-Grained Visual Classification of Aircraft},
+ author = {S. Maji and J. Kannala and E. Rahtu
+ and M. Blaschko and A. Vedaldi},
+ year = {2013},
+ archivePrefix = {arXiv},
+ eprint = {1306.5151},
+ primaryClass = "cs-cv",
+}
+@article{fooddataset,
+ title={{FoodX-251: A Dataset for Fine-grained Food Classification}},
+ author={Kaur, Parneet and Sikka, Karan and Wang, Weijun and Belongie, serge and Divakaran, Ajay},
+ journal={arXiv preprint arXiv:1907.06167},
+ year={2019}
+}
+@incollection{pytorch,
+title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library},
+author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith},
+booktitle = {Advances in Neural Information Processing Systems 32},
+pages = {8024--8035},
+year = {2019},
+publisher = {Curran Associates, Inc.},
+url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf}
+}
+@misc{pytorch-vs-tensorflow-1,
+ title = {{PyTorch vs TensorFlow: Deep Learning Frameworks [2024]}},
+ year = {2023},
+ month = dec,
+ note = {[Online; accessed 14. May 2024]},
+ url = {https://www.knowledgehut.com/blog/data-science/pytorch-vs-tensorflow}
+}
+@article{pytorch-vs-tensorflow-2,
+ author = {O'Connor, Ryan},
+ title = {{PyTorch vs TensorFlow in 2023}},
+ journal = {News, Tutorials, AI Research},
+ year = {2023},
+ month = apr,
+ publisher = {News, Tutorials, AI Research},
+ url = {https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2023}
+}
+@article{artbench,
+ title={The ArtBench Dataset: Benchmarking Generative Models with Artworks},
+ author={Liao, Peiyuan and Li, Xiuyu and Liu, Xihui and Keutzer, Kurt},
+ journal={arXiv preprint arXiv:2206.11404},
+ year={2022}
+}
+https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2023/
+https://www.knowledgehut.com/blog/data-science/pytorch-vs-tensorflow
+@misc{postgressql,
+ title = {{PostgreSQL}},
+ journal = {PostgreSQL},
+ year = {2024},
+ month = may,
+ note = {[Online; accessed 14. May 2024]},
+ url = {https://www.postgresql.org}
+}
diff --git a/report/design.tex b/report/design.tex
index f27f6e0..3f10e41 100644
--- a/report/design.tex
+++ b/report/design.tex
@@ -1,9 +1,8 @@
\section{Service Design} \label{sec:sd}
- This section will discuss the design of the service.
- The design on this section is an ideal design solution, where no time limitations or engineering limitations were considered.
- This section tries to provide a description of a designed solution that would allow for the best user experience possible.
-
- The design proposed in this section can be viewed as a scoped version of this project, and the \hyperref[sec:si]{Service Implementation} section will discuss how the scope was limited so that the service would achieve the primary goals of the project while following the design, within the time frame of this project.
+ This chapter presents an idealised design, such design is open-ended to allow for multiple possible implementations that still meet the project requirements.
+ This idealised design is also envisioned to not be limited by time or engineering constraints.
+ The chapter \ref{sec:si} will discuss in more details how this design was further scoped to be able to be implemented in the timeframe available.
+ This chapter will transform the requirements discussed in the previous chapter into a more specialized technical design that can be used as a guide to implement such a service.
\subsection{Structure of the Service}
@@ -21,12 +20,10 @@
The presentation layer requires interactivity of the user, and therefore it needs to be accessible from the outside, and be simple to use.
The presentation layer was limited from being any interaction method to be a web page.
The web page can a separate server, or as part of the main API application, if it is in the same.
-
- The API layer, is one of the most important parts of the service. As it's going to be the most used way to interact with the service.
+ The API layer, is one of the most important parts of the service.
+ As it will be the most used way to interact with the service.
The user can use the API to control their entire model process from importing, to classification of images.
-
The Worker layer, consists of a set of servers available to perform GPU loads.
-
The Data layer, consists of stored images, models, and user data.
@@ -57,7 +54,7 @@
Aside from being able to perform the above tasks, there are no restrictions on how the application needs to be architected.
\subsection{API}
- As a software as a service, one of the main requirements is to be able to communicate with other services.
+ As a SaaS, one of the main requirements is to be able to communicate with other services.
The API provides the simplest way for other services to interact with this service.
The API needs to be able to perform all the tasks that the application can do, which include:
@@ -77,11 +74,8 @@
While implementing all the features that mentioned above, the API has to handle multiple simultaneous requests.
Ideally, those requests should be handled as fast as possible.
-
The API should be implemented such that it can be easily expandable and maintainable, so that future improvements can happen.
-
- The API should be consistent and easy to use, information on how to use the API should also be available to possible users.
-
+ It should be consistent and easy to use, information on how to use the API should also be available to possible users.
The API should be structured as a REST JSON API, per the requirements.
The API should only accept inputs via the URL parameters of GET requests or via JSON on POST requests.
Binary formats can also be used to handle file upload and downloads, as transferring files via JSON extremely inefficient.
@@ -105,16 +99,14 @@
\begin{multicols}{2} % TODO think of more ways
\begin{itemize}
\item Separating the training to different machines.
- \item Control the number of resources that training machine can utilize
+ \item Control the number of resources that training machine can utilise
\item Control the time when the shared training and inference machine can be used for training.
\item Allow users to have their own ``Runners'' where the training tasks can happen.
\end{itemize}
\end{multicols}
- \subsection{Conclusion}
- This section introduced multiple possible designs options for a service, that intends to achieve automated image classification, can follow to implement a robust system.
-
- The next section will be discussing how the system was implemented and which of the possible design options were chosen when implementing the system.
+ \subsection{Summary}
+ This chapter introduced multiple possible designs options for a service, that intends to achieve automated image classification, can follow to implement a robust system. The next chapter will be discussing how the system was implemented and which of the possible design options were chosen when implementing the system.
\pagebreak
diff --git a/report/eval.tex b/report/eval.tex
new file mode 100644
index 0000000..5225d4c
--- /dev/null
+++ b/report/eval.tex
@@ -0,0 +1,221 @@
+\section{Service Evaluation} \label{sec:se}
+ This section will discuss how the service can be evaluated from a technical standpoint and its results.
+
+ With the goals of the project, there are two kinds of tests that need to be accounted for.
+ User testing tests that relate to the experience of the user while using the project and tests that quantitive test the project.
+
+ Such as accuracy of the generated models, response time to queries.
+
+ \subsection{Testing the model creation}
+ To test the system, a few datasets were selected.
+ The datasets were selected to represent different possible sizes of models, and sizes of output labels.
+
+ The ImageNet\cite{imagenet} was not selected as one of the datasets that will be tested, as it does not represent the target problem that this project is trying to tackle.
+
+ The tests will measure:
+ \begin{itemize}
+ \item Time to process and validate the entire dataset upon upload
+ \item Time to train the dataset
+ \item Time to classify the image once the dataset has been trained
+ \item Time to extend the model
+ \item Accuracy of the newly created model
+ \end{itemize}
+
+ The results will be placed in the results table.
+
+ \subsubsection*{MNIST}
+
+ The MNIST \cite{mnist} is a large dataset of handwritten digits, that is commonly used to trains and test machine learning systems.
+ This dataset was selected due to its size. It is a small dataset that can be trained quickly and can be used to verify other internal systems of the service.
+ During testing, only the 9 out of 10 classes are trained and the 10th is added during the retraining process.
+
+ \begin{figure}[H]
+ \centering
+ \subfloat{{\includegraphics[width=.2\linewidth]{minst_1}}}
+ \qquad
+ \subfloat{{\includegraphics[width=.2\linewidth]{minst_2}}}
+ \caption{Examples of the images in the MNIST dataset}
+ \end{figure}
+
+
+ \subsubsection*{CIFAR-10}
+
+ The CIFAR-10 \cite{cifar10} dataset contains various images that are commonly used to train and test machine learning algorithms.
+ This dataset was selected due to its size. It is a small dataset that can be trained quickly, but it has bigger, and coloured images, which makes it harder than MNIST.
+
+ During testing, only the 9 out of 10 classes are trained and the 10th is added during the retraining process.
+
+ \begin{figure}[H]
+ \centering
+ \subfloat{{\includegraphics[width=.2\linewidth]{cifar_1}}}
+ \qquad
+ \subfloat{{\includegraphics[width=.2\linewidth]{cifar_2}}}
+ \caption{Examples of the images in the CIFAR-10 dataset}
+ \end{figure}
+
+ \subsubsection*{STL-10}
+ The STL-10 \cite{stl10} dataset that was inspired by the CIFAR-10 \cite{cifar10}, but it has bigger images.
+ This dataset was selected because of the bigger image. The images are bigger than both CIFAR-10 and MNIST which makes the model harder to create, and train.
+
+ During testing, only the 9 out of 10 classes are trained and the 10th is added during the retraining process.
+ \begin{figure}[H]
+ \centering
+ \subfloat{{\includegraphics[width=.2\linewidth]{stl_1}}}
+ \qquad
+ \subfloat{{\includegraphics[width=.2\linewidth]{stl_2}}}
+ \caption{Examples of the images in the STL-10 dataset}
+ \end{figure}
+
+ \subsubsection*{ArtBench}
+ The ArtBench \cite{artbench} dataset is a dataset that contains artworks annotated with their art style that is intended to train generative models.
+ This dataset was selected due to the even bigger images than the previously tested models.
+
+ During testing, only the 9 out of 10 classes are trained and the 10th is added during the retraining process.
+ \begin{figure}[H]
+ \centering
+ \subfloat{{\includegraphics[width=.2\linewidth]{artbench1}}}
+ \qquad
+ \subfloat{{\includegraphics[width=.2\linewidth]{artbench2}}}
+ \caption{Examples of the images in the ArtBench dataset}
+ \end{figure}
+
+ \subsubsection*{Incompatible datasets}
+
+ There were attempts to test other datasets against the system, but those datasets were incompatible.
+ The datasets had irregular images sizes, which, as it was mentioned previously, the system does not support.
+ This caused a large section of images inputted being rejected, which means that it would have not trained.
+
+ A list of datasets that are incompatible because of this are:
+
+ \begin{multicols}{2}
+ \begin{itemize}
+ \item Caltech 256 \cite{caltech256}
+ \item FGVC-Aircraft \cite{fgvca}
+ \item IFood 2019 \cite{fooddataset}
+ \end{itemize}
+ \end{multicols}
+
+
+ \subsubsection*{Results}
+ \begin{longtable}{ | c | c | c | c | c | c |}
+ \hline
+ Dataset & Import Time & Train Time & Classification Time & Extend Time & Accuracy \\ \hline
+ MNIST & $8s$ & $2m$ & $>1s$ & $50s$ & $98\%$ \\ \hline
+ CIFAR-10 & $6s$ & $41m 38s$ & $>1s$ & $1m 11s$ & $95.2\%$ \\ \hline
+ STL-10 & $1s$ & $37m 50s$ & $>1s$ & $1m 10s$ & $95.3\%$ \\ \hline
+ Art Bench & $10s$ & $4h 20m 31$ & $>1s$ & $1m 41s$ & $41.86\%$ \\ \hline
+ \caption{Evaluation Results}
+ \label{tab:eval-results}
+ \end{longtable}
+
+ The system was able to easily import all the datasets provided in an incredibly fast time, this included the incompatible datasets.
+ While the system was able to load and verify the images of the incompatible datasets, it correctly marked the images as incompatible, which can be seen in Figure \ref{fig:incompatible_images}.
+ Which would make them not being able to be used for training, which would mean the model would have not had any data to train, which would obviously result in terrible accuracy results.
+
+ \begin{figure}[h!]
+ \centering
+ \includegraphics[width=0.7\textheight]{incompatible_images}
+ \caption{Screenshot of a web application showing many images that do not have the correct format.}
+ \label{fig:incompatible_images}
+ \end{figure}
+
+ The system was able to train, classify, and extend the MNIST, CIFAR-10, and STL-10 datasets, with high accuracy rates.
+ This is expected as these models are models that are commonly known for being easy to train.
+ The system could also train these models in a relatively short, small amount of time.
+ The classification time is optimal, with all datasets being able to classify an image in less than a second.
+ The time to extend is also very promising, and the system could extend a new set of classes fairly quickly.
+
+ The system was unable to achieve a high level of accuracy while training for the ArtBench dataset.
+ And the training time to achieve that lower level of accuracy was also much higher than the other datasets.
+ The longer training time can be attributed to the larger images, which make the model harder to train, as the model has to make more computations.
+ Another factor for the increased training time is the necessity for the model to train longer to achieve a higher accuracy, due to the model's decreased learning rate.
+ As for the low accuracy ratting, I hypothesise that this is caused by the nature of the dataset.
+ The dataset is categorized into various art styles.
+ Even within a single art style, artists' individual styles can vary significantly.
+ Given the relatively small sample size of only 5000 training images per art style, this variability poses a challenge for the model's ability to discern between distinct styles.
+ Another option is that the system did not generate a good enough model for this dataset.
+ The system was still able to fairly quickly classify and image, with the classification time still being under less than a second.
+ The expansion time was also fairy quick, being on par with the other models.
+
+ \subsubsection*{Testing limitations}
+ There are some limitations caused by this testing.
+ The biggest problem is in the training, classification and expansion timings, this value will depend on what hardware the system that is running the model has.
+ The small sample size of the datasets is also limiting, as it does not fully prove that the system can create generalized models.
+
+
+ % api benchmarking if there is time
+
+ \subsection{API Performance Testing}
+ The application performance was also tested.
+ To test the performance of the API, a small program was written that would simultaneously request an image to be classified.
+ The selected image was one of the sample images provided in the MNIST dataset.
+ The program tries to perform 1, 10, 100, 1000, 10000 simultaneous requests, and waits 2 seconds between each set.
+ The program would then record how much time it would take for the image classification task to be completed.
+ And after all requests are completed, the program call calculates the mean and max requests times.
+
+ \begin{figure}[H]
+ \centering
+ \subfloat{{\includegraphics[width=.5\linewidth]{max}}}
+ \subfloat{{\includegraphics[width=.5\linewidth]{mean}}}
+ \caption{Results of the API testing}
+ \label{fig:results-api}
+ \end{figure}
+
+ The values shown in Figure \ref{fig:results-api} show that if you configure the system to only have one runner, it will struggle to handle large amounts of simultaneous requests.
+ This is expected, as only having one process trying to classify large amounts of images would be unwise.
+ In reality this would never be set up this way since only having one runner in a production environment would never be acceptable.
+
+ \begin{figure}[H]
+ \centering
+ \subfloat{{\includegraphics[width=.5\linewidth]{max-no-1}}}
+ \subfloat{{\includegraphics[width=.5\linewidth]{mean-no-1}}}
+ \caption{Results of the API testing}
+ \label{fig:results-api-no-one}
+ \end{figure}
+
+ Figure \ref{fig:results-api-no-one} shows the same graph as Figure \ref{fig:results-api} but with the results for the test where the API only had one runner removed.
+ The graph indicates that the system was able to handle, 10000 simultaneous requests in less than 30 seconds, which more than exceeds the expectations of the project.
+ The results also indicate that the numbers of runners have demising returns, as the values maximum and mean request time are within a small range.
+ This can be caused by multiple reasons.
+ One such reason is that were not enough requests to show a significant difference between then number of runners.
+ Another reason is that the amount of work that the system has to perform to manage all the runners outweighs the benefits of having more runners.
+
+ While testing, the ram usage was monitored but not recorded.
+ As expected, the memory usage significant increased with the number of runners, but did not exceed 5Ā GiB.
+ The higher memory usage is a result of the runners caching the model used.
+ The memory footprint of the system limited by the model selected as the model generated for MNIST dataset is not large.
+ And larger models are expected to generate larger memory footprints.
+ When deploying the application, an administrator should take considerations the expected model sizes as well as the usage frequency expected and configure the application accordingly.
+
+ These results are very positive since the project was running on my personal computer and not on professional server hardware.
+ This indicates that when deployed to a production environment the service is most likely to perform extremely well.
+
+ \subsubsection*{Testing limitations}
+ As with the previous testing, this test has also some limitations.
+ Including the same hardware limitation where different hardware will give different results for this test.
+ Another limiting factor is that the test did not use different models or images which could cause the service to have to reload models from disk, affecting performance.
+
+
+ \subsection{Usability}
+ While if a service is usable differs vastly from user to user, the implemented system is simple enough where a user who does not know anything about image classification could upload images, and obtain a working classification model.
+ This simplicity may pose limitations for users with advanced knowledge, which would fall short of optimal usability standards for that user.
+ As this user might choose not to use the system because it does not allow the level of control that they might want.
+
+ The administrator area is less user-friendly than the rest of the application, but that is less critical.
+ An administrator is not the target user of the application, and is expected to manage this system, which requires prior knowledge about the system.
+
+
+ \subsection{Summary}
+ The service can create models, and train models that service the user's needs.
+ These models will most likely be able to achieve high accuracy targets, but in some cases the system might fail to generate a good enough model for the provided dataset.
+ During testing, the limitations of the strict image size requirements were also shown, as the system, would have failed to train those datasets because most of the images would have been removed before the model started training.
+
+ While classifying images, the service performed extremely well.
+ The API performance tests showed that if configured correctly, a single server configuration can handle a large amount of simultaneous images extremely fast.
+ These results indicate that the system has the performance required to be put in a production environment and perform well.
+
+ As for the usability of the service the system, the system is usable by beginners, but might detract more advanced users from using it.
+
+ Overall, the service evaluation is positive, as the system was able to create and train new models, as well as being user-friendly to users who might not have the skills to perform image classification.
+
+\pagebreak
diff --git a/report/intro.tex b/report/intro.tex
index 489b213..4d510b6 100644
--- a/report/intro.tex
+++ b/report/intro.tex
@@ -1,24 +1,22 @@
\section{Introduction} \label{sec:introduction}
- This section will introduce the project: background, motives, aims, goals, and success criteria.
- The section will end with this report structure.
+ The purpose of this dissertation is to design and implement an automated image classification service that will empower users to connect their existing services that required image classification with the one being implemented in this project.
+ This report will detail what requirements such service might have.
+ How those requirements can be turned into a design for such a service, and how that design can be implemented into software with limited time and resources.
+
+ This chapter will service as an introduction to the project, and will discuss the background, motivations, aims, goals, and success criteria for the project.
+ This chapter will end with an overview of the project structure.
\subsection{Project Background}
- There are many automated tasks that being done manually.
- If those tasks can be done automatically, a lot of productivity could be gained from as human doing those tasks can do tasks that only humans can.
-
- This project aims to provide a software platform, where users with no experience in machine learning, data analysis could create machine learning models to process their data.
- In this project, the platform will be scoped to image classification.
- As an easy-to-use platform needs to be able to handle: image uploads, processing, and verification; model creation, management, and expansion; and image classification.
-
- % This report will do a brief analysis of current image classification systems, followed by an overview of the design of the system, and implementation details. The report will finish with analysis of legal, ethical and societal issues, and evaluation of results, and objectives.
+ There are many automatable tasks that are currently being done manually.
+ If those tasks can be done automatically, a lot of productivity could be gained by having the computers perform those tasks, allowing humans to perform tasks that only humans can do.
+ Moreover, recently, machine learning models have become as good, or even better than humans in tasks such image classification. This project will focus on image classification, as it is an area where automation is still required, and there are few other images automated systems that are easy to use. It is also an area that has not been saturated with new products such as the natural language processing space has.
+
\subsection{Project Motivations}
-
- Currently, there are many classification tasks that are being done manually.
- Thousands of man-hours are used to classify images, this task can be automated.
- There are a few easy-to-use image classification systems that require low to no knowledge of image classification.
- This project aims to fill that role and provide a complete image classification service.
- While still been user-friendly, where a user who has never done any kind of user classification still could get good results, by simply using this service.
+
+ This project allows for the improvement of my learned skills, while being an interesting, complex piece of software to develop.
+ The topics, skills, and knowledge required to build this project, cover all my years in the university, from the simple applications developed in the first year; the soft skills learned during placement; and the complexity of distributed systems and deep learning of the third year.
+ I also wanted to use the opportunity that this project provides to gain experience in emerging technologies such as Go, and improve all my previous abilities and skills.
\subsection{Project Aim}
The project aims to create an easy-to-use software platform, where users can create image classification models without having prior knowledge about machine learning.
@@ -31,12 +29,12 @@
This project's primary objectives are to design and implement:
\begin{itemize}
- \item a system to upload images that will be assigned to a model
+ \item a system to upload images that will be assigned to a model.
\item a system to automatically create and train models.
\item a platform where users can manage their models.
% \item a system to automatically expand and reduce models without fully retraining the models.
\item a system to automatically expand models without fully retraining the models.
- \item an Application Programming Interface(API) that users can interact programmatically with the service.
+ \item an Application Programming Interface (API) that users can interact programmatically with the service.
\end{itemize}
This project's secondary objectives are to:
@@ -47,7 +45,6 @@
\subsection{Success Criteria}
As it was mentioned before, the project can be considered a success when the primary objectives have been completed.
-
Therefore, the success criteria of this project can be defined as:
\begin{itemize}
@@ -56,18 +53,18 @@
\end{itemize}
\subsection{Project Structure}
- The report on the project shows the development and designs stages of the project. With each section addressing a part of the design and development process.
+ The report on the project shows the development and designs stages of the project. With each chapter addressing a part of the design and development process.
\renewcommand*{\arraystretch}{2}
\begin{longtable}{p{7cm} p{8cm}}
- \hyperref[sec:introduction]{Introduction} & The introduction section will do a brief introduction of the project and its objectives. \\
- \hyperref[sec:lit-tech-review]{Literature and Technical Review} & The Literature and Technical Review section will introduce some current existing projects that are similar to this one, and introduce some technologies that can be used to implement this project. \\
- \hyperref[sec:sanr]{Service Analysis and Requirements} & This section will analyse the project requirements. The section will define design requirements that the service will need to implement to be able to achieve the goals that were set up. \\
- \hyperref[sec:sd]{Service Design} & This section will discuss how a service could be designed that it matches the requirements of the service. \\
- \hyperref[sec:sd]{Service Implementation} & Information on how the design of the system was turned into software is in this section. \\
- \hyperref[sec:lsec]{Legal, Societal, Ethical, Professional Considerations} & This section will cover potential legal, societal, ethical and professional, issues that might arise from the service and how they are mitigated. \\
- \hyperref[sec:se]{Service Evaluation} & In this section, the model will be tested and the results of the tests will be analysed. \\
- \hyperref[sec:crpo]{Critical Review of Project Outcomes} & In this section, will compare the project goals with what was achieved. Then, according to the results, the project will either be deemed successful or not.
+ \hyperref[sec:introduction]{Introduction} & The introduction chapter will do a brief introduction of the project and its objectives. \\
+ \hyperref[sec:lit-tech-review]{Literature and Technical Review} & The Literature and Technical Review chapter will introduce some current existing projects that are similar to this one, and introduce some technologies that can be used to implement this project. \\
+ \hyperref[sec:sanr]{Service Analysis and Requirements} & This chapter will analyse the project requirements. The chapter will define design requirements that the service will need to implement to be able to achieve the goals that were set up. \\
+ \hyperref[sec:sd]{Service Design} & This chapter will discuss how a service could be designed that it matches the requirements of the service. \\
+ \hyperref[sec:sd]{Service Implementation} & Information on how the design of the system was turned into software is in this chapter. \\
+ \hyperref[sec:lsec]{Legal, Societal, Ethical, Professional Considerations} & This chapter will cover potential legal, societal, ethical and professional, issues that might arise from the service and how they are mitigated. \\
+ \hyperref[sec:se]{Service Evaluation} & In this chapter, the model will be tested and the results of the tests will be analysed. \\
+ \hyperref[sec:crpo]{Critical Review of Project Outcomes} & In this chapter, will compare the project goals with what was achieved. Then, according to the results, the project will either be deemed successful or not.
\end{longtable}
\pagebreak
diff --git a/report/lit.tex b/report/lit.tex
index 008cb89..2e6a12f 100644
--- a/report/lit.tex
+++ b/report/lit.tex
@@ -1,5 +1,5 @@
\section{Literature and Technical Review} \label{sec:lit-tech-review}
- This section reviews existing technologies in the market that do image classification. It also reviews current image classification technologies, which meet the requirements for the project. This review also analyses methods that are used to distribute the learning between various physical machines, and how to spread the load so minimum reloading of the models is required when running the model.
+ This chapter reviews existing technologies in the market that do image classification. It also reviews current image classification technologies, which meet the requirements for the project. This review also analyses methods that are used to distribute the learning between various physical machines, and how to spread the load so minimum reloading of the models is required when running the model.
\subsection{Existing Classification Platforms}
There are currently some existing software as a service (SaaS) platforms that do provide similar services to the ones this will project will be providing.
@@ -20,7 +20,7 @@
\subsection{Requirements of Image Classification Models}
- The of the main objectives of this project are to be able to create models that can give a class given an image for any dataset. Which means that there will be no ``one solution fits all to the problem''. While the most complex way to solve a problem would most likely result in success, it might not be the most efficient way to achieve the results.
+ One of the main objectives of this project are to be able to create models that can give a class given an image for any dataset. Which means that there will be no ``one solution fits all to the problem''. While the most complex way to solve a problem would most likely result in success, it might not be the most efficient way to achieve the results.
This section will analyse possible models that would obtain the best results. The models for this project have to be the most efficient as possible while resulting in the best accuracy as possible.
@@ -41,7 +41,6 @@
% TODO find some papers to proff this
The system will use supervised models to classify images, using a combination of different types of models, using neural networks, convolution neural networks, deed neural networks and deep convolution neural networks.
-
These types were decided as they have had a large success in the past in other image classification challenges, for example in the ImageNet challenges \cite{imagenet}, which has ranked different models in classifying a 14 million images. The contest has been running since 2010 to 2017.
The models that participated in the contest tended to use more and more Deep convolution neural networks, out of the various models that were generated there are a few landmark models that were able to achieve high accuracies, including AlexNet \cite{krizhevsky2012imagenet}, ResNet-152 \cite{resnet-152}, EfficientNet \cite{efficientnet}.
@@ -62,7 +61,7 @@
% This needs some work in terms of gramar
ResNet works by creating shortcuts between sets of layers, the shortcuts allow residual values from previous layers to be used on the upper layers. The hypothesis being that it is easier to optimize the residual mappings than the linear mappings.
The results proved that the using the residual values improved training of the model, as the results of the challenge prove.
- It's important to note that using residual networks tends to give better results, the more layers the model has. While this could have a negative impact on performance, the number of parameters per layer does not grow that steeply in ResNet when comparing it with other architectures as it uses other optimizations such as $1x1$ kernel sizes, which are more space efficient. Even with these optimizations, it can still achieve incredible results. Which might make it a good contender to be used in the service as one of the predefined models to use to try to create the machine learning models.
+ It's important to note that using residual networks tends to give better results, the more layers the model has. While this could have a negative impact on performance, the number of parameters per layer does not grow that steeply in ResNet when comparing it with other architectures as it uses other optimisations such as $1x1$ kernel sizes, which are more space efficient. Even with these optimisations, it can still achieve incredible results. Which might make it a good contender to be used in the service as one of the predefined models to use to try to create the machine learning models.
% MobileNet
@@ -71,7 +70,7 @@
To test their results, the EfficientNet team created a baseline model which as a building block used the mobile inverted bottleneck MBConv \cite{inverted-bottleneck-mobilenet}. The baseline model was then scaled using the compound method, which resulted in better top-1 and top-5 accuracy.
While EfficientNets are smaller than their non-EfficientNet counterparts, they are more computational intensive, a ResNet-50 scaled using the EfficientNet compound scaling method is $3\%$ more computational intensive than a ResNet-50 scaled using only depth while improving the top-1 accuracy by $0.7\%$.
And as the model will be trained and run multiple times decreasing the computational cost might be a better overall target for sustainability then being able to offer higher accuracies.
- Even though scaling using the EfficientNet compound method might not yield the best results using some EfficientNets what were optimized by the team to would be optimal, for example, EfficientNet-B1 is both small and efficient while still obtaining $79.1\%$ top-1 accuracy in ImageNet, and realistically the datasets that this system will process will be smaller and more scope specific than ImageNet.
+ Even though scaling using the EfficientNet compound method might not yield the best results using some EfficientNets what were optimised by the team to would be optimal, for example, EfficientNet-B1 is both small and efficient while still obtaining $79.1\%$ top-1 accuracy in ImageNet, and realistically the datasets that this system will process will be smaller and more scope specific than ImageNet.
% \subsection{Efficiency of transfer learning}
@@ -87,12 +86,26 @@
% There are also unsupervised learning methods that do not have a fixed number of classes. While this method would work as an expandable model method, it would not work for the purpose of this project. This project requires that the model has a specific set of labels which does not work with unsupervised learning which has unlabelled data. Some technics that are used for unsupervised learning might be useful in the process of creating expandable models.
+ \subsection{Machine learning libraries}
+ While there are various machine learning libraries, the two bigger ones are Tensorflow and PyTorch.
+ This section will compare the two different libraries.
+ TensorFlow \cite{tensorflow2015-whitepaper} is an open-source machine learning platform created by Google to develop their production and research systems.
+ PyTorch \cite{pytorch} is an open-source machine learning library developed by Meta to power their systems.
- \subsection{Conclusion}
- The technical review of current systems reveals that there are current systems that exist that can perform image classification tasks, but they are not friendly in ways to easily expand currently existing models.
+ While both libraries can achieve the same tasks with similar level of accuracy \cite{pytorch-vs-tensorflow-1}, PyTorch is mostly used in research oriented applications rather than applications that might require deployment \cite{pytorch-vs-tensorflow-1,pytorch-vs-tensorflow-2}.
+ This is generally attributed to the maturity of TensorFlow and TensorFlow's ability to create static graphs, which are optimised for inference.
+
+ More important for the project is compatibility with other technologies that the project will use.
+ In this case, TensorFlow has native support for Go while PyTorch does not.
+ Which due to Tensorflow's advanced in deployment and compatibility the clear choice for the project.
+
+ \subsection{Summary}
+ The technical review of current systems, shows that there are current systems that exist that can perform image classification tasks, but they are not friendly in ways to easily expand currently existing models.
The current methods that exist for image classification seem to have reached a classification accuracy and efficiency that make a project like this feasible.
+ Model architectures such as ResNet, and EfficientNet have been able to perform image classification on large sets of models and achieve higher than human performances.
+ Taking these architectures in mind the system should be able to create machine learning models that perform equally well.
- % TODO talk about web serving thechnlogies
+ As for what technologies to use to build such models TensorFlow seams to be the correct choice as it has better performance when deploying to production, and can more easily integrate with the chosen web technologies.
\pagebreak
diff --git a/report/report.bbl b/report/report.bbl
new file mode 100644
index 0000000..eef8ac6
--- /dev/null
+++ b/report/report.bbl
@@ -0,0 +1,1343 @@
+% $ biblatex auxiliary file $
+% $ biblatex bbl format version 3.2 $
+% Do not modify the above lines!
+%
+% This is an auxiliary file used by the 'biblatex' package.
+% This file may safely be deleted. It will be recreated by
+% biber as required.
+%
+\begingroup
+\makeatletter
+\@ifundefined{ver@biblatex.sty}
+ {\@latex@error
+ {Missing 'biblatex' package}
+ {The bibliography requires the 'biblatex' package.}
+ \aftergroup\endinput}
+ {}
+\endgroup
+
+
+\refsection{0}
+ \datalist[entry]{none/global//global/global}
+ \entry{amazon-rekognition}{misc}{}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{Amazon Web Services, Inc}
+ \field{month}{12}
+ \field{note}{[Online; accessed 18. Dec. 2023]}
+ \field{title}{{What Is Amazon Rekognition? (1:42)}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://aws.amazon.com/rekognition
+ \endverb
+ \verb{url}
+ \verb https://aws.amazon.com/rekognition
+ \endverb
+ \endentry
+ \entry{amazon-rekognition-custom-labels}{misc}{}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labeltitlesource}{title}
+ \field{month}{12}
+ \field{note}{[Online; accessed 18. Dec. 2023]}
+ \field{title}{{What is Amazon Rekognition Custom Labels? - Rekognition}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/what-is.html?pg=ln&sec=ft
+ \endverb
+ \verb{url}
+ \verb https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/what-is.html?pg=ln&sec=ft
+ \endverb
+ \endentry
+ \entry{amazon-rekognition-custom-labels-training}{misc}{}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labeltitlesource}{title}
+ \field{month}{12}
+ \field{note}{[Online; accessed 18. Dec. 2023]}
+ \field{title}{{Training an Amazon Rekognition Custom Labels model - Rekognition}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/training-model.html#tm-console
+ \endverb
+ \verb{url}
+ \verb https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/training-model.html#tm-console
+ \endverb
+ \endentry
+ \entry{google-vision-api}{online}{}
+ \name{author}{1}{}{%
+ {{hash=8b36e9207c24c76e6719268e49201d94}{%
+ family={Google},
+ familyi={G\bibinitperiod}}}%
+ }
+ \strng{namehash}{8b36e9207c24c76e6719268e49201d94}
+ \strng{fullhash}{8b36e9207c24c76e6719268e49201d94}
+ \strng{bibnamehash}{8b36e9207c24c76e6719268e49201d94}
+ \strng{authorbibnamehash}{8b36e9207c24c76e6719268e49201d94}
+ \strng{authornamehash}{8b36e9207c24c76e6719268e49201d94}
+ \strng{authorfullhash}{8b36e9207c24c76e6719268e49201d94}
+ \field{sortinit}{4}
+ \field{sortinithash}{9381316451d1b9788675a07e972a12a7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{title}{Vision {AI} | Google Cloud}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://cloud.google.com/vision?hl=en
+ \endverb
+ \verb{url}
+ \verb https://cloud.google.com/vision?hl=en
+ \endverb
+ \endentry
+ \entry{google-vision-price-sheet}{misc}{}
+ \field{sortinit}{5}
+ \field{sortinithash}{20e9b4b0b173788c5dace24730f47d8c}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{Google Cloud}
+ \field{month}{12}
+ \field{note}{[Online; accessed 20. Dec. 2023]}
+ \field{title}{{Pricing {$\vert$} Vertex AI Vision {$\vert$} Google Cloud}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://cloud.google.com/vision-ai/pricing
+ \endverb
+ \verb{url}
+ \verb https://cloud.google.com/vision-ai/pricing
+ \endverb
+ \endentry
+ \entry{google-vision-product-recognizer-guide}{misc}{}
+ \field{sortinit}{6}
+ \field{sortinithash}{b33bc299efb3c36abec520a4c896a66d}
+ \field{labeltitlesource}{title}
+ \field{month}{12}
+ \field{note}{[Online; accessed 20. Dec. 2023]}
+ \field{title}{{Product Recognizer guide}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://cloud.google.com/vision-ai/docs/product-recognizer
+ \endverb
+ \verb{url}
+ \verb https://cloud.google.com/vision-ai/docs/product-recognizer
+ \endverb
+ \endentry
+ \entry{mnist}{article}{}
+ \name{author}{1}{}{%
+ {{hash=2f5fbdc5c3cf91f62a64663cd72397b3}{%
+ family={Deng},
+ familyi={D\bibinitperiod},
+ given={Li},
+ giveni={L\bibinitperiod}}}%
+ }
+ \list{publisher}{1}{%
+ {IEEE}%
+ }
+ \strng{namehash}{2f5fbdc5c3cf91f62a64663cd72397b3}
+ \strng{fullhash}{2f5fbdc5c3cf91f62a64663cd72397b3}
+ \strng{bibnamehash}{2f5fbdc5c3cf91f62a64663cd72397b3}
+ \strng{authorbibnamehash}{2f5fbdc5c3cf91f62a64663cd72397b3}
+ \strng{authornamehash}{2f5fbdc5c3cf91f62a64663cd72397b3}
+ \strng{authorfullhash}{2f5fbdc5c3cf91f62a64663cd72397b3}
+ \field{sortinit}{7}
+ \field{sortinithash}{108d0be1b1bee9773a1173443802c0a3}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{IEEE Signal Processing Magazine}
+ \field{number}{6}
+ \field{title}{The mnist database of handwritten digit images for machine learning research}
+ \field{volume}{29}
+ \field{year}{2012}
+ \field{pages}{141\bibrangedash 142}
+ \range{pages}{2}
+ \endentry
+ \entry{mist-high-accuracy}{article}{}
+ \name{author}{5}{}{%
+ {{hash=ec3e94c9a6fa7655f35a1faac83a709d}{%
+ family={An},
+ familyi={A\bibinitperiod},
+ given={Sanghyeon},
+ giveni={S\bibinitperiod}}}%
+ {{hash=7adc548246fbb4e1d8a091761cb1af95}{%
+ family={Lee},
+ familyi={L\bibinitperiod},
+ given={Min\bibnamedelima Jun},
+ giveni={M\bibinitperiod\bibinitdelim J\bibinitperiod}}}%
+ {{hash=d9730f4c920b674f1b84c99a703b797e}{%
+ family={Park},
+ familyi={P\bibinitperiod},
+ given={Sanglee},
+ giveni={S\bibinitperiod}}}%
+ {{hash=164495751deac883eb2c56aa3db5ac5f}{%
+ family={Yang},
+ familyi={Y\bibinitperiod},
+ given={Heerin},
+ giveni={H\bibinitperiod}}}%
+ {{hash=f463ebb21b49a0a93666c7254b0a49fb}{%
+ family={So},
+ familyi={S\bibinitperiod},
+ given={Jungmin},
+ giveni={J\bibinitperiod}}}%
+ }
+ \strng{namehash}{c9a0af631b7f33929b8a52219b998925}
+ \strng{fullhash}{e30223f61e9d3e240aec91c5f315b95e}
+ \strng{bibnamehash}{e30223f61e9d3e240aec91c5f315b95e}
+ \strng{authorbibnamehash}{e30223f61e9d3e240aec91c5f315b95e}
+ \strng{authornamehash}{c9a0af631b7f33929b8a52219b998925}
+ \strng{authorfullhash}{e30223f61e9d3e240aec91c5f315b95e}
+ \field{sortinit}{8}
+ \field{sortinithash}{a231b008ebf0ecbe0b4d96dcc159445f}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprinttype}{arXiv}
+ \field{journaltitle}{CoRR}
+ \field{title}{An Ensemble of Simple Convolutional Neural Network Models for {MNIST} Digit Recognition}
+ \field{volume}{abs/2008.10400}
+ \field{year}{2020}
+ \verb{eprint}
+ \verb 2008.10400
+ \endverb
+ \verb{urlraw}
+ \verb https://arxiv.org/abs/2008.10400
+ \endverb
+ \verb{url}
+ \verb https://arxiv.org/abs/2008.10400
+ \endverb
+ \endentry
+ \entry{lecun-98}{article}{}
+ \name{author}{4}{}{%
+ {{hash=9ae8dc3a930d73e11e1b22a9ef065055}{%
+ family={LeCun},
+ familyi={L\bibinitperiod},
+ given={Y.},
+ giveni={Y\bibinitperiod}}}%
+ {{hash=bbfb0f3936c83b7b099561e6f0e32ef3}{%
+ family={Bottou},
+ familyi={B\bibinitperiod},
+ given={L.},
+ giveni={L\bibinitperiod}}}%
+ {{hash=419350ebbeb4eba5351469f378dee007}{%
+ family={Bengio},
+ familyi={B\bibinitperiod},
+ given={Y.},
+ giveni={Y\bibinitperiod}}}%
+ {{hash=00f962380d25c4d7f23fa6c7e926c3ed}{%
+ family={Haffner},
+ familyi={H\bibinitperiod},
+ given={P.},
+ giveni={P\bibinitperiod}}}%
+ }
+ \strng{namehash}{a8a87a12538dbdb246c3bc01b5a3dcc2}
+ \strng{fullhash}{46d2e204dc0bc08d019fcff6774ca34f}
+ \strng{bibnamehash}{46d2e204dc0bc08d019fcff6774ca34f}
+ \strng{authorbibnamehash}{46d2e204dc0bc08d019fcff6774ca34f}
+ \strng{authornamehash}{a8a87a12538dbdb246c3bc01b5a3dcc2}
+ \strng{authorfullhash}{46d2e204dc0bc08d019fcff6774ca34f}
+ \field{sortinit}{9}
+ \field{sortinithash}{0a5ebc79d83c96b6579069544c73c7d4}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{Proceedings of the IEEE}
+ \field{month}{11}
+ \field{number}{11}
+ \field{title}{Gradient-Based Learning Applied to Document Recognition}
+ \field{volume}{86}
+ \field{year}{1998}
+ \field{pages}{2278\bibrangedash 2324}
+ \range{pages}{47}
+ \endentry
+ \entry{imagenet}{inproceedings}{}
+ \name{author}{6}{}{%
+ {{hash=0ae7fdc13773f928525f673b05f37149}{%
+ family={Deng},
+ familyi={D\bibinitperiod},
+ given={Jia},
+ giveni={J\bibinitperiod}}}%
+ {{hash=7d87c5957b07153c7f18918b92830bf8}{%
+ family={Dong},
+ familyi={D\bibinitperiod},
+ given={Wei},
+ giveni={W\bibinitperiod}}}%
+ {{hash=d5670b2600fea169724521e252d9d09d}{%
+ family={Socher},
+ familyi={S\bibinitperiod},
+ given={Richard},
+ giveni={R\bibinitperiod}}}%
+ {{hash=2afdae52015b97674d81efea449edce2}{%
+ family={Li},
+ familyi={L\bibinitperiod},
+ given={Li-Jia},
+ giveni={L\bibinithyphendelim J\bibinitperiod}}}%
+ {{hash=4838f7fdd28d5cefb28f3b3c734976d4}{%
+ family={Li},
+ familyi={L\bibinitperiod},
+ given={Kai},
+ giveni={K\bibinitperiod}}}%
+ {{hash=cd00ce5bc45f687c432e52e0fa1a7aa6}{%
+ family={Fei-Fei},
+ familyi={F\bibinithyphendelim F\bibinitperiod},
+ given={Li},
+ giveni={L\bibinitperiod}}}%
+ }
+ \list{organization}{1}{%
+ {Ieee}%
+ }
+ \strng{namehash}{990420f755e01028377fcad1464c9706}
+ \strng{fullhash}{a16fdd05c52c264b99fe98f4a5e24c60}
+ \strng{bibnamehash}{a16fdd05c52c264b99fe98f4a5e24c60}
+ \strng{authorbibnamehash}{a16fdd05c52c264b99fe98f4a5e24c60}
+ \strng{authornamehash}{990420f755e01028377fcad1464c9706}
+ \strng{authorfullhash}{a16fdd05c52c264b99fe98f4a5e24c60}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{booktitle}{2009 IEEE conference on computer vision and pattern recognition}
+ \field{title}{Imagenet: A large-scale hierarchical image database}
+ \field{year}{2009}
+ \field{pages}{248\bibrangedash 255}
+ \range{pages}{8}
+ \endentry
+ \entry{krizhevsky2012imagenet}{article}{}
+ \name{author}{3}{}{%
+ {{hash=c5e3a676e2ac1164b3afcd539c131fc9}{%
+ family={Krizhevsky},
+ familyi={K\bibinitperiod},
+ given={Alex},
+ giveni={A\bibinitperiod}}}%
+ {{hash=8d569d1d5b8b5a7836017a98b430f959}{%
+ family={Sutskever},
+ familyi={S\bibinitperiod},
+ given={Ilya},
+ giveni={I\bibinitperiod}}}%
+ {{hash=813bd95fe553e6079cd53a567b238287}{%
+ family={Hinton},
+ familyi={H\bibinitperiod},
+ given={Geoffrey\bibnamedelima E},
+ giveni={G\bibinitperiod\bibinitdelim E\bibinitperiod}}}%
+ }
+ \strng{namehash}{dd291871bfa8ee64447232f1cca429aa}
+ \strng{fullhash}{1a23c09aa65b3c2ade45ed18d8127375}
+ \strng{bibnamehash}{1a23c09aa65b3c2ade45ed18d8127375}
+ \strng{authorbibnamehash}{1a23c09aa65b3c2ade45ed18d8127375}
+ \strng{authornamehash}{dd291871bfa8ee64447232f1cca429aa}
+ \strng{authorfullhash}{1a23c09aa65b3c2ade45ed18d8127375}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{Advances in neural information processing systems}
+ \field{title}{Imagenet classification with deep convolutional neural networks}
+ \field{volume}{25}
+ \field{year}{2012}
+ \endentry
+ \entry{resnet-152}{article}{}
+ \name{author}{6}{}{%
+ {{hash=5f03ce4b1164a38128cb989bcb5142cd}{%
+ family={Wang},
+ familyi={W\bibinitperiod},
+ given={Qilong},
+ giveni={Q\bibinitperiod}}}%
+ {{hash=76bcb192eca5ac9560d1bf6c8cefad99}{%
+ family={Wu},
+ familyi={W\bibinitperiod},
+ given={Banggu},
+ giveni={B\bibinitperiod}}}%
+ {{hash=3186f5ead79be8894d8c19bcc4c5822e}{%
+ family={Zhu},
+ familyi={Z\bibinitperiod},
+ given={Pengfei},
+ giveni={P\bibinitperiod}}}%
+ {{hash=e1ed1573fe71f793460e20e08134ca17}{%
+ family={Li},
+ familyi={L\bibinitperiod},
+ given={Peihua},
+ giveni={P\bibinitperiod}}}%
+ {{hash=509ab736fbc2f1e10f41c1bddb2531ed}{%
+ family={Zuo},
+ familyi={Z\bibinitperiod},
+ given={Wangmeng},
+ giveni={W\bibinitperiod}}}%
+ {{hash=b8a0fb91ee1e348c1963577d64c2b566}{%
+ family={Hu},
+ familyi={H\bibinitperiod},
+ given={Qinghua},
+ giveni={Q\bibinitperiod}}}%
+ }
+ \strng{namehash}{ec89b10ac64649f9959ff86aa3160671}
+ \strng{fullhash}{a61f51e99239d4d969872b0436c3bc29}
+ \strng{bibnamehash}{a61f51e99239d4d969872b0436c3bc29}
+ \strng{authorbibnamehash}{a61f51e99239d4d969872b0436c3bc29}
+ \strng{authornamehash}{ec89b10ac64649f9959ff86aa3160671}
+ \strng{authorfullhash}{a61f51e99239d4d969872b0436c3bc29}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprinttype}{arXiv}
+ \field{journaltitle}{CoRR}
+ \field{title}{ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks}
+ \field{volume}{abs/1910.03151}
+ \field{year}{2019}
+ \verb{eprint}
+ \verb 1910.03151
+ \endverb
+ \verb{urlraw}
+ \verb http://arxiv.org/abs/1910.03151
+ \endverb
+ \verb{url}
+ \verb http://arxiv.org/abs/1910.03151
+ \endverb
+ \endentry
+ \entry{efficientnet}{article}{}
+ \name{author}{2}{}{%
+ {{hash=1e7d6abb8225099acc71f672b9ec336e}{%
+ family={Tan},
+ familyi={T\bibinitperiod},
+ given={Mingxing},
+ giveni={M\bibinitperiod}}}%
+ {{hash=c636f146591d51579a8119b777394878}{%
+ family={Le},
+ familyi={L\bibinitperiod},
+ given={Quoc\bibnamedelima V.},
+ giveni={Q\bibinitperiod\bibinitdelim V\bibinitperiod}}}%
+ }
+ \strng{namehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{fullhash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{bibnamehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{authorbibnamehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{authornamehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{authorfullhash}{d029e8131f7a0bef056d7edc726f1529}
+ \field{extraname}{1}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprinttype}{arXiv}
+ \field{journaltitle}{CoRR}
+ \field{title}{EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}
+ \field{volume}{abs/1905.11946}
+ \field{year}{2019}
+ \verb{eprint}
+ \verb 1905.11946
+ \endverb
+ \verb{urlraw}
+ \verb http://arxiv.org/abs/1905.11946
+ \endverb
+ \verb{url}
+ \verb http://arxiv.org/abs/1905.11946
+ \endverb
+ \endentry
+ \entry{resnet}{misc}{}
+ \name{author}{4}{}{%
+ {{hash=6b4b60e909e78633945f3f9c9dc83e01}{%
+ family={He},
+ familyi={H\bibinitperiod},
+ given={Kaiming},
+ giveni={K\bibinitperiod}}}%
+ {{hash=5e72bc22dbcf0984c6d113d280e36990}{%
+ family={Zhang},
+ familyi={Z\bibinitperiod},
+ given={Xiangyu},
+ giveni={X\bibinitperiod}}}%
+ {{hash=bb295293acacd54387339079ebbe4ead}{%
+ family={Ren},
+ familyi={R\bibinitperiod},
+ given={Shaoqing},
+ giveni={S\bibinitperiod}}}%
+ {{hash=f85751488058842b5777c7b4074077b5}{%
+ family={Sun},
+ familyi={S\bibinitperiod},
+ given={Jian},
+ giveni={J\bibinitperiod}}}%
+ }
+ \strng{namehash}{6edb98fe38401d2fe4a026f5ce6e8451}
+ \strng{fullhash}{42c4b52dc3a62cebabbc11c73e1afb53}
+ \strng{bibnamehash}{42c4b52dc3a62cebabbc11c73e1afb53}
+ \strng{authorbibnamehash}{42c4b52dc3a62cebabbc11c73e1afb53}
+ \strng{authornamehash}{6edb98fe38401d2fe4a026f5ce6e8451}
+ \strng{authorfullhash}{42c4b52dc3a62cebabbc11c73e1afb53}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprintclass}{cs.CV}
+ \field{eprinttype}{arXiv}
+ \field{title}{Deep Residual Learning for Image Recognition}
+ \field{year}{2015}
+ \verb{eprint}
+ \verb 1512.03385
+ \endverb
+ \endentry
+ \entry{going-deeper-with-convolutions}{misc}{}
+ \name{author}{9}{}{%
+ {{hash=ed568d9c3bb059e6bf22899fbf170f86}{%
+ family={Szegedy},
+ familyi={S\bibinitperiod},
+ given={Christian},
+ giveni={C\bibinitperiod}}}%
+ {{hash=c0e0d23e2d09e45e6f51cc2bcea6d9f9}{%
+ family={Liu},
+ familyi={L\bibinitperiod},
+ given={Wei},
+ giveni={W\bibinitperiod}}}%
+ {{hash=9fce03efe6b3331a1b93ed2e7c0da9d5}{%
+ family={Jia},
+ familyi={J\bibinitperiod},
+ given={Yangqing},
+ giveni={Y\bibinitperiod}}}%
+ {{hash=15f5333df96deaf51c72d065bded37d8}{%
+ family={Sermanet},
+ familyi={S\bibinitperiod},
+ given={Pierre},
+ giveni={P\bibinitperiod}}}%
+ {{hash=698ee61a2f3fa29734204496d2d36aef}{%
+ family={Reed},
+ familyi={R\bibinitperiod},
+ given={Scott},
+ giveni={S\bibinitperiod}}}%
+ {{hash=c1826f3465579186aff299a9b0e16ed7}{%
+ family={Anguelov},
+ familyi={A\bibinitperiod},
+ given={Dragomir},
+ giveni={D\bibinitperiod}}}%
+ {{hash=8bbc4c5d96f205bada839e74e0202146}{%
+ family={Erhan},
+ familyi={E\bibinitperiod},
+ given={Dumitru},
+ giveni={D\bibinitperiod}}}%
+ {{hash=8051922e7bd286f884bfbd1023ef62f5}{%
+ family={Vanhoucke},
+ familyi={V\bibinitperiod},
+ given={Vincent},
+ giveni={V\bibinitperiod}}}%
+ {{hash=aa04c4d6213a1e867b1650e298cb2668}{%
+ family={Rabinovich},
+ familyi={R\bibinitperiod},
+ given={Andrew},
+ giveni={A\bibinitperiod}}}%
+ }
+ \strng{namehash}{80f8e6bfc3aff3e75b2807a6f6962740}
+ \strng{fullhash}{64fbaf3c8a6b53523f74f0087b58e7e6}
+ \strng{bibnamehash}{80f8e6bfc3aff3e75b2807a6f6962740}
+ \strng{authorbibnamehash}{80f8e6bfc3aff3e75b2807a6f6962740}
+ \strng{authornamehash}{80f8e6bfc3aff3e75b2807a6f6962740}
+ \strng{authorfullhash}{64fbaf3c8a6b53523f74f0087b58e7e6}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprintclass}{cs.CV}
+ \field{eprinttype}{arXiv}
+ \field{title}{Going Deeper with Convolutions}
+ \field{year}{2014}
+ \verb{eprint}
+ \verb 1409.4842
+ \endverb
+ \endentry
+ \entry{very-deep-convolution-networks-for-large-scale-image-recognition}{misc}{}
+ \name{author}{2}{}{%
+ {{hash=9d16b7284df92c9adaee86c37ab992df}{%
+ family={Simonyan},
+ familyi={S\bibinitperiod},
+ given={Karen},
+ giveni={K\bibinitperiod}}}%
+ {{hash=c72fc39e94030f67717052309266a44d}{%
+ family={Zisserman},
+ familyi={Z\bibinitperiod},
+ given={Andrew},
+ giveni={A\bibinitperiod}}}%
+ }
+ \strng{namehash}{25d2f3c4577a6632d37f0126cc781232}
+ \strng{fullhash}{25d2f3c4577a6632d37f0126cc781232}
+ \strng{bibnamehash}{25d2f3c4577a6632d37f0126cc781232}
+ \strng{authorbibnamehash}{25d2f3c4577a6632d37f0126cc781232}
+ \strng{authornamehash}{25d2f3c4577a6632d37f0126cc781232}
+ \strng{authorfullhash}{25d2f3c4577a6632d37f0126cc781232}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprintclass}{cs.CV}
+ \field{eprinttype}{arXiv}
+ \field{title}{Very Deep Convolutional Networks for Large-Scale Image Recognition}
+ \field{year}{2015}
+ \verb{eprint}
+ \verb 1409.1556
+ \endverb
+ \endentry
+ \entry{efficient-net}{misc}{}
+ \name{author}{2}{}{%
+ {{hash=1e7d6abb8225099acc71f672b9ec336e}{%
+ family={Tan},
+ familyi={T\bibinitperiod},
+ given={Mingxing},
+ giveni={M\bibinitperiod}}}%
+ {{hash=c636f146591d51579a8119b777394878}{%
+ family={Le},
+ familyi={L\bibinitperiod},
+ given={Quoc\bibnamedelima V.},
+ giveni={Q\bibinitperiod\bibinitdelim V\bibinitperiod}}}%
+ }
+ \strng{namehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{fullhash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{bibnamehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{authorbibnamehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{authornamehash}{d029e8131f7a0bef056d7edc726f1529}
+ \strng{authorfullhash}{d029e8131f7a0bef056d7edc726f1529}
+ \field{extraname}{2}
+ \field{sortinit}{1}
+ \field{sortinithash}{4f6aaa89bab872aa0999fec09ff8e98a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprintclass}{cs.LG}
+ \field{eprinttype}{arXiv}
+ \field{title}{EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}
+ \field{year}{2020}
+ \verb{eprint}
+ \verb 1905.11946
+ \endverb
+ \endentry
+ \entry{inverted-bottleneck-mobilenet}{inproceedings}{}
+ \name{author}{5}{}{%
+ {{hash=8f90fd131c2bbfde4d0e9fdd7ed4ea8b}{%
+ family={Sandler},
+ familyi={S\bibinitperiod},
+ given={Mark},
+ giveni={M\bibinitperiod}}}%
+ {{hash=315c4166fc1f7cb66324a7f0d82827cd}{%
+ family={Howard},
+ familyi={H\bibinitperiod},
+ given={Andrew},
+ giveni={A\bibinitperiod}}}%
+ {{hash=d767e8e4d733bcf728bcdf2c193462f7}{%
+ family={Zhu},
+ familyi={Z\bibinitperiod},
+ given={Menglong},
+ giveni={M\bibinitperiod}}}%
+ {{hash=48f4090a93cf9f445057a9d6defe7973}{%
+ family={Zhmoginov},
+ familyi={Z\bibinitperiod},
+ given={Andrey},
+ giveni={A\bibinitperiod}}}%
+ {{hash=9dddfcfd529634a150ee38ee5c0203d7}{%
+ family={Chen},
+ familyi={C\bibinitperiod},
+ given={Liang-Chieh},
+ giveni={L\bibinithyphendelim C\bibinitperiod}}}%
+ }
+ \strng{namehash}{70179dd377b16c4cfcc0748ef0bd028a}
+ \strng{fullhash}{8d58d04315a5e1c06300772330b235d7}
+ \strng{bibnamehash}{8d58d04315a5e1c06300772330b235d7}
+ \strng{authorbibnamehash}{8d58d04315a5e1c06300772330b235d7}
+ \strng{authornamehash}{70179dd377b16c4cfcc0748ef0bd028a}
+ \strng{authorfullhash}{8d58d04315a5e1c06300772330b235d7}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{booktitle}{2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}
+ \field{title}{MobileNetV2: Inverted Residuals and Linear Bottlenecks}
+ \field{year}{2018}
+ \field{pages}{4510\bibrangedash 4520}
+ \range{pages}{11}
+ \verb{doi}
+ \verb 10.1109/CVPR.2018.00474
+ \endverb
+ \keyw{Manifolds;Neural networks;Computer architecture;Standards;Computational modeling;Task analysis}
+ \endentry
+ \entry{tensorflow2015-whitepaper}{misc}{}
+ \name{author}{40}{}{%
+ {{hash=396d6419316ec52f4c63b2f85912b61b}{%
+ family={MartĆn\bibnamedelima Abadi},
+ familyi={M\bibinitperiod\bibinitdelim A\bibinitperiod}}}%
+ {{hash=f337a7c116835c22bb206d2f0d7c70e0}{%
+ family={Ashish\bibnamedelima Agarwal},
+ familyi={A\bibinitperiod\bibinitdelim A\bibinitperiod}}}%
+ {{hash=84ac9fcb6c15dcd79c092bc8e20586ba}{%
+ family={Paul\bibnamedelima Barham},
+ familyi={P\bibinitperiod\bibinitdelim B\bibinitperiod}}}%
+ {{hash=d8574748e3086e0b279a58cdba71763d}{%
+ family={Eugene\bibnamedelima Brevdo},
+ familyi={E\bibinitperiod\bibinitdelim B\bibinitperiod}}}%
+ {{hash=c0b56f741b5a5bddfe77f1881c3cc67a}{%
+ family={Zhifeng\bibnamedelima Chen},
+ familyi={Z\bibinitperiod\bibinitdelim C\bibinitperiod}}}%
+ {{hash=8b8dd2e01366c855f42e47027cf23e98}{%
+ family={Craig\bibnamedelima Citro},
+ familyi={C\bibinitperiod\bibinitdelim C\bibinitperiod}}}%
+ {{hash=978a7d9601bf09e03d1bb3f6cce7a0ce}{%
+ family={Greg\bibnamedelima S.\bibnamedelimi Corrado},
+ familyi={G\bibinitperiod\bibinitdelim S\bibinitperiod\bibinitdelim C\bibinitperiod}}}%
+ {{hash=3b500b0dfd88e6e151d29108fdcb82f0}{%
+ family={Andy\bibnamedelima Davis},
+ familyi={A\bibinitperiod\bibinitdelim D\bibinitperiod}}}%
+ {{hash=2fd376ea3b3a3da11704c0ee86753dcf}{%
+ family={Jeffrey\bibnamedelima Dean},
+ familyi={J\bibinitperiod\bibinitdelim D\bibinitperiod}}}%
+ {{hash=5b34e641dd8a00f97c6242ae0353eb90}{%
+ family={Matthieu\bibnamedelima Devin},
+ familyi={M\bibinitperiod\bibinitdelim D\bibinitperiod}}}%
+ {{hash=5b4490947d4e91359646ce3c93cbd2f7}{%
+ family={Sanjay\bibnamedelima Ghemawat},
+ familyi={S\bibinitperiod\bibinitdelim G\bibinitperiod}}}%
+ {{hash=1fdef10b94ee122ef6136197f99e3df3}{%
+ family={Ian\bibnamedelima Goodfellow},
+ familyi={I\bibinitperiod\bibinitdelim G\bibinitperiod}}}%
+ {{hash=166ae8a0b435eded68e39e9e2d2a1ee8}{%
+ family={Andrew\bibnamedelima Harp},
+ familyi={A\bibinitperiod\bibinitdelim H\bibinitperiod}}}%
+ {{hash=7e9f7006151cf312bc133568336c68c6}{%
+ family={Geoffrey\bibnamedelima Irving},
+ familyi={G\bibinitperiod\bibinitdelim I\bibinitperiod}}}%
+ {{hash=08c1890e1c33279b8c63c71fa8f19263}{%
+ family={Michael\bibnamedelima Isard},
+ familyi={M\bibinitperiod\bibinitdelim I\bibinitperiod}}}%
+ {{hash=9fce03efe6b3331a1b93ed2e7c0da9d5}{%
+ family={Jia},
+ familyi={J\bibinitperiod},
+ given={Yangqing},
+ giveni={Y\bibinitperiod}}}%
+ {{hash=c0c0eea5379268c0c5b68732c90984b6}{%
+ family={Rafal\bibnamedelima Jozefowicz},
+ familyi={R\bibinitperiod\bibinitdelim J\bibinitperiod}}}%
+ {{hash=cff46cb4603a73d83b11ea7a9ded9d79}{%
+ family={Lukasz\bibnamedelima Kaiser},
+ familyi={L\bibinitperiod\bibinitdelim K\bibinitperiod}}}%
+ {{hash=d088e0f635523b8b5b18662331e4f44a}{%
+ family={Manjunath\bibnamedelima Kudlur},
+ familyi={M\bibinitperiod\bibinitdelim K\bibinitperiod}}}%
+ {{hash=1c24291ae15b979c82aa09a33790cb62}{%
+ family={Josh\bibnamedelima Levenberg},
+ familyi={J\bibinitperiod\bibinitdelim L\bibinitperiod}}}%
+ {{hash=796a3a98ff7545fe10f6a4c17ba016fa}{%
+ family={Dandelion\bibnamedelima ManƩ},
+ familyi={D\bibinitperiod\bibinitdelim M\bibinitperiod}}}%
+ {{hash=1ee98d232eb1fc1208a8f8ca649e970b}{%
+ family={Rajat\bibnamedelima Monga},
+ familyi={R\bibinitperiod\bibinitdelim M\bibinitperiod}}}%
+ {{hash=b2a15ec3d90955ece50ea26d31100b9a}{%
+ family={Sherry\bibnamedelima Moore},
+ familyi={S\bibinitperiod\bibinitdelim M\bibinitperiod}}}%
+ {{hash=1494c573fadad736c58cf1119ac59239}{%
+ family={Derek\bibnamedelima Murray},
+ familyi={D\bibinitperiod\bibinitdelim M\bibinitperiod}}}%
+ {{hash=ecf58eb1684af6cba2c1f126405eedab}{%
+ family={Chris\bibnamedelima Olah},
+ familyi={C\bibinitperiod\bibinitdelim O\bibinitperiod}}}%
+ {{hash=9f43befd94cd09a9aaa7ea8489405a83}{%
+ family={Mike\bibnamedelima Schuster},
+ familyi={M\bibinitperiod\bibinitdelim S\bibinitperiod}}}%
+ {{hash=4712800a228b1179529b9f6e0d1b1838}{%
+ family={Jonathon\bibnamedelima Shlens},
+ familyi={J\bibinitperiod\bibinitdelim S\bibinitperiod}}}%
+ {{hash=41ad6ff6c026d5a3730269072b31caf1}{%
+ family={Benoit\bibnamedelima Steiner},
+ familyi={B\bibinitperiod\bibinitdelim S\bibinitperiod}}}%
+ {{hash=b02f7871db6fc5524cec4ce38e104410}{%
+ family={Ilya\bibnamedelima Sutskever},
+ familyi={I\bibinitperiod\bibinitdelim S\bibinitperiod}}}%
+ {{hash=63288446e47b1d383f522ede84aa6fcc}{%
+ family={Kunal\bibnamedelima Talwar},
+ familyi={K\bibinitperiod\bibinitdelim T\bibinitperiod}}}%
+ {{hash=1dec75595b55bf77971f6a932d146b81}{%
+ family={Paul\bibnamedelima Tucker},
+ familyi={P\bibinitperiod\bibinitdelim T\bibinitperiod}}}%
+ {{hash=b6680dbb0176cb9bd87a3b26fa6f5cfb}{%
+ family={Vincent\bibnamedelima Vanhoucke},
+ familyi={V\bibinitperiod\bibinitdelim V\bibinitperiod}}}%
+ {{hash=e030c9d199c66657e26138be29814d81}{%
+ family={Vijay\bibnamedelima Vasudevan},
+ familyi={V\bibinitperiod\bibinitdelim V\bibinitperiod}}}%
+ {{hash=04426b798803cfaf3e8aa9280a5d0a58}{%
+ family={Fernanda\bibnamedelima ViƩgas},
+ familyi={F\bibinitperiod\bibinitdelim V\bibinitperiod}}}%
+ {{hash=fa7242e11c7d955de2ac1be94ca29073}{%
+ family={Oriol\bibnamedelima Vinyals},
+ familyi={O\bibinitperiod\bibinitdelim V\bibinitperiod}}}%
+ {{hash=8c9ee8f70a3c3d97f85efd01c4e9cbe6}{%
+ family={Pete\bibnamedelima Warden},
+ familyi={P\bibinitperiod\bibinitdelim W\bibinitperiod}}}%
+ {{hash=8e4243c228c72a5e5279e31252887b32}{%
+ family={Martin\bibnamedelima Wattenberg},
+ familyi={M\bibinitperiod\bibinitdelim W\bibinitperiod}}}%
+ {{hash=c6a6eb2597f23589fc9141bdda275996}{%
+ family={Martin\bibnamedelima Wicke},
+ familyi={M\bibinitperiod\bibinitdelim W\bibinitperiod}}}%
+ {{hash=3ea39e6dc6ef47029ae996c7e63f1a48}{%
+ family={Yuan\bibnamedelima Yu},
+ familyi={Y\bibinitperiod\bibinitdelim Y\bibinitperiod}}}%
+ {{hash=b69feb3a3d59a312b20dbef0b1d2d6de}{%
+ family={Xiaoqiang\bibnamedelima Zheng},
+ familyi={X\bibinitperiod\bibinitdelim Z\bibinitperiod}}}%
+ }
+ \strng{namehash}{7fdd865be502254047a3b2638dc0cfeb}
+ \strng{fullhash}{9b332dc9b33a2f6316d71d525269bd0f}
+ \strng{bibnamehash}{7fdd865be502254047a3b2638dc0cfeb}
+ \strng{authorbibnamehash}{7fdd865be502254047a3b2638dc0cfeb}
+ \strng{authornamehash}{7fdd865be502254047a3b2638dc0cfeb}
+ \strng{authorfullhash}{9b332dc9b33a2f6316d71d525269bd0f}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{note}{Software available from tensorflow.org}
+ \field{title}{{TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems}
+ \field{year}{2015}
+ \verb{urlraw}
+ \verb https://www.tensorflow.org/
+ \endverb
+ \verb{url}
+ \verb https://www.tensorflow.org/
+ \endverb
+ \endentry
+ \entry{pytorch}{incollection}{}
+ \name{author}{21}{}{%
+ {{hash=56bf0b340039cf8594436a624ff548a9}{%
+ family={Paszke},
+ familyi={P\bibinitperiod},
+ given={Adam},
+ giveni={A\bibinitperiod}}}%
+ {{hash=4ba5062e5919c814aceec188d54c01f2}{%
+ family={Gross},
+ familyi={G\bibinitperiod},
+ given={Sam},
+ giveni={S\bibinitperiod}}}%
+ {{hash=e5dfae4582081d649e3a0d5342050016}{%
+ family={Massa},
+ familyi={M\bibinitperiod},
+ given={Francisco},
+ giveni={F\bibinitperiod}}}%
+ {{hash=b5815e1692fa2d0c1f44eecf509bd7c4}{%
+ family={Lerer},
+ familyi={L\bibinitperiod},
+ given={Adam},
+ giveni={A\bibinitperiod}}}%
+ {{hash=b75383e6b48c8360c7a60031424c85cf}{%
+ family={Bradbury},
+ familyi={B\bibinitperiod},
+ given={James},
+ giveni={J\bibinitperiod}}}%
+ {{hash=f897ed422c34d95af2e22778dfc2607e}{%
+ family={Chanan},
+ familyi={C\bibinitperiod},
+ given={Gregory},
+ giveni={G\bibinitperiod}}}%
+ {{hash=046269e070246feb6f394141db80ed87}{%
+ family={Killeen},
+ familyi={K\bibinitperiod},
+ given={Trevor},
+ giveni={T\bibinitperiod}}}%
+ {{hash=c40352c194e60a3ef458ee7e8685afb5}{%
+ family={Lin},
+ familyi={L\bibinitperiod},
+ given={Zeming},
+ giveni={Z\bibinitperiod}}}%
+ {{hash=6e45f49ec618e619efad90c8e8a61f0c}{%
+ family={Gimelshein},
+ familyi={G\bibinitperiod},
+ given={Natalia},
+ giveni={N\bibinitperiod}}}%
+ {{hash=f65a80959d520337ae99a0798515036c}{%
+ family={Antiga},
+ familyi={A\bibinitperiod},
+ given={Luca},
+ giveni={L\bibinitperiod}}}%
+ {{hash=954cf7680b6ce14813973eccdca3c4bc}{%
+ family={Desmaison},
+ familyi={D\bibinitperiod},
+ given={Alban},
+ giveni={A\bibinitperiod}}}%
+ {{hash=c1b8f8db68d6667b9f2f9a9a3567721b}{%
+ family={Kopf},
+ familyi={K\bibinitperiod},
+ given={Andreas},
+ giveni={A\bibinitperiod}}}%
+ {{hash=b9e701339e56fd0b171145b08288a1b7}{%
+ family={Yang},
+ familyi={Y\bibinitperiod},
+ given={Edward},
+ giveni={E\bibinitperiod}}}%
+ {{hash=3f9535be511fd2fa346093e63b8e61a0}{%
+ family={DeVito},
+ familyi={D\bibinitperiod},
+ given={Zachary},
+ giveni={Z\bibinitperiod}}}%
+ {{hash=d814afaa50b9e22ab92cc9f8f9a9e43a}{%
+ family={Raison},
+ familyi={R\bibinitperiod},
+ given={Martin},
+ giveni={M\bibinitperiod}}}%
+ {{hash=3feeeebee8583ecc208f7fb3e0a55068}{%
+ family={Tejani},
+ familyi={T\bibinitperiod},
+ given={Alykhan},
+ giveni={A\bibinitperiod}}}%
+ {{hash=e18536d5cb7543731fbf2ca1a4908732}{%
+ family={Chilamkurthy},
+ familyi={C\bibinitperiod},
+ given={Sasank},
+ giveni={S\bibinitperiod}}}%
+ {{hash=0a0b028c6b85c46f368317d0c5bfe3a0}{%
+ family={Steiner},
+ familyi={S\bibinitperiod},
+ given={Benoit},
+ giveni={B\bibinitperiod}}}%
+ {{hash=998a001f16bb57c079c1d5afb1cb02c8}{%
+ family={Fang},
+ familyi={F\bibinitperiod},
+ given={Lu},
+ giveni={L\bibinitperiod}}}%
+ {{hash=3f19c633bbfb847db6a0e71d3659eacd}{%
+ family={Bai},
+ familyi={B\bibinitperiod},
+ given={Junjie},
+ giveni={J\bibinitperiod}}}%
+ {{hash=8ef51a0906e47d2b4472c4e714ed598f}{%
+ family={Chintala},
+ familyi={C\bibinitperiod},
+ given={Soumith},
+ giveni={S\bibinitperiod}}}%
+ }
+ \list{publisher}{1}{%
+ {Curran Associates, Inc.}%
+ }
+ \strng{namehash}{724e74fc18651eb78eb82fbcd1d9dfb1}
+ \strng{fullhash}{ba1e2da270d08cb8de2856498a028fed}
+ \strng{bibnamehash}{724e74fc18651eb78eb82fbcd1d9dfb1}
+ \strng{authorbibnamehash}{724e74fc18651eb78eb82fbcd1d9dfb1}
+ \strng{authornamehash}{724e74fc18651eb78eb82fbcd1d9dfb1}
+ \strng{authorfullhash}{ba1e2da270d08cb8de2856498a028fed}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{booktitle}{Advances in Neural Information Processing Systems 32}
+ \field{title}{PyTorch: An Imperative Style, High-Performance Deep Learning Library}
+ \field{year}{2019}
+ \field{pages}{8024\bibrangedash 8035}
+ \range{pages}{12}
+ \verb{urlraw}
+ \verb http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf
+ \endverb
+ \verb{url}
+ \verb http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf
+ \endverb
+ \endentry
+ \entry{pytorch-vs-tensorflow-1}{misc}{}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labeltitlesource}{title}
+ \field{month}{12}
+ \field{note}{[Online; accessed 14. May 2024]}
+ \field{title}{{PyTorch vs TensorFlow: Deep Learning Frameworks [2024]}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://www.knowledgehut.com/blog/data-science/pytorch-vs-tensorflow
+ \endverb
+ \verb{url}
+ \verb https://www.knowledgehut.com/blog/data-science/pytorch-vs-tensorflow
+ \endverb
+ \endentry
+ \entry{pytorch-vs-tensorflow-2}{article}{}
+ \name{author}{1}{}{%
+ {{hash=5079643d4e5ebf5ceb5dfb40ee8525d4}{%
+ family={O'Connor},
+ familyi={O\bibinitperiod},
+ given={Ryan},
+ giveni={R\bibinitperiod}}}%
+ }
+ \list{publisher}{1}{%
+ {News, Tutorials, AI Research}%
+ }
+ \strng{namehash}{5079643d4e5ebf5ceb5dfb40ee8525d4}
+ \strng{fullhash}{5079643d4e5ebf5ceb5dfb40ee8525d4}
+ \strng{bibnamehash}{5079643d4e5ebf5ceb5dfb40ee8525d4}
+ \strng{authorbibnamehash}{5079643d4e5ebf5ceb5dfb40ee8525d4}
+ \strng{authornamehash}{5079643d4e5ebf5ceb5dfb40ee8525d4}
+ \strng{authorfullhash}{5079643d4e5ebf5ceb5dfb40ee8525d4}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{News, Tutorials, AI Research}
+ \field{month}{4}
+ \field{title}{{PyTorch vs TensorFlow in 2023}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2023
+ \endverb
+ \verb{url}
+ \verb https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2023
+ \endverb
+ \endentry
+ \entry{json-api-usage-stats}{article}{}
+ \name{author}{1}{}{%
+ {{hash=17d848142becf7c7ee4a0e0da00ed40b}{%
+ family={Hnatyuk},
+ familyi={H\bibinitperiod},
+ given={Kolya},
+ giveni={K\bibinitperiod}}}%
+ }
+ \list{publisher}{1}{%
+ {MarketSplash}%
+ }
+ \strng{namehash}{17d848142becf7c7ee4a0e0da00ed40b}
+ \strng{fullhash}{17d848142becf7c7ee4a0e0da00ed40b}
+ \strng{bibnamehash}{17d848142becf7c7ee4a0e0da00ed40b}
+ \strng{authorbibnamehash}{17d848142becf7c7ee4a0e0da00ed40b}
+ \strng{authornamehash}{17d848142becf7c7ee4a0e0da00ed40b}
+ \strng{authorfullhash}{17d848142becf7c7ee4a0e0da00ed40b}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{MarketSplash}
+ \field{month}{10}
+ \field{title}{{130+ API Statistics: Usage, Growth {\&} Security}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://marketsplash.com/api-statistics
+ \endverb
+ \verb{url}
+ \verb https://marketsplash.com/api-statistics
+ \endverb
+ \endentry
+ \entry{nginx}{misc}{}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{NGINX}
+ \field{month}{2}
+ \field{note}{[Online; accessed 12. Mar. 2024]}
+ \field{title}{{Advanced Load Balancer, Web Server, {\&} Reverse Proxy - NGINX}}
+ \field{year}{2024}
+ \verb{urlraw}
+ \verb https://www.nginx.com
+ \endverb
+ \verb{url}
+ \verb https://www.nginx.com
+ \endverb
+ \endentry
+ \entry{postgressql}{misc}{}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{PostgreSQL}
+ \field{month}{5}
+ \field{note}{[Online; accessed 14. May 2024]}
+ \field{title}{{PostgreSQL}}
+ \field{year}{2024}
+ \verb{urlraw}
+ \verb https://www.postgresql.org
+ \endverb
+ \verb{url}
+ \verb https://www.postgresql.org
+ \endverb
+ \endentry
+ \entry{svelte}{misc}{}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labeltitlesource}{title}
+ \field{month}{3}
+ \field{note}{[Online; accessed 12. Mar. 2024]}
+ \field{title}{{Svelte {\ifmmode\bullet\elseā¢\fi} Cybernetically enhanced web apps}}
+ \field{year}{2024}
+ \verb{urlraw}
+ \verb https://svelte.dev
+ \endverb
+ \verb{url}
+ \verb https://svelte.dev
+ \endverb
+ \endentry
+ \entry{state-of-js-2022}{misc}{}
+ \field{sortinit}{2}
+ \field{sortinithash}{8b555b3791beccb63322c22f3320aa9a}
+ \field{labeltitlesource}{title}
+ \field{month}{11}
+ \field{note}{[Online; accessed 12. Mar. 2024]}
+ \field{title}{{State of JavaScript 2022: Front-end Frameworks}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://2022.stateofjs.com/en-US/libraries/front-end-frameworks
+ \endverb
+ \verb{url}
+ \verb https://2022.stateofjs.com/en-US/libraries/front-end-frameworks
+ \endverb
+ \endentry
+ \entry{js-frontend-frameworks-performance}{misc}{}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labeltitlesource}{title}
+ \field{month}{3}
+ \field{note}{[Online; accessed 12. Mar. 2024]}
+ \field{title}{{Interactive Results}}
+ \field{year}{2024}
+ \verb{urlraw}
+ \verb https://krausest.github.io/js-framework-benchmark/current.html
+ \endverb
+ \verb{url}
+ \verb https://krausest.github.io/js-framework-benchmark/current.html
+ \endverb
+ \endentry
+ \entry{svelte-kit}{misc}{}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labeltitlesource}{title}
+ \field{month}{3}
+ \field{note}{[Online; accessed 12. Mar. 2024]}
+ \field{title}{{SvelteKit {\ifmmode\bullet\elseā¢\fi} Web development, streamlined}}
+ \field{year}{2024}
+ \verb{urlraw}
+ \verb https://kit.svelte.dev
+ \endverb
+ \verb{url}
+ \verb https://kit.svelte.dev
+ \endverb
+ \endentry
+ \entry{bycrpt}{article}{}
+ \name{author}{2}{}{%
+ {{hash=60b1af3e10b3553c9b4eb4055120912c}{%
+ family={Provos},
+ familyi={P\bibinitperiod},
+ given={Niels},
+ giveni={N\bibinitperiod}}}%
+ {{hash=f74609e259322f9c7b6a2c775a7d8e29}{%
+ family={Mazieres},
+ familyi={M\bibinitperiod},
+ given={David},
+ giveni={D\bibinitperiod}}}%
+ }
+ \strng{namehash}{cf7a7a797d9c87cf2ba4c4ca7ff0c17c}
+ \strng{fullhash}{cf7a7a797d9c87cf2ba4c4ca7ff0c17c}
+ \strng{bibnamehash}{cf7a7a797d9c87cf2ba4c4ca7ff0c17c}
+ \strng{authorbibnamehash}{cf7a7a797d9c87cf2ba4c4ca7ff0c17c}
+ \strng{authornamehash}{cf7a7a797d9c87cf2ba4c4ca7ff0c17c}
+ \strng{authorfullhash}{cf7a7a797d9c87cf2ba4c4ca7ff0c17c}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{month}{03}
+ \field{title}{A Future-Adaptable Password Scheme}
+ \field{year}{2001}
+ \endentry
+ \entry{go}{misc}{}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labeltitlesource}{title}
+ \field{month}{11}
+ \field{note}{[Online; accessed 1. Nov. 2023]}
+ \field{title}{{The Go Programming Language}}
+ \field{year}{2023}
+ \verb{urlraw}
+ \verb https://go.dev
+ \endverb
+ \verb{url}
+ \verb https://go.dev
+ \endverb
+ \endentry
+ \entry{cifar10}{report}{}
+ \name{author}{1}{}{%
+ {{hash=c5e3a676e2ac1164b3afcd539c131fc9}{%
+ family={Krizhevsky},
+ familyi={K\bibinitperiod},
+ given={Alex},
+ giveni={A\bibinitperiod}}}%
+ }
+ \strng{namehash}{c5e3a676e2ac1164b3afcd539c131fc9}
+ \strng{fullhash}{c5e3a676e2ac1164b3afcd539c131fc9}
+ \strng{bibnamehash}{c5e3a676e2ac1164b3afcd539c131fc9}
+ \strng{authorbibnamehash}{c5e3a676e2ac1164b3afcd539c131fc9}
+ \strng{authornamehash}{c5e3a676e2ac1164b3afcd539c131fc9}
+ \strng{authorfullhash}{c5e3a676e2ac1164b3afcd539c131fc9}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{title}{Learning multiple layers of features from tiny images}
+ \field{type}{techreport}
+ \field{year}{2009}
+ \endentry
+ \entry{stl10}{misc}{}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labeltitlesource}{title}
+ \field{month}{11}
+ \field{note}{[Online; accessed 11. May 2024]}
+ \field{title}{{STL-10 dataset}}
+ \field{year}{2015}
+ \verb{urlraw}
+ \verb https://cs.stanford.edu/~acoates/stl10
+ \endverb
+ \verb{url}
+ \verb https://cs.stanford.edu/~acoates/stl10
+ \endverb
+ \endentry
+ \entry{artbench}{article}{}
+ \name{author}{4}{}{%
+ {{hash=7bba0fa6a38a6bcb53161bc627f0e8f0}{%
+ family={Liao},
+ familyi={L\bibinitperiod},
+ given={Peiyuan},
+ giveni={P\bibinitperiod}}}%
+ {{hash=bc48cf0ef11559dbf52ea1eeb6a1fe6b}{%
+ family={Li},
+ familyi={L\bibinitperiod},
+ given={Xiuyu},
+ giveni={X\bibinitperiod}}}%
+ {{hash=6c9b1bbb5e6bfa9eb298cf2adaf2fe6e}{%
+ family={Liu},
+ familyi={L\bibinitperiod},
+ given={Xihui},
+ giveni={X\bibinitperiod}}}%
+ {{hash=994177386e5a63171a7be588e97df7e5}{%
+ family={Keutzer},
+ familyi={K\bibinitperiod},
+ given={Kurt},
+ giveni={K\bibinitperiod}}}%
+ }
+ \strng{namehash}{08a0f45af14d162ba0122f203b8ad810}
+ \strng{fullhash}{112b6926c6bf43c13d55af2ea8fde5e2}
+ \strng{bibnamehash}{112b6926c6bf43c13d55af2ea8fde5e2}
+ \strng{authorbibnamehash}{112b6926c6bf43c13d55af2ea8fde5e2}
+ \strng{authornamehash}{08a0f45af14d162ba0122f203b8ad810}
+ \strng{authorfullhash}{112b6926c6bf43c13d55af2ea8fde5e2}
+ \field{sortinit}{3}
+ \field{sortinithash}{ad6fe7482ffbd7b9f99c9e8b5dccd3d7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{arXiv preprint arXiv:2206.11404}
+ \field{title}{The ArtBench Dataset: Benchmarking Generative Models with Artworks}
+ \field{year}{2022}
+ \endentry
+ \entry{caltech256}{misc}{}
+ \name{author}{3}{}{%
+ {{hash=0c276668bd6739ab142e84d4de9000da}{%
+ family={Griffin},
+ familyi={G\bibinitperiod},
+ given={Gregory},
+ giveni={G\bibinitperiod}}}%
+ {{hash=9f4b2bda38961146065556b1d28f38ab}{%
+ family={Holub},
+ familyi={H\bibinitperiod},
+ given={Alex},
+ giveni={A\bibinitperiod}}}%
+ {{hash=e52876f830a8a20786ff3e4d7dd6f083}{%
+ family={Perona},
+ familyi={P\bibinitperiod},
+ given={Pietro},
+ giveni={P\bibinitperiod}}}%
+ }
+ \list{publisher}{1}{%
+ {CaltechDATA}%
+ }
+ \strng{namehash}{5983e22273ff6957ba259a0f7d1141bc}
+ \strng{fullhash}{1b5a4898a33fca8d61b05ac9d9fd8c0a}
+ \strng{bibnamehash}{1b5a4898a33fca8d61b05ac9d9fd8c0a}
+ \strng{authorbibnamehash}{1b5a4898a33fca8d61b05ac9d9fd8c0a}
+ \strng{authornamehash}{5983e22273ff6957ba259a0f7d1141bc}
+ \strng{authorfullhash}{1b5a4898a33fca8d61b05ac9d9fd8c0a}
+ \field{sortinit}{4}
+ \field{sortinithash}{9381316451d1b9788675a07e972a12a7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{month}{4}
+ \field{title}{Caltech 256}
+ \field{year}{2022}
+ \verb{doi}
+ \verb 10.22002/D1.20087
+ \endverb
+ \endentry
+ \entry{fgvca}{report}{}
+ \name{author}{5}{}{%
+ {{hash=b1fb544937854da3ec8ec4f8109e846d}{%
+ family={Maji},
+ familyi={M\bibinitperiod},
+ given={S.},
+ giveni={S\bibinitperiod}}}%
+ {{hash=66af1ecffa9fdc06f0e4ac2c3f2e4124}{%
+ family={Kannala},
+ familyi={K\bibinitperiod},
+ given={J.},
+ giveni={J\bibinitperiod}}}%
+ {{hash=912e9620e6b1bb780e26082faac6a619}{%
+ family={Rahtu},
+ familyi={R\bibinitperiod},
+ given={E.},
+ giveni={E\bibinitperiod}}}%
+ {{hash=7b37b33590b6ced45c1ce1cddab2ef7a}{%
+ family={Blaschko},
+ familyi={B\bibinitperiod},
+ given={M.},
+ giveni={M\bibinitperiod}}}%
+ {{hash=0ec8712a7d032edd8ddc33c250c0784f}{%
+ family={Vedaldi},
+ familyi={V\bibinitperiod},
+ given={A.},
+ giveni={A\bibinitperiod}}}%
+ }
+ \strng{namehash}{4734aac49e2aa46b278616fcc2d66280}
+ \strng{fullhash}{4929dc1922cbfb934b74d90436a4ded6}
+ \strng{bibnamehash}{4929dc1922cbfb934b74d90436a4ded6}
+ \strng{authorbibnamehash}{4929dc1922cbfb934b74d90436a4ded6}
+ \strng{authornamehash}{4734aac49e2aa46b278616fcc2d66280}
+ \strng{authorfullhash}{4929dc1922cbfb934b74d90436a4ded6}
+ \field{sortinit}{4}
+ \field{sortinithash}{9381316451d1b9788675a07e972a12a7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{eprintclass}{cs-cv}
+ \field{eprinttype}{arXiv}
+ \field{title}{Fine-Grained Visual Classification of Aircraft}
+ \field{type}{techreport}
+ \field{year}{2013}
+ \verb{eprint}
+ \verb 1306.5151
+ \endverb
+ \endentry
+ \entry{fooddataset}{article}{}
+ \name{author}{5}{}{%
+ {{hash=9745e23b5afda022dab01c159a454bb2}{%
+ family={Kaur},
+ familyi={K\bibinitperiod},
+ given={Parneet},
+ giveni={P\bibinitperiod}}}%
+ {{hash=5582bf1be9db7a164fe4a89365a4420b}{%
+ family={Sikka},
+ familyi={S\bibinitperiod},
+ given={Karan},
+ giveni={K\bibinitperiod}}}%
+ {{hash=47ad65c82b1de7d642988df185d7d8ea}{%
+ family={Wang},
+ familyi={W\bibinitperiod},
+ given={Weijun},
+ giveni={W\bibinitperiod}}}%
+ {{hash=4d59c02623535d66620842635043b3b7}{%
+ family={Belongie},
+ familyi={B\bibinitperiod},
+ given={serge},
+ giveni={s\bibinitperiod}}}%
+ {{hash=2478c221c08a4d32c950a414c383fb08}{%
+ family={Divakaran},
+ familyi={D\bibinitperiod},
+ given={Ajay},
+ giveni={A\bibinitperiod}}}%
+ }
+ \strng{namehash}{9bbebb9e370b0e4d2c8850515561bfbd}
+ \strng{fullhash}{ee23ed313476a626198320adcc67e276}
+ \strng{bibnamehash}{ee23ed313476a626198320adcc67e276}
+ \strng{authorbibnamehash}{ee23ed313476a626198320adcc67e276}
+ \strng{authornamehash}{9bbebb9e370b0e4d2c8850515561bfbd}
+ \strng{authorfullhash}{ee23ed313476a626198320adcc67e276}
+ \field{sortinit}{4}
+ \field{sortinithash}{9381316451d1b9788675a07e972a12a7}
+ \field{labelnamesource}{author}
+ \field{labeltitlesource}{title}
+ \field{journaltitle}{arXiv preprint arXiv:1907.06167}
+ \field{title}{{FoodX-251: A Dataset for Fine-grained Food Classification}}
+ \field{year}{2019}
+ \endentry
+ \enddatalist
+\endrefsection
+\endinput
+
diff --git a/report/report.bcf b/report/report.bcf
new file mode 100644
index 0000000..9cc4bae
--- /dev/null
+++ b/report/report.bcf
@@ -0,0 +1,2429 @@
+
+
+
+
+
+ output_encoding
+ utf8
+
+
+ input_encoding
+ utf8
+
+
+ debug
+ 0
+
+
+ mincrossrefs
+ 2
+
+
+ minxrefs
+ 2
+
+
+ sortcase
+ 1
+
+
+ sortupper
+ 1
+
+
+
+
+
+
+ alphaothers
+ +
+
+
+ extradatecontext
+ labelname
+ labeltitle
+
+
+ labelalpha
+ 0
+
+
+ labelnamespec
+ shortauthor
+ author
+ shorteditor
+ editor
+ translator
+
+
+ labeltitle
+ 0
+
+
+ labeltitlespec
+ shorttitle
+ title
+ maintitle
+
+
+ labeltitleyear
+ 0
+
+
+ labeldateparts
+ 0
+
+
+ labeldatespec
+ date
+ year
+ eventdate
+ origdate
+ urldate
+ nodate
+
+
+ julian
+ 0
+
+
+ gregorianstart
+ 1582-10-15
+
+
+ maxalphanames
+ 3
+
+
+ maxbibnames
+ 6
+
+
+ maxcitenames
+ 2
+
+
+ maxsortnames
+ 6
+
+
+ maxitems
+ 3
+
+
+ minalphanames
+ 1
+
+
+ minbibnames
+ 1
+
+
+ mincitenames
+ 1
+
+
+ minsortnames
+ 1
+
+
+ minitems
+ 1
+
+
+ nohashothers
+ 0
+
+
+ noroman
+ 0
+
+
+ nosortothers
+ 0
+
+
+ pluralothers
+ 0
+
+
+ singletitle
+ 0
+
+
+ skipbib
+ 0
+
+
+ skipbiblist
+ 0
+
+
+ skiplab
+ 0
+
+
+ sortalphaothers
+ +
+
+
+ sortlocale
+ english
+
+
+ sortingtemplatename
+ none
+
+
+ sortsets
+ 0
+
+
+ uniquelist
+ false
+
+
+ uniquename
+ false
+
+
+ uniqueprimaryauthor
+ 0
+
+
+ uniquetitle
+ 0
+
+
+ uniquebaretitle
+ 0
+
+
+ uniquework
+ 0
+
+
+ useprefix
+ 0
+
+
+ useafterword
+ 1
+
+
+ useannotator
+ 1
+
+
+ useauthor
+ 1
+
+
+ usebookauthor
+ 1
+
+
+ usecommentator
+ 1
+
+
+ useeditor
+ 1
+
+
+ useeditora
+ 1
+
+
+ useeditorb
+ 1
+
+
+ useeditorc
+ 1
+
+
+ useforeword
+ 1
+
+
+ useholder
+ 1
+
+
+ useintroduction
+ 1
+
+
+ usenamea
+ 1
+
+
+ usenameb
+ 1
+
+
+ usenamec
+ 1
+
+
+ usetranslator
+ 0
+
+
+ useshortauthor
+ 1
+
+
+ useshorteditor
+ 1
+
+
+
+
+
+ extradatecontext
+ labelname
+ labeltitle
+
+
+ labelalpha
+ 0
+
+
+ labelnamespec
+ shortauthor
+ author
+ shorteditor
+ editor
+ translator
+
+
+ labeltitle
+ 0
+
+
+ labeltitlespec
+ shorttitle
+ title
+ maintitle
+
+
+ labeltitleyear
+ 0
+
+
+ labeldateparts
+ 0
+
+
+ labeldatespec
+ date
+ year
+ eventdate
+ origdate
+ urldate
+ nodate
+
+
+ maxalphanames
+ 3
+
+
+ maxbibnames
+ 6
+
+
+ maxcitenames
+ 2
+
+
+ maxsortnames
+ 6
+
+
+ maxitems
+ 3
+
+
+ minalphanames
+ 1
+
+
+ minbibnames
+ 1
+
+
+ mincitenames
+ 1
+
+
+ minsortnames
+ 1
+
+
+ minitems
+ 1
+
+
+ nohashothers
+ 0
+
+
+ noroman
+ 0
+
+
+ nosortothers
+ 0
+
+
+ singletitle
+ 0
+
+
+ skipbib
+ 0
+
+
+ skipbiblist
+ 0
+
+
+ skiplab
+ 0
+
+
+ uniquelist
+ false
+
+
+ uniquename
+ false
+
+
+ uniqueprimaryauthor
+ 0
+
+
+ uniquetitle
+ 0
+
+
+ uniquebaretitle
+ 0
+
+
+ uniquework
+ 0
+
+
+ useprefix
+ 0
+
+
+ useafterword
+ 1
+
+
+ useannotator
+ 1
+
+
+ useauthor
+ 1
+
+
+ usebookauthor
+ 1
+
+
+ usecommentator
+ 1
+
+
+ useeditor
+ 1
+
+
+ useeditora
+ 1
+
+
+ useeditorb
+ 1
+
+
+ useeditorc
+ 1
+
+
+ useforeword
+ 1
+
+
+ useholder
+ 1
+
+
+ useintroduction
+ 1
+
+
+ usenamea
+ 1
+
+
+ usenameb
+ 1
+
+
+ usenamec
+ 1
+
+
+ usetranslator
+ 0
+
+
+ useshortauthor
+ 1
+
+
+ useshorteditor
+ 1
+
+
+
+
+ datamodel
+ labelalphanametemplate
+ labelalphatemplate
+ inheritance
+ translit
+ uniquenametemplate
+ sortingnamekeytemplate
+ sortingtemplate
+ extradatespec
+ extradatecontext
+ labelnamespec
+ labeltitlespec
+ labeldatespec
+ controlversion
+ alphaothers
+ sortalphaothers
+ presort
+ texencoding
+ bibencoding
+ sortingtemplatename
+ sortlocale
+ language
+ autolang
+ langhook
+ indexing
+ hyperref
+ backrefsetstyle
+ block
+ pagetracker
+ citecounter
+ citetracker
+ ibidtracker
+ idemtracker
+ opcittracker
+ loccittracker
+ labeldate
+ labeltime
+ dateera
+ date
+ time
+ eventdate
+ eventtime
+ origdate
+ origtime
+ urldate
+ urltime
+ alldatesusetime
+ alldates
+ alltimes
+ gregorianstart
+ autocite
+ notetype
+ uniquelist
+ uniquename
+ refsection
+ refsegment
+ citereset
+ sortlos
+ babel
+ datelabel
+ backrefstyle
+ arxiv
+ familyinits
+ giveninits
+ prefixinits
+ suffixinits
+ useafterword
+ useannotator
+ useauthor
+ usebookauthor
+ usecommentator
+ useeditor
+ useeditora
+ useeditorb
+ useeditorc
+ useforeword
+ useholder
+ useintroduction
+ usenamea
+ usenameb
+ usenamec
+ usetranslator
+ useshortauthor
+ useshorteditor
+ debug
+ loadfiles
+ safeinputenc
+ sortcase
+ sortupper
+ terseinits
+ abbreviate
+ dateabbrev
+ clearlang
+ sortcites
+ sortsets
+ backref
+ backreffloats
+ trackfloats
+ parentracker
+ labeldateusetime
+ datecirca
+ dateuncertain
+ dateusetime
+ eventdateusetime
+ origdateusetime
+ urldateusetime
+ julian
+ datezeros
+ timezeros
+ timezones
+ seconds
+ autopunct
+ punctfont
+ labelnumber
+ labelalpha
+ labeltitle
+ labeltitleyear
+ labeldateparts
+ pluralothers
+ nohashothers
+ nosortothers
+ noroman
+ singletitle
+ uniquetitle
+ uniquebaretitle
+ uniquework
+ uniqueprimaryauthor
+ defernumbers
+ locallabelwidth
+ bibwarn
+ useprefix
+ skipbib
+ skipbiblist
+ skiplab
+ dataonly
+ defernums
+ firstinits
+ sortfirstinits
+ sortgiveninits
+ labelyear
+ isbn
+ url
+ doi
+ eprint
+ related
+ subentry
+ dashed
+ bibtexcaseprotection
+ mincrossrefs
+ minxrefs
+ maxnames
+ minnames
+ maxbibnames
+ minbibnames
+ maxcitenames
+ mincitenames
+ maxsortnames
+ minsortnames
+ maxitems
+ minitems
+ maxalphanames
+ minalphanames
+ maxparens
+ dateeraauto
+
+
+ alphaothers
+ sortalphaothers
+ presort
+ indexing
+ citetracker
+ ibidtracker
+ idemtracker
+ opcittracker
+ loccittracker
+ uniquelist
+ uniquename
+ familyinits
+ giveninits
+ prefixinits
+ suffixinits
+ useafterword
+ useannotator
+ useauthor
+ usebookauthor
+ usecommentator
+ useeditor
+ useeditora
+ useeditorb
+ useeditorc
+ useforeword
+ useholder
+ useintroduction
+ usenamea
+ usenameb
+ usenamec
+ usetranslator
+ useshortauthor
+ useshorteditor
+ terseinits
+ abbreviate
+ dateabbrev
+ clearlang
+ labelnumber
+ labelalpha
+ labeltitle
+ labeltitleyear
+ labeldateparts
+ nohashothers
+ nosortothers
+ noroman
+ singletitle
+ uniquetitle
+ uniquebaretitle
+ uniquework
+ uniqueprimaryauthor
+ useprefix
+ skipbib
+ skipbiblist
+ skiplab
+ dataonly
+ skiplos
+ labelyear
+ isbn
+ url
+ doi
+ eprint
+ related
+ subentry
+ bibtexcaseprotection
+ labelalphatemplate
+ translit
+ sortexclusion
+ sortinclusion
+ extradatecontext
+ labelnamespec
+ labeltitlespec
+ labeldatespec
+ maxnames
+ minnames
+ maxbibnames
+ minbibnames
+ maxcitenames
+ mincitenames
+ maxsortnames
+ minsortnames
+ maxitems
+ minitems
+ maxalphanames
+ minalphanames
+
+
+ noinherit
+ nametemplates
+ labelalphanametemplatename
+ uniquenametemplatename
+ sortingnamekeytemplatename
+ presort
+ indexing
+ citetracker
+ ibidtracker
+ idemtracker
+ opcittracker
+ loccittracker
+ uniquelist
+ uniquename
+ familyinits
+ giveninits
+ prefixinits
+ suffixinits
+ useafterword
+ useannotator
+ useauthor
+ usebookauthor
+ usecommentator
+ useeditor
+ useeditora
+ useeditorb
+ useeditorc
+ useforeword
+ useholder
+ useintroduction
+ usenamea
+ usenameb
+ usenamec
+ usetranslator
+ useshortauthor
+ useshorteditor
+ terseinits
+ abbreviate
+ dateabbrev
+ clearlang
+ labelnumber
+ labelalpha
+ labeltitle
+ labeltitleyear
+ labeldateparts
+ nohashothers
+ nosortothers
+ noroman
+ singletitle
+ uniquetitle
+ uniquebaretitle
+ uniquework
+ uniqueprimaryauthor
+ useprefix
+ skipbib
+ skipbiblist
+ skiplab
+ dataonly
+ skiplos
+ isbn
+ url
+ doi
+ eprint
+ related
+ subentry
+ bibtexcaseprotection
+ maxnames
+ minnames
+ maxbibnames
+ minbibnames
+ maxcitenames
+ mincitenames
+ maxsortnames
+ minsortnames
+ maxitems
+ minitems
+ maxalphanames
+ minalphanames
+
+
+ nametemplates
+ labelalphanametemplatename
+ uniquenametemplatename
+ sortingnamekeytemplatename
+ uniquelist
+ uniquename
+ familyinits
+ giveninits
+ prefixinits
+ suffixinits
+ terseinits
+ nohashothers
+ nosortothers
+ useprefix
+
+
+ nametemplates
+ labelalphanametemplatename
+ uniquenametemplatename
+ sortingnamekeytemplatename
+ uniquename
+ familyinits
+ giveninits
+ prefixinits
+ suffixinits
+ terseinits
+ useprefix
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ prefix
+ family
+
+
+
+
+ shorthand
+ label
+ labelname
+ labelname
+
+
+ year
+
+
+
+
+
+ labelyear
+ year
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ prefix
+ family
+ given
+
+
+
+
+ prefix
+ family
+
+
+ given
+
+
+ suffix
+
+
+ prefix
+
+
+ mm
+
+
+
+ sf,sm,sn,pf,pm,pn,pp
+ family,given,prefix,suffix
+ boolean,integer,string,xml
+ default,transliteration,transcription,translation
+
+
+ article
+ artwork
+ audio
+ bibnote
+ book
+ bookinbook
+ booklet
+ collection
+ commentary
+ customa
+ customb
+ customc
+ customd
+ custome
+ customf
+ dataset
+ inbook
+ incollection
+ inproceedings
+ inreference
+ image
+ jurisdiction
+ legal
+ legislation
+ letter
+ manual
+ misc
+ movie
+ music
+ mvcollection
+ mvreference
+ mvproceedings
+ mvbook
+ online
+ patent
+ performance
+ periodical
+ proceedings
+ reference
+ report
+ review
+ set
+ software
+ standard
+ suppbook
+ suppcollection
+ suppperiodical
+ thesis
+ unpublished
+ video
+ xdata
+
+
+ sortyear
+ volume
+ volumes
+ abstract
+ addendum
+ annotation
+ booksubtitle
+ booktitle
+ booktitleaddon
+ chapter
+ edition
+ eid
+ entrysubtype
+ eprintclass
+ eprinttype
+ eventtitle
+ eventtitleaddon
+ gender
+ howpublished
+ indexsorttitle
+ indextitle
+ isan
+ isbn
+ ismn
+ isrn
+ issn
+ issue
+ issuesubtitle
+ issuetitle
+ issuetitleaddon
+ iswc
+ journalsubtitle
+ journaltitle
+ journaltitleaddon
+ label
+ langid
+ langidopts
+ library
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ nameaddon
+ note
+ number
+ origtitle
+ pagetotal
+ part
+ relatedstring
+ relatedtype
+ reprinttitle
+ series
+ shorthandintro
+ subtitle
+ title
+ titleaddon
+ usera
+ userb
+ userc
+ userd
+ usere
+ userf
+ venue
+ version
+ shorthand
+ shortjournal
+ shortseries
+ shorttitle
+ sorttitle
+ sortshorthand
+ sortkey
+ presort
+ institution
+ lista
+ listb
+ listc
+ listd
+ liste
+ listf
+ location
+ organization
+ origlocation
+ origpublisher
+ publisher
+ afterword
+ annotator
+ author
+ bookauthor
+ commentator
+ editor
+ editora
+ editorb
+ editorc
+ foreword
+ holder
+ introduction
+ namea
+ nameb
+ namec
+ translator
+ shortauthor
+ shorteditor
+ sortname
+ authortype
+ editoratype
+ editorbtype
+ editorctype
+ editortype
+ bookpagination
+ nameatype
+ namebtype
+ namectype
+ pagination
+ pubstate
+ type
+ language
+ origlanguage
+ crossref
+ xref
+ date
+ endyear
+ year
+ month
+ day
+ hour
+ minute
+ second
+ timezone
+ yeardivision
+ endmonth
+ endday
+ endhour
+ endminute
+ endsecond
+ endtimezone
+ endyeardivision
+ eventdate
+ eventendyear
+ eventyear
+ eventmonth
+ eventday
+ eventhour
+ eventminute
+ eventsecond
+ eventtimezone
+ eventyeardivision
+ eventendmonth
+ eventendday
+ eventendhour
+ eventendminute
+ eventendsecond
+ eventendtimezone
+ eventendyeardivision
+ origdate
+ origendyear
+ origyear
+ origmonth
+ origday
+ orighour
+ origminute
+ origsecond
+ origtimezone
+ origyeardivision
+ origendmonth
+ origendday
+ origendhour
+ origendminute
+ origendsecond
+ origendtimezone
+ origendyeardivision
+ urldate
+ urlendyear
+ urlyear
+ urlmonth
+ urlday
+ urlhour
+ urlminute
+ urlsecond
+ urltimezone
+ urlyeardivision
+ urlendmonth
+ urlendday
+ urlendhour
+ urlendminute
+ urlendsecond
+ urlendtimezone
+ urlendyeardivision
+ doi
+ eprint
+ file
+ verba
+ verbb
+ verbc
+ url
+ xdata
+ ids
+ entryset
+ related
+ keywords
+ options
+ relatedoptions
+ pages
+ execute
+
+
+ abstract
+ annotation
+ authortype
+ bookpagination
+ crossref
+ day
+ doi
+ eprint
+ eprintclass
+ eprinttype
+ endday
+ endhour
+ endminute
+ endmonth
+ endsecond
+ endtimezone
+ endyear
+ endyeardivision
+ entryset
+ entrysubtype
+ execute
+ file
+ gender
+ hour
+ ids
+ indextitle
+ indexsorttitle
+ isan
+ ismn
+ iswc
+ keywords
+ label
+ langid
+ langidopts
+ library
+ lista
+ listb
+ listc
+ listd
+ liste
+ listf
+ minute
+ month
+ namea
+ nameb
+ namec
+ nameatype
+ namebtype
+ namectype
+ nameaddon
+ options
+ origday
+ origendday
+ origendhour
+ origendminute
+ origendmonth
+ origendsecond
+ origendtimezone
+ origendyear
+ origendyeardivision
+ orighour
+ origminute
+ origmonth
+ origsecond
+ origtimezone
+ origyear
+ origyeardivision
+ origlocation
+ origpublisher
+ origtitle
+ pagination
+ presort
+ related
+ relatedoptions
+ relatedstring
+ relatedtype
+ second
+ shortauthor
+ shorteditor
+ shorthand
+ shorthandintro
+ shortjournal
+ shortseries
+ shorttitle
+ sortkey
+ sortname
+ sortshorthand
+ sorttitle
+ sortyear
+ timezone
+ url
+ urlday
+ urlendday
+ urlendhour
+ urlendminute
+ urlendmonth
+ urlendsecond
+ urlendtimezone
+ urlendyear
+ urlhour
+ urlminute
+ urlmonth
+ urlsecond
+ urltimezone
+ urlyear
+ usera
+ userb
+ userc
+ userd
+ usere
+ userf
+ verba
+ verbb
+ verbc
+ xdata
+ xref
+ year
+ yeardivision
+
+
+ set
+ entryset
+
+
+ article
+ addendum
+ annotator
+ author
+ commentator
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ eid
+ issn
+ issue
+ issuetitle
+ issuesubtitle
+ issuetitleaddon
+ journalsubtitle
+ journaltitle
+ journaltitleaddon
+ language
+ note
+ number
+ origlanguage
+ pages
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ translator
+ version
+ volume
+
+
+ bibnote
+ note
+
+
+ book
+ author
+ addendum
+ afterword
+ annotator
+ chapter
+ commentator
+ edition
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ eid
+ foreword
+ introduction
+ isbn
+ language
+ location
+ maintitle
+ maintitleaddon
+ mainsubtitle
+ note
+ number
+ origlanguage
+ pages
+ pagetotal
+ part
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ translator
+ volume
+ volumes
+
+
+ mvbook
+ addendum
+ afterword
+ annotator
+ author
+ commentator
+ edition
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ foreword
+ introduction
+ isbn
+ language
+ location
+ note
+ number
+ origlanguage
+ pagetotal
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ translator
+ volume
+ volumes
+
+
+ inbook
+ bookinbook
+ suppbook
+ addendum
+ afterword
+ annotator
+ author
+ booktitle
+ bookauthor
+ booksubtitle
+ booktitleaddon
+ chapter
+ commentator
+ edition
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ eid
+ foreword
+ introduction
+ isbn
+ language
+ location
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ note
+ number
+ origlanguage
+ part
+ publisher
+ pages
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ translator
+ volume
+ volumes
+
+
+ booklet
+ addendum
+ author
+ chapter
+ editor
+ editortype
+ eid
+ howpublished
+ language
+ location
+ note
+ pages
+ pagetotal
+ pubstate
+ subtitle
+ title
+ titleaddon
+ type
+
+
+ collection
+ reference
+ addendum
+ afterword
+ annotator
+ chapter
+ commentator
+ edition
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ eid
+ foreword
+ introduction
+ isbn
+ language
+ location
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ note
+ number
+ origlanguage
+ pages
+ pagetotal
+ part
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ translator
+ volume
+ volumes
+
+
+ mvcollection
+ mvreference
+ addendum
+ afterword
+ annotator
+ author
+ commentator
+ edition
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ foreword
+ introduction
+ isbn
+ language
+ location
+ note
+ number
+ origlanguage
+ publisher
+ pubstate
+ subtitle
+ title
+ titleaddon
+ translator
+ volume
+ volumes
+
+
+ incollection
+ suppcollection
+ inreference
+ addendum
+ afterword
+ annotator
+ author
+ booksubtitle
+ booktitle
+ booktitleaddon
+ chapter
+ commentator
+ edition
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ eid
+ foreword
+ introduction
+ isbn
+ language
+ location
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ note
+ number
+ origlanguage
+ pages
+ part
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ translator
+ volume
+ volumes
+
+
+ dataset
+ addendum
+ author
+ edition
+ editor
+ editortype
+ language
+ location
+ note
+ number
+ organization
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ type
+ version
+
+
+ manual
+ addendum
+ author
+ chapter
+ edition
+ editor
+ editortype
+ eid
+ isbn
+ language
+ location
+ note
+ number
+ organization
+ pages
+ pagetotal
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ type
+ version
+
+
+ misc
+ software
+ addendum
+ author
+ editor
+ editortype
+ howpublished
+ language
+ location
+ note
+ organization
+ pubstate
+ subtitle
+ title
+ titleaddon
+ type
+ version
+
+
+ online
+ addendum
+ author
+ editor
+ editortype
+ language
+ note
+ organization
+ pubstate
+ subtitle
+ title
+ titleaddon
+ version
+
+
+ patent
+ addendum
+ author
+ holder
+ location
+ note
+ number
+ pubstate
+ subtitle
+ title
+ titleaddon
+ type
+ version
+
+
+ periodical
+ addendum
+ editor
+ editora
+ editorb
+ editorc
+ editortype
+ editoratype
+ editorbtype
+ editorctype
+ issn
+ issue
+ issuesubtitle
+ issuetitle
+ issuetitleaddon
+ language
+ note
+ number
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ volume
+ yeardivision
+
+
+ mvproceedings
+ addendum
+ editor
+ editortype
+ eventday
+ eventendday
+ eventendhour
+ eventendminute
+ eventendmonth
+ eventendsecond
+ eventendtimezone
+ eventendyear
+ eventendyeardivision
+ eventhour
+ eventminute
+ eventmonth
+ eventsecond
+ eventtimezone
+ eventyear
+ eventyeardivision
+ eventtitle
+ eventtitleaddon
+ isbn
+ language
+ location
+ note
+ number
+ organization
+ pagetotal
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ venue
+ volumes
+
+
+ proceedings
+ addendum
+ chapter
+ editor
+ editortype
+ eid
+ eventday
+ eventendday
+ eventendhour
+ eventendminute
+ eventendmonth
+ eventendsecond
+ eventendtimezone
+ eventendyear
+ eventendyeardivision
+ eventhour
+ eventminute
+ eventmonth
+ eventsecond
+ eventtimezone
+ eventyear
+ eventyeardivision
+ eventtitle
+ eventtitleaddon
+ isbn
+ language
+ location
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ note
+ number
+ organization
+ pages
+ pagetotal
+ part
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ venue
+ volume
+ volumes
+
+
+ inproceedings
+ addendum
+ author
+ booksubtitle
+ booktitle
+ booktitleaddon
+ chapter
+ editor
+ editortype
+ eid
+ eventday
+ eventendday
+ eventendhour
+ eventendminute
+ eventendmonth
+ eventendsecond
+ eventendtimezone
+ eventendyear
+ eventendyeardivision
+ eventhour
+ eventminute
+ eventmonth
+ eventsecond
+ eventtimezone
+ eventyear
+ eventyeardivision
+ eventtitle
+ eventtitleaddon
+ isbn
+ language
+ location
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ note
+ number
+ organization
+ pages
+ part
+ publisher
+ pubstate
+ series
+ subtitle
+ title
+ titleaddon
+ venue
+ volume
+ volumes
+
+
+ report
+ addendum
+ author
+ chapter
+ eid
+ institution
+ isrn
+ language
+ location
+ note
+ number
+ pages
+ pagetotal
+ pubstate
+ subtitle
+ title
+ titleaddon
+ type
+ version
+
+
+ thesis
+ addendum
+ author
+ chapter
+ eid
+ institution
+ language
+ location
+ note
+ pages
+ pagetotal
+ pubstate
+ subtitle
+ title
+ titleaddon
+ type
+
+
+ unpublished
+ addendum
+ author
+ eventday
+ eventendday
+ eventendhour
+ eventendminute
+ eventendmonth
+ eventendsecond
+ eventendtimezone
+ eventendyear
+ eventendyeardivision
+ eventhour
+ eventminute
+ eventmonth
+ eventsecond
+ eventtimezone
+ eventyear
+ eventyeardivision
+ eventtitle
+ eventtitleaddon
+ howpublished
+ language
+ location
+ note
+ pubstate
+ subtitle
+ title
+ titleaddon
+ type
+ venue
+
+
+ abstract
+ addendum
+ afterword
+ annotator
+ author
+ bookauthor
+ booksubtitle
+ booktitle
+ booktitleaddon
+ chapter
+ commentator
+ editor
+ editora
+ editorb
+ editorc
+ foreword
+ holder
+ institution
+ introduction
+ issuesubtitle
+ issuetitle
+ issuetitleaddon
+ journalsubtitle
+ journaltitle
+ journaltitleaddon
+ location
+ mainsubtitle
+ maintitle
+ maintitleaddon
+ nameaddon
+ note
+ organization
+ origlanguage
+ origlocation
+ origpublisher
+ origtitle
+ part
+ publisher
+ relatedstring
+ series
+ shortauthor
+ shorteditor
+ shorthand
+ shortjournal
+ shortseries
+ shorttitle
+ sortname
+ sortshorthand
+ sorttitle
+ subtitle
+ title
+ titleaddon
+ translator
+ venue
+
+
+ article
+ book
+ inbook
+ bookinbook
+ suppbook
+ booklet
+ collection
+ incollection
+ suppcollection
+ manual
+ misc
+ mvbook
+ mvcollection
+ online
+ patent
+ periodical
+ suppperiodical
+ proceedings
+ inproceedings
+ reference
+ inreference
+ report
+ set
+ thesis
+ unpublished
+
+
+ date
+ year
+
+
+
+
+ set
+
+ entryset
+
+
+
+ article
+
+ author
+ journaltitle
+ title
+
+
+
+ book
+ mvbook
+
+ author
+ title
+
+
+
+ inbook
+ bookinbook
+ suppbook
+
+ author
+ title
+ booktitle
+
+
+
+ booklet
+
+
+ author
+ editor
+
+ title
+
+
+
+ collection
+ reference
+ mvcollection
+ mvreference
+
+ editor
+ title
+
+
+
+ incollection
+ suppcollection
+ inreference
+
+ author
+ editor
+ title
+ booktitle
+
+
+
+ dataset
+
+ title
+
+
+
+ manual
+
+ title
+
+
+
+ misc
+ software
+
+ title
+
+
+
+ online
+
+ title
+
+ url
+ doi
+ eprint
+
+
+
+
+ patent
+
+ author
+ title
+ number
+
+
+
+ periodical
+
+ editor
+ title
+
+
+
+ proceedings
+ mvproceedings
+
+ title
+
+
+
+ inproceedings
+
+ author
+ title
+ booktitle
+
+
+
+ report
+
+ author
+ title
+ type
+ institution
+
+
+
+ thesis
+
+ author
+ title
+ type
+ institution
+
+
+
+ unpublished
+
+ author
+ title
+
+
+
+
+ isbn
+
+
+ issn
+
+
+ ismn
+
+
+ gender
+
+
+
+
+
+
+ ../main.bib
+
+
+ amazon-rekognition
+ amazon-rekognition-custom-labels
+ amazon-rekognition-custom-labels-training
+ google-vision-api
+ google-vision-price-sheet
+ google-vision-product-recognizer-guide
+ mnist
+ mist-high-accuracy
+ lecun-98
+ mist-high-accuracy
+ imagenet
+ krizhevsky2012imagenet
+ resnet-152
+ efficientnet
+ krizhevsky2012imagenet
+ resnet
+ going-deeper-with-convolutions
+ very-deep-convolution-networks-for-large-scale-image-recognition
+ efficient-net
+ efficient-net
+ inverted-bottleneck-mobilenet
+ tensorflow2015-whitepaper
+ pytorch
+ pytorch-vs-tensorflow-1
+ pytorch-vs-tensorflow-1
+ pytorch-vs-tensorflow-2
+ json-api-usage-stats
+ nginx
+ postgressql
+ svelte
+ state-of-js-2022
+ js-frontend-frameworks-performance
+ svelte-kit
+ bycrpt
+ go
+ imagenet
+ mnist
+ cifar10
+ stl10
+ cifar10
+ artbench
+ caltech256
+ fgvca
+ fooddataset
+
+
+
+
+ citeorder
+
+
+ intciteorder
+
+
+
+
+
+
diff --git a/report/report.blg b/report/report.blg
new file mode 100644
index 0000000..2f7f38d
--- /dev/null
+++ b/report/report.blg
@@ -0,0 +1,19 @@
+[0] Config.pm:307> INFO - This is Biber 2.19
+[0] Config.pm:310> INFO - Logfile is 'report.blg'
+[28] biber:340> INFO - === Wed May 15, 2024, 05:17:32
+[34] Biber.pm:419> INFO - Reading 'report.bcf'
+[59] Biber.pm:979> INFO - Found 37 citekeys in bib section 0
+[65] Biber.pm:4419> INFO - Processing section 0
+[69] Biber.pm:4610> INFO - Looking for bibtex file '../main.bib' for section 0
+[70] bibtex.pm:1713> INFO - LaTeX decoding ...
+[82] bibtex.pm:1519> INFO - Found BibTeX data source '../main.bib'
+[181] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'variable = shifted' with 'variable = non-ignorable'
+[181] UCollate.pm:68> INFO - Overriding locale 'en-US' defaults 'normalization = NFD' with 'normalization = prenormalized'
+[181] Biber.pm:4239> INFO - Sorting list 'none/global//global/global' of type 'entry' with template 'none' and locale 'en-US'
+[181] Biber.pm:4245> INFO - No sort tailoring available for locale 'en-US'
+[190] bbl.pm:660> INFO - Writing 'report.bbl' with encoding 'UTF-8'
+[195] bbl.pm:763> INFO - Output to report.bbl
+[195] Biber.pm:131> WARN - legacy month field 'November' in entry 'lecun-98' is not an integer - this will probably not sort properly.
+[195] Biber.pm:131> WARN - legacy month field 'Apr' in entry 'caltech256' is not an integer - this will probably not sort properly.
+[195] Biber.pm:131> WARN - BibTeX subsystem: /tmp/biber_tmp_nCQC/3b92c3a43883b50258e417775889aed6_1391351.utf8, line 387, warning: 130 characters of junk seen at toplevel
+[195] Biber.pm:133> INFO - WARNINGS: 3
diff --git a/report/report.out b/report/report.out
new file mode 100644
index 0000000..2c2ff3a
--- /dev/null
+++ b/report/report.out
@@ -0,0 +1,55 @@
+\BOOKMARK [1][-]{section.1}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n}{}% 1
+\BOOKMARK [2][-]{subsection.1.1}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000B\000a\000c\000k\000g\000r\000o\000u\000n\000d}{section.1}% 2
+\BOOKMARK [2][-]{subsection.1.2}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000M\000o\000t\000i\000v\000a\000t\000i\000o\000n\000s}{section.1}% 3
+\BOOKMARK [2][-]{subsection.1.3}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000A\000i\000m}{section.1}% 4
+\BOOKMARK [2][-]{subsection.1.4}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000O\000b\000j\000e\000c\000t\000i\000v\000e\000s}{section.1}% 5
+\BOOKMARK [2][-]{subsection.1.5}{\376\377\000S\000u\000c\000c\000e\000s\000s\000\040\000C\000r\000i\000t\000e\000r\000i\000a}{section.1}% 6
+\BOOKMARK [2][-]{subsection.1.6}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000S\000t\000r\000u\000c\000t\000u\000r\000e}{section.1}% 7
+\BOOKMARK [1][-]{section.2}{\376\377\000L\000i\000t\000e\000r\000a\000t\000u\000r\000e\000\040\000a\000n\000d\000\040\000T\000e\000c\000h\000n\000i\000c\000a\000l\000\040\000R\000e\000v\000i\000e\000w}{}% 8
+\BOOKMARK [2][-]{subsection.2.1}{\376\377\000E\000x\000i\000s\000t\000i\000n\000g\000\040\000C\000l\000a\000s\000s\000i\000f\000i\000c\000a\000t\000i\000o\000n\000\040\000P\000l\000a\000t\000f\000o\000r\000m\000s}{section.2}% 9
+\BOOKMARK [2][-]{subsection.2.2}{\376\377\000R\000e\000q\000u\000i\000r\000e\000m\000e\000n\000t\000s\000\040\000o\000f\000\040\000I\000m\000a\000g\000e\000\040\000C\000l\000a\000s\000s\000i\000f\000i\000c\000a\000t\000i\000o\000n\000\040\000M\000o\000d\000e\000l\000s}{section.2}% 10
+\BOOKMARK [2][-]{subsection.2.3}{\376\377\000M\000e\000t\000h\000o\000d\000\040\000o\000f\000\040\000I\000m\000a\000g\000e\000\040\000C\000l\000a\000s\000s\000i\000f\000i\000c\000a\000t\000i\000o\000n\000\040\000M\000o\000d\000e\000l\000s}{section.2}% 11
+\BOOKMARK [2][-]{subsection.2.4}{\376\377\000W\000e\000l\000l\000-\000k\000n\000o\000w\000n\000\040\000m\000o\000d\000e\000l\000s}{section.2}% 12
+\BOOKMARK [2][-]{subsection.2.5}{\376\377\000M\000a\000c\000h\000i\000n\000e\000\040\000l\000e\000a\000r\000n\000i\000n\000g\000\040\000l\000i\000b\000r\000a\000r\000i\000e\000s}{section.2}% 13
+\BOOKMARK [2][-]{subsection.2.6}{\376\377\000S\000u\000m\000m\000a\000r\000y}{section.2}% 14
+\BOOKMARK [1][-]{section.3}{\376\377\000S\000e\000r\000v\000i\000c\000e\000\040\000A\000n\000a\000l\000y\000s\000i\000s\000\040\000a\000n\000d\000\040\000R\000e\000q\000u\000i\000r\000e\000m\000e\000n\000t\000s}{}% 15
+\BOOKMARK [2][-]{subsection.3.1}{\376\377\000S\000e\000r\000v\000i\000c\000e\000\040\000S\000t\000r\000u\000c\000t\000u\000r\000e}{section.3}% 16
+\BOOKMARK [2][-]{subsection.3.2}{\376\377\000R\000e\000s\000o\000u\000r\000c\000e\000s}{section.3}% 17
+\BOOKMARK [3][-]{subsubsection.3.2.1}{\376\377\000C\000o\000m\000p\000u\000t\000e\000\040\000R\000e\000s\000o\000u\000r\000c\000e\000s}{subsection.3.2}% 18
+\BOOKMARK [3][-]{subsubsection.3.2.2}{\376\377\000S\000t\000o\000r\000a\000g\000e}{subsection.3.2}% 19
+\BOOKMARK [2][-]{subsection.3.3}{\376\377\000U\000s\000e\000r\000\040\000i\000n\000t\000e\000r\000f\000a\000c\000e}{section.3}% 20
+\BOOKMARK [2][-]{subsection.3.4}{\376\377\000A\000P\000I}{section.3}% 21
+\BOOKMARK [2][-]{subsection.3.5}{\376\377\000D\000a\000t\000a\000\040\000M\000a\000n\000a\000g\000e\000m\000e\000n\000t}{section.3}% 22
+\BOOKMARK [2][-]{subsection.3.6}{\376\377\000S\000u\000m\000m\000a\000r\000y}{section.3}% 23
+\BOOKMARK [1][-]{section.4}{\376\377\000S\000e\000r\000v\000i\000c\000e\000\040\000D\000e\000s\000i\000g\000n}{}% 24
+\BOOKMARK [2][-]{subsection.4.1}{\376\377\000S\000t\000r\000u\000c\000t\000u\000r\000e\000\040\000o\000f\000\040\000t\000h\000e\000\040\000S\000e\000r\000v\000i\000c\000e}{section.4}% 25
+\BOOKMARK [2][-]{subsection.4.2}{\376\377\000I\000n\000t\000e\000r\000a\000c\000t\000i\000n\000g\000\040\000w\000i\000t\000h\000\040\000t\000h\000e\000\040\000s\000e\000r\000v\000i\000c\000e}{section.4}% 26
+\BOOKMARK [2][-]{subsection.4.3}{\376\377\000A\000P\000I}{section.4}% 27
+\BOOKMARK [2][-]{subsection.4.4}{\376\377\000G\000e\000n\000e\000r\000a\000t\000i\000o\000n\000\040\000o\000f\000\040\000M\000o\000d\000e\000l\000s}{section.4}% 28
+\BOOKMARK [2][-]{subsection.4.5}{\376\377\000M\000o\000d\000e\000l\000s\000\040\000T\000r\000a\000i\000n\000i\000n\000g}{section.4}% 29
+\BOOKMARK [2][-]{subsection.4.6}{\376\377\000S\000u\000m\000m\000a\000r\000y}{section.4}% 30
+\BOOKMARK [1][-]{section.5}{\376\377\000S\000e\000r\000v\000i\000c\000e\000\040\000I\000m\000p\000l\000e\000m\000e\000n\000t\000a\000t\000i\000o\000n}{}% 31
+\BOOKMARK [2][-]{subsection.5.1}{\376\377\000S\000t\000r\000u\000c\000t\000u\000r\000e\000\040\000o\000f\000\040\000t\000h\000e\000\040\000S\000e\000r\000v\000i\000c\000e}{section.5}% 32
+\BOOKMARK [2][-]{subsection.5.2}{\376\377\000W\000e\000b\000\040\000A\000p\000p\000l\000i\000c\000a\000t\000i\000o\000n}{section.5}% 33
+\BOOKMARK [2][-]{subsection.5.3}{\376\377\000A\000P\000I}{section.5}% 34
+\BOOKMARK [2][-]{subsection.5.4}{\376\377\000G\000e\000n\000e\000r\000a\000t\000i\000o\000n\000\040\000a\000n\000d\000\040\000T\000r\000a\000i\000n\000i\000n\000g\000\040\000o\000f\000\040\000M\000o\000d\000e\000l\000s}{section.5}% 35
+\BOOKMARK [2][-]{subsection.5.5}{\376\377\000M\000o\000d\000e\000l\000\040\000I\000n\000f\000e\000r\000e\000n\000c\000e}{section.5}% 36
+\BOOKMARK [2][-]{subsection.5.6}{\376\377\000R\000u\000n\000n\000e\000r}{section.5}% 37
+\BOOKMARK [2][-]{subsection.5.7}{\376\377\000S\000u\000m\000m\000a\000r\000y}{section.5}% 38
+\BOOKMARK [1][-]{section.6}{\376\377\000L\000e\000g\000a\000l\000,\000\040\000S\000o\000c\000i\000e\000t\000a\000l\000,\000\040\000E\000t\000h\000i\000c\000a\000l\000\040\000a\000n\000d\000\040\000P\000r\000o\000f\000e\000s\000s\000i\000o\000n\000a\000l\000\040\000C\000o\000n\000s\000i\000d\000e\000r\000a\000t\000i\000o\000n\000s}{}% 39
+\BOOKMARK [2][-]{subsection.6.1}{\376\377\000L\000e\000g\000a\000l\000\040\000I\000s\000s\000u\000e\000s}{section.6}% 40
+\BOOKMARK [2][-]{subsection.6.2}{\376\377\000S\000o\000c\000i\000a\000l\000\040\000I\000s\000s\000u\000e\000s}{section.6}% 41
+\BOOKMARK [2][-]{subsection.6.3}{\376\377\000E\000t\000h\000i\000c\000a\000l\000\040\000I\000s\000s\000u\000e\000s}{section.6}% 42
+\BOOKMARK [2][-]{subsection.6.4}{\376\377\000P\000r\000o\000f\000e\000s\000s\000i\000o\000n\000a\000l\000\040\000I\000s\000s\000u\000e\000s}{section.6}% 43
+\BOOKMARK [1][-]{section.7}{\376\377\000S\000e\000r\000v\000i\000c\000e\000\040\000E\000v\000a\000l\000u\000a\000t\000i\000o\000n}{}% 44
+\BOOKMARK [2][-]{subsection.7.1}{\376\377\000T\000e\000s\000t\000i\000n\000g\000\040\000t\000h\000e\000\040\000m\000o\000d\000e\000l\000\040\000c\000r\000e\000a\000t\000i\000o\000n}{section.7}% 45
+\BOOKMARK [2][-]{subsection.7.2}{\376\377\000A\000P\000I\000\040\000P\000e\000r\000f\000o\000r\000m\000a\000n\000c\000e\000\040\000T\000e\000s\000t\000i\000n\000g}{section.7}% 46
+\BOOKMARK [2][-]{subsection.7.3}{\376\377\000U\000s\000a\000b\000i\000l\000i\000t\000y}{section.7}% 47
+\BOOKMARK [2][-]{subsection.7.4}{\376\377\000S\000u\000m\000m\000a\000r\000y}{section.7}% 48
+\BOOKMARK [1][-]{section.8}{\376\377\000C\000r\000i\000t\000i\000c\000a\000l\000\040\000R\000e\000v\000i\000e\000w\000\040\000o\000f\000\040\000P\000r\000o\000j\000e\000c\000t\000\040\000O\000u\000t\000c\000o\000m\000e\000s}{}% 49
+\BOOKMARK [2][-]{subsection.8.1}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000O\000b\000j\000e\000c\000t\000i\000v\000e\000s}{section.8}% 50
+\BOOKMARK [2][-]{subsection.8.2}{\376\377\000A\000\040\000r\000e\000t\000r\000o\000s\000p\000e\000c\000t\000i\000v\000e\000\040\000a\000n\000a\000l\000y\000s\000i\000s\000\040\000o\000f\000\040\000t\000h\000e\000\040\000d\000e\000v\000e\000l\000o\000p\000m\000e\000n\000t\000\040\000p\000r\000o\000c\000e\000s\000s}{section.8}% 51
+\BOOKMARK [2][-]{subsection.8.3}{\376\377\000P\000r\000o\000j\000e\000c\000t\000\040\000S\000h\000o\000r\000t\000c\000o\000m\000i\000n\000g\000s\000\040\000a\000n\000d\000\040\000I\000m\000p\000r\000o\000v\000e\000m\000e\000n\000t\000s}{section.8}% 52
+\BOOKMARK [2][-]{subsection.8.4}{\376\377\000F\000u\000t\000u\000r\000e\000\040\000W\000o\000r\000k}{section.8}% 53
+\BOOKMARK [2][-]{subsection.8.5}{\376\377\000C\000o\000n\000c\000l\000u\000s\000i\000o\000n}{section.8}% 54
+\BOOKMARK [1][-]{section.9}{\376\377\000R\000e\000f\000e\000r\000e\000n\000c\000e\000s}{}% 55
diff --git a/report/report.run.xml b/report/report.run.xml
new file mode 100644
index 0000000..c7ed311
--- /dev/null
+++ b/report/report.run.xml
@@ -0,0 +1,87 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+]>
+
+
+ latex
+
+ report.bcf
+
+
+ report.bbl
+
+
+ blx-dm.def
+ blx-compat.def
+ biblatex.def
+ standard.bbx
+ numeric.bbx
+ numeric-comp.bbx
+ ieee.bbx
+ numeric.cbx
+ biblatex.cfg
+ english.lbx
+
+
+
+ biber
+
+ biber
+ report
+
+
+ report.bcf
+
+
+
+ report.bbl
+
+
+ report.bcf
+
+
+ ../main.bib
+
+
+
diff --git a/report/report.tex b/report/report.tex
index 073e775..2440866 100644
--- a/report/report.tex
+++ b/report/report.tex
@@ -8,7 +8,7 @@
\title{Image Classification as a Software Platform}
% Write your full name, as in University records
-\author{Andre Henriques\\Univerity of surrey}
+\author{Andre Henriques}
\date{}
@@ -22,12 +22,12 @@
\section{Service Implementation} \label{sec:si}
- This section will discuss how the service followed some possible designs to achieve a working system.
+ This chapter will discuss how the service followed some possible designs to achieve a working system.
The design path that was decided matches what made sense for the scale and needs of the project.
\subsection{Structure of the Service}
- The structure of the service matches the designed structure, as it can be seen \ref{fig:simplified_service_diagram}.
+ The structure of the service matches the designed structure, as it can be seen in Figure \ref{fig:simplified_service_diagram}.
\begin{figure}[h!]
\centering
@@ -36,11 +36,14 @@
\label{fig:simplified_service_diagram}
\end{figure}
- The implementation contains: Web App; Web server, which serves the Web App; API; Training Runners; Model Runners;
+ The implementation contains: Web App; Web server, which serves the Web App; API; Training Runners; Model Runners.
+ This differs from the designed solution, as it contains an extra nginx reverse proxy server \cite{nginx}.
+ The reverse proxy is requireed as it allows for the API and the webpage to be accessible from them same domain.
- The implementation contains an extra nginx reverse proxy server that allows for the API and the webpage to be accessible from them same domain.
-
- The rest of this section will go into details on how every tier of the structure was implemented.
+ The selected database was PostgresSQL \cite{postgressql} as it is one of the most advanced open source databases available.
+ The database stores all data required for the system to work, with the exeption of uploaded images and model files.
+
+ The rest of this chapter will discuss how each individual part of the system was implemented.
\subsection{Web Application} \label{web-app-design}
@@ -60,18 +63,18 @@
There exist currently many frameworks to create SPAs.
I selected Svelte \cite{svelte} for this project.
- I selected Svelte because it's been one of the most liked frameworks to work with in the last years, accordingly to the State of JS survey \cite{state-of-js-2022}.
- It's also one of the best performant frameworks that is currently available that has extremity good performance \cite{js-frontend-frameworks-performance}.
+ I selected Svelte because it is been one of the most liked frameworks to work with in the last years, accordingly to the State of JS survey \cite{state-of-js-2022}.
+ It is also one of the best performant frameworks that is currently available that has extremity good performance \cite{js-frontend-frameworks-performance}.
I also already have experience with Svelte.
- I will be using Svelte with the SvelteKit framework \cite{svelte-kit} which greatly improves the developer experience.
-
+ I will be using Svelte with the SvelteKit framework \cite{svelte-kit} which greatly improves the developer experience.
SvelteKit allows for the easy creation of SPAs with a good default web router.
-
- The static adapter will be used to generate a static HTML and JavaScript files, and they will be hosted by an NGINX proxy \cite{nginx}.
+ When deploying into a production enviroment the static adapter can be used to generate a static HTML and JavaScript files.
+ This static files can be then hosted in a more efficient http server, other than the one running on NodeJs.
The web application uses the API to control the functionality of the service.
This implementation allows users of the application to do everything that the application does with the API, which is ideal in a SaaS project.
+ The communication with the API, when correcly consigured, uses HTTPS to make this communication encrypted and safe.
\subsubsection*{Service authentication} \label{sec:impl:service-auth}
@@ -82,18 +85,17 @@
\label{fig:simplified_auth_diagram}
\end{figure}
+ %TODO task about the above image
+
The user uses an email and password to Sign In or Register with the application.
This is sent to the server and stored in a user account.
-
The Password is stored hashed using bcrypt \cite{bycrpt}.
- In the future, other methods of authentication might be provided; like using Googles' OAuth.
+ In the future, other methods of authentication might be provided; like using Google's OAuth.
Once logged In, the user will be able to use the application and manage tokens that were emitted for this application.
This allows the user to manage what services have access to the account. % and the usage that those services have used.
-
On the web app, the user can manage existing tokens.
Guaranteeing that only the clients that should be accessing the information are.
-
- In the management screen, which can be seen in Fig. \ref{fig:token_page}, the user can remove, and create tokens.
+ In the management screen, which can be seen in Figure \ref{fig:token_page}, the user can remove, and create tokens.
\begin{figure}[H]
\centering
@@ -107,16 +109,16 @@
\begin{figure}[h!]
\centering
\includegraphics[width=\textwidth]{models_flow}
- \caption{Simplified Diagram of Model management}
+ \caption{Simplified Diagram of Model management.}
\label{fig:simplified_model_diagram}
\end{figure}
- The diagram \ref{fig:simplified_model_diagram} shows the steps that the user takes to use a model.
+ Figure \ref{fig:simplified_model_diagram} shows the steps that the user takes to use a model.
First, the user creates the model.
In this step, the user uploads a sample image of what the model will be handling.
This image is used to define what the kinds of images the model will be able to intake.
- This is done in the page shown in Fig. \ref{fig:create_model}, the user provides a name for the model and an image and then presses the button create.
+ This is done in the page shown in Figure \ref{fig:create_model}, the user provides a name for the model and an image and then presses the button create.
\begin{figure}[H]
\centering
@@ -125,19 +127,19 @@
\label{fig:create_model}
\end{figure}
- The user is then shown the model page, which contains all the information about a model, which can be seen in Fig. \ref{fig:model_page}.
+ The user is then shown the model page, which contains all the information about a model, which can be seen in Figure \ref{fig:model_page}.
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{base_model_page}
- \caption{Screenshot of web application on the page shows basic information about the model.}
+ \caption{Screenshot of the web application on the page shows basic information about the model.}
\label{fig:model_page}
\end{figure}
- This page contains a set of tabs a the top.
- Each tab gives different insight abou the model.
- The ``Model'' tab, contains only relevent actions ot the most pressing action that the user take.
- In Fig. \ref{fig:model_page}, the user has created a model but has not added training data so the page shows a section where the user can input training data.
+ This page contains a set of tabs a top.
+ Each tab gives different insight about the model.
+ The ``Model'' tab, contains only relevant actions that the user can take.
+ In Figure \ref{fig:model_page}, the user has created a model but has not added training data, so the page shows a section where the user can input training data.
The ``Model Data'' tab contains a more detailed view about data that has been updated.
Currently, the system does not support resizing of images that are different from the one uploaded at the creation step.
@@ -153,19 +155,16 @@
\begin{figure}[H]
\centering
\includegraphics[width=0.9\textwidth]{model_data_tab}
- \caption{Screenshot of web application part of the ``Model Data'' tab}
+ \caption{Screenshot of web application part of the ``Model Data'' tab.}
\label{fig:model_data_tab}
\end{figure}
- This information can be useful to more advanced users that might decide to gather more data to balance the dataset.
-
+ This information can be useful to more advanced users that might decide to gather more data to balance the dataset.
To upload the reset of the data set, the user can upload a zip file that contains a set of classes and images corresponding to that class.
That zip file is processed and images and classes are created.
- The user is given instruction on how create the zip file so that the system can esaly process the data, the upload set can be seen in \ref{fig:upload_data_section}.
-
+ The user is given instruction on how to create the zip file so that the system can easily process the data, the upload set can be seen in Figure \ref{fig:upload_data_section}.
This process was original slow as the system did not have the capabilities to parallelize the process of importing the images, but this was implemented, and the import process was improved.
- The improved process now takes a few seconds to process and verify the entirety of the dataset, making the experience for the end user better.
-
+ The improved process now takes a few seconds to process and verify the entirety of the dataset, making the experience for the end user better.
Alternatively, the user can use the API to create new classes and upload images.
\begin{figure}[H]
@@ -183,7 +182,7 @@
When the model is finished training, the user can use the model to run inference tasks on images.
To achieve this, the user can either use the API to submit a classification task or use the tasks tab in the web platform.
- In the tasks tab, which can be seen in Fig. \ref{fig:update_data_section}, the user can see current and previous tasks.
+ In the tasks tab, which can be seen in Figure \ref{fig:upload_data_section}, the user can see current and previous tasks.
The users can see what tasks were performed and their results.
The user can also inform the service if the task that was performed did return the correct results.
This information can be used to keep track of the real accuracy of the model.
@@ -192,7 +191,7 @@
\begin{figure}[H]
\centering
- \includegraphics[width=0.6\textwidth]{model_task_tab}
+ \includegraphics[height=0.95\textheight]{model_task_tab}
\caption{Screenshot of web application on the tasks tab.}
\label{fig:upload_data_section}
\end{figure}
@@ -202,17 +201,17 @@
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{models_advanced_flow}
- \caption{Simplified Diagram of Advanced Model management}
+ \caption{Simplified Diagram of Advanced Model management.}
\label{fig:simplified_model_advanced_diagram}
\end{figure}
- The diagram \ref{fig:simplified_model_advanced_diagram} shows the steps that the user takes to use a model.
+ Figure \ref{fig:simplified_model_advanced_diagram} shows the steps that the user takes to use a model.
The steps are very similar to the normal model management.
The user would follow all the steps that are required for normal model creation and training.
At the end of the process, the user will be able to add new data to the model and retrain it.
- To achieve that, the user would simply go to the data tab and create a new class, which the Fig. \ref{expand_class_part} shows.
+ To achieve that, the user would simply go to the data tab and create a new class, which the Figure \ref{fig:expand_class_part} shows.
Once a new class is added, the webpage will inform the user that the model can be retrained.
The user might choose to retrain the model now or more new classes and retrain later.
@@ -231,38 +230,37 @@
Users in this tab can see what is the progress, and results of their tasks.
The webpage also provides nice, easy to see statistics on the task results, allowing the user to see how the model is performing.
- Which is shown on Fig. \ref{fig:upload_data_section}
+ Which is shown on Figure \ref{fig:upload_data_section}
On the administrator, users should be able to change the status of tasks as well as see a more comprehensive view on how the tasks are being performed.
- Administrator users can see the current status of runners, as well as which task the runners are doing.
+ Administrator users can see the current status of runners, as well as which task the runners are doing, the Figure \ref{fig:runner_page} shows the runner visualisation page.
\begin{figure}[H]
\centering
\includegraphics[width=0.6\textwidth]{runner_page}
- \caption{Screenshot of web application on the runners administrator Page.}
+ \caption{Screenshot of web application on the runner administration page.}
\label{fig:runner_page}
\end{figure}
\subsection{API}
- The API was implemented as a multithreaded go \cite{go} server.
+ The API was implemented as a multithreaded Go \cite{go} server.
The application, on launch, loads a configuration file and connects to the database.
After connecting to the database, the application performs pre-startup checks to make sure no tasks that were interrupted via a server restart and were not left in an unrecoverable state.
Once the checks are done, the application creates workers, which will be explained in section \ref{impl:runner}, which when completed the API server is finally started up.
Information about the API is shown around the web page so that the user can see information about the API right next to where the user would normally do the action, providing a good user interface.
- As the user can get information about right where they would normally do the action, as it can be seen in Fig. \ref{fig:code_demo}.
+ As, the user can get information about right where they would normally do the action, as it can be seen in Figure \ref{fig:code_demo}.
\begin{figure}[H]
\centering
\includegraphics[width=0.6\textwidth]{code_demo}
- \caption{Screenshot of web application that shows the explanation of the API call}
+ \caption{Screenshot of the web application that shows the explanation of the API call.}
\label{fig:code_demo}
\end{figure}
- This server will take JSON and multipart form data requests, the requests are processed, and answered with a JSON response.
-
+ This server will take JSON and multipart form data requests, the requests are processed, and answered with a JSON response.
The multipart requests are required due to JSON's inability to transmit binary data, which will make the uploading of images extremely inefficient.
Those images would have to be transformed into binary data and then uploaded as a byte array or encoded as base64 and uploaded.
Either of those options is extremely inefficient.
@@ -284,7 +282,7 @@
During the login process, the service checks to see if the user is registered and if the password provided during the login matches the stored hash.
Upon verifying the user, a token is emitted.
- Once a user is logged in they can then create more tokens as seen in the section \ref{sec:impl:service-auth}.
+ Once a user is logged in they can then create more tokens as seen in section \ref{sec:impl:service-auth}.
While using the API the user should only use created tokens in the settings page as those tokens are named, and have controllable expiration dates.
This is advantageous from a security perspective, as the user can manage who has access to the API.
If the token gets leaked, the user can then delete the named token, to guarantee the safety of his access.
@@ -296,7 +294,7 @@
Model generation happens on the API server, the API server analyses what the image that was provided and generates several model candidates accordingly.
The number of model candidates is user defined.
- The model generation subsystem decides the structure of the model candidates based on the image size, it prioritizes the smaller models for smaller images and convolution networks with bigger images.
+ The model generation subsystem decides the structure of the model candidates based on the image size, it prioritises the smaller models for smaller images and convolution networks with bigger images.
The depth is controlled both by the image size and number of outputs, models candidates that need to be expanded are generated with bigger values to account for possible new values.
It tries to generate the optimal size if only one model is requested.
If more than one is requested then the generator tries to generate models of various types and sizes, so if there is possible smaller model it will also be tested.
@@ -390,11 +388,11 @@
% TODO talk about how the runner loads images
- \subsection{Conclusion}
- This section went into the details of how the designed was implemented.
+ \subsection{Summary}
+ This chapter went into the details of how the designed was implemented.
The design was envisioned to be the best possible version of this service, but scope was restrained to the necessities of the system while it was being developed.
And possible features that would make the implemented application closer to the ideal design could have been implemented if there was higher need during the development timeline.
- This will be more discussed in the section about a critical review of the work.
+ This will be more discussed in chapter \ref{sec:crpo}.
\pagebreak
@@ -427,7 +425,7 @@
Legal issues might occur due to image uploaded images. For example, those images could be copyrighted, or the images could be confidential. The service is designed to provide ways to allow users to host their images without having to host the images itself, moving the legal requirement to the management of the data to the user of the system.
- \subsubsection{GDPR}
+ \subsubsection*{GDPR}
The General Data Protection Regulation (GDPR) (GDPR, 2018) is a data protection and privacy law in the European Union and the European Economic Area, that has also been implemented into British law.
The main objective of the GDPR is to minimise the data collected by the application for purposes that are not the used in the application, as well as giving users the right to be forgotten.
@@ -459,7 +457,7 @@
\subsubsection*{Professional Competence and Integrity}
This project has been an enormous undertaking that pushed the limits of my capabilities.
I am glad that I was able to use this opportunity to learn about distributed systems, image classification, go, and Svelte.
- During this project, I also followed the best practices of software development such as using source control software and having an audit to tasks and issues.
+ During this project, I also followed the best practices of software development, such as using source control software and having an audit to tasks and issues.
\subsubsection*{Duty to Relevant Authority}
For the duration of the project, all the guidelines provided by the University of Surrey were followed.
@@ -470,217 +468,28 @@
\pagebreak
-
-
-
-
-
-
-
-
-
-
- \section{Service Evaluation} \label{sec:se}
- This section will discuss how the service can be evaluated from a technical standpoint and its results.
-
- With the goals of the project, there are two kinds of tests that need to be accounted for.
- User testing tests that relate to the experience of the user while using the project and tests that quantitive test the project.
-
- Such as accuracy of the generated models, response time to queries.
-
- \subsection{Testing the model}
- To test the system, a few datasets were selected.
- The datasets were selected to represent different possible sizes of models, and sizes of output labels.
-
- The ImageNet\cite{imagenet} was not selected as one of the datasets that will be tested, as it does not represent the target problem that this project is trying to tackle.
-
- The tests will measure:
- \begin{itemize}
- \item Time to process and validate the entire dataset upon upload
- \item Time to train the dataset
- \item Time to classify the image once the dataset has been trained
- \item Time to extend the model
- \item Accuracy of the newly created model
- \end{itemize}
-
- The results will be placed in the results table.
-
- \subsubsection{MNIST}
-
- The MNIST \cite{mnist} dataset was selected due to its size. It's a small dataset that can be trained quickly and can be used to verify other internal systems of the service.
-
- During testing only the 9 out 10 classes are trainged and the 10th is added during the retraining process.
-
- \subsubsection{CIFAR-10}
-
- The MNIST \cite{mnist} dataset was selected due to its size. It's a small dataset that can be trained quickly and can be used to verify other internal systems of the service.
-
- During testing only the 9 out 10 classes are trainged and the 10th is added during the retraining process.
-
- \textbf{TODO add image}
-
- \textbf{TODO add more datasets}
-
- \subsubsection{Results}
-
-
- \textbf{TODO add more data}
-
-
- \begin{longtable}{ | c | c | c | c | c | c |}
- \hline
- Dataset & Import Time & Train Time & Classification Time & Extend Time & Accuracy \\ \hline
- MNIST & $8s$ & $2m$ & $1s$ & $50s$ & $98\%$ \\ \hline
- CIFAR-10 & $6s$ & $41m 38s$ & $1s$ & $1m 11s$ & $95.2\%$ \\ \hline
- \caption{Evaluation Results}
- \label{tab:eval-results}
- \end{longtable}
-
- \subsubsection{Conclusions}
- The service can create models that represent what the users want in a reasonable amount of time without much interaction from the user.
- The models created have the target accuracy required by the users, and the amount of time it takes for the models to train and expand is reasonable and within the margins that meet the success criteria for the project.
-
- \pagebreak
-
-
-
-
-
-
-
-
-
-
-
-
- \section{Critical Review of Project Outcomes} \label{sec:crpo}
-
- This section will go into details to see if the project was able to achieve the goals set forth in the introduction.
-
- The section will be analysing if the goals of the project were met, then shortcomings and improvements off the implementation will be discussed. After analysing shortcomings and improvements, possible future work that be done to the project will be discussed.
- The section will end with a general statement about the state of the project.
-
- \subsection{Project Objectives}
-
- In the introduction section of this project, some objectives were set for this project.
-
- By the end of the project, the developed solution can achieve the goals set forth.
-
- \subsubsection*{A system to upload images that will be assigned to a model}
-
- This goal was achieved.
- One of the abilities of both the API and the webpage are to be able to upload images to the service.
- Which means that a system was created that allows users to upload images that will be linked with a model.
-
- \subsubsection*{A system to automatically train and create models}
-
- This goal was achieved.
- The designed server can create models based only using the data provided by the user without any human interaction.
- The model creation system is not as efficient, this inefficient will be discussed more in a future subsection it could be but can still achieve the desired goal.
-
- \subsubsection*{Platform where users can manage their models}
-
- This goal was achieved.
- A web-based platform was developed where users can manage all the data related to machine learning models that were created.
- The platform that was implemented allows users to create models, upload images related to the model, and then manage the submitted classification tasks.
-
- The platform allows managing any models easily they create with within, meaning that the developed solution can achieve the first goal of the project.
-
- \subsubsection{A system to automatically expand models without fully retraining the models}
-
- This goal was achieved.
- A system was created that allows users to add more images and classes to models that were previously created.
- And this is done without having to fully retrain the model.
-
- \subsubsection*{An API that users can interact programmatically}
-
- This goal was achieved.
- The API implementation allows users to programmatically access the system.
- The efficacy of the API is proved by its use in the front end application.
- The front end application uses the API to fully control the service.
- This means that everything that can be done in the frontend can be done via the API.
- Which means that the API can satisfy every need that a possible user might have; therefore this goal was accomplished.
-
- \subsection{Project Shortcomings and Improvements}
-
- Although the project was able to achieve the desired goals, the project has some shortcomings that can be improved upon in future iterations.
- This section will analyse some of those shortcoming and ways to improve the service.
-
- \subsubsection*{Model Generation}
- The model generation system is a complex, and due to all the moving parts that make the system work, it requires a large amount of to work to maintain.
- It is also very inefficient due to the having to generate custom tailored python scripts, that cause the data to be reloaded every time a new a round-robin round needs to happen.
-
- A way more efficient way is to perform all the training directly in go server.
- Running the training directly in go would allow the service to be able to keep track of memory and GPU usage, move data from the GPU and CPU effortlessly between runs, and would remove uncertainty from the training system.
-
- The model generation was originally implemented with TensorFlow, this ended up limiting the generation of the models in go as the bindings for TensorFlow were lacking in the tools used to train the model.
- Using Lib Torch libraries would allow more control over data, and allow that control to be done in go, which would improve both control and speed of the process.
- Unfortunately, when a version of the service was attempted to be implemented using Lib Torch, the system was too unstable.
- Problems were encountered with the go bindings for the Lib Torch library or, the Lib Torch library was causing inconsistent behaviour with between runs.
- That compounded with time limitations make it impossible for a Lib Torch implementation to come to fruition.
-
- Having a full go implementation would make the system more maintainable and fast.
-
-
- \subsubsection*{Image storage}
-
- The image storage is all local, while this does not currently affect how remote runner works.
- %TODO improve this
- This is less problematic when the runner is on the same network as the main server, but if a possible user would like to provide their runners.
- This would require a lot of bandwidth for the images to be transferred over the network every time the model needs to run.
-
- A better solution for image storage would allow user provided runners to store images locally.
- During the upload time, the API, instead of storing the images locally, would instruct the users' runner to store the images locally, therefore when the runner would need to perform any training tasks with local data instead of remote data.
-
- This would not also not require modification of the current system.
- The system was designed and implemented to be expanded.
- The dataset system was designed to be able to handle different kinds of storage methods in the future, such as remote storage and Object Buckets, like Amazon S3.
-
- \subsection{Future Work}
- This section will consider possible future work that can be built upon this project.
-
- \subsubsection*{Image Processing Pipelines}
- The current system does not allow for images of different sizes to be uploaded to the system, an interesting project would be to create a new subsystem that would allow the user to create image processing pipelines.
-
- This new system would allow users to create a set of instructions that images would go through to be added to the system.
- For example, automatically cropping, scaling, or padding the image.
-
- A system like this would add versatility to the system and remove more work from the users of the service as they don't have to worry about handling the image processing on their side.
-
- \subsubsection*{Different Kinds of Models}
- The runner system could be used to train and manage different kinds of models, not just image classification models.
-
- If the system was modified to have different kinds of models, it would allow the users to run different kinds of models.
- Such as Natural Language Processing Models, or Multi Model Models.
- This would increase the versatility of the service, and it would allow users to automate more tasks.
-
-
- \subsection{Conclusion}
-
- With the increase in automation recently, having a system that allows users to quickly build classification models for their tasks, would be incredibly useful.
- This project provides exactly that, a simple-to-use system that allows the user to create models with ease.
-
- There are more features to be added to the service, that would improve the quality of the project.
- The service is in a state that it would be possible to run it in a production environment, making this project successful.
-
+ \include{eval}
+ \include{review}
\pagebreak
- \section{Appendix}
- \begin{figure}[h!]
- \begin{center}
- \includegraphics[height=0.8\textheight]{expandable_models_simple}
- \end{center}
- \caption{Contains an overall view of the entire system}\label{fig:expandable_models_simple}
- \end{figure}
+ %\section{appendix}
+ % \begin{figure}[h!]
+ % \begin{center}
+ % \includegraphics[height=0.8\textheight]{expandable_models_simple}
+ % \end{center}
+ % \caption{contains an overall view of the entire system}
+ % \label{fig:expandable_models_simple}
+ % \end{figure}
- \begin{figure}
- \begin{center}
- \includegraphics[height=0.8\textheight]{expandable_models_generator}
- \end{center}
- \caption{Contains an overall view of the model generation system}\label{fig:expandable_models_generator}
- \end{figure}
+ % \begin{figure}
+ % \begin{center}
+ % \includegraphics[height=0.8\textheight]{expandable_models_generator}
+ % \end{center}
+ % \caption{contains an overall view of the model generation system}
+ % \label{fig:expandable_models_generator}
+ % \end{figure}
diff --git a/report/review.tex b/report/review.tex
new file mode 100644
index 0000000..acb8b49
--- /dev/null
+++ b/report/review.tex
@@ -0,0 +1,136 @@
+\section{Critical Review of Project Outcomes} \label{sec:crpo}
+
+ This chapter will go into details to see if the project was able to achieve the goals set forth in the introduction.
+ The chapter will be analysing if the goals of the project were met, then shortcomings and improvements off the implementation will be discussed. After analysing shortcomings and improvements, possible future work that be done to the project will be discussed.
+ The section will end with a general statement about the state of the project.
+
+ \subsection{Project Objectives}
+
+ In the introduction section of this project, some objectives were set for this project.
+
+ By the end of the project, the developed solution can achieve the goals set forth.
+
+ \subsubsection*{A system to upload images that will be assigned to a model}
+
+ This goal was achieved.
+ One of the abilities of both the API and the webpage are to be able to upload images to the service.
+ Which means that a system was created that allows users to upload images that will be linked with a model.
+
+ \subsubsection*{A system to automatically train and create models}
+
+ This goal was achieved.
+ The designed server can create models based only using the data provided by the user without any human interaction.
+ The model creation system is not as efficient, this inefficient will be discussed more in a future subsection it could be but can still achieve the desired goal.
+
+ \subsubsection*{Platform where users can manage their models}
+
+ This goal was achieved.
+ A web-based platform was developed where users can manage all the data related to machine learning models that were created.
+ The platform that was implemented allows users to create models, upload images related to the model, and then manage the submitted classification tasks.
+
+ The platform allows managing any models easily they create with within, meaning that the developed solution can achieve the first goal of the project.
+
+ \subsubsection*{A system to automatically expand models without fully retraining the models}
+
+ This goal was achieved.
+ A system was created that allows users to add more images and classes to models that were previously created.
+ And this is done without having to fully retrain the model.
+
+ \subsubsection*{An API that users can interact programmatically}
+
+ This goal was achieved.
+ The API implementation allows users to programmatically access the system.
+ The efficacy of the API is proved by its use in the front end application.
+ The front end application uses the API to fully control the service.
+ This means that everything that can be done in the frontend can be done via the API.
+ Which means that the API can satisfy every need that a possible user might have; therefore this goal was accomplished.
+
+ \subsection{A retrospective analysis of the development process}
+
+ This project was complex to implement, with many interconnected systems working together to achieve the goals of the project.
+ This complexity was a result of open-ended design and scope expansion.
+ If the scope of the project had been more limited, the project could have achieved higher overall results.
+
+ While there were no technical setbacks done during the development process.
+ There were times when software updates of libraries made the implementation unusable, which slowed considerably the development velocity, as those issues required fixing.
+ If actions such as creating OCI containers were done in the earlier stages of development, issues such as this could have been prevented.
+ One of these software updates, made it so that images were not being able to classified.
+ This then prompted me to try to use a different library to train and classify the images, but this ended up not being achievable, and ended up with just fixing the original library problem.
+ While the time used to try to integrate the different machine learning library helped the project improve, most of the effort put into this possible transition as spent inefficiently.
+
+ As far as tools aiding the development, this project followed industries norms by having the source code tracked in Git and issues tracked in an issue tracker.
+ Which greatly helped in the development process, by having one centrailized repository of both code and known issues of that code.
+
+
+ \subsection{Project Shortcomings and Improvements}
+
+ Although the project was able to achieve the desired goals, the project has some shortcomings that can be improved upon in future iterations.
+ This section will analyse some of those shortcoming and ways to improve the service.
+
+ \subsubsection*{Model Generation}
+ The model generation system is a complex, and due to all the moving parts that make the system work, it requires a large amount of to work to maintain.
+ It is also very inefficient due to the having to generate custom tailored python scripts, that cause the data to be reloaded every time a new a round-robin round needs to happen.
+
+ A way more efficient way is to perform all the training directly on go server.
+ Running the training directly in go would allow the service to be able to keep track of memory and GPU usage, move data from the GPU and CPU effortlessly between runs, and would remove uncertainty from the training system.
+
+ The model generation was originally implemented with TensorFlow, this ended up limiting the generation of the models in go as the bindings for TensorFlow were lacking in the tools used to train the model.
+ Using Lib Torch libraries would allow more control over data, and allow that control to be done in go, which would improve both control and speed of the process.
+ Unfortunately, when a version of the service was attempted to be implemented using Lib Torch, the system was too unstable.
+ Problems were encountered with the go bindings for the Lib Torch library or, the Lib Torch library was causing inconsistent behaviour with between runs.
+ That compounded with time limitations make it impossible for a Lib Torch implementation to come to fruition.
+ Having a full go implementation would make the system more maintainable and fast.
+
+
+ \subsubsection*{Image storage}
+
+ The image storage is all local, while this does not currently affect how remote runner works.
+ %TODO improve this
+ This is less problematic when the runner is on the same network as the main server, but if a possible user would like to provide their runners.
+ This would require a lot of bandwidth for the images to be transferred over the network every time the model needs to run.
+
+ A better solution for image storage would allow user provided runners to store images locally.
+ During the upload time, the API, instead of storing the images locally, would instruct the users' runner to store the images locally, therefore when the runner would need to perform any training tasks with local data instead of remote data.
+
+ This would not also not require modification of the current system.
+ The system was designed and implemented to be expanded.
+ The dataset system was designed to be able to handle different kinds of storage methods in the future, such as remote storage and Object Buckets, like Amazon S3.
+
+ \subsubsection*{User Interface}
+
+ The user interface is simplistic, this helps new users use the program but limits what advanced users might want to do.
+ The user interface also might need an overhaul as it not visually appealing.
+ A future improving for this project is definitely getting a professional graphical designer that can create a better-looking and recognizable application.
+
+ \subsection{Future Work}
+ This section will consider possible future work that can be built upon this project.
+
+ \subsubsection*{Image Processing Pipelines}
+ The current system does not allow for images of different sizes to be uploaded to the system, an interesting project would be to create a new subsystem that would allow the user to create image processing pipelines.
+
+ This new system would allow users to create a set of instructions that images would go through to be added to the system.
+ For example, automatically cropping, scaling, or padding the image.
+
+ A system like this would add versatility to the system and remove more work from the users of the service as they don't have to worry about handling the image processing on their side.
+
+ \subsubsection*{Different Kinds of Models}
+ The runner system could be used to train and manage different kinds of models, not just image classification models.
+
+ If the system was modified to have different kinds of models, it would allow the users to run different kinds of models.
+ Such as Natural Language Processing Models, or Multi Model Models.
+ This would increase the versatility of the service, and it would allow users to automate more tasks.
+
+
+ \subsection{Conclusion}
+
+ With the increase in automation recently, having a system that allows users to quickly build classification models for their tasks, would be incredibly useful.
+ This project provides exactly that, a simple-to-use system that allows the user to create models with ease.
+
+ The implemented system is able to accept images provided by the user, then create and train that model, and then allow user to classify the images with that created model.
+ To achieve this, the developed software is large and complex.
+ Developing such large and complex systems comes with compromises.
+ In this case the model generation, training, and classfication; and API systems were prioritized over other systems such as file management systems.
+
+ While there are still improvements that can be made, and more features, that can be added to the service to make it event better, such as image processing pipelines and diferent kinds of models.
+ The service is in a state that It could be deployed in a production enviroment and work.
+ Therefore this project is successful.
diff --git a/report/sanr.tex b/report/sanr.tex
index f51b587..8112dab 100644
--- a/report/sanr.tex
+++ b/report/sanr.tex
@@ -41,7 +41,6 @@
As mentioned before, the service needs to be able to manage its compute resources.
This is required because, for example, if the system starts training a new model and that training uses all the GPU resources, it would impact the ability of the service to be able to evaluate images for other users.
As this example demonstrated, the system needs to keep track of the amount of GPU power available, so it can manage the actions it has to take accordingly.
-
Therefore, for optimal functionality, the service requires the management of various compute resources.
There should be a separation of the different kinds of compute power.
@@ -52,9 +51,9 @@
As a result, the service needs a system to distribute these compute tasks.
The tasks have to be distributed between the application that is running the API and the various other places where that compute can happen.
- An ideal system would distribute the tasks intelligently, to allow the maximization of resources.
+ An ideal system would distribute the tasks intelligently, to allow the maximisation of resources.
An example of this would be running image classification, on the same model, on the same place twice, this would allow the model to stay in memory and not need to be reloaded again from disk.
- These kinds of optimizations would help the system to be more efficient and less wasteful.
+ These kinds of optimisations would help the system to be more efficient and less wasteful.
Another way to reduce the load that the system goes through is to allow users to add their own compute power to the system.
That compute power would only use images and models that are owned by the user.
@@ -102,23 +101,23 @@
The application should also allow administrators of the service to control the resources that are available to the system, to see if there is any requirement to add more resources.
\subsection{API} \label{sec:anal-api}
- As a software as a service platform, most of the requests made to the service would be made via the API, not the user interface.
+ As a SaaS platform, most of the requests made to the service would be made via the API, not the user interface.
This is the case because the users that would need this service would set up the model using the web interface and then do the image classifications requests via the API.
While there are no hard requirements for the user interface, that is not the case for the API.
The API must be implemented as an HTTPS REST API, this is because the most of the APIs that currently exist online are HTTPS REST APIs \cite{json-api-usage-stats}.
If the service wants to be easy to use, it needs to be implemented in away such that it has the lowest barrier to entry.
Making the type of the API a requirement would guarantee that the application would be the most compatible with other systems that already exist.
-
- The API needs to be able to do all the tasks that the application can do.
-
- The API also requires authentication.
- This is needed to prevent users from:
+ The API would also need to be able to do all the tasks that the application can do.
+ As it would allow a user who wants to interact with the service via the API the ability to do so.
+ The API also requires authentication because without authentication it would allow users who might have malicious intent to:
\begin{itemize}
\item{Modifying systems settings}
\item{Accessing other users' data}
\end{itemize}
- The API must implement authentication methods to prevent those kinds of actions from happening.
+
+ Allowing such actions would be incredibly damaging for the system.
+ Therefore, the API must implement authentication methods to prevent those kinds of actions from happening.
\subsection{Data Management}
The service will store a large amount of user data.
@@ -141,16 +140,13 @@
The last kind of data that the service has to keep track of is model data.
Once the model is trained, it has to be saved on disk.
The service should implement a system that manages where the models are stored.
- This is similar to the image situation, where the model should be as close as possible to the compute resource that is going to utilize it, even if this requires copying the model.
-
- \subsection{Conclusion}
- This section shows that there are requirements that need to be met for the system to work as indented. These requirements range from usability requirements, implementation details, to system-level resource management requirements.
+ This is similar to the image situation, where the model should be as close as possible to the compute resource that is going to utilise it, even if this requires copying the model.
+ \subsection{Summary}
+ This section shows that there are requirements that need to be met for the system to work as indented. These requirements range from usability requirements, implementation details, to system-level resource management requirements.
The most important requirement is for the system to be easy to use by the user.
- As if it's difficult to use, then the service already fails in one of its objectives.
-
+ As if it is difficult to use, then the service already fails in one of its objectives.
The other requirements are significant as well, as without them, the quality of the service would be very degraded.
- And even if the service was effortless to use, it is as bad as being difficult to use if it could not process the images quickly in a reasonable amount of time.
-
- The next section will describe a design that matches a subset of the requirements.
+ And even if the service was effortless to use, it is as bad as being difficult to use if it could not process the images quickly in a reasonable amount of time.
+ The next chapter will describe a design that matches a subset of the requirements.
\pagebreak
diff --git a/report/settings.tex b/report/settings.tex
index f648325..56cc554 100644
--- a/report/settings.tex
+++ b/report/settings.tex
@@ -5,6 +5,7 @@
\usepackage{float}
\usepackage{longtable}
\usepackage{multicol}
+\usepackage{subfig}
\usepackage{graphicx}
\usepackage{svg}
@@ -61,10 +62,54 @@
\renewcommand{\footrulewidth}{0pt} % Remove footer underlines
\setlength{\headheight}{13.6pt}
+
\newcommand*\NewPage{\newpage\null\thispagestyle{empty}\newpage}
+\newcommand*\mydate{\monthyeardate\today}
+
% numeric
\usepackage[bibstyle=ieee, citestyle=numeric, sorting=none,backend=biber]{biblatex}
\addbibresource{../main.bib}
\raggedbottom
+
+\makeatletter
+\renewcommand{\maketitle}{
+\begin{center}
+
+\pagestyle{my_empty}
+\phantom{.} %necessary to add space on top before the title
+\vspace{3cm}
+
+{\huge \bf \@title\par}
+\vspace{1cm}
+
+{by}
+
+\vspace{1cm}
+
+{\LARGE Andre Goncalves Henriques}\\
+{\large URN: 6644818}\\[1cm]
+
+{\normalsize A dissertation submitted in partial fulfilment of the}\\
+{\normalsize requirements for the award of}\\[1cm]
+
+{\Large BACHELOR OF SCIENCE IN COMPUTER SCIENCE}\\[1cm]
+
+{\normalsize\mydate}\\
+
+\begin{center}
+ \includegraphics[height=0.3\textheight]{uni_surrey}
+\end{center}
+
+{\normalsize Department of Computer Science}\\
+{\normalsize University of Surrey}\\
+{\normalsize Guildford GU2 7XH}\\[2cm]
+
+{\normalsize Supervised by: Dr. Rizwan Asghar}
+
+\end{center}
+}\makeatother
+
+
+
diff --git a/report/start.tex b/report/start.tex
index 033854b..78710f3 100644
--- a/report/start.tex
+++ b/report/start.tex
@@ -1,16 +1,5 @@
\pagenumbering{gobble}
-
\maketitle
-\pagestyle{my_empty}
-
-\begin{center}
- \includegraphics[height=0.5\textheight]{uni_surrey}
-\end{center}
-
-\begin{center}
- \monthyeardate\today
-\end{center}
-
\NewPage
\pagenumbering{arabic}
@@ -24,7 +13,12 @@
unpublished) using the referencing system set out in the programme handbook. I agree that the
University may submit my work to means of checking this, such as the plagiarism detection service
TurnitinĀ® UK. I confirm that I understand that assessed work that has been shown to have been
- plagiarised will be penalised.
+ plagiarised will be penalised.\\
+ \vspace*{\fill}
+ Andre Goncalves Henriques\\
+ \mydate\\
+ University of Surrey\\
+ Guildford GU27XH\\
\vspace*{\fill}
\end{center}
\NewPage
@@ -32,26 +26,33 @@
\begin{center}
\vspace*{\fill}
\section*{Acknowledgements}
- I would like to take this opportunity to thank my supervisor, Rizwan Asghar that helped me with this project from the start of the until the end.
- His help with the report was incredibly useful.
-
- I would like to thank my family and friends for their support and encouragement from the beginning.
+ I would like to take this opportunity to thank my supervisor, Rizwan Asghar who helped me with this project from the start of the until the end.
+ His help with the report was incredibly useful.\\
+ I would like to thank my family and friends for their support and encouragement.\\
+ \vspace*{\fill}
+ Andre Goncalves Henriques\\
+ \mydate\\
+ University of Surrey\\
+ Guildford GU27XH\\
\vspace*{\fill}
\end{center}
\NewPage
-\begin{center}
- \vspace*{\fill}
- \section*{Abstract}
- Currently there is a log of man-hours used performing tasks that can be done by automated systems.
- If a user, without any knowledge of image classification, can create an image classification model with ease, it would allow those man-hours to be used for more productive.
+\section*{Abstract}
+ There are many automatable tasks that are currently being done manually.
+ If those tasks can be done automatically, a lot of productivity could be gained by having the computers perform those tasks, allowing humans to perform tasks that only humans can do.
+ One of this set of tasks are image classification tasks.
+ Many image classification tasks are being performed by humans, when they could be performed by computers.\\
- This project aims to develop a classification platform where users can create image classification models with as few clicks as possible.
- The project will create multiple systems that allow: model creation, model raining, and model inference.
+ This project aims to develop an image classification platform where users can create image classification models with as few clicks as possible.
+ Allowing for users that do not have any knowledge about image classification to use this system.
+ Making it possible for more of the manually classified tasks to be done by machines, and not humans, increasing the productivity of possible users.\\
- This report will guide the reader through the ideas and designs that were implemented.
- \vspace*{\fill}
-\end{center}
+ This dissertation evaluates the feasibility of such system, current similarly implemented systems, current techniques for image classification, and possible requirements, designs and implementations of such system.
+ The dissertation focuses mainly on the implemented software, and the implementation choices that were made to achieve this project.
+ The dissertation ends with a critical evaluation of the results of this project, to ensure that the goals set forth were achieved.
+
+
\NewPage
\tableofcontents