try adding diagrams
Some checks reported errors
continuous-integration/drone/push Build encountered an error
Some checks reported errors
continuous-integration/drone/push Build encountered an error
This commit is contained in:
parent
9c5e178e05
commit
8bfecc95d9
@ -8,7 +8,14 @@ steps:
|
||||
commands:
|
||||
- bash linting.sh
|
||||
|
||||
- name: Build UPDS-1
|
||||
- name: Build diagrams
|
||||
- commands:
|
||||
- cd diagrams
|
||||
- ls | grep d2 | xargs -I{} d2 --layout=elk {}
|
||||
- mv *.svg '../images for report'
|
||||
- cd -
|
||||
|
||||
- name: Build Project synopsis
|
||||
commands:
|
||||
- cd projectsynopsis
|
||||
- pdflatex project-synopsis.tex
|
||||
|
1
diagrams/.gitignore
vendored
Normal file
1
diagrams/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.svg
|
133
diagrams/expandable_models.d2
Normal file
133
diagrams/expandable_models.d2
Normal file
@ -0,0 +1,133 @@
|
||||
indata: "Input data" {
|
||||
shape: cylinder
|
||||
}
|
||||
|
||||
model_generation: Model Generation {
|
||||
|
||||
hidden_layers_generator: Hidden Layers Model Generator
|
||||
|
||||
hidden_layers_generator<->generator: Request/Accept Model
|
||||
|
||||
head_generator: Head Models Generator {
|
||||
|
||||
|
||||
_.head_generator->model1: Create
|
||||
_.head_generator->model2: Create
|
||||
_.head_generator->modeln: Create
|
||||
|
||||
model1
|
||||
model2
|
||||
modeln: "Model..."
|
||||
|
||||
model1<->_.generator: Request/Accept Model
|
||||
model2<->_.generator: Request/Accept Model
|
||||
modeln<->_.generator: Request/Accept Model
|
||||
|
||||
model1->_.head_generator: Accept Model
|
||||
model2->_.head_generator: Accept Model
|
||||
modeln->_.head_generator: Accept Model
|
||||
}
|
||||
|
||||
generator: Generator {
|
||||
|
||||
_.generator->model_search: Start
|
||||
|
||||
model_search: Model Search {
|
||||
|
||||
database_search: Database search
|
||||
|
||||
autoML: Automl
|
||||
}
|
||||
|
||||
model_search->model_training: Propose model
|
||||
model_training->model_search: Reject model and request new one
|
||||
|
||||
model_training: Model Traning {
|
||||
|
||||
_.model_training->node: Start Training
|
||||
|
||||
node: Node
|
||||
|
||||
_.model_training<-node: End Training
|
||||
}
|
||||
|
||||
model_training->_.generator: Accept Model
|
||||
}
|
||||
}
|
||||
|
||||
node_manager: Node_Manager {
|
||||
node1
|
||||
node2
|
||||
noden: "node..."
|
||||
|
||||
node_manager->node1: Manage
|
||||
node_manager->node2: Manage
|
||||
node_manager->noden: Manage
|
||||
}
|
||||
|
||||
model_generation.generator.model_training.node<->node_manager: Request/Gives node to train
|
||||
|
||||
model_database: Model database {
|
||||
shape: cylinder
|
||||
}
|
||||
|
||||
model_runner: Model Runner {
|
||||
|
||||
node: Node
|
||||
|
||||
headless: Obatin Headless Model
|
||||
|
||||
_.model_runner->headless: Start
|
||||
|
||||
headless<->_.model_database: Request/Get Model
|
||||
|
||||
headless<->node: Run/Result
|
||||
|
||||
headless->model_search: Results
|
||||
|
||||
model_search: Model Search {
|
||||
|
||||
}
|
||||
|
||||
model_search<->_.model_database: Request Head Models
|
||||
|
||||
head_model: Head Model
|
||||
|
||||
model_search->head_model: Obtain
|
||||
|
||||
head_model<->node: Run/Result
|
||||
|
||||
head_model->model_search: Unsatatisfary results, request new model
|
||||
head_model->results_cache: Unsatatisfary results, save results
|
||||
|
||||
results_cache: Results Cache
|
||||
|
||||
combine: Combine
|
||||
|
||||
head_model->combine: Satisfary Results/No more options
|
||||
|
||||
combine<-results_cache: Cached results Results
|
||||
|
||||
combine->_.model_runner: Results
|
||||
|
||||
}
|
||||
|
||||
model_runner.node<->node_manager: Request/Gives node to run model
|
||||
|
||||
|
||||
|
||||
User.shape: Person
|
||||
|
||||
User->indata: Uploads data
|
||||
User->model_generation: Requests Model
|
||||
User->model_database: Manages Models
|
||||
User->model_runner: Request image for classification
|
||||
model_runner->User: Give class of image
|
||||
|
||||
model_generation.generator <-> indata: Requests Data
|
||||
|
||||
model_generation->model_generation.hidden_layers_generator: Start
|
||||
model_generation.hidden_layers_generator->model_generation.head_generator: Strip head and give base model to Head Generator
|
||||
|
||||
model_generation.head_generator->model_database: Save hidden layers model and heads
|
||||
|
69
diagrams/simple_models.d2
Normal file
69
diagrams/simple_models.d2
Normal file
@ -0,0 +1,69 @@
|
||||
indata: "Input data" {
|
||||
shape: cylinder
|
||||
}
|
||||
|
||||
model_generation: Model Generation {
|
||||
_.generator->model_search: Start
|
||||
|
||||
model_search: Model Search {
|
||||
|
||||
database_search: Database search
|
||||
|
||||
autoML: Automl
|
||||
}
|
||||
|
||||
model_search->model_training: Propose model
|
||||
model_training->model_search: Reject model and request new one
|
||||
|
||||
model_training: Model Traning {
|
||||
|
||||
_.model_training->node: Start Training
|
||||
|
||||
node: Node
|
||||
|
||||
_.model_training<-node: End Training
|
||||
}
|
||||
|
||||
model_training->_.generator: Accept Model
|
||||
}
|
||||
|
||||
node_manager: Node_Manager {
|
||||
node1
|
||||
node2
|
||||
noden: "node..."
|
||||
|
||||
node_manager->node1: Manage
|
||||
node_manager->node2: Manage
|
||||
node_manager->noden: Manage
|
||||
}
|
||||
|
||||
model_generation.generator.model_training.node<->node_manager: Request/Gives node to train
|
||||
|
||||
model_database: Model database {
|
||||
shape: cylinder
|
||||
}
|
||||
|
||||
model_runner: Model Runner {
|
||||
node: Node
|
||||
|
||||
model: Model
|
||||
|
||||
model<->_.model_database: Request/Get Model
|
||||
|
||||
model<->node: Run/Result
|
||||
|
||||
node->_.model_runner: Results
|
||||
}
|
||||
|
||||
model_runner.node<->node_manager: Request/Gives node to run model
|
||||
|
||||
User.shape: Person
|
||||
|
||||
User->indata: Uploads data
|
||||
User->model_generation: Requests Model
|
||||
User->model_database: Manages Models
|
||||
User->model_runner: Request image for classification
|
||||
model_runner->User: Give class of image
|
||||
|
||||
model_generation <-> indata: Requests Data
|
||||
|
4
main.bib
4
main.bib
@ -1,13 +1,13 @@
|
||||
@online{google-vision-api,
|
||||
author ={Google},
|
||||
title ={Vision AI | Google Cloud},
|
||||
title ={Vision {AI} | Google Cloud},
|
||||
year ={2023},
|
||||
url ={https://cloud.google.com/vision?hl=en}
|
||||
}
|
||||
|
||||
@article{amazon-rekognition,
|
||||
author ={Amazon},
|
||||
title ={Image Recognition Software - ML Image \& Video Analysis - Amazon Rekognition - AWS},
|
||||
title ={Image Recognition Software - {ML} Image \& Video Analysis - Amazon Rekognition - AWS},
|
||||
year ={2023},
|
||||
url ={https://aws.amazon.com/rekognition/}
|
||||
}
|
||||
|
@ -58,39 +58,38 @@
|
||||
\section{Introduction}
|
||||
% This section should contain an introduction to the problem aims and objectives (0.5 page)
|
||||
Currently, there are many classification tasks that are being done manually. These tasks could be done more effectively if there was tooling that would allow the easy creation of classification models, without the knowledge of data analysis and machine learning models creation.
|
||||
The aim of this project is to create a classification service that has 0 requires zero user knowledge about machine learning, image classification or data analysis.
|
||||
The aim of this project is to create a classification service that requires zero user knowledge about machine learning, image classification or data analysis.
|
||||
The system should allow the user to create a reasonable accurate model that can satisfy the users' need.
|
||||
The system should also allow the user to create expandable models; models where classes can be added after the model has been created.
|
||||
|
||||
\subsection{Aims}
|
||||
\subsection{Project Aim}
|
||||
The project aims to create a platform where users can create different types of classification models without the users having any knowledge of image classification.
|
||||
|
||||
\subsection{Objectives}
|
||||
\subsection{Project Objectives}
|
||||
This project's primary objectives are to:
|
||||
\begin{itemize}
|
||||
\item Create platform where the users can create and manage their models.
|
||||
\item Create a system to automatically create and train.
|
||||
\item Create a system to automatically create and train models.
|
||||
\item Create a system to automatically expand and reduce models without fully retraining the models.
|
||||
\item Create an API so that users can interact programatically with the system.
|
||||
\end{itemize}
|
||||
This project extended objectives are to:
|
||||
\begin{itemize}
|
||||
\item Create a system to automatically to merge modules to increase efficiency
|
||||
\item Create a system to automatically to merge modules to increase efficiency.
|
||||
\item Create a system to distribute the load of training the model's among multiple services.
|
||||
\end{itemize}
|
||||
\section{Literature and Techincal Review}
|
||||
% 1 page of background and literature review. Here you will need to references things. Gamal et al.~\cite{gamal} introduce the concept of \ldots
|
||||
|
||||
\subsection{Alternatives to my Project}
|
||||
There currently exist systems that do image classification, like Google Vision AI\cite{google-vision-api}, and Amazon's Rekoginition\cite{amazon-rekognition}.
|
||||
There currently exist systems that do image classification, like Google Vision AI \cite{google-vision-api}, and Amazon's Rekoginition \cite{amazon-rekognition}.
|
||||
Their tools, while providing similar services to what my project is supposed to do, it mostly focusses on general image classification rather than specific image classification, i.e. Car vs Boat, vs, Car model X vs Car model Y.
|
||||
|
||||
\subsection{Creation Models}
|
||||
The models that I will be creating will be Convolutional Neural Network(CNN)\cite{lecun1989handwritten,fukushima1980neocognitron}.
|
||||
The models that I will be creating will be Convolutional Neural Network(CNN) \cite{lecun1989handwritten,fukushima1980neocognitron}.
|
||||
The system will be creating two types of models that cannot be expanded and models that can be expanded. For the models that can be expanded, see the section about expandable models.
|
||||
The models that cannot be expanded will use a simple convolution blocks, with a similar structure as the AlexNet\cite{krizhevsky2012imagenet} ones, as the basis for the model. The size of the model will be controlled by the size of the input image, where bigger images will generate more deep and complex models.
|
||||
The models will be created using TensorFlow\cite{tensorflow2015-whitepaper} and Keras\cite{chollet2015keras}. These theologies are chosen since they are both robust and used in industry.
|
||||
The models that cannot be expanded will use a simple convolution blocks, with a similar structure as the AlexNet \cite{krizhevsky2012imagenet} ones, as the basis for the model. The size of the model will be controlled by the size of the input image, where bigger images will generate more deep and complex models.
|
||||
The models will be created using TensorFlow \cite{tensorflow2015-whitepaper} and Keras \cite{chollet2015keras}. These theologies are chosen since they are both robust and used in industry.
|
||||
|
||||
\subsection{Expandable Models}
|
||||
The current most used approach for expanding a CNN model is to retrain the model. This is done by, recreating an entire new model that does the new task, using the older model as a base for the new model\cite{amazon-rekognition}, or using a pretrained model as a base and training the last few layers.
|
||||
@ -99,18 +98,18 @@
|
||||
|
||||
|
||||
\section{Technical overview}
|
||||
% 1 page of overview. My approach is shown in Figure~\ref{fig:sample}. You can draw the diagram in powerpoint and save the picture
|
||||
% technological free overview
|
||||
\subsection{Web Interface}
|
||||
The user will interact with the platform form via a web portal.
|
||||
The web platform will be designed using HTML and a JavaScript library called HTMX\cite{htmx} for the reactivity that the pagers requires.
|
||||
The web server that will act as controller will be implemented using go\cite{go}, due to its ease of use.
|
||||
The user will interact with the platform form via a web portal. % why the web portal
|
||||
The web platform will be designed using HTML and a JavaScript library called HTMX \cite{htmx} for the reactivity that the pagers requires.
|
||||
The web server that will act as controller will be implemented using go \cite{go}, due to its ease of use.
|
||||
Go was chosen has the programming language used in the server due to its performance, i.e. \cite{node-to-go}, and ease of implementation. As compiled language go, outperforms other server technologies such as Node.js.
|
||||
Go also has easy support for C ABI, which might be needed if there is a need to interact with other tools that are implemented using C.
|
||||
The web server will also interact with python to create models. Then to run the models, it will use the libraries that are available to run TensorFlow\cite{tensorflow2015-whitepaper} models for that in go.
|
||||
The web server will also interact with python to create models. Then to run the models, it will use the libraries that are available to run TensorFlow \cite{tensorflow2015-whitepaper} models for that in go.
|
||||
|
||||
\subsection{Creating Models}
|
||||
The models will be created using TensorFlow\cite{tensorflow2015-whitepaper}.
|
||||
TensorFlow was chosen because, when using frameworks like Keras\cite{chollet2015keras}, it allows the easy development of machine learning models with little code. While tools like PyTorch might provide more advanced control options for the model, like dynamic graphs, it comes at the cost of more complex python code. Since that code is generated by the go code, the more python that needs to be written, the more complex the overall program gets, which is not desirable.
|
||||
The models will be created using TensorFlow \cite{tensorflow2015-whitepaper}.
|
||||
TensorFlow was chosen because, when using frameworks like Keras \cite{chollet2015keras}, it allows the easy development of machine learning models with little code. While tools like PyTorch might provide more advanced control options for the model, like dynamic graphs, it comes at the cost of more complex python code. Since that code is generated by the go code, the more python that needs to be written, the more complex the overall program gets, which is not desirable.
|
||||
The original plan was to use go and TensorFlow, but the go library was lacking that ability. Therefore, I chose to use python to create the models.
|
||||
The go server starts a new process, running python, that creates and trains the TensorFlow model. Once the training is done, the model is saved to disk which then can be loaded by the go TensorFlow library.
|
||||
|
||||
@ -122,6 +121,7 @@
|
||||
|
||||
\section{Workplan}
|
||||
\subsection{Timeline}
|
||||
% bold the headres
|
||||
% The following work plan is what I will be using for the project is shown in Figure~\ref{fig:sample2}.
|
||||
\begin{tabular}{ |m{0.5\textwidth}|m{0.5\textwidth}| }
|
||||
\hline
|
||||
|
@ -56,6 +56,7 @@
|
||||
\newpage
|
||||
|
||||
\section{Introduction}
|
||||
\subsection{Motivation}
|
||||
|
||||
\newpage
|
||||
\section{References}
|
||||
|
Loading…
Reference in New Issue
Block a user