more worker on go-runner
This commit is contained in:
parent
b1e4211e6a
commit
29846012e7
4
go.mod
4
go.mod
@ -9,10 +9,11 @@ require (
|
|||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
golang.org/x/crypto v0.19.0
|
golang.org/x/crypto v0.19.0
|
||||||
|
github.com/BurntSushi/toml v1.3.2
|
||||||
|
github.com/goccy/go-json v0.10.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||||
github.com/charmbracelet/lipgloss v0.9.1 // indirect
|
github.com/charmbracelet/lipgloss v0.9.1 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||||
@ -20,7 +21,6 @@ require (
|
|||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.19.0 // indirect
|
github.com/go-playground/validator/v10 v10.19.0 // indirect
|
||||||
github.com/goccy/go-json v0.10.2 // indirect
|
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||||
github.com/jackc/pgx v3.6.2+incompatible // indirect
|
github.com/jackc/pgx v3.6.2+incompatible // indirect
|
||||||
|
@ -87,9 +87,9 @@ func (d Definition) GetLayers(db db.Db, filter string, args ...any) (layer []*La
|
|||||||
return GetDbMultitple[Layer](db, "model_definition_layer as mdl where mdl.def_id=$1 "+filter, args...)
|
return GetDbMultitple[Layer](db, "model_definition_layer as mdl where mdl.def_id=$1 "+filter, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Definition) UpdateAfterEpoch(db db.Db, accuracy float64) (err error) {
|
func (d *Definition) UpdateAfterEpoch(db db.Db, accuracy float64, epoch int) (err error) {
|
||||||
d.Accuracy = accuracy
|
d.Accuracy = accuracy
|
||||||
d.Epoch += 1
|
d.Epoch += epoch
|
||||||
_, err = db.Exec("update model_definition set epoch=$1, accuracy=$2 where id=$3", d.Epoch, d.Accuracy, d.Id)
|
_, err = db.Exec("update model_definition set epoch=$1, accuracy=$2 where id=$3", d.Epoch, d.Accuracy, d.Id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package tasks
|
package tasks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -383,4 +385,149 @@ func handleRemoteRunner(x *Handle) {
|
|||||||
Training: training_points,
|
Training: training_points,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
type RunnerTrainDefEpoch struct {
|
||||||
|
Id string `json:"id" validate:"required"`
|
||||||
|
TaskId string `json:"taskId" validate:"required"`
|
||||||
|
DefId string `json:"defId" validate:"required"`
|
||||||
|
Epoch int `json:"epoch" validate:"required"`
|
||||||
|
Accuracy float64 `json:"accuracy" validate:"required"`
|
||||||
|
}
|
||||||
|
PostAuthJson(x, "/tasks/runner/train/epoch", User_Normal, func(c *Context, dat *RunnerTrainDefEpoch) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, &VerifyTask{
|
||||||
|
Id: dat.Id,
|
||||||
|
TaskId: dat.TaskId,
|
||||||
|
})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
def, err := GetDefinition(c, dat.DefId)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get definition information", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = def.UpdateAfterEpoch(c, dat.Accuracy, dat.Epoch)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to update model", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/task/runner/train/mark-failed", User_Normal, func(c *Context, dat *VerifyTask) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, &VerifyTask{
|
||||||
|
Id: dat.Id,
|
||||||
|
TaskId: dat.TaskId,
|
||||||
|
})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.Exec(
|
||||||
|
"update model_definition set status=$1 "+
|
||||||
|
"where model_id=$2 and status in ($3, $4)",
|
||||||
|
MODEL_DEFINITION_STATUS_CANCELD_TRAINING,
|
||||||
|
task.ModelId,
|
||||||
|
MODEL_DEFINITION_STATUS_TRAINING,
|
||||||
|
MODEL_DEFINITION_STATUS_PAUSED_TRAINING,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to mark definition as failed", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/task/runner/train/done", User_Normal, func(c *Context, dat *VerifyTask) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
model, err := GetBaseModel(c, *task.ModelId)
|
||||||
|
if err != nil {
|
||||||
|
c.Logger.Error("Failed to get model", "err", err)
|
||||||
|
return c.E500M("Failed to get mode", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var def Definition
|
||||||
|
err = GetDBOnce(c, &def, "from model_definition as md where model_id=$1 and status=$2 order by accuracy desc limit 1;", task.ModelId, DEFINITION_STATUS_TRANIED)
|
||||||
|
if err == NotFoundError {
|
||||||
|
// TODO Make the Model status have a message
|
||||||
|
c.Logger.Error("All definitions failed to train!")
|
||||||
|
model.UpdateStatus(c, FAILED_TRAINING)
|
||||||
|
task.UpdateStatusLog(c, TASK_FAILED_RUNNING, "All definition failed to train!")
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
} else if err != nil {
|
||||||
|
model.UpdateStatus(c, FAILED_TRAINING)
|
||||||
|
task.UpdateStatusLog(c, TASK_FAILED_RUNNING, "Failed to get model definition")
|
||||||
|
return c.E500M("Failed to get model definition", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = def.UpdateStatus(c, DEFINITION_STATUS_READY); err != nil {
|
||||||
|
model.UpdateStatus(c, FAILED_TRAINING)
|
||||||
|
task.UpdateStatusLog(c, TASK_FAILED_RUNNING, "Failed to update model definition")
|
||||||
|
return c.E500M("Failed to update model definition", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
to_delete, err := c.Query("select id from model_definition where status != $1 and model_id=$2", MODEL_DEFINITION_STATUS_READY, model.Id)
|
||||||
|
if err != nil {
|
||||||
|
model.UpdateStatus(c, FAILED_TRAINING)
|
||||||
|
task.UpdateStatusLog(c, TASK_FAILED_RUNNING, "Failed to delete unsed definitions")
|
||||||
|
return c.E500M("Failed to delete unsed definitions", err)
|
||||||
|
}
|
||||||
|
defer to_delete.Close()
|
||||||
|
|
||||||
|
for to_delete.Next() {
|
||||||
|
var id string
|
||||||
|
if err = to_delete.Scan(&id); err != nil {
|
||||||
|
model.UpdateStatus(c, FAILED_TRAINING)
|
||||||
|
task.UpdateStatusLog(c, TASK_FAILED_RUNNING, "Failed to delete unsed definitions")
|
||||||
|
return c.E500M("Failed to delete unsed definitions", err)
|
||||||
|
}
|
||||||
|
os.RemoveAll(path.Join("savedData", model.Id, "defs", id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Check if returning also works here
|
||||||
|
if _, err = c.Exec("delete from model_definition where status!=$1 and model_id=$2;", MODEL_DEFINITION_STATUS_READY, model.Id); err != nil {
|
||||||
|
model.UpdateStatus(c, FAILED_TRAINING)
|
||||||
|
task.UpdateStatusLog(c, TASK_FAILED_RUNNING, "Failed to delete unsed definitions")
|
||||||
|
return c.E500M("Failed to delete unsed definitions", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
model.UpdateStatus(c, READY)
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
1
runner/.gitignore
vendored
1
runner/.gitignore
vendored
@ -1 +0,0 @@
|
|||||||
target/
|
|
1936
runner/Cargo.lock
generated
1936
runner/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,17 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "runner"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
anyhow = "1.0.82"
|
|
||||||
serde = { version = "1.0.200", features = ["derive"] }
|
|
||||||
toml = "0.8.12"
|
|
||||||
reqwest = { version = "0.12", features = ["json"] }
|
|
||||||
tokio = { version = "1", features = ["full"] }
|
|
||||||
serde_json = "1.0.116"
|
|
||||||
serde_repr = "0.1"
|
|
||||||
tch = { version = "0.16.0", features = ["download-libtorch"] }
|
|
||||||
rand = "0.8.5"
|
|
@ -1,12 +0,0 @@
|
|||||||
FROM docker.io/nvidia/cuda:11.7.1-devel-ubuntu22.04
|
|
||||||
|
|
||||||
RUN apt-get update
|
|
||||||
RUN apt-get install -y curl
|
|
||||||
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
|
||||||
ENV PATH="$PATH:/root/.cargo/bin"
|
|
||||||
RUN rustup toolchain install stable
|
|
||||||
|
|
||||||
RUN apt-get install -y pkg-config libssl-dev
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
@ -1,3 +0,0 @@
|
|||||||
hostname = "https://testing.andr3h3nriqu3s.com/api"
|
|
||||||
token = "d2bc41e8293937bcd9397870c98f97acc9603f742924b518e193cd1013e45d57897aa302b364001c72b458afcfb34239dfaf38a66b318e5cbc973eea"
|
|
||||||
data_path = "/home/andr3/Documents/my-repos/fyp"
|
|
@ -1 +0,0 @@
|
|||||||
id = "a7cec9e9-1d05-4633-8bc5-6faabe4fd5a3"
|
|
@ -1,2 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
podman run --rm --network host --gpus all -ti -v $(pwd):/app -e "TERM=xterm-256color" fyp-runner bash
|
|
@ -1,115 +0,0 @@
|
|||||||
use crate::{model::DataPoint, settings::ConfigFile};
|
|
||||||
use std::{path::Path, sync::Arc};
|
|
||||||
use tch::Tensor;
|
|
||||||
|
|
||||||
pub struct DataLoader {
|
|
||||||
pub batch_size: i64,
|
|
||||||
pub len: usize,
|
|
||||||
pub inputs: Vec<Tensor>,
|
|
||||||
pub labels: Vec<Tensor>,
|
|
||||||
pub pos: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn import_image(
|
|
||||||
item: &DataPoint,
|
|
||||||
base_path: &Path,
|
|
||||||
classes_len: i64,
|
|
||||||
inputs: &mut Vec<Tensor>,
|
|
||||||
labels: &mut Vec<Tensor>,
|
|
||||||
) {
|
|
||||||
inputs.push(
|
|
||||||
tch::vision::image::load(base_path.join(&item.path))
|
|
||||||
.ok()
|
|
||||||
.unwrap()
|
|
||||||
.unsqueeze(0),
|
|
||||||
);
|
|
||||||
|
|
||||||
if item.class >= 0 {
|
|
||||||
let t = tch::Tensor::from_slice(&[item.class]).onehot(classes_len as i64);
|
|
||||||
labels.push(t);
|
|
||||||
} else {
|
|
||||||
labels.push(tch::Tensor::zeros(
|
|
||||||
[1, classes_len as i64],
|
|
||||||
(tch::Kind::Float, tch::Device::Cpu),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DataLoader {
|
|
||||||
pub fn new(
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
data: Vec<DataPoint>,
|
|
||||||
classes_len: i64,
|
|
||||||
batch_size: i64,
|
|
||||||
) -> DataLoader {
|
|
||||||
let len: f64 = (data.len() as f64) / (batch_size as f64);
|
|
||||||
let min_len: i64 = len.floor() as i64;
|
|
||||||
let max_len: i64 = len.ceil() as i64;
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"Creating dataloader data len: {} len: {} min_len: {} max_len:{}",
|
|
||||||
data.len(),
|
|
||||||
len,
|
|
||||||
min_len,
|
|
||||||
max_len
|
|
||||||
);
|
|
||||||
|
|
||||||
let base_path = Path::new(&config.data_path);
|
|
||||||
|
|
||||||
let mut inputs: Vec<Tensor> = Vec::new();
|
|
||||||
let mut all_labels: Vec<Tensor> = Vec::new();
|
|
||||||
|
|
||||||
for batch in 0..min_len {
|
|
||||||
let mut batch_acc: Vec<Tensor> = Vec::new();
|
|
||||||
let mut labels: Vec<Tensor> = Vec::new();
|
|
||||||
for image in 0..batch_size {
|
|
||||||
let i: usize = (batch * batch_size + image).try_into().unwrap();
|
|
||||||
let item = &data[i];
|
|
||||||
import_image(item, base_path, classes_len, &mut batch_acc, &mut labels)
|
|
||||||
}
|
|
||||||
inputs.push(tch::Tensor::cat(&batch_acc[0..], 0));
|
|
||||||
all_labels.push(tch::Tensor::cat(&labels[0..], 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import the last batch that has irregular sizing
|
|
||||||
if min_len != max_len {
|
|
||||||
let mut batch_acc: Vec<Tensor> = Vec::new();
|
|
||||||
let mut labels: Vec<Tensor> = Vec::new();
|
|
||||||
for image in 0..(data.len() - (batch_size * min_len) as usize) {
|
|
||||||
let i: usize = (min_len * batch_size + (image as i64)) as usize;
|
|
||||||
let item = &data[i];
|
|
||||||
import_image(item, base_path, classes_len, &mut batch_acc, &mut labels);
|
|
||||||
}
|
|
||||||
inputs.push(tch::Tensor::cat(&batch_acc[0..], 0));
|
|
||||||
all_labels.push(tch::Tensor::cat(&labels[0..], 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("ins shape: {:?}", inputs[0].size());
|
|
||||||
|
|
||||||
return DataLoader {
|
|
||||||
batch_size,
|
|
||||||
inputs,
|
|
||||||
labels: all_labels,
|
|
||||||
len: max_len as usize,
|
|
||||||
pos: 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn restart(self: &mut DataLoader) {
|
|
||||||
self.pos = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn next(self: &mut DataLoader) -> Option<(Tensor, Tensor)> {
|
|
||||||
if self.pos >= self.len {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let input = self.inputs[self.pos].empty_like();
|
|
||||||
self.inputs[self.pos] = self.inputs[self.pos].clone(&input);
|
|
||||||
let label = self.labels[self.pos].empty_like();
|
|
||||||
self.labels[self.pos] = self.labels[self.pos].clone(&label);
|
|
||||||
|
|
||||||
self.pos += 1;
|
|
||||||
|
|
||||||
return Some((input, label));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,206 +0,0 @@
|
|||||||
mod dataloader;
|
|
||||||
mod model;
|
|
||||||
mod settings;
|
|
||||||
mod tasks;
|
|
||||||
mod training;
|
|
||||||
mod types;
|
|
||||||
|
|
||||||
use crate::settings::*;
|
|
||||||
use crate::tasks::{fail_task, Task, TaskType};
|
|
||||||
use crate::training::handle_train;
|
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use reqwest::StatusCode;
|
|
||||||
use serde_json::json;
|
|
||||||
use std::{fs, process::exit, sync::Arc, time::Duration};
|
|
||||||
|
|
||||||
enum ResultAlive {
|
|
||||||
Ok,
|
|
||||||
Error,
|
|
||||||
NotInit,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_keep_alive_message(
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
) -> ResultAlive {
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
|
|
||||||
let to_send = json!({
|
|
||||||
"id": runner_data.id,
|
|
||||||
});
|
|
||||||
|
|
||||||
let resp = client
|
|
||||||
.post(format!("{}/tasks/runner/beat", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if resp.is_err() {
|
|
||||||
return ResultAlive::Error;
|
|
||||||
}
|
|
||||||
|
|
||||||
let resp = resp.ok();
|
|
||||||
|
|
||||||
if resp.is_none() {
|
|
||||||
return ResultAlive::Error;
|
|
||||||
}
|
|
||||||
|
|
||||||
let resp = resp.unwrap();
|
|
||||||
|
|
||||||
// TODO see if the message is related to not being inited
|
|
||||||
if resp.status() != 200 {
|
|
||||||
println!("Could not connect with the status");
|
|
||||||
return ResultAlive::Error;
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultAlive::Ok
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn keep_alive(config: Arc<ConfigFile>, runner_data: Arc<RunnerData>) -> Result<()> {
|
|
||||||
let mut failed = 0;
|
|
||||||
loop {
|
|
||||||
match send_keep_alive_message(config.clone(), runner_data.clone()).await {
|
|
||||||
ResultAlive::Error => failed += 1,
|
|
||||||
ResultAlive::NotInit => {
|
|
||||||
println!("Runner not inited! Restarting!");
|
|
||||||
exit(1)
|
|
||||||
}
|
|
||||||
ResultAlive::Ok => failed = 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO move to config
|
|
||||||
if failed > 20 {
|
|
||||||
println!("Failed to connect to API! More than 20 times in a row stoping");
|
|
||||||
exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_task(
|
|
||||||
task: Task,
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let res = match task.task_type {
|
|
||||||
TaskType::Training => handle_train(&task, config.clone(), runner_data.clone()).await,
|
|
||||||
_ => {
|
|
||||||
println!("Do not know how to handle this task #{:?}", task);
|
|
||||||
bail!("Failed")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if res.is_err() {
|
|
||||||
println!("task failed #{:?}", res);
|
|
||||||
fail_task(
|
|
||||||
&task,
|
|
||||||
config,
|
|
||||||
runner_data,
|
|
||||||
"Do not know how to handle this kind of task",
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> Result<()> {
|
|
||||||
// Load config file
|
|
||||||
let config_data = fs::read_to_string("./config.toml")?;
|
|
||||||
let mut config: ConfigFile = toml::from_str(&config_data)?;
|
|
||||||
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
if config.config_path == None {
|
|
||||||
config.config_path = Some(String::from("./data.toml"))
|
|
||||||
}
|
|
||||||
|
|
||||||
let runner_data: RunnerData = load_runner_data(&config).await?;
|
|
||||||
|
|
||||||
let to_send = json!({
|
|
||||||
"id": runner_data.id,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Inform the server that the runner is available
|
|
||||||
let resp = client
|
|
||||||
.post(format!("{}/tasks/runner/init", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if resp.status() != 200 {
|
|
||||||
println!(
|
|
||||||
"Could not connect with the api: status {} body {}",
|
|
||||||
resp.status(),
|
|
||||||
resp.text().await?
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = resp.json::<String>().await?;
|
|
||||||
if res != "Ok" {
|
|
||||||
print!("Unexpected problem: {}", res);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let config = Arc::new(config);
|
|
||||||
let runner_data = Arc::new(runner_data);
|
|
||||||
|
|
||||||
let config_alive = config.clone();
|
|
||||||
let runner_data_alive = runner_data.clone();
|
|
||||||
std::thread::spawn(move || keep_alive(config_alive.clone(), runner_data_alive.clone()));
|
|
||||||
|
|
||||||
println!("Started main loop");
|
|
||||||
loop {
|
|
||||||
//TODO move time to config
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
|
|
||||||
let to_send = json!({ "id": runner_data.id });
|
|
||||||
|
|
||||||
let resp = client
|
|
||||||
.post(format!("{}/tasks/runner/active", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if resp.is_err() || resp.as_ref().ok().is_none() {
|
|
||||||
println!("Failed to get info from server {:?}", resp);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let resp = resp?;
|
|
||||||
|
|
||||||
match resp.status() {
|
|
||||||
// No active task
|
|
||||||
StatusCode::NOT_FOUND => (),
|
|
||||||
StatusCode::OK => {
|
|
||||||
println!("Found task!");
|
|
||||||
|
|
||||||
let task: Result<Task, reqwest::Error> = resp.json().await;
|
|
||||||
if task.is_err() || task.as_ref().ok().is_none() {
|
|
||||||
println!("Failed to resolve the json {:?}", task);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let task = task?;
|
|
||||||
|
|
||||||
let res = handle_task(task, config.clone(), runner_data.clone()).await;
|
|
||||||
|
|
||||||
if res.is_err() || res.as_ref().ok().is_none() {
|
|
||||||
println!("Failed to run the task");
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = res;
|
|
||||||
()
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
println!("Unexpected error #{:?}", resp);
|
|
||||||
exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,117 +0,0 @@
|
|||||||
use anyhow::bail;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
|
||||||
use tch::{
|
|
||||||
nn::{self, Module},
|
|
||||||
Device,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Model {
|
|
||||||
pub vs: nn::VarStore,
|
|
||||||
pub seq: nn::Sequential,
|
|
||||||
pub layers: Vec<Layer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize_repr, Deserialize_repr)]
|
|
||||||
#[repr(i8)]
|
|
||||||
pub enum LayerType {
|
|
||||||
Input = 1,
|
|
||||||
Dense = 2,
|
|
||||||
Flatten = 3,
|
|
||||||
SimpleBlock = 4,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Layer {
|
|
||||||
pub id: String,
|
|
||||||
pub definition_id: String,
|
|
||||||
pub layer_order: String,
|
|
||||||
pub layer_type: LayerType,
|
|
||||||
pub shape: String,
|
|
||||||
pub exp_type: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct DataPoint {
|
|
||||||
pub class: i64,
|
|
||||||
pub path: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_model(layers: Vec<Layer>, last_linear_size: i64, add_sigmoid: bool) -> Model {
|
|
||||||
let vs = nn::VarStore::new(Device::Cuda(0));
|
|
||||||
|
|
||||||
let mut seq = nn::seq();
|
|
||||||
|
|
||||||
let mut last_linear_size = last_linear_size;
|
|
||||||
let mut last_linear_conv: Vec<i64> = Vec::new();
|
|
||||||
|
|
||||||
for layer in layers.iter() {
|
|
||||||
match layer.layer_type {
|
|
||||||
LayerType::Input => {
|
|
||||||
last_linear_conv = serde_json::from_str(&layer.shape).ok().unwrap();
|
|
||||||
println!("Layer: Input, In: {:?}", last_linear_conv);
|
|
||||||
}
|
|
||||||
LayerType::Dense => {
|
|
||||||
let shape: Vec<i64> = serde_json::from_str(&layer.shape).ok().unwrap();
|
|
||||||
println!("Layer: Dense, In: {}, Out: {}", last_linear_size, shape[0]);
|
|
||||||
seq = seq
|
|
||||||
.add(nn::linear(
|
|
||||||
&vs.root(),
|
|
||||||
last_linear_size,
|
|
||||||
shape[0],
|
|
||||||
Default::default(),
|
|
||||||
))
|
|
||||||
.add_fn(|xs| xs.relu());
|
|
||||||
last_linear_size = shape[0];
|
|
||||||
}
|
|
||||||
LayerType::Flatten => {
|
|
||||||
seq = seq.add_fn(|xs| xs.flatten(1, -1));
|
|
||||||
last_linear_size = 1;
|
|
||||||
for i in &last_linear_conv {
|
|
||||||
last_linear_size *= i;
|
|
||||||
}
|
|
||||||
println!(
|
|
||||||
"Layer: flatten, In: {:?}, Out: {}",
|
|
||||||
last_linear_conv, last_linear_size
|
|
||||||
)
|
|
||||||
}
|
|
||||||
LayerType::SimpleBlock => {
|
|
||||||
let new_last_linear_conv =
|
|
||||||
vec![128, last_linear_conv[1] / 2, last_linear_conv[2] / 2];
|
|
||||||
println!(
|
|
||||||
"Layer: block, In: {:?}, Put: {:?}",
|
|
||||||
last_linear_conv, new_last_linear_conv,
|
|
||||||
);
|
|
||||||
let out_size = vec![new_last_linear_conv[1], new_last_linear_conv[2]];
|
|
||||||
seq = seq
|
|
||||||
.add(nn::conv2d(
|
|
||||||
&vs.root(),
|
|
||||||
last_linear_conv[0],
|
|
||||||
128,
|
|
||||||
3,
|
|
||||||
nn::ConvConfig::default(),
|
|
||||||
))
|
|
||||||
.add_fn(|xs| xs.relu())
|
|
||||||
.add(nn::conv2d(
|
|
||||||
&vs.root(),
|
|
||||||
128,
|
|
||||||
128,
|
|
||||||
3,
|
|
||||||
nn::ConvConfig::default(),
|
|
||||||
))
|
|
||||||
.add_fn(|xs| xs.relu())
|
|
||||||
.add_fn(move |xs| xs.adaptive_avg_pool2d([out_size[1], out_size[1]]))
|
|
||||||
.add_fn(|xs| xs.leaky_relu());
|
|
||||||
//m_layers = append(m_layers, NewSimpleBlock(vs, lastLinearConv[0]))
|
|
||||||
last_linear_conv = new_last_linear_conv;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if add_sigmoid {
|
|
||||||
seq = seq.add_fn(|xs| xs.sigmoid());
|
|
||||||
}
|
|
||||||
|
|
||||||
return Model { vs, layers, seq };
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
use anyhow::{bail, Result};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::json;
|
|
||||||
use std::{fs, path};
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
pub struct ConfigFile {
|
|
||||||
// Hostname to connect with the api
|
|
||||||
pub hostname: String,
|
|
||||||
// Token used in the api to authenticate
|
|
||||||
pub token: String,
|
|
||||||
// Path to where to store some generated configuration values
|
|
||||||
// defaults to ./data.toml
|
|
||||||
pub config_path: Option<String>,
|
|
||||||
// Data Path
|
|
||||||
// Path to where the data is mounted
|
|
||||||
pub data_path: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
pub struct RunnerData {
|
|
||||||
pub id: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn load_runner_data(config: &ConfigFile) -> Result<RunnerData> {
|
|
||||||
let data_path = config.config_path.as_ref().unwrap();
|
|
||||||
let data_path = path::Path::new(&*data_path);
|
|
||||||
|
|
||||||
if data_path.exists() {
|
|
||||||
let runner_data = fs::read_to_string(data_path)?;
|
|
||||||
Ok(toml::from_str(&runner_data)?)
|
|
||||||
} else {
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
let to_send = json!({
|
|
||||||
"token": config.token,
|
|
||||||
"type": 1,
|
|
||||||
});
|
|
||||||
|
|
||||||
let register_resp = client
|
|
||||||
.post(format!("{}/tasks/runner/register", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if register_resp.status() != 200 {
|
|
||||||
bail!(format!("Could not create runner {:#?}", register_resp));
|
|
||||||
}
|
|
||||||
|
|
||||||
let runner_data: RunnerData = register_resp.json().await?;
|
|
||||||
|
|
||||||
fs::write(data_path, toml::to_string(&runner_data)?)
|
|
||||||
.expect("Faield to save data for runner");
|
|
||||||
|
|
||||||
Ok(runner_data)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,90 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use serde_json::json;
|
|
||||||
use serde_repr::Deserialize_repr;
|
|
||||||
|
|
||||||
use crate::{ConfigFile, RunnerData};
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Deserialize_repr, Debug)]
|
|
||||||
#[repr(i8)]
|
|
||||||
pub enum TaskStatus {
|
|
||||||
FailedRunning = -2,
|
|
||||||
FailedCreation = -1,
|
|
||||||
Preparing = 0,
|
|
||||||
Todo = 1,
|
|
||||||
PickedUp = 2,
|
|
||||||
Running = 3,
|
|
||||||
Done = 4,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Deserialize_repr, Debug)]
|
|
||||||
#[repr(i8)]
|
|
||||||
pub enum TaskType {
|
|
||||||
Classification = 1,
|
|
||||||
Training = 2,
|
|
||||||
Retraining = 3,
|
|
||||||
DeleteUser = 4,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct Task {
|
|
||||||
pub id: String,
|
|
||||||
pub user_id: String,
|
|
||||||
pub model_id: String,
|
|
||||||
pub status: TaskStatus,
|
|
||||||
pub status_message: String,
|
|
||||||
pub user_confirmed: i8,
|
|
||||||
pub compacted: i8,
|
|
||||||
#[serde(alias = "type")]
|
|
||||||
pub task_type: TaskType,
|
|
||||||
pub extra_task_info: String,
|
|
||||||
pub result: String,
|
|
||||||
pub created: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn fail_task(
|
|
||||||
task: &Task,
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
reason: &str,
|
|
||||||
) -> Result<()> {
|
|
||||||
println!("Marking Task as failed");
|
|
||||||
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
|
|
||||||
let to_send = json!({
|
|
||||||
"id": runner_data.id,
|
|
||||||
"taskId": task.id,
|
|
||||||
"reason": reason
|
|
||||||
});
|
|
||||||
|
|
||||||
let resp = client
|
|
||||||
.post(format!("{}/tasks/runner/fail", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if resp.status() != 200 {
|
|
||||||
println!("Failed to update status of task");
|
|
||||||
bail!("Failed to update status of task");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Task {
|
|
||||||
pub async fn fail(
|
|
||||||
self: &mut Task,
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
reason: &str,
|
|
||||||
) -> Result<()> {
|
|
||||||
fail_task(self, config, runner_data, reason).await?;
|
|
||||||
self.status = TaskStatus::FailedRunning;
|
|
||||||
self.status_message = reason.to_string();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,599 +0,0 @@
|
|||||||
use crate::{
|
|
||||||
dataloader::DataLoader,
|
|
||||||
model::{self, build_model},
|
|
||||||
settings::{ConfigFile, RunnerData},
|
|
||||||
tasks::{fail_task, Task},
|
|
||||||
types::{DataPointRequest, Definition, ModelClass},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
io::{self, Write},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use rand::{seq::SliceRandom, thread_rng};
|
|
||||||
use serde_json::json;
|
|
||||||
use tch::{
|
|
||||||
nn::{self, Module, OptimizerConfig},
|
|
||||||
Cuda, Tensor,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub async fn handle_train(
|
|
||||||
task: &Task,
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
println!("Preparing to train a model");
|
|
||||||
|
|
||||||
let to_send = json!({
|
|
||||||
"id": runner_data.id,
|
|
||||||
"taskId": task.id,
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut defs: Vec<Definition> = client
|
|
||||||
.post(format!("{}/tasks/runner/train/defs", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?
|
|
||||||
.json()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if defs.len() == 0 {
|
|
||||||
println!("No defs found");
|
|
||||||
fail_task(task, config, runner_data, "No definitions found").await?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let classes: Vec<ModelClass> = client
|
|
||||||
.post(format!("{}/tasks/runner/train/classes", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?
|
|
||||||
.json()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let data: DataPointRequest = client
|
|
||||||
.post(format!("{}/tasks/runner/train/datapoints", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?
|
|
||||||
.json()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut testing = data.testing;
|
|
||||||
|
|
||||||
testing.shuffle(&mut thread_rng());
|
|
||||||
|
|
||||||
let mut data_loader = DataLoader::new(config.clone(), testing, classes.len() as i64, 64);
|
|
||||||
|
|
||||||
// TODO make this a vec
|
|
||||||
let mut model: Option<model::Model> = None;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let config = config.clone();
|
|
||||||
let runner_data = runner_data.clone();
|
|
||||||
let mut to_remove: Vec<usize> = Vec::new();
|
|
||||||
|
|
||||||
let mut def_iter = defs.iter_mut();
|
|
||||||
|
|
||||||
let mut i: usize = 0;
|
|
||||||
while let Some(def) = def_iter.next() {
|
|
||||||
def.updateStatus(
|
|
||||||
task,
|
|
||||||
config.clone(),
|
|
||||||
runner_data.clone(),
|
|
||||||
crate::types::DefinitionStatus::Training,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let model_err = train_definition(
|
|
||||||
def,
|
|
||||||
&mut data_loader,
|
|
||||||
model,
|
|
||||||
config.clone(),
|
|
||||||
runner_data.clone(),
|
|
||||||
&task,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if model_err.is_err() {
|
|
||||||
println!("Failed to create model {:?}", model_err);
|
|
||||||
model = None;
|
|
||||||
to_remove.push(i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
model = model_err?;
|
|
||||||
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
defs = defs
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter(|&(i, _)| to_remove.iter().any(|b| *b == i))
|
|
||||||
.map(|(_, e)| e)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
fail_task(task, config, runner_data, "TODO").await?;
|
|
||||||
Ok(())
|
|
||||||
|
|
||||||
/*
|
|
||||||
for {
|
|
||||||
// Keep track of definitions that did not train fast enough
|
|
||||||
var toRemove ToRemoveList = []int{}
|
|
||||||
|
|
||||||
for i, def := range definitions {
|
|
||||||
|
|
||||||
accuracy, ml_model, err := trainDefinition(c, model, def, models[def.Id], classes)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to train definition!Err:", "err", err)
|
|
||||||
def.UpdateStatus(c, DEFINITION_STATUS_FAILED_TRAINING)
|
|
||||||
toRemove = append(toRemove, i)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
models[def.Id] = ml_model
|
|
||||||
|
|
||||||
if accuracy >= float64(def.TargetAccuracy) {
|
|
||||||
log.Info("Found a definition that reaches target_accuracy!")
|
|
||||||
_, err = db.Exec("update model_definition set accuracy=$1, status=$2, epoch=$3 where id=$4", accuracy, DEFINITION_STATUS_TRANIED, def.Epoch, def.Id)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to train definition!Err:\n", "err", err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec("update model_definition set status=$1 where id!=$2 and model_id=$3 and status!=$4", DEFINITION_STATUS_CANCELD_TRAINING, def.Id, model.Id, DEFINITION_STATUS_FAILED_TRAINING)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to train definition!Err:\n", "err", err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
finished = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if def.Epoch > MAX_EPOCH {
|
|
||||||
fmt.Printf("Failed to train definition! Accuracy less %f < %d\n", accuracy, def.TargetAccuracy)
|
|
||||||
def.UpdateStatus(c, DEFINITION_STATUS_FAILED_TRAINING)
|
|
||||||
toRemove = append(toRemove, i)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec("update model_definition set accuracy=$1, epoch=$2, status=$3 where id=$4", accuracy, def.Epoch, DEFINITION_STATUS_PAUSED_TRAINING, def.Id)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to train definition!Err:\n", "err", err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if finished {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(sort.Reverse(toRemove))
|
|
||||||
|
|
||||||
log.Info("Round done", "toRemove", toRemove)
|
|
||||||
|
|
||||||
for _, n := range toRemove {
|
|
||||||
// Clean up unsed models
|
|
||||||
models[definitions[n].Id] = nil
|
|
||||||
definitions = remove(definitions, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
len_def := len(definitions)
|
|
||||||
|
|
||||||
if len_def == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if len_def == 1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(sort.Reverse(definitions))
|
|
||||||
|
|
||||||
acc := definitions[0].Accuracy - 20.0
|
|
||||||
|
|
||||||
log.Info("Training models, Highest acc", "acc", definitions[0].Accuracy, "mod_acc", acc)
|
|
||||||
|
|
||||||
toRemove = []int{}
|
|
||||||
for i, def := range definitions {
|
|
||||||
if def.Accuracy < acc {
|
|
||||||
toRemove = append(toRemove, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Removing due to accuracy", "toRemove", toRemove)
|
|
||||||
|
|
||||||
sort.Sort(sort.Reverse(toRemove))
|
|
||||||
for _, n := range toRemove {
|
|
||||||
log.Warn("Removing definition not fast enough learning", "n", n)
|
|
||||||
definitions[n].UpdateStatus(c, DEFINITION_STATUS_FAILED_TRAINING)
|
|
||||||
models[definitions[n].Id] = nil
|
|
||||||
definitions = remove(definitions, n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var def Definition
|
|
||||||
err = GetDBOnce(c, &def, "model_definition as md where md.model_id=$1 and md.status=$2 order by md.accuracy desc limit 1;", model.Id, DEFINITION_STATUS_TRANIED)
|
|
||||||
if err != nil {
|
|
||||||
if err == NotFoundError {
|
|
||||||
log.Error("All definitions failed to train!")
|
|
||||||
} else {
|
|
||||||
log.Error("DB: failed to read definition", "err", err)
|
|
||||||
}
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = def.UpdateStatus(c, DEFINITION_STATUS_READY); err != nil {
|
|
||||||
log.Error("Failed to update model definition", "err", err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
to_delete, err := db.Query("select id from model_definition where status != $1 and model_id=$2", DEFINITION_STATUS_READY, model.Id)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to select model_definition to delete")
|
|
||||||
log.Error(err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer to_delete.Close()
|
|
||||||
|
|
||||||
for to_delete.Next() {
|
|
||||||
var id string
|
|
||||||
if err = to_delete.Scan(&id); err != nil {
|
|
||||||
log.Error("Failed to scan the id of a model_definition to delete", "err", err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
os.RemoveAll(path.Join("savedData", model.Id, "defs", id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO Check if returning also works here
|
|
||||||
if _, err = db.Exec("delete from model_definition where status!=$1 and model_id=$2;", DEFINITION_STATUS_READY, model.Id); err != nil {
|
|
||||||
log.Error("Failed to delete model_definition")
|
|
||||||
log.Error(err)
|
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ModelUpdateStatus(c, model.Id, READY)
|
|
||||||
|
|
||||||
return
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn train_definition(
|
|
||||||
def: &Definition,
|
|
||||||
data_loader: &mut DataLoader,
|
|
||||||
model: Option<model::Model>,
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
task: &Task,
|
|
||||||
) -> Result<Option<model::Model>> {
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
println!("About to start training definition");
|
|
||||||
|
|
||||||
let mut accuracy = 0;
|
|
||||||
|
|
||||||
let model = model.unwrap_or({
|
|
||||||
let layers: Vec<model::Layer> = client
|
|
||||||
.post(format!("{}/tasks/runner/train/def/layers", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(
|
|
||||||
json!({
|
|
||||||
"id": runner_data.id,
|
|
||||||
"taskId": task.id,
|
|
||||||
"defId": def.id,
|
|
||||||
})
|
|
||||||
.to_string(),
|
|
||||||
)
|
|
||||||
.send()
|
|
||||||
.await?
|
|
||||||
.json()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
build_model(layers, 0, true)
|
|
||||||
});
|
|
||||||
|
|
||||||
// TODO CUDA
|
|
||||||
// get device
|
|
||||||
// Move model to cuda
|
|
||||||
|
|
||||||
let mut opt = nn::Adam::default().build(&model.vs, 1e-3)?;
|
|
||||||
|
|
||||||
let mut last_acc = 0.0;
|
|
||||||
|
|
||||||
for epoch in 1..40 {
|
|
||||||
data_loader.restart();
|
|
||||||
let mut mean_loss: f64 = 0.0;
|
|
||||||
let mut mean_acc: f64 = 0.0;
|
|
||||||
while let Some((inputs, labels)) = data_loader.next() {
|
|
||||||
let inputs = inputs
|
|
||||||
.to_kind(tch::Kind::Float)
|
|
||||||
.to_device(tch::Device::Cuda(0));
|
|
||||||
let labels = labels
|
|
||||||
.to_kind(tch::Kind::Float)
|
|
||||||
.to_device(tch::Device::Cuda(0));
|
|
||||||
let out = model.seq.forward(&inputs);
|
|
||||||
let weight: Option<Tensor> = None;
|
|
||||||
let loss = out.binary_cross_entropy(&labels, weight, tch::Reduction::Mean);
|
|
||||||
opt.backward_step(&loss);
|
|
||||||
mean_loss += loss
|
|
||||||
.to_device(tch::Device::Cpu)
|
|
||||||
.unsqueeze(0)
|
|
||||||
.double_value(&[0]);
|
|
||||||
|
|
||||||
let out = out.to_device(tch::Device::Cpu);
|
|
||||||
|
|
||||||
let test = out.empty_like();
|
|
||||||
_ = out.clone(&test);
|
|
||||||
|
|
||||||
let out = test.argmax(1, true);
|
|
||||||
|
|
||||||
let mut labels = labels.to_device(tch::Device::Cpu);
|
|
||||||
|
|
||||||
labels = labels.unsqueeze(-1);
|
|
||||||
|
|
||||||
let size = out.size()[0];
|
|
||||||
|
|
||||||
let mut acc = 0;
|
|
||||||
for i in 0..size {
|
|
||||||
let res = out.double_value(&[i]);
|
|
||||||
let exp = labels.double_value(&[i, res as i64]);
|
|
||||||
if exp == 1.0 {
|
|
||||||
acc += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mean_acc += acc as f64 / size as f64;
|
|
||||||
last_acc = acc as f64 / size as f64;
|
|
||||||
}
|
|
||||||
print!(
|
|
||||||
"\repoch: {} loss: {} acc: {} l acc: {} ",
|
|
||||||
epoch,
|
|
||||||
mean_loss / data_loader.len as f64,
|
|
||||||
mean_acc / data_loader.len as f64,
|
|
||||||
last_acc
|
|
||||||
);
|
|
||||||
io::stdout().flush().expect("Unable to flush stdout");
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("\nlast acc: {}", last_acc);
|
|
||||||
|
|
||||||
return Ok(Some(model));
|
|
||||||
/*
|
|
||||||
|
|
||||||
opt, err := my_nn.DefaultAdamConfig().Build(model.Vs, 0.001)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for epoch := 0; epoch < EPOCH_PER_RUN; epoch++ {
|
|
||||||
var trainIter *torch.Iter2
|
|
||||||
trainIter, err = ds.TrainIter(32)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// trainIter.ToDevice(device)
|
|
||||||
|
|
||||||
log.Info("epoch", "epoch", epoch)
|
|
||||||
|
|
||||||
var trainLoss float64 = 0
|
|
||||||
var trainCorrect float64 = 0
|
|
||||||
ok := true
|
|
||||||
for ok {
|
|
||||||
var item torch.Iter2Item
|
|
||||||
var loss *torch.Tensor
|
|
||||||
item, ok = trainIter.Next()
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
data := item.Data
|
|
||||||
data, err = data.ToDevice(device, gotch.Float, false, true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var size []int64
|
|
||||||
size, err = data.Size()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var zeros *torch.Tensor
|
|
||||||
zeros, err = torch.Zeros(size, gotch.Float, device)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err = zeros.Add(data, true)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("\n\nhere 1, data\n\n", "retains", data.MustRetainsGrad(false), "requires", data.MustRequiresGrad())
|
|
||||||
|
|
||||||
data, err = data.SetRequiresGrad(true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("\n\nhere 2, data\n\n", "retains", data.MustRetainsGrad(false), "requires", data.MustRequiresGrad())
|
|
||||||
|
|
||||||
err = data.RetainGrad(false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("\n\nhere 2, data\n\n", "retains", data.MustRetainsGrad(false), "requires", data.MustRequiresGrad())
|
|
||||||
|
|
||||||
pred := model.ForwardT(data, true)
|
|
||||||
pred, err = pred.SetRequiresGrad(true, true)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = pred.RetainGrad(false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
label := item.Label
|
|
||||||
label, err = label.ToDevice(device, gotch.Float, false, true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
label, err = label.SetRequiresGrad(true, true)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = label.RetainGrad(false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate loss
|
|
||||||
loss, err = pred.BinaryCrossEntropyWithLogits(label, &torch.Tensor{}, &torch.Tensor{}, 2, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
loss, err = loss.SetRequiresGrad(true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = loss.RetainGrad(false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = opt.ZeroGrad()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = loss.Backward()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("pred grad", "pred", pred.MustGrad(false).MustMax(false).Float64Values())
|
|
||||||
log.Info("pred grad", "outs", label.MustGrad(false).MustMax(false).Float64Values())
|
|
||||||
log.Info("pred grad", "data", data.MustGrad(false).MustMax(false).Float64Values(), "lol", data.MustRetainsGrad(false))
|
|
||||||
|
|
||||||
vars := model.Vs.Variables()
|
|
||||||
|
|
||||||
for k, v := range vars {
|
|
||||||
log.Info("[grad check]", "k", k, "grad", v.MustGrad(false).MustMax(false).Float64Values(), "lol", v.MustRetainsGrad(false))
|
|
||||||
}
|
|
||||||
|
|
||||||
model.Debug()
|
|
||||||
|
|
||||||
err = opt.Step()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
trainLoss = loss.Float64Values()[0]
|
|
||||||
|
|
||||||
// Calculate accuracy
|
|
||||||
/ *var p_pred, p_labels *torch.Tensor
|
|
||||||
p_pred, err = pred.Argmax([]int64{1}, true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p_labels, err = item.Label.Argmax([]int64{1}, true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
floats := p_pred.Float64Values()
|
|
||||||
floats_labels := p_labels.Float64Values()
|
|
||||||
|
|
||||||
for i := range floats {
|
|
||||||
if floats[i] == floats_labels[i] {
|
|
||||||
trainCorrect += 1
|
|
||||||
}
|
|
||||||
} * /
|
|
||||||
|
|
||||||
panic("fornow")
|
|
||||||
}
|
|
||||||
|
|
||||||
//v := []float64{}
|
|
||||||
|
|
||||||
log.Info("model training epoch done loss", "loss", trainLoss, "correct", trainCorrect, "out", ds.TrainImagesSize, "accuracy", trainCorrect/float64(ds.TrainImagesSize))
|
|
||||||
|
|
||||||
/ *correct := int64(0)
|
|
||||||
//torch.NoGrad(func() {
|
|
||||||
ok = true
|
|
||||||
testIter := ds.TestIter(64)
|
|
||||||
for ok {
|
|
||||||
var item torch.Iter2Item
|
|
||||||
item, ok = testIter.Next()
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
output := model.Forward(item.Data)
|
|
||||||
|
|
||||||
var pred, labels *torch.Tensor
|
|
||||||
pred, err = output.Argmax([]int64{1}, true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
labels, err = item.Label.Argmax([]int64{1}, true, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
floats := pred.Float64Values()
|
|
||||||
floats_labels := labels.Float64Values()
|
|
||||||
|
|
||||||
for i := range floats {
|
|
||||||
if floats[i] == floats_labels[i] {
|
|
||||||
correct += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
accuracy = float64(correct) / float64(ds.TestImagesSize)
|
|
||||||
|
|
||||||
log.Info("Eval accuracy", "accuracy", accuracy)
|
|
||||||
|
|
||||||
err = def.UpdateAfterEpoch(db, accuracy*100)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}* /
|
|
||||||
//})
|
|
||||||
}
|
|
||||||
|
|
||||||
result_path := path.Join(getDir(), "savedData", m.Id, "defs", def.Id)
|
|
||||||
err = os.MkdirAll(result_path, os.ModePerm)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = my_torch.SaveModel(model, path.Join(result_path, "model.dat"))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Model finished training!", "accuracy", accuracy)
|
|
||||||
return
|
|
||||||
*/
|
|
||||||
}
|
|
@ -1,89 +0,0 @@
|
|||||||
use crate::{model, tasks::Task, ConfigFile, RunnerData};
|
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use serde_json::json;
|
|
||||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Deserialize_repr, Serialize_repr, Debug)]
|
|
||||||
#[repr(i8)]
|
|
||||||
pub enum DefinitionStatus {
|
|
||||||
CanceldTraining = -4,
|
|
||||||
FailedTraining = -3,
|
|
||||||
PreInit = 1,
|
|
||||||
Init = 2,
|
|
||||||
Training = 3,
|
|
||||||
PausedTraining = 6,
|
|
||||||
Tranied = 4,
|
|
||||||
Ready = 5,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct Definition {
|
|
||||||
pub id: String,
|
|
||||||
pub model_id: String,
|
|
||||||
pub accuracy: f64,
|
|
||||||
pub target_accuracy: i64,
|
|
||||||
pub epoch: i64,
|
|
||||||
pub status: i64,
|
|
||||||
pub created: String,
|
|
||||||
pub epoch_progress: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Definition {
|
|
||||||
pub async fn updateStatus(
|
|
||||||
self: &mut Definition,
|
|
||||||
task: &Task,
|
|
||||||
config: Arc<ConfigFile>,
|
|
||||||
runner_data: Arc<RunnerData>,
|
|
||||||
status: DefinitionStatus,
|
|
||||||
) -> Result<()> {
|
|
||||||
println!("Marking Task as faield");
|
|
||||||
|
|
||||||
let client = reqwest::Client::new();
|
|
||||||
|
|
||||||
let to_send = json!({
|
|
||||||
"id": runner_data.id,
|
|
||||||
"taskId": task.id,
|
|
||||||
"defId": self.id,
|
|
||||||
"status": status,
|
|
||||||
});
|
|
||||||
|
|
||||||
let resp = client
|
|
||||||
.post(format!("{}/tasks/runner/train/def/status", config.hostname))
|
|
||||||
.header("token", &config.token)
|
|
||||||
.body(to_send.to_string())
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if resp.status() != 200 {
|
|
||||||
println!("Failed to update status of task");
|
|
||||||
bail!("Failed to update status of task");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Deserialize_repr, Debug)]
|
|
||||||
#[repr(i8)]
|
|
||||||
pub enum ModelClassStatus {
|
|
||||||
ToTrain = 1,
|
|
||||||
Training = 2,
|
|
||||||
Trained = 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct ModelClass {
|
|
||||||
pub id: String,
|
|
||||||
pub model_id: String,
|
|
||||||
pub name: String,
|
|
||||||
pub class_order: i64,
|
|
||||||
pub status: ModelClassStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct DataPointRequest {
|
|
||||||
pub testing: Vec<model::DataPoint>,
|
|
||||||
pub training: Vec<model::DataPoint>,
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user