Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
b1e4211e6a | |||
e22df8adc9 |
37
DockerfileServer
Normal file
37
DockerfileServer
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
FROM docker.io/nvidia/cuda:12.3.2-devel-ubuntu22.04
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Sometimes you have to get update twice because ?
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get update
|
||||||
|
|
||||||
|
RUN apt-get install -y wget unzip python3-pip vim python3 python3-pip curl
|
||||||
|
|
||||||
|
RUN wget https://go.dev/dl/go1.22.2.linux-amd64.tar.gz
|
||||||
|
RUN tar -xvf go1.22.2.linux-amd64.tar.gz -C /usr/local
|
||||||
|
ENV PATH=$PATH:/usr/local/go/bin
|
||||||
|
ENV GOPATH=/go
|
||||||
|
|
||||||
|
RUN bash -c 'curl -L "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-2.9.1.tar.gz" | tar -C /usr/local -xz'
|
||||||
|
# RUN bash -c 'curl -L "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-2.13.1.tar.gz" | tar -C /usr/local -xz'
|
||||||
|
RUN ldconfig
|
||||||
|
|
||||||
|
RUN ln -s /usr/bin/python3 /usr/bin/python
|
||||||
|
RUN python -m pip install nvidia-pyindex
|
||||||
|
ADD requirements.txt .
|
||||||
|
RUN python -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
ENV CUDNN_PATH=/usr/local/lib/python3.10/dist-packages/nvidia/cudnn
|
||||||
|
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib/python3.10/dist-packages/nvidia/cudnn/lib
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
ADD go.mod .
|
||||||
|
ADD go.sum .
|
||||||
|
ADD main.go .
|
||||||
|
ADD logic logic
|
||||||
|
|
||||||
|
RUN go install || true
|
||||||
|
|
||||||
|
CMD ["go", "run", "."]
|
@ -6,3 +6,19 @@ const (
|
|||||||
DATA_POINT_MODE_TRAINING DATA_POINT_MODE = 1
|
DATA_POINT_MODE_TRAINING DATA_POINT_MODE = 1
|
||||||
DATA_POINT_MODE_TESTING = 2
|
DATA_POINT_MODE_TESTING = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ModelClassStatus int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CLASS_STATUS_TO_TRAIN ModelClassStatus = iota + 1
|
||||||
|
CLASS_STATUS_TRAINING
|
||||||
|
CLASS_STATUS_TRAINED
|
||||||
|
)
|
||||||
|
|
||||||
|
type ModelClass struct {
|
||||||
|
Id string `db:"mc.id" json:"id"`
|
||||||
|
ModelId string `db:"mc.model_id" json:"model_id"`
|
||||||
|
Name string `db:"mc.name" json:"name"`
|
||||||
|
ClassOrder int `db:"mc.class_order" json:"class_order"`
|
||||||
|
Status int `db:"mc.status" json:"status"`
|
||||||
|
}
|
||||||
|
95
logic/db_types/definitions.go
Normal file
95
logic/db_types/definitions.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package dbtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DefinitionStatus int
|
||||||
|
|
||||||
|
const (
|
||||||
|
DEFINITION_STATUS_CANCELD_TRAINING DefinitionStatus = -4
|
||||||
|
DEFINITION_STATUS_FAILED_TRAINING = -3
|
||||||
|
DEFINITION_STATUS_PRE_INIT = 1
|
||||||
|
DEFINITION_STATUS_INIT = 2
|
||||||
|
DEFINITION_STATUS_TRAINING = 3
|
||||||
|
DEFINITION_STATUS_PAUSED_TRAINING = 6
|
||||||
|
DEFINITION_STATUS_TRANIED = 4
|
||||||
|
DEFINITION_STATUS_READY = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
type Definition struct {
|
||||||
|
Id string `db:"md.id" json:"id"`
|
||||||
|
ModelId string `db:"md.model_id" json:"model_id"`
|
||||||
|
Accuracy float64 `db:"md.accuracy" json:"accuracy"`
|
||||||
|
TargetAccuracy int `db:"md.target_accuracy" json:"target_accuracy"`
|
||||||
|
Epoch int `db:"md.epoch" json:"epoch"`
|
||||||
|
Status int `db:"md.status" json:"status"`
|
||||||
|
CreatedOn time.Time `db:"md.created_on" json:"created"`
|
||||||
|
EpochProgress int `db:"md.epoch_progress" json:"epoch_progress"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SortByAccuracyDefinitions []*Definition
|
||||||
|
|
||||||
|
func (nf SortByAccuracyDefinitions) Len() int { return len(nf) }
|
||||||
|
func (nf SortByAccuracyDefinitions) Swap(i, j int) { nf[i], nf[j] = nf[j], nf[i] }
|
||||||
|
func (nf SortByAccuracyDefinitions) Less(i, j int) bool {
|
||||||
|
return nf[i].Accuracy < nf[j].Accuracy
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetDefinition(db db.Db, definition_id string) (definition Definition, err error) {
|
||||||
|
err = GetDBOnce(db, &definition, "model_definition as md where id=$1;", definition_id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func MakeDefenition(db db.Db, model_id string, target_accuracy int) (definition Definition, err error) {
|
||||||
|
var NewDefinition = struct {
|
||||||
|
ModelId string `db:"model_id"`
|
||||||
|
TargetAccuracy int `db:"target_accuracy"`
|
||||||
|
}{ModelId: model_id, TargetAccuracy: target_accuracy}
|
||||||
|
|
||||||
|
id, err := InsertReturnId(db, &NewDefinition, "model_definition", "id")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return GetDefinition(db, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Definition) UpdateStatus(db db.Db, status DefinitionStatus) (err error) {
|
||||||
|
_, err = db.Exec("update model_definition set status=$1 where id=$2", status, d.Id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Definition) MakeLayer(db db.Db, layer_order int, layer_type LayerType, shape string) (layer Layer, err error) {
|
||||||
|
var NewLayer = struct {
|
||||||
|
DefinitionId string `db:"def_id"`
|
||||||
|
LayerOrder int `db:"layer_order"`
|
||||||
|
LayerType LayerType `db:"layer_type"`
|
||||||
|
Shape string `db:"shape"`
|
||||||
|
}{
|
||||||
|
DefinitionId: d.Id,
|
||||||
|
LayerOrder: layer_order,
|
||||||
|
LayerType: layer_type,
|
||||||
|
Shape: shape,
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := InsertReturnId(db, &NewLayer, "model_definition_layer", "id")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return GetLayer(db, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Definition) GetLayers(db db.Db, filter string, args ...any) (layer []*Layer, err error) {
|
||||||
|
args = append(args, d.Id)
|
||||||
|
return GetDbMultitple[Layer](db, "model_definition_layer as mdl where mdl.def_id=$1 "+filter, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Definition) UpdateAfterEpoch(db db.Db, accuracy float64) (err error) {
|
||||||
|
d.Accuracy = accuracy
|
||||||
|
d.Epoch += 1
|
||||||
|
_, err = db.Exec("update model_definition set epoch=$1, accuracy=$2 where id=$3", d.Epoch, d.Accuracy, d.Id)
|
||||||
|
return
|
||||||
|
}
|
50
logic/db_types/layer.go
Normal file
50
logic/db_types/layer.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package dbtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LayerType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
LAYER_INPUT LayerType = 1
|
||||||
|
LAYER_DENSE = 2
|
||||||
|
LAYER_FLATTEN = 3
|
||||||
|
LAYER_SIMPLE_BLOCK = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
type Layer struct {
|
||||||
|
Id string `db:"mdl.id" json:"id"`
|
||||||
|
DefinitionId string `db:"mdl.def_id" json:"definition_id"`
|
||||||
|
LayerOrder string `db:"mdl.layer_order" json:"layer_order"`
|
||||||
|
LayerType LayerType `db:"mdl.layer_type" json:"layer_type"`
|
||||||
|
Shape string `db:"mdl.shape" json:"shape"`
|
||||||
|
ExpType string `db:"mdl.exp_type" json:"exp_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func ShapeToString(args ...int) string {
|
||||||
|
text, err := json.Marshal(args)
|
||||||
|
if err != nil {
|
||||||
|
panic("Could not generate Shape")
|
||||||
|
}
|
||||||
|
return string(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StringToShape(str string) (shape []int64) {
|
||||||
|
err := json.Unmarshal([]byte(str), &shape)
|
||||||
|
if err != nil {
|
||||||
|
panic("Could not parse Shape")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l Layer) GetShape() []int64 {
|
||||||
|
return StringToShape(l.Shape)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetLayer(db db.Db, layer_id string) (layer Layer, err error) {
|
||||||
|
err = GetDBOnce(db, &layer, "model_definition_layer as mdl where mdl.id=$1", layer_id)
|
||||||
|
return
|
||||||
|
}
|
@ -2,23 +2,26 @@ package dbtypes
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
|
||||||
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
|
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
type ModelStatus int
|
||||||
FAILED_TRAINING = -4
|
|
||||||
FAILED_PREPARING_TRAINING = -3
|
|
||||||
FAILED_PREPARING_ZIP_FILE = -2
|
|
||||||
FAILED_PREPARING = -1
|
|
||||||
|
|
||||||
PREPARING = 1
|
const (
|
||||||
CONFIRM_PRE_TRAINING = 2
|
FAILED_TRAINING ModelStatus = -4
|
||||||
PREPARING_ZIP_FILE = 3
|
FAILED_PREPARING_TRAINING = -3
|
||||||
TRAINING = 4
|
FAILED_PREPARING_ZIP_FILE = -2
|
||||||
READY = 5
|
FAILED_PREPARING = -1
|
||||||
READY_ALTERATION = 6
|
PREPARING = 1
|
||||||
READY_ALTERATION_FAILED = -6
|
CONFIRM_PRE_TRAINING = 2
|
||||||
|
PREPARING_ZIP_FILE = 3
|
||||||
|
TRAINING = 4
|
||||||
|
READY = 5
|
||||||
|
READY_ALTERATION = 6
|
||||||
|
READY_ALTERATION_FAILED = -6
|
||||||
|
|
||||||
READY_RETRAIN = 7
|
READY_RETRAIN = 7
|
||||||
READY_RETRAIN_FAILED = -7
|
READY_RETRAIN_FAILED = -7
|
||||||
@ -26,15 +29,6 @@ const (
|
|||||||
|
|
||||||
type ModelDefinitionStatus int
|
type ModelDefinitionStatus int
|
||||||
|
|
||||||
type LayerType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
LAYER_INPUT LayerType = 1
|
|
||||||
LAYER_DENSE = 2
|
|
||||||
LAYER_FLATTEN = 3
|
|
||||||
LAYER_SIMPLE_BLOCK = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MODEL_DEFINITION_STATUS_CANCELD_TRAINING ModelDefinitionStatus = -4
|
MODEL_DEFINITION_STATUS_CANCELD_TRAINING ModelDefinitionStatus = -4
|
||||||
MODEL_DEFINITION_STATUS_FAILED_TRAINING = -3
|
MODEL_DEFINITION_STATUS_FAILED_TRAINING = -3
|
||||||
@ -46,14 +40,6 @@ const (
|
|||||||
MODEL_DEFINITION_STATUS_READY = 5
|
MODEL_DEFINITION_STATUS_READY = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
type ModelClassStatus int
|
|
||||||
|
|
||||||
const (
|
|
||||||
MODEL_CLASS_STATUS_TO_TRAIN ModelClassStatus = 1
|
|
||||||
MODEL_CLASS_STATUS_TRAINING = 2
|
|
||||||
MODEL_CLASS_STATUS_TRAINED = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
type ModelHeadStatus int
|
type ModelHeadStatus int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -97,6 +83,61 @@ func (m BaseModel) CanEval() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DO NOT Pass un filtered data on filters
|
||||||
|
func (m BaseModel) GetDefinitions(db db.Db, filters string, args ...any) ([]*Definition, error) {
|
||||||
|
n_args := []any{m.Id}
|
||||||
|
n_args = append(n_args, args...)
|
||||||
|
return GetDbMultitple[Definition](db, fmt.Sprintf("model_definition as md where md.model_id=$1 %s", filters), n_args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m BaseModel) GetClasses(db db.Db, filters string, args ...any) ([]*ModelClass, error) {
|
||||||
|
n_args := []any{m.Id}
|
||||||
|
n_args = append(n_args, args...)
|
||||||
|
return GetDbMultitple[ModelClass](db, fmt.Sprintf("model_classes as mc where mc.model_id=$1 %s", filters), n_args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *BaseModel) UpdateStatus(db db.Db, status ModelStatus) (err error) {
|
||||||
|
_, err = db.Exec("update models set status=$1 where id=$2", status, m.Id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataPoint struct {
|
||||||
|
Class int `json:"class"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m BaseModel) DataPoints(db db.Db, mode DATA_POINT_MODE) (data []DataPoint, err error) {
|
||||||
|
rows, err := db.Query(
|
||||||
|
"select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner "+
|
||||||
|
"join model_classes as mc on mc.id = mdp.class_id "+
|
||||||
|
"where mc.model_id = $1 and mdp.model_mode=$2;",
|
||||||
|
m.Id, mode)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
data = []DataPoint{}
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var id string
|
||||||
|
var class_order int
|
||||||
|
var file_path string
|
||||||
|
if err = rows.Scan(&id, &class_order, &file_path); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if file_path == "id://" {
|
||||||
|
data = append(data, DataPoint{
|
||||||
|
Path: path.Join("./savedData", m.Id, "data", id+"."+m.Format),
|
||||||
|
Class: class_order,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
panic("TODO remote file path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func StringToImageMode(colorMode string) int {
|
func StringToImageMode(colorMode string) int {
|
||||||
switch colorMode {
|
switch colorMode {
|
||||||
case "greyscale":
|
case "greyscale":
|
||||||
|
@ -7,15 +7,15 @@ import (
|
|||||||
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ModelClass struct {
|
type ModelClassJSON struct {
|
||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
ModelId string `json:"model_id" db:"model_id"`
|
ModelId string `json:"model_id" db:"model_id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func ListClasses(c BasePack, model_id string) (cls []*ModelClass, err error) {
|
func ListClasses(c BasePack, model_id string) (cls []*ModelClassJSON, err error) {
|
||||||
return GetDbMultitple[ModelClass](c.GetDb(), "model_classes where model_id=$1", model_id)
|
return GetDbMultitple[ModelClassJSON](c.GetDb(), "model_classes where model_id=$1", model_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ModelHasDataPoints(db db.Db, model_id string) (result bool, err error) {
|
func ModelHasDataPoints(db db.Db, model_id string) (result bool, err error) {
|
||||||
|
@ -495,7 +495,7 @@ func handleDataUpload(handle *Handle) {
|
|||||||
return c.E500M("Could not create class", err)
|
return c.E500M("Could not create class", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var modelClass model_classes.ModelClass
|
var modelClass model_classes.ModelClassJSON
|
||||||
err = GetDBOnce(c, &modelClass, "model_classes where id=$1;", id)
|
err = GetDBOnce(c, &modelClass, "model_classes where id=$1;", id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c.E500M("Failed to get class information but class was creted", err)
|
return c.E500M("Failed to get class information but class was creted", err)
|
||||||
@ -704,7 +704,7 @@ func handleDataUpload(handle *Handle) {
|
|||||||
return c.Error500(err)
|
return c.Error500(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_, err = handle.Db.Exec("delete from model_classes where model_id=$1 and status=$2;", model.Id, MODEL_CLASS_STATUS_TO_TRAIN)
|
_, err = handle.Db.Exec("delete from model_classes where model_id=$1 and status=$2;", model.Id, CLASS_STATUS_TO_TRAIN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c.Error500(err)
|
return c.Error500(err)
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ func handleDelete(handle *Handle) {
|
|||||||
return c.E500M("Faield to get model", err)
|
return c.E500M("Faield to get model", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch model.Status {
|
switch ModelStatus(model.Status) {
|
||||||
case FAILED_TRAINING:
|
case FAILED_TRAINING:
|
||||||
fallthrough
|
fallthrough
|
||||||
case FAILED_PREPARING_ZIP_FILE:
|
case FAILED_PREPARING_ZIP_FILE:
|
||||||
|
@ -35,9 +35,9 @@ func handleEdit(handle *Handle) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ReturnType struct {
|
type ReturnType struct {
|
||||||
Classes []*model_classes.ModelClass `json:"classes"`
|
Classes []*model_classes.ModelClassJSON `json:"classes"`
|
||||||
HasData bool `json:"has_data"`
|
HasData bool `json:"has_data"`
|
||||||
NumberOfInvalidImages int `json:"number_of_invalid_images"`
|
NumberOfInvalidImages int `json:"number_of_invalid_images"`
|
||||||
}
|
}
|
||||||
|
|
||||||
c.ShowMessage = false
|
c.ShowMessage = false
|
||||||
|
79
logic/models/train/remote_train.go
Normal file
79
logic/models/train/remote_train.go
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
package models_train
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/tasks/utils"
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/utils"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
)
|
||||||
|
|
||||||
|
func PrepareTraining(handler *Handle, b BasePack, task Task, runner_id string) (err error) {
|
||||||
|
l := b.GetLogger()
|
||||||
|
|
||||||
|
model, err := GetBaseModel(b.GetDb(), *task.ModelId)
|
||||||
|
if err != nil {
|
||||||
|
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Failed to get model information")
|
||||||
|
l.Error("Failed to get model information", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if model.Status != TRAINING {
|
||||||
|
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Model not in the correct status for training")
|
||||||
|
return errors.New("Model not in the right status")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO do this when the runner says it's OK
|
||||||
|
//task.UpdateStatusLog(b, TASK_RUNNING, "Training model")
|
||||||
|
|
||||||
|
// TODO move this to the runner part as well
|
||||||
|
var dat struct {
|
||||||
|
NumberOfModels int
|
||||||
|
Accuracy int
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(task.ExtraTaskInfo), &dat)
|
||||||
|
if err != nil {
|
||||||
|
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Failed to get model extra information")
|
||||||
|
}
|
||||||
|
|
||||||
|
if model.ModelType == 2 {
|
||||||
|
panic("TODO")
|
||||||
|
full_error := generateExpandableDefinitions(b, model, dat.Accuracy, dat.NumberOfModels)
|
||||||
|
if full_error != nil {
|
||||||
|
l.Error("Failed to generate defintions", "err", full_error)
|
||||||
|
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Failed generate model")
|
||||||
|
return errors.New("Failed to generate definitions")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error := generateDefinitions(b, model, dat.Accuracy, dat.NumberOfModels)
|
||||||
|
if error != nil {
|
||||||
|
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Failed generate model")
|
||||||
|
return errors.New("Failed to generate definitions")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
runners := handler.DataMap["runners"].(map[string]interface{})
|
||||||
|
runner := runners[runner_id].(map[string]interface{})
|
||||||
|
runner["task"] = &task
|
||||||
|
|
||||||
|
runners[runner_id] = runner
|
||||||
|
handler.DataMap["runners"] = runners
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CleanUpFailed(b BasePack, task *Task) {
|
||||||
|
db := b.GetDb()
|
||||||
|
l := b.GetLogger()
|
||||||
|
model, err := GetBaseModel(db, *task.ModelId)
|
||||||
|
if err != nil {
|
||||||
|
l.Error("Failed to get model", "err", err)
|
||||||
|
} else {
|
||||||
|
err = model.UpdateStatus(db, FAILED_TRAINING)
|
||||||
|
if err != nil {
|
||||||
|
l.Error("Failed to get status", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -17,7 +17,7 @@ func handleRest(handle *Handle) {
|
|||||||
return c.E500M("Failed to get model", err)
|
return c.E500M("Failed to get model", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if model.Status != FAILED_PREPARING_TRAINING && model.Status != FAILED_TRAINING {
|
if model.Status != FAILED_PREPARING_TRAINING && model.Status != int(FAILED_TRAINING) {
|
||||||
return c.JsonBadRequest("Model is not in status that be reset")
|
return c.JsonBadRequest("Model is not in status that be reset")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,16 +39,6 @@ func getDir() string {
|
|||||||
return dir
|
return dir
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function creates a new model_definition
|
|
||||||
func MakeDefenition(db db.Db, model_id string, target_accuracy int) (id string, err error) {
|
|
||||||
var NewDefinition = struct {
|
|
||||||
ModelId string `db:"model_id"`
|
|
||||||
TargetAccuracy int `db:"target_accuracy"`
|
|
||||||
}{ModelId: model_id, TargetAccuracy: target_accuracy}
|
|
||||||
|
|
||||||
return InsertReturnId(db, &NewDefinition, "model_definition", "id")
|
|
||||||
}
|
|
||||||
|
|
||||||
func ModelDefinitionUpdateStatus(c BasePack, id string, status ModelDefinitionStatus) (err error) {
|
func ModelDefinitionUpdateStatus(c BasePack, id string, status ModelDefinitionStatus) (err error) {
|
||||||
_, err = c.GetDb().Exec("update model_definition set status = $1 where id = $2", status, id)
|
_, err = c.GetDb().Exec("update model_definition set status = $1 where id = $2", status, id)
|
||||||
return
|
return
|
||||||
@ -118,14 +108,14 @@ func generateCvsExp(c BasePack, run_path string, model_id string, doPanic bool)
|
|||||||
var co struct {
|
var co struct {
|
||||||
Count int `db:"count(*)"`
|
Count int `db:"count(*)"`
|
||||||
}
|
}
|
||||||
err = GetDBOnce(db, &co, "model_classes where model_id=$1 and status=$2;", model_id, MODEL_CLASS_STATUS_TRAINING)
|
err = GetDBOnce(db, &co, "model_classes where model_id=$1 and status=$2;", model_id, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
count = co.Count
|
count = co.Count
|
||||||
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
err = setModelClassStatus(c, MODEL_CLASS_STATUS_TRAINING, "model_id=$1 and status=$2;", model_id, MODEL_CLASS_STATUS_TO_TRAIN)
|
err = setModelClassStatus(c, CLASS_STATUS_TRAINING, "model_id=$1 and status=$2;", model_id, CLASS_STATUS_TO_TRAIN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -137,7 +127,7 @@ func generateCvsExp(c BasePack, run_path string, model_id string, doPanic bool)
|
|||||||
return generateCvsExp(c, run_path, model_id, true)
|
return generateCvsExp(c, run_path, model_id, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := db.Query("select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner join model_classes as mc on mc.id = mdp.class_id where mc.model_id = $1 and mdp.model_mode=$2 and mc.status=$3;", model_id, DATA_POINT_MODE_TRAINING, MODEL_CLASS_STATUS_TRAINING)
|
data, err := db.Query("select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner join model_classes as mc on mc.id = mdp.class_id where mc.model_id = $1 and mdp.model_mode=$2 and mc.status=$3;", model_id, DATA_POINT_MODE_TRAINING, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -287,7 +277,7 @@ func generateCvsExpandExp(c BasePack, run_path string, model_id string, offset i
|
|||||||
var co struct {
|
var co struct {
|
||||||
Count int `db:"count(*)"`
|
Count int `db:"count(*)"`
|
||||||
}
|
}
|
||||||
err = GetDBOnce(db, &co, "model_classes where model_id=$1 and status=$2;", model_id, MODEL_CLASS_STATUS_TRAINING)
|
err = GetDBOnce(db, &co, "model_classes where model_id=$1 and status=$2;", model_id, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -296,7 +286,7 @@ func generateCvsExpandExp(c BasePack, run_path string, model_id string, offset i
|
|||||||
count := co.Count
|
count := co.Count
|
||||||
|
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
err = setModelClassStatus(c, MODEL_CLASS_STATUS_TRAINING, "model_id=$1 and status=$2;", model_id, MODEL_CLASS_STATUS_TO_TRAIN)
|
err = setModelClassStatus(c, CLASS_STATUS_TRAINING, "model_id=$1 and status=$2;", model_id, CLASS_STATUS_TO_TRAIN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
} else if doPanic {
|
} else if doPanic {
|
||||||
@ -305,7 +295,7 @@ func generateCvsExpandExp(c BasePack, run_path string, model_id string, offset i
|
|||||||
return generateCvsExpandExp(c, run_path, model_id, offset, true)
|
return generateCvsExpandExp(c, run_path, model_id, offset, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := db.Query("select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner join model_classes as mc on mc.id = mdp.class_id where mc.model_id = $1 and mdp.model_mode=$2 and mc.status=$3;", model_id, DATA_POINT_MODE_TRAINING, MODEL_CLASS_STATUS_TRAINING)
|
data, err := db.Query("select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner join model_classes as mc on mc.id = mdp.class_id where mc.model_id = $1 and mdp.model_mode=$2 and mc.status=$3;", model_id, DATA_POINT_MODE_TRAINING, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -339,7 +329,7 @@ func generateCvsExpandExp(c BasePack, run_path string, model_id string, offset i
|
|||||||
// This is to load some extra data so that the model has more things to train on
|
// This is to load some extra data so that the model has more things to train on
|
||||||
//
|
//
|
||||||
|
|
||||||
data_other, err := db.Query("select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner join model_classes as mc on mc.id = mdp.class_id where mc.model_id = $1 and mdp.model_mode=$2 and mc.status=$3 limit $4;", model_id, DATA_POINT_MODE_TRAINING, MODEL_CLASS_STATUS_TRAINED, count*10)
|
data_other, err := db.Query("select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner join model_classes as mc on mc.id = mdp.class_id where mc.model_id = $1 and mdp.model_mode=$2 and mc.status=$3 limit $4;", model_id, DATA_POINT_MODE_TRAINING, CLASS_STATUS_TRAINED, count*10)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -737,7 +727,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to train Model! Err:")
|
l.Error("Failed to train Model! Err:")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer definitionsRows.Close()
|
defer definitionsRows.Close()
|
||||||
@ -750,7 +740,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if err = definitionsRows.Scan(&rowv.id, &rowv.target_accuracy, &rowv.epoch); err != nil {
|
if err = definitionsRows.Scan(&rowv.id, &rowv.target_accuracy, &rowv.epoch); err != nil {
|
||||||
l.Error("Failed to train Model Could not read definition from db!Err:")
|
l.Error("Failed to train Model Could not read definition from db!Err:")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
definitions = append(definitions, rowv)
|
definitions = append(definitions, rowv)
|
||||||
@ -758,7 +748,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
|
|
||||||
if len(definitions) == 0 {
|
if len(definitions) == 0 {
|
||||||
l.Error("No Definitions defined!")
|
l.Error("No Definitions defined!")
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -788,14 +778,14 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
_, err = db.Exec("update model_definition set accuracy=$1, status=$2, epoch=$3 where id=$4", accuracy, MODEL_DEFINITION_STATUS_TRANIED, def.epoch, def.id)
|
_, err = db.Exec("update model_definition set accuracy=$1, status=$2, epoch=$3 where id=$4", accuracy, MODEL_DEFINITION_STATUS_TRANIED, def.epoch, def.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to train definition!Err:\n", "err", err)
|
l.Error("Failed to train definition!Err:\n", "err", err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = db.Exec("update model_definition set status=$1 where id!=$2 and model_id=$3 and status!=$4", MODEL_DEFINITION_STATUS_CANCELD_TRAINING, def.id, model.Id, MODEL_DEFINITION_STATUS_FAILED_TRAINING)
|
_, err = db.Exec("update model_definition set status=$1 where id!=$2 and model_id=$3 and status!=$4", MODEL_DEFINITION_STATUS_CANCELD_TRAINING, def.id, model.Id, MODEL_DEFINITION_STATUS_FAILED_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to train definition!Err:\n", "err", err)
|
l.Error("Failed to train definition!Err:\n", "err", err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -813,7 +803,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
_, err = db.Exec("update model_definition set accuracy=$1, epoch=$2, status=$3 where id=$4", accuracy, def.epoch, MODEL_DEFINITION_STATUS_PAUSED_TRAINING, def.id)
|
_, err = db.Exec("update model_definition set accuracy=$1, epoch=$2, status=$3 where id=$4", accuracy, def.epoch, MODEL_DEFINITION_STATUS_PAUSED_TRAINING, def.id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to train definition!Err:\n", "err", err)
|
l.Error("Failed to train definition!Err:\n", "err", err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -868,7 +858,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("DB: failed to read definition")
|
l.Error("DB: failed to read definition")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
@ -876,7 +866,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if !rows.Next() {
|
if !rows.Next() {
|
||||||
// TODO Make the Model status have a message
|
// TODO Make the Model status have a message
|
||||||
l.Error("All definitions failed to train!")
|
l.Error("All definitions failed to train!")
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -884,14 +874,14 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if err = rows.Scan(&id); err != nil {
|
if err = rows.Scan(&id); err != nil {
|
||||||
l.Error("Failed to read id:")
|
l.Error("Failed to read id:")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = db.Exec("update model_definition set status=$1 where id=$2;", MODEL_DEFINITION_STATUS_READY, id); err != nil {
|
if _, err = db.Exec("update model_definition set status=$1 where id=$2;", MODEL_DEFINITION_STATUS_READY, id); err != nil {
|
||||||
l.Error("Failed to update model definition")
|
l.Error("Failed to update model definition")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -899,7 +889,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to select model_definition to delete")
|
l.Error("Failed to select model_definition to delete")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer to_delete.Close()
|
defer to_delete.Close()
|
||||||
@ -909,7 +899,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if err = to_delete.Scan(&id); err != nil {
|
if err = to_delete.Scan(&id); err != nil {
|
||||||
l.Error("Failed to scan the id of a model_definition to delete")
|
l.Error("Failed to scan the id of a model_definition to delete")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
os.RemoveAll(path.Join("savedData", model.Id, "defs", id))
|
os.RemoveAll(path.Join("savedData", model.Id, "defs", id))
|
||||||
@ -919,7 +909,7 @@ func trainModel(c BasePack, model *BaseModel) (err error) {
|
|||||||
if _, err = db.Exec("delete from model_definition where status!=$1 and model_id=$2;", MODEL_DEFINITION_STATUS_READY, model.Id); err != nil {
|
if _, err = db.Exec("delete from model_definition where status!=$1 and model_id=$2;", MODEL_DEFINITION_STATUS_READY, model.Id); err != nil {
|
||||||
l.Error("Failed to delete model_definition")
|
l.Error("Failed to delete model_definition")
|
||||||
l.Error(err)
|
l.Error(err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1066,7 +1056,7 @@ func trainModelExp(c BasePack, model *BaseModel) (err error) {
|
|||||||
err = GetDBOnce(db, &dat, "model_definition where model_id=$1 and status=$2 order by accuracy desc limit 1;", model.Id, MODEL_DEFINITION_STATUS_TRANIED)
|
err = GetDBOnce(db, &dat, "model_definition where model_id=$1 and status=$2 order by accuracy desc limit 1;", model.Id, MODEL_DEFINITION_STATUS_TRANIED)
|
||||||
if err == NotFoundError {
|
if err == NotFoundError {
|
||||||
// Set the class status to trained
|
// Set the class status to trained
|
||||||
err = setModelClassStatus(c, MODEL_CLASS_STATUS_TO_TRAIN, "model_id=$1 and status=$2;", model.Id, MODEL_CLASS_STATUS_TRAINING)
|
err = setModelClassStatus(c, CLASS_STATUS_TO_TRAIN, "model_id=$1 and status=$2;", model.Id, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("All definitions failed to train! And Failed to set class status")
|
l.Error("All definitions failed to train! And Failed to set class status")
|
||||||
return err
|
return err
|
||||||
@ -1101,7 +1091,7 @@ func trainModelExp(c BasePack, model *BaseModel) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = splitModel(c, model); err != nil {
|
if err = splitModel(c, model); err != nil {
|
||||||
err = setModelClassStatus(c, MODEL_CLASS_STATUS_TO_TRAIN, "model_id=$1 and status=$2;", model.Id, MODEL_CLASS_STATUS_TRAINING)
|
err = setModelClassStatus(c, CLASS_STATUS_TO_TRAIN, "model_id=$1 and status=$2;", model.Id, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to split the model! And Failed to set class status")
|
l.Error("Failed to split the model! And Failed to set class status")
|
||||||
return err
|
return err
|
||||||
@ -1112,7 +1102,7 @@ func trainModelExp(c BasePack, model *BaseModel) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set the class status to trained
|
// Set the class status to trained
|
||||||
err = setModelClassStatus(c, MODEL_CLASS_STATUS_TRAINED, "model_id=$1 and status=$2;", model.Id, MODEL_CLASS_STATUS_TRAINING)
|
err = setModelClassStatus(c, CLASS_STATUS_TRAINED, "model_id=$1 and status=$2;", model.Id, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to set class status")
|
l.Error("Failed to set class status")
|
||||||
return err
|
return err
|
||||||
@ -1272,7 +1262,7 @@ func generateDefinition(c BasePack, model *BaseModel, target_accuracy int, numbe
|
|||||||
db := c.GetDb()
|
db := c.GetDb()
|
||||||
l := c.GetLogger()
|
l := c.GetLogger()
|
||||||
|
|
||||||
def_id, err := MakeDefenition(db, model.Id, target_accuracy)
|
def, err := MakeDefenition(db, model.Id, target_accuracy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failed()
|
failed()
|
||||||
return
|
return
|
||||||
@ -1281,60 +1271,77 @@ func generateDefinition(c BasePack, model *BaseModel, target_accuracy int, numbe
|
|||||||
order := 1
|
order := 1
|
||||||
|
|
||||||
// Note the shape of the first layer defines the import size
|
// Note the shape of the first layer defines the import size
|
||||||
if complexity == 2 {
|
//_, err = def.MakeLayer(db, order, LAYER_INPUT, ShapeToString(model.Width, model.Height, model.ImageMode))
|
||||||
// Note the shape for now is no used
|
_, err = def.MakeLayer(db, order, LAYER_INPUT, ShapeToString(3, model.Width, model.Height))
|
||||||
width := int(math.Pow(2, math.Floor(math.Log(float64(model.Width))/math.Log(2.0))))
|
|
||||||
height := int(math.Pow(2, math.Floor(math.Log(float64(model.Height))/math.Log(2.0))))
|
|
||||||
l.Warn("Complexity 2 creating model with smaller size", "width", width, "height", height)
|
|
||||||
err = MakeLayer(db, def_id, order, LAYER_INPUT, fmt.Sprintf("%d,%d,1", width, height))
|
|
||||||
if err != nil {
|
|
||||||
failed()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
order++
|
|
||||||
} else {
|
|
||||||
err = MakeLayer(db, def_id, order, LAYER_INPUT, fmt.Sprintf("%d,%d,1", model.Width, model.Height))
|
|
||||||
if err != nil {
|
|
||||||
failed()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
order++
|
|
||||||
}
|
|
||||||
|
|
||||||
loop := max(int((math.Log(float64(model.Width)) / math.Log(float64(10)))), 1)
|
|
||||||
for i := 0; i < loop; i++ {
|
|
||||||
err = MakeLayer(db, def_id, order, LAYER_SIMPLE_BLOCK, "")
|
|
||||||
order++
|
|
||||||
if err != nil {
|
|
||||||
failed()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = MakeLayer(db, def_id, order, LAYER_FLATTEN, "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failed()
|
failed()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
order++
|
order++
|
||||||
|
|
||||||
loop = max(int((math.Log(float64(number_of_classes))/math.Log(float64(10)))/2), 1)
|
if complexity == 0 {
|
||||||
for i := 0; i < loop; i++ {
|
/*
|
||||||
err = MakeLayer(db, def_id, order, LAYER_DENSE, fmt.Sprintf("%d,1", number_of_classes*(loop-i)))
|
_, err = def.MakeLayer(db, order, LAYER_SIMPLE_BLOCK, "")
|
||||||
order++
|
if err != nil {
|
||||||
|
failed()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
order++
|
||||||
|
*/
|
||||||
|
|
||||||
|
_, err = def.MakeLayer(db, order, LAYER_FLATTEN, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failed()
|
failed()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
order++
|
||||||
|
|
||||||
err = ModelDefinitionUpdateStatus(c, def_id, MODEL_DEFINITION_STATUS_INIT)
|
loop := int(math.Log2(float64(number_of_classes)))
|
||||||
if err != nil {
|
for i := 0; i < loop; i++ {
|
||||||
|
_, err = def.MakeLayer(db, order, LAYER_DENSE, ShapeToString(number_of_classes*(loop-i)))
|
||||||
|
order++
|
||||||
|
if err != nil {
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_PREPARING_TRAINING)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if complexity == 1 || complexity == 2 {
|
||||||
|
loop := max(1, int((math.Log(float64(model.Width)) / math.Log(float64(10)))))
|
||||||
|
for i := 0; i < loop; i++ {
|
||||||
|
_, err = def.MakeLayer(db, order, LAYER_SIMPLE_BLOCK, "")
|
||||||
|
order++
|
||||||
|
if err != nil {
|
||||||
|
failed()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = def.MakeLayer(db, order, LAYER_FLATTEN, "")
|
||||||
|
if err != nil {
|
||||||
|
failed()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
order++
|
||||||
|
|
||||||
|
loop = int((math.Log(float64(number_of_classes)) / math.Log(float64(10))) / 2)
|
||||||
|
if loop == 0 {
|
||||||
|
loop = 1
|
||||||
|
}
|
||||||
|
for i := 0; i < loop; i++ {
|
||||||
|
_, err = def.MakeLayer(db, order, LAYER_DENSE, ShapeToString(number_of_classes*(loop-i)))
|
||||||
|
order++
|
||||||
|
if err != nil {
|
||||||
|
failed()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.Error("Unkown complexity", "complexity", complexity)
|
||||||
failed()
|
failed()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return def.UpdateStatus(db, DEFINITION_STATUS_INIT)
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateDefinitions(c BasePack, model *BaseModel, target_accuracy int, number_of_models int) (err error) {
|
func generateDefinitions(c BasePack, model *BaseModel, target_accuracy int, number_of_models int) (err error) {
|
||||||
@ -1393,12 +1400,14 @@ func generateExpandableDefinition(c BasePack, model *BaseModel, target_accuracy
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
def_id, err := MakeDefenition(c.GetDb(), model.Id, target_accuracy)
|
def, err := MakeDefenition(c.GetDb(), model.Id, target_accuracy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failed()
|
failed()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def_id := def.Id
|
||||||
|
|
||||||
order := 1
|
order := 1
|
||||||
|
|
||||||
width := model.Width
|
width := model.Width
|
||||||
@ -1533,7 +1542,7 @@ func generateExpandableDefinitions(c BasePack, model *BaseModel, target_accuracy
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ResetClasses(c BasePack, model *BaseModel) {
|
func ResetClasses(c BasePack, model *BaseModel) {
|
||||||
_, err := c.GetDb().Exec("update model_classes set status=$1 where status=$2 and model_id=$3", MODEL_CLASS_STATUS_TO_TRAIN, MODEL_CLASS_STATUS_TRAINING, model.Id)
|
_, err := c.GetDb().Exec("update model_classes set status=$1 where status=$2 and model_id=$3", CLASS_STATUS_TO_TRAIN, CLASS_STATUS_TRAINING, model.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.GetLogger().Error("Error while reseting the classes", "error", err)
|
c.GetLogger().Error("Error while reseting the classes", "error", err)
|
||||||
}
|
}
|
||||||
@ -1544,7 +1553,7 @@ func trainExpandable(c *Context, model *BaseModel) {
|
|||||||
|
|
||||||
failed := func(msg string) {
|
failed := func(msg string) {
|
||||||
c.Logger.Error(msg, "err", err)
|
c.Logger.Error(msg, "err", err)
|
||||||
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(c, model.Id, int(FAILED_TRAINING))
|
||||||
ResetClasses(c, model)
|
ResetClasses(c, model)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1588,7 +1597,7 @@ func trainExpandable(c *Context, model *BaseModel) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set the class status to trained
|
// Set the class status to trained
|
||||||
err = setModelClassStatus(c, MODEL_CLASS_STATUS_TRAINED, "model_id=$1 and status=$2;", model.Id, MODEL_CLASS_STATUS_TRAINING)
|
err = setModelClassStatus(c, CLASS_STATUS_TRAINED, "model_id=$1 and status=$2;", model.Id, CLASS_STATUS_TRAINING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failed("Failed to set class status")
|
failed("Failed to set class status")
|
||||||
return
|
return
|
||||||
@ -1648,7 +1657,7 @@ func RunTaskTrain(b BasePack, task Task) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Failed to train model", "err", err)
|
l.Error("Failed to train model", "err", err)
|
||||||
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Failed generate model")
|
task.UpdateStatusLog(b, TASK_FAILED_RUNNING, "Failed generate model")
|
||||||
ModelUpdateStatus(b, model.Id, FAILED_TRAINING)
|
ModelUpdateStatus(b, model.Id, int(FAILED_TRAINING))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1731,7 +1740,7 @@ func RunTaskRetrain(b BasePack, task Task) (err error) {
|
|||||||
|
|
||||||
l.Info("Model updaded")
|
l.Info("Model updaded")
|
||||||
|
|
||||||
_, err = db.Exec("update model_classes set status=$1 where status=$2 and model_id=$3", MODEL_CLASS_STATUS_TRAINED, MODEL_CLASS_STATUS_TRAINING, model.Id)
|
_, err = db.Exec("update model_classes set status=$1 where status=$2 and model_id=$3", CLASS_STATUS_TRAINED, CLASS_STATUS_TRAINING, model.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Error while updating the classes", "error", err)
|
l.Error("Error while updating the classes", "error", err)
|
||||||
failed()
|
failed()
|
||||||
@ -1861,7 +1870,7 @@ func handleTrain(handle *Handle) {
|
|||||||
c,
|
c,
|
||||||
"model_classes where model_id=$1 and status=$2 order by class_order asc",
|
"model_classes where model_id=$1 and status=$2 order by class_order asc",
|
||||||
model.Id,
|
model.Id,
|
||||||
MODEL_CLASS_STATUS_TO_TRAIN,
|
CLASS_STATUS_TO_TRAIN,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_err := c.RollbackTx()
|
_err := c.RollbackTx()
|
||||||
@ -1882,7 +1891,7 @@ func handleTrain(handle *Handle) {
|
|||||||
|
|
||||||
//Update the classes
|
//Update the classes
|
||||||
{
|
{
|
||||||
_, err = c.Exec("update model_classes set status=$1 where status=$2 and model_id=$3", MODEL_CLASS_STATUS_TRAINING, MODEL_CLASS_STATUS_TO_TRAIN, model.Id)
|
_, err = c.Exec("update model_classes set status=$1 where status=$2 and model_id=$3", CLASS_STATUS_TRAINING, CLASS_STATUS_TO_TRAIN, model.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_err := c.RollbackTx()
|
_err := c.RollbackTx()
|
||||||
if _err != nil {
|
if _err != nil {
|
||||||
|
7
logic/tasks/README.md
Normal file
7
logic/tasks/README.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# Runner Protocol
|
||||||
|
|
||||||
|
```
|
||||||
|
/----\
|
||||||
|
\/ |
|
||||||
|
Register -> Init -> Active ---> Ready -> Info
|
||||||
|
```
|
@ -8,4 +8,5 @@ func HandleTasks(handle *Handle) {
|
|||||||
handleUpload(handle)
|
handleUpload(handle)
|
||||||
handleList(handle)
|
handleList(handle)
|
||||||
handleRequests(handle)
|
handleRequests(handle)
|
||||||
|
handleRemoteRunner(handle)
|
||||||
}
|
}
|
||||||
|
386
logic/tasks/runner.go
Normal file
386
logic/tasks/runner.go
Normal file
@ -0,0 +1,386 @@
|
|||||||
|
package tasks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train"
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/tasks/utils"
|
||||||
|
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func verifyRunner(c *Context, dat *JustId) (runner *Runner, e *Error) {
|
||||||
|
runner, err := GetRunner(c, dat.Id)
|
||||||
|
if err == NotFoundError {
|
||||||
|
e = c.JsonBadRequest("Could not find runner, please register runner first")
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
e = c.E500M("Failed to get information about the runner", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.Token != *c.Token {
|
||||||
|
return nil, c.SendJSONStatus(401, "Only runners can use this funcion")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type VerifyTask struct {
|
||||||
|
Id string `json:"id" validate:"required"`
|
||||||
|
TaskId string `json:"taskId" validate:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyTask(x *Handle, c *Context, dat *VerifyTask) (task *Task, error *Error) {
|
||||||
|
mutex := x.DataMap["runners_mutex"].(*sync.Mutex)
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
var runners map[string]interface{} = x.DataMap["runners"].(map[string]interface{})
|
||||||
|
if runners[dat.Id] == nil {
|
||||||
|
return nil, c.JsonBadRequest("Runner not active")
|
||||||
|
}
|
||||||
|
|
||||||
|
var runner_data map[string]interface{} = runners[dat.Id].(map[string]interface{})
|
||||||
|
|
||||||
|
if runner_data["task"] == nil {
|
||||||
|
return nil, c.SendJSONStatus(404, "No active task")
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner_data["task"].(*Task), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleRemoteRunner(x *Handle) {
|
||||||
|
|
||||||
|
type RegisterRunner struct {
|
||||||
|
Token string `json:"token" validate:"required"`
|
||||||
|
Type RunnerType `json:"type" validate:"required"`
|
||||||
|
}
|
||||||
|
PostAuthJson(x, "/tasks/runner/register", User_Normal, func(c *Context, dat *RegisterRunner) *Error {
|
||||||
|
if *c.Token != dat.Token {
|
||||||
|
// TODO do admin
|
||||||
|
return c.E500M("Please make sure that the token is the same that is being registered", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Logger.Info("test", "dat", dat)
|
||||||
|
|
||||||
|
var runner Runner
|
||||||
|
err := GetDBOnce(c, &runner, "remote_runner as ru where token=$1", dat.Token)
|
||||||
|
if err != NotFoundError && err != nil {
|
||||||
|
return c.E500M("Failed to get information remote runners", err)
|
||||||
|
}
|
||||||
|
if err != NotFoundError {
|
||||||
|
return c.JsonBadRequest("Token is already registered by a runner")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO get id from token passed by when doing admin
|
||||||
|
var userId = c.User.Id
|
||||||
|
|
||||||
|
var new_runner = struct {
|
||||||
|
Type RunnerType
|
||||||
|
UserId string `db:"user_id"`
|
||||||
|
Token string
|
||||||
|
}{
|
||||||
|
Type: dat.Type,
|
||||||
|
Token: dat.Token,
|
||||||
|
UserId: userId,
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := InsertReturnId(c, &new_runner, "remote_runner", "id")
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to create remote runner", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON(struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
}{
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO remove runner
|
||||||
|
|
||||||
|
PostAuthJson(x, "/tasks/runner/init", User_Normal, func(c *Context, dat *JustId) *Error {
|
||||||
|
runner, error := verifyRunner(c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex := x.DataMap["runners_mutex"].(*sync.Mutex)
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
var runners map[string]interface{} = x.DataMap["runners"].(map[string]interface{})
|
||||||
|
if runners[dat.Id] != nil {
|
||||||
|
c.Logger.Info("Logger trying to register but already registerd")
|
||||||
|
c.ShowMessage = false
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
}
|
||||||
|
|
||||||
|
var new_runner = map[string]interface{}{}
|
||||||
|
new_runner["last_time_check"] = time.Now()
|
||||||
|
new_runner["runner_info"] = runner
|
||||||
|
|
||||||
|
runners[dat.Id] = new_runner
|
||||||
|
|
||||||
|
x.DataMap["runners"] = runners
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/tasks/runner/active", User_Normal, func(c *Context, dat *JustId) *Error {
|
||||||
|
_, error := verifyRunner(c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex := x.DataMap["runners_mutex"].(*sync.Mutex)
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
var runners map[string]interface{} = x.DataMap["runners"].(map[string]interface{})
|
||||||
|
if runners[dat.Id] == nil {
|
||||||
|
return c.JsonBadRequest("Runner not active")
|
||||||
|
}
|
||||||
|
|
||||||
|
var runner_data map[string]interface{} = runners[dat.Id].(map[string]interface{})
|
||||||
|
|
||||||
|
if runner_data["task"] == nil {
|
||||||
|
c.ShowMessage = false
|
||||||
|
return c.SendJSONStatus(404, "No active task")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ShowMessage = false
|
||||||
|
// This should be a task obj
|
||||||
|
return c.SendJSON(runner_data["task"])
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/tasks/runner/ready", User_Normal, func(c *Context, dat *VerifyTask) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
err := task.UpdateStatus(c, TASK_RUNNING, "Task Running on Runner")
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to set task status", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
|
|
||||||
|
type TaskFail struct {
|
||||||
|
Id string `json:"id" validate:"required"`
|
||||||
|
TaskId string `json:"taskId" validate:"required"`
|
||||||
|
Reason string `json:"reason" validate:"required"`
|
||||||
|
}
|
||||||
|
PostAuthJson(x, "/tasks/runner/fail", User_Normal, func(c *Context, dat *TaskFail) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, &VerifyTask{Id: dat.Id, TaskId: dat.TaskId})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
err := task.UpdateStatus(c, TASK_FAILED_RUNNING, dat.Reason)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to set task status", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do extra clean up on tasks
|
||||||
|
switch task.TaskType {
|
||||||
|
case int(TASK_TYPE_TRAINING):
|
||||||
|
CleanUpFailed(c, task)
|
||||||
|
default:
|
||||||
|
panic("Do not know how to handle this")
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex := x.DataMap["runners_mutex"].(*sync.Mutex)
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
var runners map[string]interface{} = x.DataMap["runners"].(map[string]interface{})
|
||||||
|
var runner_data map[string]interface{} = runners[dat.Id].(map[string]interface{})
|
||||||
|
runner_data["task"] = nil
|
||||||
|
|
||||||
|
runners[dat.Id] = runner_data
|
||||||
|
x.DataMap["runners"] = runners
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/tasks/runner/train/defs", User_Normal, func(c *Context, dat *VerifyTask) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
model, err := GetBaseModel(c, *task.ModelId)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get model information", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defs, err := model.GetDefinitions(c, "and md.status=$2", DEFINITION_STATUS_INIT)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get the model definitions", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON(defs)
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/tasks/runner/train/classes", User_Normal, func(c *Context, dat *VerifyTask) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
model, err := GetBaseModel(c, *task.ModelId)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get model information", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
classes, err := model.GetClasses(c, "and status=$2 order by mc.class_order asc", CLASS_STATUS_TO_TRAIN)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get the model classes", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON(classes)
|
||||||
|
})
|
||||||
|
|
||||||
|
type RunnerTrainDefStatus struct {
|
||||||
|
Id string `json:"id" validate:"required"`
|
||||||
|
TaskId string `json:"taskId" validate:"required"`
|
||||||
|
DefId string `json:"defId" validate:"required"`
|
||||||
|
Status DefinitionStatus `json:"status" validate:"required"`
|
||||||
|
}
|
||||||
|
PostAuthJson(x, "/tasks/runner/train/def/status", User_Normal, func(c *Context, dat *RunnerTrainDefStatus) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, &VerifyTask{Id: dat.Id, TaskId: dat.TaskId})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
def, err := GetDefinition(c, dat.DefId)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get definition information", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = def.UpdateStatus(c, dat.Status)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to update model status", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON("Ok")
|
||||||
|
})
|
||||||
|
|
||||||
|
type RunnerTrainDefLayers struct {
|
||||||
|
Id string `json:"id" validate:"required"`
|
||||||
|
TaskId string `json:"taskId" validate:"required"`
|
||||||
|
DefId string `json:"defId" validate:"required"`
|
||||||
|
}
|
||||||
|
PostAuthJson(x, "/tasks/runner/train/def/layers", User_Normal, func(c *Context, dat *RunnerTrainDefLayers) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, &VerifyTask{Id: dat.Id, TaskId: dat.TaskId})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
def, err := GetDefinition(c, dat.DefId)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get definition information", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
layers, err := def.GetLayers(c, " order by layer_order asc")
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get layers", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON(layers)
|
||||||
|
})
|
||||||
|
|
||||||
|
PostAuthJson(x, "/tasks/runner/train/datapoints", User_Normal, func(c *Context, dat *VerifyTask) *Error {
|
||||||
|
_, error := verifyRunner(c, &JustId{Id: dat.Id})
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
task, error := verifyTask(x, c, dat)
|
||||||
|
if error != nil {
|
||||||
|
return error
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.TaskType != int(TASK_TYPE_TRAINING) {
|
||||||
|
c.Logger.Error("Task not is not the right type to get the definitions", "task type", task.TaskType)
|
||||||
|
return c.JsonBadRequest("Task is not the right type go get the definitions")
|
||||||
|
}
|
||||||
|
|
||||||
|
model, err := GetBaseModel(c, *task.ModelId)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get model information", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
training_points, err := model.DataPoints(c, DATA_POINT_MODE_TRAINING)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get the model classes", err)
|
||||||
|
}
|
||||||
|
testing_points, err := model.DataPoints(c, DATA_POINT_MODE_TRAINING)
|
||||||
|
if err != nil {
|
||||||
|
return c.E500M("Failed to get the model classes", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendJSON(struct {
|
||||||
|
Testing []DataPoint `json:"testing"`
|
||||||
|
Training []DataPoint `json:"training"`
|
||||||
|
}{
|
||||||
|
Testing: testing_points,
|
||||||
|
Training: training_points,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
@ -5,6 +5,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/charmbracelet/log"
|
"github.com/charmbracelet/log"
|
||||||
@ -90,6 +91,45 @@ func runner(config Config, db db.Db, task_channel chan Task, index int, back_cha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle remote runner
|
||||||
|
*/
|
||||||
|
func handleRemoteTask(handler *Handle, base BasePack, runner_id string, task Task) {
|
||||||
|
logger := log.NewWithOptions(os.Stdout, log.Options{
|
||||||
|
ReportCaller: true,
|
||||||
|
ReportTimestamp: true,
|
||||||
|
TimeFormat: time.Kitchen,
|
||||||
|
Prefix: fmt.Sprintf("Runner pre %s", runner_id),
|
||||||
|
})
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
logger.Error("Runner failed to setup for runner", "due to", r, "stack", string(debug.Stack()))
|
||||||
|
// TODO maybe create better failed task
|
||||||
|
task.UpdateStatusLog(base, TASK_FAILED_RUNNING, "Failed to setup task for runner")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := task.UpdateStatus(base, TASK_PICKED_UP, "Failed to setup task for runner")
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("Failed to mark task as PICK UP")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex := handler.DataMap["runners_mutex"].(*sync.Mutex)
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
switch task.TaskType {
|
||||||
|
case int(TASK_TYPE_TRAINING):
|
||||||
|
if err := PrepareTraining(handler, base, task, runner_id); err != nil {
|
||||||
|
logger.Error("Failed to prepare for training", "err", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
logger.Error("Not sure what to do panicing", "taskType", task.TaskType)
|
||||||
|
panic("not sure what to do")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tells the orcchestator to look at the task list from time to time
|
* Tells the orcchestator to look at the task list from time to time
|
||||||
*/
|
*/
|
||||||
@ -125,7 +165,7 @@ func attentionSeeker(config Config, back_channel chan int) {
|
|||||||
/**
|
/**
|
||||||
* Manages what worker should to Work
|
* Manages what worker should to Work
|
||||||
*/
|
*/
|
||||||
func RunnerOrchestrator(db db.Db, config Config) {
|
func RunnerOrchestrator(db db.Db, config Config, handler *Handle) {
|
||||||
logger := log.NewWithOptions(os.Stdout, log.Options{
|
logger := log.NewWithOptions(os.Stdout, log.Options{
|
||||||
ReportCaller: true,
|
ReportCaller: true,
|
||||||
ReportTimestamp: true,
|
ReportTimestamp: true,
|
||||||
@ -133,6 +173,16 @@ func RunnerOrchestrator(db db.Db, config Config) {
|
|||||||
Prefix: "Runner Orchestrator Logger",
|
Prefix: "Runner Orchestrator Logger",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Setup vars
|
||||||
|
handler.DataMap["runners"] = map[string]interface{}{}
|
||||||
|
handler.DataMap["runners_mutex"] = &sync.Mutex{}
|
||||||
|
|
||||||
|
base := BasePackStruct{
|
||||||
|
Db: db,
|
||||||
|
Logger: logger,
|
||||||
|
Host: config.Hostname,
|
||||||
|
}
|
||||||
|
|
||||||
gpu_workers := config.GpuWorker.NumberOfWorkers
|
gpu_workers := config.GpuWorker.NumberOfWorkers
|
||||||
|
|
||||||
logger.Info("Starting runners")
|
logger.Info("Starting runners")
|
||||||
@ -149,7 +199,7 @@ func RunnerOrchestrator(db db.Db, config Config) {
|
|||||||
close(task_runners[x])
|
close(task_runners[x])
|
||||||
}
|
}
|
||||||
close(back_channel)
|
close(back_channel)
|
||||||
go RunnerOrchestrator(db, config)
|
go RunnerOrchestrator(db, config, handler)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -198,19 +248,45 @@ func RunnerOrchestrator(db db.Db, config Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if task_to_dispatch != nil {
|
if task_to_dispatch != nil {
|
||||||
for i := 0; i < len(task_runners_used); i += 1 {
|
|
||||||
if !task_runners_used[i] {
|
// Only let CPU tasks be done by the local users
|
||||||
task_runners[i] <- *task_to_dispatch
|
if task_to_dispatch.TaskType == int(TASK_TYPE_DELETE_USER) {
|
||||||
task_runners_used[i] = true
|
for i := 0; i < len(task_runners_used); i += 1 {
|
||||||
|
if !task_runners_used[i] {
|
||||||
|
task_runners[i] <- *task_to_dispatch
|
||||||
|
task_runners_used[i] = true
|
||||||
|
task_to_dispatch = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex := handler.DataMap["runners_mutex"].(*sync.Mutex)
|
||||||
|
mutex.Lock()
|
||||||
|
remote_runners := handler.DataMap["runners"].(map[string]interface{})
|
||||||
|
|
||||||
|
for k, v := range remote_runners {
|
||||||
|
runner_data := v.(map[string]interface{})
|
||||||
|
runner_info := runner_data["runner_info"].(*Runner)
|
||||||
|
|
||||||
|
if runner_data["task"] != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner_info.UserId == task_to_dispatch.UserId {
|
||||||
|
go handleRemoteTask(handler, base, k, *task_to_dispatch)
|
||||||
task_to_dispatch = nil
|
task_to_dispatch = nil
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartRunners(db db.Db, config Config) {
|
func StartRunners(db db.Db, config Config, handler *Handle) {
|
||||||
go RunnerOrchestrator(db, config)
|
go RunnerOrchestrator(db, config, handler)
|
||||||
}
|
}
|
||||||
|
29
logic/tasks/utils/runner.go
Normal file
29
logic/tasks/utils/runner.go
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package tasks_utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
|
||||||
|
dbtypes "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunnerType int64
|
||||||
|
|
||||||
|
const (
|
||||||
|
RUNNER_TYPE_GPU RunnerType = iota + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
type Runner struct {
|
||||||
|
Id string `json:"id" db:"ru.id"`
|
||||||
|
UserId string `json:"user_id" db:"ru.user_id"`
|
||||||
|
Token string `json:"token" db:"ru.token"`
|
||||||
|
Type RunnerType `json:"type" db:"ru.type"`
|
||||||
|
CreateOn time.Time `json:"createOn" db:"ru.created_on"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetRunner(db db.Db, id string) (ru *Runner, err error) {
|
||||||
|
var runner Runner
|
||||||
|
err = dbtypes.GetDBOnce(db, &runner, "remote_runner as ru where ru.id=$1", id)
|
||||||
|
ru = &runner
|
||||||
|
return
|
||||||
|
}
|
@ -374,7 +374,7 @@ func (c Context) JsonBadRequest(dat any) *Error {
|
|||||||
c.SetReportCaller(true)
|
c.SetReportCaller(true)
|
||||||
c.Logger.Warn("Request failed with a bad request", "dat", dat)
|
c.Logger.Warn("Request failed with a bad request", "dat", dat)
|
||||||
c.SetReportCaller(false)
|
c.SetReportCaller(false)
|
||||||
return c.ErrorCode(nil, 404, dat)
|
return c.SendJSONStatus(http.StatusBadRequest, dat)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Context) JsonErrorBadRequest(err error, dat any) *Error {
|
func (c Context) JsonErrorBadRequest(err error, dat any) *Error {
|
||||||
|
4
main.go
4
main.go
@ -36,11 +36,11 @@ func main() {
|
|||||||
log.Info("Config loaded!", "config", config)
|
log.Info("Config loaded!", "config", config)
|
||||||
config.GenerateToken(db)
|
config.GenerateToken(db)
|
||||||
|
|
||||||
StartRunners(db, config)
|
|
||||||
|
|
||||||
//TODO check if file structure exists to save data
|
//TODO check if file structure exists to save data
|
||||||
handle := NewHandler(db, config)
|
handle := NewHandler(db, config)
|
||||||
|
|
||||||
|
StartRunners(db, config, handle)
|
||||||
|
|
||||||
config.Cleanup(db)
|
config.Cleanup(db)
|
||||||
|
|
||||||
// TODO Handle this in other way
|
// TODO Handle this in other way
|
||||||
|
@ -13,7 +13,7 @@ http {
|
|||||||
server {
|
server {
|
||||||
listen 8000;
|
listen 8000;
|
||||||
|
|
||||||
client_max_body_size 1G;
|
client_max_body_size 5G;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
proxy_http_version 1.1;
|
proxy_http_version 1.1;
|
||||||
|
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# tensorflow[and-cuda] == 2.15.1
|
||||||
|
tensorflow[and-cuda] == 2.9.1
|
||||||
|
pandas
|
||||||
|
# Make sure to install the nvidia pyindex first
|
||||||
|
# nvidia-pyindex
|
||||||
|
nvidia-cudnn
|
2
run.sh
Executable file
2
run.sh
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
podman run --rm --network host --gpus all -ti -v $(pwd):/app -e "TERM=xterm-256color" fyp-server bash
|
1
runner/.gitignore
vendored
Normal file
1
runner/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
target/
|
1936
runner/Cargo.lock
generated
Normal file
1936
runner/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
17
runner/Cargo.toml
Normal file
17
runner/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
[package]
|
||||||
|
name = "runner"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0.82"
|
||||||
|
serde = { version = "1.0.200", features = ["derive"] }
|
||||||
|
toml = "0.8.12"
|
||||||
|
reqwest = { version = "0.12", features = ["json"] }
|
||||||
|
tokio = { version = "1", features = ["full"] }
|
||||||
|
serde_json = "1.0.116"
|
||||||
|
serde_repr = "0.1"
|
||||||
|
tch = { version = "0.16.0", features = ["download-libtorch"] }
|
||||||
|
rand = "0.8.5"
|
12
runner/Dockerfile
Normal file
12
runner/Dockerfile
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
FROM docker.io/nvidia/cuda:11.7.1-devel-ubuntu22.04
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install -y curl
|
||||||
|
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||||
|
ENV PATH="$PATH:/root/.cargo/bin"
|
||||||
|
RUN rustup toolchain install stable
|
||||||
|
|
||||||
|
RUN apt-get install -y pkg-config libssl-dev
|
||||||
|
|
||||||
|
WORKDIR /app
|
3
runner/config.toml
Normal file
3
runner/config.toml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
hostname = "https://testing.andr3h3nriqu3s.com/api"
|
||||||
|
token = "d2bc41e8293937bcd9397870c98f97acc9603f742924b518e193cd1013e45d57897aa302b364001c72b458afcfb34239dfaf38a66b318e5cbc973eea"
|
||||||
|
data_path = "/home/andr3/Documents/my-repos/fyp"
|
1
runner/data.toml
Normal file
1
runner/data.toml
Normal file
@ -0,0 +1 @@
|
|||||||
|
id = "a7cec9e9-1d05-4633-8bc5-6faabe4fd5a3"
|
2
runner/run.sh
Executable file
2
runner/run.sh
Executable file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
podman run --rm --network host --gpus all -ti -v $(pwd):/app -e "TERM=xterm-256color" fyp-runner bash
|
115
runner/src/dataloader.rs
Normal file
115
runner/src/dataloader.rs
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
use crate::{model::DataPoint, settings::ConfigFile};
|
||||||
|
use std::{path::Path, sync::Arc};
|
||||||
|
use tch::Tensor;
|
||||||
|
|
||||||
|
pub struct DataLoader {
|
||||||
|
pub batch_size: i64,
|
||||||
|
pub len: usize,
|
||||||
|
pub inputs: Vec<Tensor>,
|
||||||
|
pub labels: Vec<Tensor>,
|
||||||
|
pub pos: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn import_image(
|
||||||
|
item: &DataPoint,
|
||||||
|
base_path: &Path,
|
||||||
|
classes_len: i64,
|
||||||
|
inputs: &mut Vec<Tensor>,
|
||||||
|
labels: &mut Vec<Tensor>,
|
||||||
|
) {
|
||||||
|
inputs.push(
|
||||||
|
tch::vision::image::load(base_path.join(&item.path))
|
||||||
|
.ok()
|
||||||
|
.unwrap()
|
||||||
|
.unsqueeze(0),
|
||||||
|
);
|
||||||
|
|
||||||
|
if item.class >= 0 {
|
||||||
|
let t = tch::Tensor::from_slice(&[item.class]).onehot(classes_len as i64);
|
||||||
|
labels.push(t);
|
||||||
|
} else {
|
||||||
|
labels.push(tch::Tensor::zeros(
|
||||||
|
[1, classes_len as i64],
|
||||||
|
(tch::Kind::Float, tch::Device::Cpu),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DataLoader {
|
||||||
|
pub fn new(
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
data: Vec<DataPoint>,
|
||||||
|
classes_len: i64,
|
||||||
|
batch_size: i64,
|
||||||
|
) -> DataLoader {
|
||||||
|
let len: f64 = (data.len() as f64) / (batch_size as f64);
|
||||||
|
let min_len: i64 = len.floor() as i64;
|
||||||
|
let max_len: i64 = len.ceil() as i64;
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Creating dataloader data len: {} len: {} min_len: {} max_len:{}",
|
||||||
|
data.len(),
|
||||||
|
len,
|
||||||
|
min_len,
|
||||||
|
max_len
|
||||||
|
);
|
||||||
|
|
||||||
|
let base_path = Path::new(&config.data_path);
|
||||||
|
|
||||||
|
let mut inputs: Vec<Tensor> = Vec::new();
|
||||||
|
let mut all_labels: Vec<Tensor> = Vec::new();
|
||||||
|
|
||||||
|
for batch in 0..min_len {
|
||||||
|
let mut batch_acc: Vec<Tensor> = Vec::new();
|
||||||
|
let mut labels: Vec<Tensor> = Vec::new();
|
||||||
|
for image in 0..batch_size {
|
||||||
|
let i: usize = (batch * batch_size + image).try_into().unwrap();
|
||||||
|
let item = &data[i];
|
||||||
|
import_image(item, base_path, classes_len, &mut batch_acc, &mut labels)
|
||||||
|
}
|
||||||
|
inputs.push(tch::Tensor::cat(&batch_acc[0..], 0));
|
||||||
|
all_labels.push(tch::Tensor::cat(&labels[0..], 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import the last batch that has irregular sizing
|
||||||
|
if min_len != max_len {
|
||||||
|
let mut batch_acc: Vec<Tensor> = Vec::new();
|
||||||
|
let mut labels: Vec<Tensor> = Vec::new();
|
||||||
|
for image in 0..(data.len() - (batch_size * min_len) as usize) {
|
||||||
|
let i: usize = (min_len * batch_size + (image as i64)) as usize;
|
||||||
|
let item = &data[i];
|
||||||
|
import_image(item, base_path, classes_len, &mut batch_acc, &mut labels);
|
||||||
|
}
|
||||||
|
inputs.push(tch::Tensor::cat(&batch_acc[0..], 0));
|
||||||
|
all_labels.push(tch::Tensor::cat(&labels[0..], 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("ins shape: {:?}", inputs[0].size());
|
||||||
|
|
||||||
|
return DataLoader {
|
||||||
|
batch_size,
|
||||||
|
inputs,
|
||||||
|
labels: all_labels,
|
||||||
|
len: max_len as usize,
|
||||||
|
pos: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn restart(self: &mut DataLoader) {
|
||||||
|
self.pos = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn next(self: &mut DataLoader) -> Option<(Tensor, Tensor)> {
|
||||||
|
if self.pos >= self.len {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let input = self.inputs[self.pos].empty_like();
|
||||||
|
self.inputs[self.pos] = self.inputs[self.pos].clone(&input);
|
||||||
|
let label = self.labels[self.pos].empty_like();
|
||||||
|
self.labels[self.pos] = self.labels[self.pos].clone(&label);
|
||||||
|
|
||||||
|
self.pos += 1;
|
||||||
|
|
||||||
|
return Some((input, label));
|
||||||
|
}
|
||||||
|
}
|
206
runner/src/main.rs
Normal file
206
runner/src/main.rs
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
mod dataloader;
|
||||||
|
mod model;
|
||||||
|
mod settings;
|
||||||
|
mod tasks;
|
||||||
|
mod training;
|
||||||
|
mod types;
|
||||||
|
|
||||||
|
use crate::settings::*;
|
||||||
|
use crate::tasks::{fail_task, Task, TaskType};
|
||||||
|
use crate::training::handle_train;
|
||||||
|
use anyhow::{bail, Result};
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
use serde_json::json;
|
||||||
|
use std::{fs, process::exit, sync::Arc, time::Duration};
|
||||||
|
|
||||||
|
enum ResultAlive {
|
||||||
|
Ok,
|
||||||
|
Error,
|
||||||
|
NotInit,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_keep_alive_message(
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
) -> ResultAlive {
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
|
||||||
|
let to_send = json!({
|
||||||
|
"id": runner_data.id,
|
||||||
|
});
|
||||||
|
|
||||||
|
let resp = client
|
||||||
|
.post(format!("{}/tasks/runner/beat", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if resp.is_err() {
|
||||||
|
return ResultAlive::Error;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = resp.ok();
|
||||||
|
|
||||||
|
if resp.is_none() {
|
||||||
|
return ResultAlive::Error;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = resp.unwrap();
|
||||||
|
|
||||||
|
// TODO see if the message is related to not being inited
|
||||||
|
if resp.status() != 200 {
|
||||||
|
println!("Could not connect with the status");
|
||||||
|
return ResultAlive::Error;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultAlive::Ok
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn keep_alive(config: Arc<ConfigFile>, runner_data: Arc<RunnerData>) -> Result<()> {
|
||||||
|
let mut failed = 0;
|
||||||
|
loop {
|
||||||
|
match send_keep_alive_message(config.clone(), runner_data.clone()).await {
|
||||||
|
ResultAlive::Error => failed += 1,
|
||||||
|
ResultAlive::NotInit => {
|
||||||
|
println!("Runner not inited! Restarting!");
|
||||||
|
exit(1)
|
||||||
|
}
|
||||||
|
ResultAlive::Ok => failed = 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO move to config
|
||||||
|
if failed > 20 {
|
||||||
|
println!("Failed to connect to API! More than 20 times in a row stoping");
|
||||||
|
exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_task(
|
||||||
|
task: Task,
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let res = match task.task_type {
|
||||||
|
TaskType::Training => handle_train(&task, config.clone(), runner_data.clone()).await,
|
||||||
|
_ => {
|
||||||
|
println!("Do not know how to handle this task #{:?}", task);
|
||||||
|
bail!("Failed")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if res.is_err() {
|
||||||
|
println!("task failed #{:?}", res);
|
||||||
|
fail_task(
|
||||||
|
&task,
|
||||||
|
config,
|
||||||
|
runner_data,
|
||||||
|
"Do not know how to handle this kind of task",
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
// Load config file
|
||||||
|
let config_data = fs::read_to_string("./config.toml")?;
|
||||||
|
let mut config: ConfigFile = toml::from_str(&config_data)?;
|
||||||
|
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
if config.config_path == None {
|
||||||
|
config.config_path = Some(String::from("./data.toml"))
|
||||||
|
}
|
||||||
|
|
||||||
|
let runner_data: RunnerData = load_runner_data(&config).await?;
|
||||||
|
|
||||||
|
let to_send = json!({
|
||||||
|
"id": runner_data.id,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Inform the server that the runner is available
|
||||||
|
let resp = client
|
||||||
|
.post(format!("{}/tasks/runner/init", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if resp.status() != 200 {
|
||||||
|
println!(
|
||||||
|
"Could not connect with the api: status {} body {}",
|
||||||
|
resp.status(),
|
||||||
|
resp.text().await?
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = resp.json::<String>().await?;
|
||||||
|
if res != "Ok" {
|
||||||
|
print!("Unexpected problem: {}", res);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let config = Arc::new(config);
|
||||||
|
let runner_data = Arc::new(runner_data);
|
||||||
|
|
||||||
|
let config_alive = config.clone();
|
||||||
|
let runner_data_alive = runner_data.clone();
|
||||||
|
std::thread::spawn(move || keep_alive(config_alive.clone(), runner_data_alive.clone()));
|
||||||
|
|
||||||
|
println!("Started main loop");
|
||||||
|
loop {
|
||||||
|
//TODO move time to config
|
||||||
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
|
|
||||||
|
let to_send = json!({ "id": runner_data.id });
|
||||||
|
|
||||||
|
let resp = client
|
||||||
|
.post(format!("{}/tasks/runner/active", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if resp.is_err() || resp.as_ref().ok().is_none() {
|
||||||
|
println!("Failed to get info from server {:?}", resp);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = resp?;
|
||||||
|
|
||||||
|
match resp.status() {
|
||||||
|
// No active task
|
||||||
|
StatusCode::NOT_FOUND => (),
|
||||||
|
StatusCode::OK => {
|
||||||
|
println!("Found task!");
|
||||||
|
|
||||||
|
let task: Result<Task, reqwest::Error> = resp.json().await;
|
||||||
|
if task.is_err() || task.as_ref().ok().is_none() {
|
||||||
|
println!("Failed to resolve the json {:?}", task);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let task = task?;
|
||||||
|
|
||||||
|
let res = handle_task(task, config.clone(), runner_data.clone()).await;
|
||||||
|
|
||||||
|
if res.is_err() || res.as_ref().ok().is_none() {
|
||||||
|
println!("Failed to run the task");
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = res;
|
||||||
|
()
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
println!("Unexpected error #{:?}", resp);
|
||||||
|
exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
117
runner/src/model/mod.rs
Normal file
117
runner/src/model/mod.rs
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
use anyhow::bail;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||||
|
use tch::{
|
||||||
|
nn::{self, Module},
|
||||||
|
Device,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Model {
|
||||||
|
pub vs: nn::VarStore,
|
||||||
|
pub seq: nn::Sequential,
|
||||||
|
pub layers: Vec<Layer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize_repr, Deserialize_repr)]
|
||||||
|
#[repr(i8)]
|
||||||
|
pub enum LayerType {
|
||||||
|
Input = 1,
|
||||||
|
Dense = 2,
|
||||||
|
Flatten = 3,
|
||||||
|
SimpleBlock = 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Layer {
|
||||||
|
pub id: String,
|
||||||
|
pub definition_id: String,
|
||||||
|
pub layer_order: String,
|
||||||
|
pub layer_type: LayerType,
|
||||||
|
pub shape: String,
|
||||||
|
pub exp_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct DataPoint {
|
||||||
|
pub class: i64,
|
||||||
|
pub path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn build_model(layers: Vec<Layer>, last_linear_size: i64, add_sigmoid: bool) -> Model {
|
||||||
|
let vs = nn::VarStore::new(Device::Cuda(0));
|
||||||
|
|
||||||
|
let mut seq = nn::seq();
|
||||||
|
|
||||||
|
let mut last_linear_size = last_linear_size;
|
||||||
|
let mut last_linear_conv: Vec<i64> = Vec::new();
|
||||||
|
|
||||||
|
for layer in layers.iter() {
|
||||||
|
match layer.layer_type {
|
||||||
|
LayerType::Input => {
|
||||||
|
last_linear_conv = serde_json::from_str(&layer.shape).ok().unwrap();
|
||||||
|
println!("Layer: Input, In: {:?}", last_linear_conv);
|
||||||
|
}
|
||||||
|
LayerType::Dense => {
|
||||||
|
let shape: Vec<i64> = serde_json::from_str(&layer.shape).ok().unwrap();
|
||||||
|
println!("Layer: Dense, In: {}, Out: {}", last_linear_size, shape[0]);
|
||||||
|
seq = seq
|
||||||
|
.add(nn::linear(
|
||||||
|
&vs.root(),
|
||||||
|
last_linear_size,
|
||||||
|
shape[0],
|
||||||
|
Default::default(),
|
||||||
|
))
|
||||||
|
.add_fn(|xs| xs.relu());
|
||||||
|
last_linear_size = shape[0];
|
||||||
|
}
|
||||||
|
LayerType::Flatten => {
|
||||||
|
seq = seq.add_fn(|xs| xs.flatten(1, -1));
|
||||||
|
last_linear_size = 1;
|
||||||
|
for i in &last_linear_conv {
|
||||||
|
last_linear_size *= i;
|
||||||
|
}
|
||||||
|
println!(
|
||||||
|
"Layer: flatten, In: {:?}, Out: {}",
|
||||||
|
last_linear_conv, last_linear_size
|
||||||
|
)
|
||||||
|
}
|
||||||
|
LayerType::SimpleBlock => {
|
||||||
|
let new_last_linear_conv =
|
||||||
|
vec![128, last_linear_conv[1] / 2, last_linear_conv[2] / 2];
|
||||||
|
println!(
|
||||||
|
"Layer: block, In: {:?}, Put: {:?}",
|
||||||
|
last_linear_conv, new_last_linear_conv,
|
||||||
|
);
|
||||||
|
let out_size = vec![new_last_linear_conv[1], new_last_linear_conv[2]];
|
||||||
|
seq = seq
|
||||||
|
.add(nn::conv2d(
|
||||||
|
&vs.root(),
|
||||||
|
last_linear_conv[0],
|
||||||
|
128,
|
||||||
|
3,
|
||||||
|
nn::ConvConfig::default(),
|
||||||
|
))
|
||||||
|
.add_fn(|xs| xs.relu())
|
||||||
|
.add(nn::conv2d(
|
||||||
|
&vs.root(),
|
||||||
|
128,
|
||||||
|
128,
|
||||||
|
3,
|
||||||
|
nn::ConvConfig::default(),
|
||||||
|
))
|
||||||
|
.add_fn(|xs| xs.relu())
|
||||||
|
.add_fn(move |xs| xs.adaptive_avg_pool2d([out_size[1], out_size[1]]))
|
||||||
|
.add_fn(|xs| xs.leaky_relu());
|
||||||
|
//m_layers = append(m_layers, NewSimpleBlock(vs, lastLinearConv[0]))
|
||||||
|
last_linear_conv = new_last_linear_conv;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if add_sigmoid {
|
||||||
|
seq = seq.add_fn(|xs| xs.sigmoid());
|
||||||
|
}
|
||||||
|
|
||||||
|
return Model { vs, layers, seq };
|
||||||
|
}
|
57
runner/src/settings.rs
Normal file
57
runner/src/settings.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
use anyhow::{bail, Result};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::json;
|
||||||
|
use std::{fs, path};
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub struct ConfigFile {
|
||||||
|
// Hostname to connect with the api
|
||||||
|
pub hostname: String,
|
||||||
|
// Token used in the api to authenticate
|
||||||
|
pub token: String,
|
||||||
|
// Path to where to store some generated configuration values
|
||||||
|
// defaults to ./data.toml
|
||||||
|
pub config_path: Option<String>,
|
||||||
|
// Data Path
|
||||||
|
// Path to where the data is mounted
|
||||||
|
pub data_path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
pub struct RunnerData {
|
||||||
|
pub id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn load_runner_data(config: &ConfigFile) -> Result<RunnerData> {
|
||||||
|
let data_path = config.config_path.as_ref().unwrap();
|
||||||
|
let data_path = path::Path::new(&*data_path);
|
||||||
|
|
||||||
|
if data_path.exists() {
|
||||||
|
let runner_data = fs::read_to_string(data_path)?;
|
||||||
|
Ok(toml::from_str(&runner_data)?)
|
||||||
|
} else {
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let to_send = json!({
|
||||||
|
"token": config.token,
|
||||||
|
"type": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
let register_resp = client
|
||||||
|
.post(format!("{}/tasks/runner/register", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if register_resp.status() != 200 {
|
||||||
|
bail!(format!("Could not create runner {:#?}", register_resp));
|
||||||
|
}
|
||||||
|
|
||||||
|
let runner_data: RunnerData = register_resp.json().await?;
|
||||||
|
|
||||||
|
fs::write(data_path, toml::to_string(&runner_data)?)
|
||||||
|
.expect("Faield to save data for runner");
|
||||||
|
|
||||||
|
Ok(runner_data)
|
||||||
|
}
|
||||||
|
}
|
90
runner/src/tasks.rs
Normal file
90
runner/src/tasks.rs
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{bail, Result};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde_json::json;
|
||||||
|
use serde_repr::Deserialize_repr;
|
||||||
|
|
||||||
|
use crate::{ConfigFile, RunnerData};
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Deserialize_repr, Debug)]
|
||||||
|
#[repr(i8)]
|
||||||
|
pub enum TaskStatus {
|
||||||
|
FailedRunning = -2,
|
||||||
|
FailedCreation = -1,
|
||||||
|
Preparing = 0,
|
||||||
|
Todo = 1,
|
||||||
|
PickedUp = 2,
|
||||||
|
Running = 3,
|
||||||
|
Done = 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Deserialize_repr, Debug)]
|
||||||
|
#[repr(i8)]
|
||||||
|
pub enum TaskType {
|
||||||
|
Classification = 1,
|
||||||
|
Training = 2,
|
||||||
|
Retraining = 3,
|
||||||
|
DeleteUser = 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
pub struct Task {
|
||||||
|
pub id: String,
|
||||||
|
pub user_id: String,
|
||||||
|
pub model_id: String,
|
||||||
|
pub status: TaskStatus,
|
||||||
|
pub status_message: String,
|
||||||
|
pub user_confirmed: i8,
|
||||||
|
pub compacted: i8,
|
||||||
|
#[serde(alias = "type")]
|
||||||
|
pub task_type: TaskType,
|
||||||
|
pub extra_task_info: String,
|
||||||
|
pub result: String,
|
||||||
|
pub created: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fail_task(
|
||||||
|
task: &Task,
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
reason: &str,
|
||||||
|
) -> Result<()> {
|
||||||
|
println!("Marking Task as failed");
|
||||||
|
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
|
||||||
|
let to_send = json!({
|
||||||
|
"id": runner_data.id,
|
||||||
|
"taskId": task.id,
|
||||||
|
"reason": reason
|
||||||
|
});
|
||||||
|
|
||||||
|
let resp = client
|
||||||
|
.post(format!("{}/tasks/runner/fail", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if resp.status() != 200 {
|
||||||
|
println!("Failed to update status of task");
|
||||||
|
bail!("Failed to update status of task");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Task {
|
||||||
|
pub async fn fail(
|
||||||
|
self: &mut Task,
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
reason: &str,
|
||||||
|
) -> Result<()> {
|
||||||
|
fail_task(self, config, runner_data, reason).await?;
|
||||||
|
self.status = TaskStatus::FailedRunning;
|
||||||
|
self.status_message = reason.to_string();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
599
runner/src/training.rs
Normal file
599
runner/src/training.rs
Normal file
@ -0,0 +1,599 @@
|
|||||||
|
use crate::{
|
||||||
|
dataloader::DataLoader,
|
||||||
|
model::{self, build_model},
|
||||||
|
settings::{ConfigFile, RunnerData},
|
||||||
|
tasks::{fail_task, Task},
|
||||||
|
types::{DataPointRequest, Definition, ModelClass},
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
io::{self, Write},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use rand::{seq::SliceRandom, thread_rng};
|
||||||
|
use serde_json::json;
|
||||||
|
use tch::{
|
||||||
|
nn::{self, Module, OptimizerConfig},
|
||||||
|
Cuda, Tensor,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub async fn handle_train(
|
||||||
|
task: &Task,
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
println!("Preparing to train a model");
|
||||||
|
|
||||||
|
let to_send = json!({
|
||||||
|
"id": runner_data.id,
|
||||||
|
"taskId": task.id,
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut defs: Vec<Definition> = client
|
||||||
|
.post(format!("{}/tasks/runner/train/defs", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.json()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if defs.len() == 0 {
|
||||||
|
println!("No defs found");
|
||||||
|
fail_task(task, config, runner_data, "No definitions found").await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let classes: Vec<ModelClass> = client
|
||||||
|
.post(format!("{}/tasks/runner/train/classes", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.json()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let data: DataPointRequest = client
|
||||||
|
.post(format!("{}/tasks/runner/train/datapoints", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.json()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut testing = data.testing;
|
||||||
|
|
||||||
|
testing.shuffle(&mut thread_rng());
|
||||||
|
|
||||||
|
let mut data_loader = DataLoader::new(config.clone(), testing, classes.len() as i64, 64);
|
||||||
|
|
||||||
|
// TODO make this a vec
|
||||||
|
let mut model: Option<model::Model> = None;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let config = config.clone();
|
||||||
|
let runner_data = runner_data.clone();
|
||||||
|
let mut to_remove: Vec<usize> = Vec::new();
|
||||||
|
|
||||||
|
let mut def_iter = defs.iter_mut();
|
||||||
|
|
||||||
|
let mut i: usize = 0;
|
||||||
|
while let Some(def) = def_iter.next() {
|
||||||
|
def.updateStatus(
|
||||||
|
task,
|
||||||
|
config.clone(),
|
||||||
|
runner_data.clone(),
|
||||||
|
crate::types::DefinitionStatus::Training,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let model_err = train_definition(
|
||||||
|
def,
|
||||||
|
&mut data_loader,
|
||||||
|
model,
|
||||||
|
config.clone(),
|
||||||
|
runner_data.clone(),
|
||||||
|
&task,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if model_err.is_err() {
|
||||||
|
println!("Failed to create model {:?}", model_err);
|
||||||
|
model = None;
|
||||||
|
to_remove.push(i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
model = model_err?;
|
||||||
|
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
defs = defs
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|&(i, _)| to_remove.iter().any(|b| *b == i))
|
||||||
|
.map(|(_, e)| e)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
fail_task(task, config, runner_data, "TODO").await?;
|
||||||
|
Ok(())
|
||||||
|
|
||||||
|
/*
|
||||||
|
for {
|
||||||
|
// Keep track of definitions that did not train fast enough
|
||||||
|
var toRemove ToRemoveList = []int{}
|
||||||
|
|
||||||
|
for i, def := range definitions {
|
||||||
|
|
||||||
|
accuracy, ml_model, err := trainDefinition(c, model, def, models[def.Id], classes)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to train definition!Err:", "err", err)
|
||||||
|
def.UpdateStatus(c, DEFINITION_STATUS_FAILED_TRAINING)
|
||||||
|
toRemove = append(toRemove, i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
models[def.Id] = ml_model
|
||||||
|
|
||||||
|
if accuracy >= float64(def.TargetAccuracy) {
|
||||||
|
log.Info("Found a definition that reaches target_accuracy!")
|
||||||
|
_, err = db.Exec("update model_definition set accuracy=$1, status=$2, epoch=$3 where id=$4", accuracy, DEFINITION_STATUS_TRANIED, def.Epoch, def.Id)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to train definition!Err:\n", "err", err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec("update model_definition set status=$1 where id!=$2 and model_id=$3 and status!=$4", DEFINITION_STATUS_CANCELD_TRAINING, def.Id, model.Id, DEFINITION_STATUS_FAILED_TRAINING)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to train definition!Err:\n", "err", err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
finished = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if def.Epoch > MAX_EPOCH {
|
||||||
|
fmt.Printf("Failed to train definition! Accuracy less %f < %d\n", accuracy, def.TargetAccuracy)
|
||||||
|
def.UpdateStatus(c, DEFINITION_STATUS_FAILED_TRAINING)
|
||||||
|
toRemove = append(toRemove, i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec("update model_definition set accuracy=$1, epoch=$2, status=$3 where id=$4", accuracy, def.Epoch, DEFINITION_STATUS_PAUSED_TRAINING, def.Id)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to train definition!Err:\n", "err", err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if finished {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(sort.Reverse(toRemove))
|
||||||
|
|
||||||
|
log.Info("Round done", "toRemove", toRemove)
|
||||||
|
|
||||||
|
for _, n := range toRemove {
|
||||||
|
// Clean up unsed models
|
||||||
|
models[definitions[n].Id] = nil
|
||||||
|
definitions = remove(definitions, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
len_def := len(definitions)
|
||||||
|
|
||||||
|
if len_def == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if len_def == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(sort.Reverse(definitions))
|
||||||
|
|
||||||
|
acc := definitions[0].Accuracy - 20.0
|
||||||
|
|
||||||
|
log.Info("Training models, Highest acc", "acc", definitions[0].Accuracy, "mod_acc", acc)
|
||||||
|
|
||||||
|
toRemove = []int{}
|
||||||
|
for i, def := range definitions {
|
||||||
|
if def.Accuracy < acc {
|
||||||
|
toRemove = append(toRemove, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Removing due to accuracy", "toRemove", toRemove)
|
||||||
|
|
||||||
|
sort.Sort(sort.Reverse(toRemove))
|
||||||
|
for _, n := range toRemove {
|
||||||
|
log.Warn("Removing definition not fast enough learning", "n", n)
|
||||||
|
definitions[n].UpdateStatus(c, DEFINITION_STATUS_FAILED_TRAINING)
|
||||||
|
models[definitions[n].Id] = nil
|
||||||
|
definitions = remove(definitions, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var def Definition
|
||||||
|
err = GetDBOnce(c, &def, "model_definition as md where md.model_id=$1 and md.status=$2 order by md.accuracy desc limit 1;", model.Id, DEFINITION_STATUS_TRANIED)
|
||||||
|
if err != nil {
|
||||||
|
if err == NotFoundError {
|
||||||
|
log.Error("All definitions failed to train!")
|
||||||
|
} else {
|
||||||
|
log.Error("DB: failed to read definition", "err", err)
|
||||||
|
}
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = def.UpdateStatus(c, DEFINITION_STATUS_READY); err != nil {
|
||||||
|
log.Error("Failed to update model definition", "err", err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
to_delete, err := db.Query("select id from model_definition where status != $1 and model_id=$2", DEFINITION_STATUS_READY, model.Id)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to select model_definition to delete")
|
||||||
|
log.Error(err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer to_delete.Close()
|
||||||
|
|
||||||
|
for to_delete.Next() {
|
||||||
|
var id string
|
||||||
|
if err = to_delete.Scan(&id); err != nil {
|
||||||
|
log.Error("Failed to scan the id of a model_definition to delete", "err", err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.RemoveAll(path.Join("savedData", model.Id, "defs", id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Check if returning also works here
|
||||||
|
if _, err = db.Exec("delete from model_definition where status!=$1 and model_id=$2;", DEFINITION_STATUS_READY, model.Id); err != nil {
|
||||||
|
log.Error("Failed to delete model_definition")
|
||||||
|
log.Error(err)
|
||||||
|
ModelUpdateStatus(c, model.Id, FAILED_TRAINING)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ModelUpdateStatus(c, model.Id, READY)
|
||||||
|
|
||||||
|
return
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn train_definition(
|
||||||
|
def: &Definition,
|
||||||
|
data_loader: &mut DataLoader,
|
||||||
|
model: Option<model::Model>,
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
task: &Task,
|
||||||
|
) -> Result<Option<model::Model>> {
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
println!("About to start training definition");
|
||||||
|
|
||||||
|
let mut accuracy = 0;
|
||||||
|
|
||||||
|
let model = model.unwrap_or({
|
||||||
|
let layers: Vec<model::Layer> = client
|
||||||
|
.post(format!("{}/tasks/runner/train/def/layers", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(
|
||||||
|
json!({
|
||||||
|
"id": runner_data.id,
|
||||||
|
"taskId": task.id,
|
||||||
|
"defId": def.id,
|
||||||
|
})
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.json()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
build_model(layers, 0, true)
|
||||||
|
});
|
||||||
|
|
||||||
|
// TODO CUDA
|
||||||
|
// get device
|
||||||
|
// Move model to cuda
|
||||||
|
|
||||||
|
let mut opt = nn::Adam::default().build(&model.vs, 1e-3)?;
|
||||||
|
|
||||||
|
let mut last_acc = 0.0;
|
||||||
|
|
||||||
|
for epoch in 1..40 {
|
||||||
|
data_loader.restart();
|
||||||
|
let mut mean_loss: f64 = 0.0;
|
||||||
|
let mut mean_acc: f64 = 0.0;
|
||||||
|
while let Some((inputs, labels)) = data_loader.next() {
|
||||||
|
let inputs = inputs
|
||||||
|
.to_kind(tch::Kind::Float)
|
||||||
|
.to_device(tch::Device::Cuda(0));
|
||||||
|
let labels = labels
|
||||||
|
.to_kind(tch::Kind::Float)
|
||||||
|
.to_device(tch::Device::Cuda(0));
|
||||||
|
let out = model.seq.forward(&inputs);
|
||||||
|
let weight: Option<Tensor> = None;
|
||||||
|
let loss = out.binary_cross_entropy(&labels, weight, tch::Reduction::Mean);
|
||||||
|
opt.backward_step(&loss);
|
||||||
|
mean_loss += loss
|
||||||
|
.to_device(tch::Device::Cpu)
|
||||||
|
.unsqueeze(0)
|
||||||
|
.double_value(&[0]);
|
||||||
|
|
||||||
|
let out = out.to_device(tch::Device::Cpu);
|
||||||
|
|
||||||
|
let test = out.empty_like();
|
||||||
|
_ = out.clone(&test);
|
||||||
|
|
||||||
|
let out = test.argmax(1, true);
|
||||||
|
|
||||||
|
let mut labels = labels.to_device(tch::Device::Cpu);
|
||||||
|
|
||||||
|
labels = labels.unsqueeze(-1);
|
||||||
|
|
||||||
|
let size = out.size()[0];
|
||||||
|
|
||||||
|
let mut acc = 0;
|
||||||
|
for i in 0..size {
|
||||||
|
let res = out.double_value(&[i]);
|
||||||
|
let exp = labels.double_value(&[i, res as i64]);
|
||||||
|
if exp == 1.0 {
|
||||||
|
acc += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mean_acc += acc as f64 / size as f64;
|
||||||
|
last_acc = acc as f64 / size as f64;
|
||||||
|
}
|
||||||
|
print!(
|
||||||
|
"\repoch: {} loss: {} acc: {} l acc: {} ",
|
||||||
|
epoch,
|
||||||
|
mean_loss / data_loader.len as f64,
|
||||||
|
mean_acc / data_loader.len as f64,
|
||||||
|
last_acc
|
||||||
|
);
|
||||||
|
io::stdout().flush().expect("Unable to flush stdout");
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\nlast acc: {}", last_acc);
|
||||||
|
|
||||||
|
return Ok(Some(model));
|
||||||
|
/*
|
||||||
|
|
||||||
|
opt, err := my_nn.DefaultAdamConfig().Build(model.Vs, 0.001)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for epoch := 0; epoch < EPOCH_PER_RUN; epoch++ {
|
||||||
|
var trainIter *torch.Iter2
|
||||||
|
trainIter, err = ds.TrainIter(32)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// trainIter.ToDevice(device)
|
||||||
|
|
||||||
|
log.Info("epoch", "epoch", epoch)
|
||||||
|
|
||||||
|
var trainLoss float64 = 0
|
||||||
|
var trainCorrect float64 = 0
|
||||||
|
ok := true
|
||||||
|
for ok {
|
||||||
|
var item torch.Iter2Item
|
||||||
|
var loss *torch.Tensor
|
||||||
|
item, ok = trainIter.Next()
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
data := item.Data
|
||||||
|
data, err = data.ToDevice(device, gotch.Float, false, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var size []int64
|
||||||
|
size, err = data.Size()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeros *torch.Tensor
|
||||||
|
zeros, err = torch.Zeros(size, gotch.Float, device)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err = zeros.Add(data, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("\n\nhere 1, data\n\n", "retains", data.MustRetainsGrad(false), "requires", data.MustRequiresGrad())
|
||||||
|
|
||||||
|
data, err = data.SetRequiresGrad(true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("\n\nhere 2, data\n\n", "retains", data.MustRetainsGrad(false), "requires", data.MustRequiresGrad())
|
||||||
|
|
||||||
|
err = data.RetainGrad(false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("\n\nhere 2, data\n\n", "retains", data.MustRetainsGrad(false), "requires", data.MustRequiresGrad())
|
||||||
|
|
||||||
|
pred := model.ForwardT(data, true)
|
||||||
|
pred, err = pred.SetRequiresGrad(true, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pred.RetainGrad(false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
label := item.Label
|
||||||
|
label, err = label.ToDevice(device, gotch.Float, false, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
label, err = label.SetRequiresGrad(true, true)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = label.RetainGrad(false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate loss
|
||||||
|
loss, err = pred.BinaryCrossEntropyWithLogits(label, &torch.Tensor{}, &torch.Tensor{}, 2, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
loss, err = loss.SetRequiresGrad(true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = loss.RetainGrad(false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = opt.ZeroGrad()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = loss.Backward()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("pred grad", "pred", pred.MustGrad(false).MustMax(false).Float64Values())
|
||||||
|
log.Info("pred grad", "outs", label.MustGrad(false).MustMax(false).Float64Values())
|
||||||
|
log.Info("pred grad", "data", data.MustGrad(false).MustMax(false).Float64Values(), "lol", data.MustRetainsGrad(false))
|
||||||
|
|
||||||
|
vars := model.Vs.Variables()
|
||||||
|
|
||||||
|
for k, v := range vars {
|
||||||
|
log.Info("[grad check]", "k", k, "grad", v.MustGrad(false).MustMax(false).Float64Values(), "lol", v.MustRetainsGrad(false))
|
||||||
|
}
|
||||||
|
|
||||||
|
model.Debug()
|
||||||
|
|
||||||
|
err = opt.Step()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
trainLoss = loss.Float64Values()[0]
|
||||||
|
|
||||||
|
// Calculate accuracy
|
||||||
|
/ *var p_pred, p_labels *torch.Tensor
|
||||||
|
p_pred, err = pred.Argmax([]int64{1}, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p_labels, err = item.Label.Argmax([]int64{1}, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
floats := p_pred.Float64Values()
|
||||||
|
floats_labels := p_labels.Float64Values()
|
||||||
|
|
||||||
|
for i := range floats {
|
||||||
|
if floats[i] == floats_labels[i] {
|
||||||
|
trainCorrect += 1
|
||||||
|
}
|
||||||
|
} * /
|
||||||
|
|
||||||
|
panic("fornow")
|
||||||
|
}
|
||||||
|
|
||||||
|
//v := []float64{}
|
||||||
|
|
||||||
|
log.Info("model training epoch done loss", "loss", trainLoss, "correct", trainCorrect, "out", ds.TrainImagesSize, "accuracy", trainCorrect/float64(ds.TrainImagesSize))
|
||||||
|
|
||||||
|
/ *correct := int64(0)
|
||||||
|
//torch.NoGrad(func() {
|
||||||
|
ok = true
|
||||||
|
testIter := ds.TestIter(64)
|
||||||
|
for ok {
|
||||||
|
var item torch.Iter2Item
|
||||||
|
item, ok = testIter.Next()
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
output := model.Forward(item.Data)
|
||||||
|
|
||||||
|
var pred, labels *torch.Tensor
|
||||||
|
pred, err = output.Argmax([]int64{1}, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
labels, err = item.Label.Argmax([]int64{1}, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
floats := pred.Float64Values()
|
||||||
|
floats_labels := labels.Float64Values()
|
||||||
|
|
||||||
|
for i := range floats {
|
||||||
|
if floats[i] == floats_labels[i] {
|
||||||
|
correct += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
accuracy = float64(correct) / float64(ds.TestImagesSize)
|
||||||
|
|
||||||
|
log.Info("Eval accuracy", "accuracy", accuracy)
|
||||||
|
|
||||||
|
err = def.UpdateAfterEpoch(db, accuracy*100)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}* /
|
||||||
|
//})
|
||||||
|
}
|
||||||
|
|
||||||
|
result_path := path.Join(getDir(), "savedData", m.Id, "defs", def.Id)
|
||||||
|
err = os.MkdirAll(result_path, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = my_torch.SaveModel(model, path.Join(result_path, "model.dat"))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Model finished training!", "accuracy", accuracy)
|
||||||
|
return
|
||||||
|
*/
|
||||||
|
}
|
89
runner/src/types.rs
Normal file
89
runner/src/types.rs
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
use crate::{model, tasks::Task, ConfigFile, RunnerData};
|
||||||
|
use anyhow::{bail, Result};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde_json::json;
|
||||||
|
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Deserialize_repr, Serialize_repr, Debug)]
|
||||||
|
#[repr(i8)]
|
||||||
|
pub enum DefinitionStatus {
|
||||||
|
CanceldTraining = -4,
|
||||||
|
FailedTraining = -3,
|
||||||
|
PreInit = 1,
|
||||||
|
Init = 2,
|
||||||
|
Training = 3,
|
||||||
|
PausedTraining = 6,
|
||||||
|
Tranied = 4,
|
||||||
|
Ready = 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
pub struct Definition {
|
||||||
|
pub id: String,
|
||||||
|
pub model_id: String,
|
||||||
|
pub accuracy: f64,
|
||||||
|
pub target_accuracy: i64,
|
||||||
|
pub epoch: i64,
|
||||||
|
pub status: i64,
|
||||||
|
pub created: String,
|
||||||
|
pub epoch_progress: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Definition {
|
||||||
|
pub async fn updateStatus(
|
||||||
|
self: &mut Definition,
|
||||||
|
task: &Task,
|
||||||
|
config: Arc<ConfigFile>,
|
||||||
|
runner_data: Arc<RunnerData>,
|
||||||
|
status: DefinitionStatus,
|
||||||
|
) -> Result<()> {
|
||||||
|
println!("Marking Task as faield");
|
||||||
|
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
|
||||||
|
let to_send = json!({
|
||||||
|
"id": runner_data.id,
|
||||||
|
"taskId": task.id,
|
||||||
|
"defId": self.id,
|
||||||
|
"status": status,
|
||||||
|
});
|
||||||
|
|
||||||
|
let resp = client
|
||||||
|
.post(format!("{}/tasks/runner/train/def/status", config.hostname))
|
||||||
|
.header("token", &config.token)
|
||||||
|
.body(to_send.to_string())
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if resp.status() != 200 {
|
||||||
|
println!("Failed to update status of task");
|
||||||
|
bail!("Failed to update status of task");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Deserialize_repr, Debug)]
|
||||||
|
#[repr(i8)]
|
||||||
|
pub enum ModelClassStatus {
|
||||||
|
ToTrain = 1,
|
||||||
|
Training = 2,
|
||||||
|
Trained = 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
pub struct ModelClass {
|
||||||
|
pub id: String,
|
||||||
|
pub model_id: String,
|
||||||
|
pub name: String,
|
||||||
|
pub class_order: i64,
|
||||||
|
pub status: ModelClassStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
pub struct DataPointRequest {
|
||||||
|
pub testing: Vec<model::DataPoint>,
|
||||||
|
pub training: Vec<model::DataPoint>,
|
||||||
|
}
|
@ -38,3 +38,14 @@ create table if not exists tasks_dependencies (
|
|||||||
main_id uuid references tasks (id) on delete cascade not null,
|
main_id uuid references tasks (id) on delete cascade not null,
|
||||||
dependent_id uuid references tasks (id) on delete cascade not null
|
dependent_id uuid references tasks (id) on delete cascade not null
|
||||||
);
|
);
|
||||||
|
|
||||||
|
create table if not exists remote_runner (
|
||||||
|
id uuid primary key default gen_random_uuid(),
|
||||||
|
user_id uuid references users (id) on delete cascade not null,
|
||||||
|
token text not null,
|
||||||
|
|
||||||
|
-- 1: GPU
|
||||||
|
type integer,
|
||||||
|
|
||||||
|
created_on timestamp default current_timestamp
|
||||||
|
);
|
||||||
|
@ -135,9 +135,9 @@ def addBlock(
|
|||||||
model.add(layers.ReLU())
|
model.add(layers.ReLU())
|
||||||
if top:
|
if top:
|
||||||
if pooling_same:
|
if pooling_same:
|
||||||
model.add(pool_func(padding="same", strides=(1, 1)))
|
model.add(pool_func(pool_size=(2,2), padding="same", strides=(1, 1)))
|
||||||
else:
|
else:
|
||||||
model.add(pool_func())
|
model.add(pool_func(pool_size=(2,2)))
|
||||||
model.add(layers.BatchNormalization())
|
model.add(layers.BatchNormalization())
|
||||||
model.add(layers.LeakyReLU())
|
model.add(layers.LeakyReLU())
|
||||||
model.add(layers.Dropout(0.4))
|
model.add(layers.Dropout(0.4))
|
||||||
@ -172,7 +172,7 @@ model.compile(
|
|||||||
|
|
||||||
his = model.fit(dataset, validation_data= dataset_validation, epochs={{.EPOCH_PER_RUN}}, callbacks=[
|
his = model.fit(dataset, validation_data= dataset_validation, epochs={{.EPOCH_PER_RUN}}, callbacks=[
|
||||||
NotifyServerCallback(),
|
NotifyServerCallback(),
|
||||||
tf.keras.callbacks.EarlyStopping("loss", mode="min", patience=5)], use_multiprocessing = True)
|
tf.keras.callbacks.EarlyStopping("loss", mode="min", patience=5)])
|
||||||
|
|
||||||
acc = his.history["accuracy"]
|
acc = his.history["accuracy"]
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user