Compare commits

...

5 Commits
main ... torch

34 changed files with 3495 additions and 432 deletions

5
.dockerignore Normal file
View File

@ -0,0 +1,5 @@
tmp/
testData/
savedData/
!savedData/.keep
fyp

60
DockerfileServer Normal file
View File

@ -0,0 +1,60 @@
FROM docker.io/nvidia/cuda:11.8.0-devel-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update
RUN apt-get install -y wget sudo pkg-config libopencv-dev unzip python3-pip vim
RUN pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0
RUN mkdir /go
ENV GOPATH=/go
RUN wget https://go.dev/dl/go1.22.2.linux-amd64.tar.gz
RUN tar -xvf go1.22.2.linux-amd64.tar.gz -C /usr/local
ENV PATH=$PATH:/usr/local/go/bin
RUN mkdir /app
WORKDIR /app
ADD go.mod .
ADD go.sum .
ADD main.go .
ADD logic logic
RUN go install || true
WORKDIR /root
RUN wget https://git.andr3h3nriqu3s.com/andr3/gotch/raw/commit/22e75becf0432cda41a7c055a4d60ea435f76599/setup-libtorch.sh
RUN chmod +x setup-libtorch.sh
ENV CUDA_VER=11.8
ENV GOTCH_VER=v0.9.2
RUN bash setup-libtorch.sh
ENV GOTCH_LIBTORCH="/usr/local/lib/libtorch"
ENV REFRESH_SETUP=0
ENV LIBRARY_PATH="$LIBRARY_PATH:$GOTCH_LIBTORCH/lib"
ENV export CPATH="$CPATH:$GOTCH_LIBTORCH/lib:$GOTCH_LIBTORCH/include:$GOTCH_LIBTORCH/include/torch/csrc/api/include"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$GOTCH_LIBTORCH/lib:/usr/lib64-nvidia:/usr/local/cuda-${CUDA_VERSION}/lib64"
RUN wget https://git.andr3h3nriqu3s.com/andr3/gotch/raw/branch/master/setup-gotch.sh
RUN chmod +x setup-gotch.sh
RUN echo 'root ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
RUN bash setup-gotch.sh
RUN ln -s /usr/local/lib/libtorch/include/torch/csrc /usr/local/lib/libtorch/include/torch/csrc/api/include/torch
RUN mkdir -p /go/pkg/mod/git.andr3h3nriqu3s.com/andr3/gotch@v0.9.2/libtch/libtorch/include/torch/csrc/api
RUN find /usr/local/lib/libtorch/include -maxdepth 1 -type d | tail -n +2 | grep -ve 'torch$' | xargs -I{} ln -s {} /go/pkg/mod/git.andr3h3nriqu3s.com/andr3/gotch@v0.9.2/libtch/libtorch/include
RUN ln -s /usr/local/lib/libtorch/include/torch/csrc/api/include /go/pkg/mod/git.andr3h3nriqu3s.com/andr3/gotch@v0.9.2/libtch/libtorch/include/torch/csrc/api/include
RUN find /usr/local/lib/libtorch/include/torch -maxdepth 1 -type f | xargs -I{} ln -s {} /go/pkg/mod/git.andr3h3nriqu3s.com/andr3/gotch@v0.9.2/libtch/libtorch/include/torch
RUN ln -s /usr/local/lib/libtorch/lib/libcudnn.so.8 /usr/local/lib/libcudnn.so
WORKDIR /app
ENV CGO_CXXFLAGS="-I/usr/local/lib/libtorch/include/torch/csrc/api/include/ -I/usr/local/lib/libtorch/include"
ENV CGO_CFLAGS="-I/usr/local/lib/libtorch/include/torch/csrc/api/include/ -I/usr/local/lib/libtorch/include"
ADD . .
RUN go build -x || true
CMD ["bash", "-c", "go run ."]

3
go.mod
View File

@ -4,11 +4,10 @@ go 1.21
require ( require (
github.com/charmbracelet/log v0.3.1 github.com/charmbracelet/log v0.3.1
github.com/galeone/tensorflow/tensorflow/go v0.0.0-20240119075110-6ad3cf65adfe
github.com/galeone/tfgo v0.0.0-20230715013254-16113111dc99
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
golang.org/x/crypto v0.19.0 golang.org/x/crypto v0.19.0
git.andr3h3nriqu3s.com/andr3/gotch v0.9.2
) )
require ( require (

12
go.sum
View File

@ -1,3 +1,7 @@
git.andr3h3nriqu3s.com/andr3/gotch v0.9.1 h1:1q34JKV8cX80n7LXbJswlXCiRtNbzcvJ/vbgb6an1tA=
git.andr3h3nriqu3s.com/andr3/gotch v0.9.1/go.mod h1:FXusE3CHt8NLf5wynUGaHtIbToRuYifsZaC5EZH0pJY=
git.andr3h3nriqu3s.com/andr3/gotch v0.9.2 h1:aZcsPgDVGVhrEFoer0upSkzPqJWNMxdUHRktP4s6MSc=
git.andr3h3nriqu3s.com/andr3/gotch v0.9.2/go.mod h1:FXusE3CHt8NLf5wynUGaHtIbToRuYifsZaC5EZH0pJY=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
@ -13,12 +17,6 @@ github.com/charmbracelet/log v0.3.1/go.mod h1:OR4E1hutLsax3ZKpXbgUqPtTjQfrh1pG3z
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/galeone/tensorflow/tensorflow/go v0.0.0-20221023090153-6b7fa0680c3e h1:9+2AEFZymTi25FIIcDwuzcOPH04z9+fV6XeLiGORPDI=
github.com/galeone/tensorflow/tensorflow/go v0.0.0-20221023090153-6b7fa0680c3e/go.mod h1:TelZuq26kz2jysARBwOrTv16629hyUsHmIoj54QqyFo=
github.com/galeone/tensorflow/tensorflow/go v0.0.0-20240119075110-6ad3cf65adfe h1:7yELf1NFEwECpXMGowkoftcInMlVtLTCdwWLmxKgzNM=
github.com/galeone/tensorflow/tensorflow/go v0.0.0-20240119075110-6ad3cf65adfe/go.mod h1:TelZuq26kz2jysARBwOrTv16629hyUsHmIoj54QqyFo=
github.com/galeone/tfgo v0.0.0-20230715013254-16113111dc99 h1:8Bt1P/zy1gb37L4n8CGgp1qmFwBV5729kxVfj0sqhJk=
github.com/galeone/tfgo v0.0.0-20230715013254-16113111dc99/go.mod h1:3YgYBeIX42t83uP27Bd4bSMxTnQhSbxl0pYSkCDB1tc=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
@ -74,7 +72,9 @@ github.com/rivo/uniseg v0.4.6 h1:Sovz9sDSwbOz9tgUy8JpT+KgCkPYJEN/oYzlJiYTNLg=
github.com/rivo/uniseg v0.4.6/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.6/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=

1
lib Symbolic link
View File

@ -0,0 +1 @@
/usr/local/lib

10
lib.go.back Normal file
View File

@ -0,0 +1,10 @@
package libtch
// #cgo LDFLAGS: -lstdc++ -ltorch -lc10 -ltorch_cpu -L${SRCDIR}/libtorch/lib
// #cgo LDFLAGS: -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcudnn -lcaffe2_nvrtc -lnvrtc-builtins -lnvrtc -lnvToolsExt -lc10_cuda -ltorch_cuda
// #cgo CFLAGS: -I${SRCDIR} -O3 -Wall -Wno-unused-variable -Wno-deprecated-declarations -Wno-c++11-narrowing -g -Wno-sign-compare -Wno-unused-function
// #cgo CFLAGS: -D_GLIBCXX_USE_CXX11_ABI=0
// #cgo CFLAGS: -I/usr/local/cuda/include
// #cgo CXXFLAGS: -std=c++17 -I${SRCDIR} -g -O3
// #cgo CXXFLAGS: -I${SRCDIR}/libtorch/lib -I${SRCDIR}/libtorch/include -I${SRCDIR}/libtorch/include/torch/csrc/api/include -I/opt/libtorch/include/torch/csrc/api/include
import "C"

View File

@ -6,3 +6,19 @@ const (
DATA_POINT_MODE_TRAINING DATA_POINT_MODE = 1 DATA_POINT_MODE_TRAINING DATA_POINT_MODE = 1
DATA_POINT_MODE_TESTING = 2 DATA_POINT_MODE_TESTING = 2
) )
type ModelClassStatus int
const (
CLASS_STATUS_TO_TRAIN ModelClassStatus = iota + 1
CLASS_STATUS_TRAINING
CLASS_STATUS_TRAINED
)
type ModelClass struct {
Id string `db:"mc.id"`
ModelId string `db:"mc.model_id"`
Name string `db:"mc.name"`
ClassOrder int `db:"mc.class_order"`
Status int `db:"mc.status"`
}

View File

@ -0,0 +1,95 @@
package dbtypes
import (
"time"
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
)
type DefinitionStatus int
const (
DEFINITION_STATUS_CANCELD_TRAINING DefinitionStatus = -4
DEFINITION_STATUS_FAILED_TRAINING = -3
DEFINITION_STATUS_PRE_INIT = 1
DEFINITION_STATUS_INIT = 2
DEFINITION_STATUS_TRAINING = 3
DEFINITION_STATUS_PAUSED_TRAINING = 6
DEFINITION_STATUS_TRANIED = 4
DEFINITION_STATUS_READY = 5
)
type Definition struct {
Id string `db:"md.id"`
ModelId string `db:"md.model_id"`
Accuracy float64 `db:"md.accuracy"`
TargetAccuracy int `db:"md.target_accuracy"`
Epoch int `db:"md.epoch"`
Status int `db:"md.status"`
CreatedOn time.Time `db:"md.created_on"`
EpochProgress int `db:"md.epoch_progress"`
}
type SortByAccuracyDefinitions []*Definition
func (nf SortByAccuracyDefinitions) Len() int { return len(nf) }
func (nf SortByAccuracyDefinitions) Swap(i, j int) { nf[i], nf[j] = nf[j], nf[i] }
func (nf SortByAccuracyDefinitions) Less(i, j int) bool {
return nf[i].Accuracy < nf[j].Accuracy
}
func GetDefinition(db db.Db, definition_id string) (definition Definition, err error) {
err = GetDBOnce(db, &definition, "model_definition as md where id=$1;", definition_id)
return
}
func MakeDefenition(db db.Db, model_id string, target_accuracy int) (definition Definition, err error) {
var NewDefinition = struct {
ModelId string `db:"model_id"`
TargetAccuracy int `db:"target_accuracy"`
}{ModelId: model_id, TargetAccuracy: target_accuracy}
id, err := InsertReturnId(db, &NewDefinition, "model_definition", "id")
if err != nil {
return
}
return GetDefinition(db, id)
}
func (d Definition) UpdateStatus(db db.Db, status DefinitionStatus) (err error) {
_, err = db.Exec("update model_definition set status=$1 where id=$2", status, d.Id)
return
}
func (d Definition) MakeLayer(db db.Db, layer_order int, layer_type LayerType, shape string) (layer Layer, err error) {
var NewLayer = struct {
DefinitionId string `db:"def_id"`
LayerOrder int `db:"layer_order"`
LayerType LayerType `db:"layer_type"`
Shape string `db:"shape"`
}{
DefinitionId: d.Id,
LayerOrder: layer_order,
LayerType: layer_type,
Shape: shape,
}
id, err := InsertReturnId(db, &NewLayer, "model_definition_layer", "id")
if err != nil {
return
}
return GetLayer(db, id)
}
func (d Definition) GetLayers(db db.Db, filter string, args ...any) (layer []*Layer, err error) {
args = append(args, d.Id)
return GetDbMultitple[Layer](db, "model_definition_layer as mdl where mdl.def_id=$1 "+filter, args...)
}
func (d *Definition) UpdateAfterEpoch(db db.Db, accuracy float64) (err error) {
d.Accuracy = accuracy
d.Epoch += 1
_, err = db.Exec("update model_definition set epoch=$1, accuracy=$2 where id=$3", d.Epoch, d.Accuracy, d.Id)
return
}

50
logic/db_types/layer.go Normal file
View File

@ -0,0 +1,50 @@
package dbtypes
import (
"encoding/json"
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
)
type LayerType int
const (
LAYER_INPUT LayerType = 1
LAYER_DENSE = 2
LAYER_FLATTEN = 3
LAYER_SIMPLE_BLOCK = 4
)
type Layer struct {
Id string `db:"mdl.id"`
DefinitionId string `db:"mdl.def_id"`
LayerOrder string `db:"mdl.layer_order"`
LayerType LayerType `db:"mdl.layer_type"`
Shape string `db:"mdl.shape"`
ExpType string `db:"mdl.exp_type"`
}
func ShapeToString(args ...int) string {
text, err := json.Marshal(args)
if err != nil {
panic("Could not generate Shape")
}
return string(text)
}
func StringToShape(str string) (shape []int64) {
err := json.Unmarshal([]byte(str), &shape)
if err != nil {
panic("Could not parse Shape")
}
return
}
func (l Layer) GetShape() []int64 {
return StringToShape(l.Shape)
}
func GetLayer(db db.Db, layer_id string) (layer Layer, err error) {
err = GetDBOnce(db, &layer, "model_definition_layer as mdl where mdl.id=$1", layer_id)
return
}

View File

@ -1,9 +1,12 @@
package dbtypes package dbtypes
import ( import (
"errors" "fmt"
"os"
"path"
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db" "git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
"github.com/jackc/pgx/v5"
) )
const ( const (
@ -24,36 +27,6 @@ const (
READY_RETRAIN_FAILED = -7 READY_RETRAIN_FAILED = -7
) )
type ModelDefinitionStatus int
type LayerType int
const (
LAYER_INPUT LayerType = 1
LAYER_DENSE = 2
LAYER_FLATTEN = 3
LAYER_SIMPLE_BLOCK = 4
)
const (
MODEL_DEFINITION_STATUS_CANCELD_TRAINING ModelDefinitionStatus = -4
MODEL_DEFINITION_STATUS_FAILED_TRAINING = -3
MODEL_DEFINITION_STATUS_PRE_INIT = 1
MODEL_DEFINITION_STATUS_INIT = 2
MODEL_DEFINITION_STATUS_TRAINING = 3
MODEL_DEFINITION_STATUS_PAUSED_TRAINING = 6
MODEL_DEFINITION_STATUS_TRANIED = 4
MODEL_DEFINITION_STATUS_READY = 5
)
type ModelClassStatus int
const (
MODEL_CLASS_STATUS_TO_TRAIN ModelClassStatus = 1
MODEL_CLASS_STATUS_TRAINING = 2
MODEL_CLASS_STATUS_TRAINED = 3
)
type ModelHeadStatus int type ModelHeadStatus int
const ( const (
@ -78,8 +51,6 @@ type BaseModel struct {
CanTrain int `db:"can_train"` CanTrain int `db:"can_train"`
} }
var ModelNotFoundError = errors.New("Model not found error")
func GetBaseModel(db db.Db, id string) (base *BaseModel, err error) { func GetBaseModel(db db.Db, id string) (base *BaseModel, err error) {
var model BaseModel var model BaseModel
err = GetDBOnce(db, &model, "models where id=$1", id) err = GetDBOnce(db, &model, "models where id=$1", id)
@ -97,11 +68,104 @@ func (m BaseModel) CanEval() bool {
return true return true
} }
func (m BaseModel) removeFailedDataPoints(c BasePack) (err error) {
rows, err := c.GetDb().Query("select mdp.id from model_data_point as mdp join model_classes as mc on mc.id=mdp.class_id where mc.model_id=$1 and mdp.status=-1;", m.Id)
if err != nil {
return
}
defer rows.Close()
base_path := path.Join("savedData", m.Id, "data")
for rows.Next() {
var dataPointId string
err = rows.Scan(&dataPointId)
if err != nil {
return
}
p := path.Join(base_path, dataPointId+"."+m.Format)
c.GetLogger().Warn("Removing image", "path", p)
err = os.RemoveAll(p)
if err != nil {
return
}
}
_, err = c.GetDb().Exec("delete from model_data_point as mdp using model_classes as mc where mdp.class_id = mc.id and mc.model_id=$1 and mdp.status=-1;", m.Id)
return
}
// DO NOT Pass un filtered data on filters
func (m BaseModel) GetDefinitions(db db.Db, filters string, args ...any) ([]*Definition, error) {
n_args := []any{m.Id}
n_args = append(n_args, args...)
return GetDbMultitple[Definition](db, fmt.Sprintf("model_definition as md where md.model_id=$1 %s", filters), n_args...)
}
// DO NOT Pass un filtered data on filters
func (m BaseModel) GetClasses(db db.Db, filters string, args ...any) ([]*ModelClass, error) {
n_args := []any{m.Id}
n_args = append(n_args, args...)
return GetDbMultitple[ModelClass](db, fmt.Sprintf("model_classes as mc where mc.model_id=$1 %s", filters), n_args...)
}
type DataPointIterator struct {
rows pgx.Rows
Model BaseModel
}
type DataPoint struct {
Class int
Path string
}
func (iter DataPointIterator) Close() {
iter.rows.Close()
}
func (m BaseModel) DataPoints(db db.Db, mode DATA_POINT_MODE) (data []DataPoint, err error) {
rows, err := db.Query(
"select mdp.id, mc.class_order, mdp.file_path from model_data_point as mdp inner "+
"join model_classes as mc on mc.id = mdp.class_id "+
"where mc.model_id = $1 and mdp.model_mode=$2;",
m.Id, mode)
if err != nil {
return
}
defer rows.Close()
data = []DataPoint{}
for rows.Next() {
var id string
var class_order int
var file_path string
if err = rows.Scan(&id, &class_order, &file_path); err != nil {
return
}
if file_path == "id://" {
data = append(data, DataPoint{
Path: path.Join("./savedData", m.Id, "data", id+"."+m.Format),
Class: class_order,
})
} else {
panic("TODO remote file path")
}
}
return
}
const RGB string = "rgb"
const GRAY string = "greyscale"
func StringToImageMode(colorMode string) int { func StringToImageMode(colorMode string) int {
switch colorMode { switch colorMode {
case "greyscale": case GRAY:
return 1 return 1
case "rgb": case RGB:
return 3 return 3
default: default:
panic("unkown color mode") panic("unkown color mode")

View File

@ -14,11 +14,13 @@ import (
"github.com/charmbracelet/log" "github.com/charmbracelet/log"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
db "git.andr3h3nriqu3s.com/andr3/fyp/logic/db" db "git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
) )
type BasePack interface { type BasePack interface {
db.Db
GetDb() db.Db GetDb() db.Db
GetLogger() *log.Logger GetLogger() *log.Logger
GetHost() string GetHost() string
@ -42,6 +44,18 @@ func (b BasePackStruct) GetLogger() *log.Logger {
return b.Logger return b.Logger
} }
func (c BasePackStruct) Query(query string, args ...any) (pgx.Rows, error) {
return c.Db.Query(query, args...)
}
func (c BasePackStruct) Exec(query string, args ...any) (pgconn.CommandTag, error) {
return c.Db.Exec(query, args...)
}
func (c BasePackStruct) Begin() (pgx.Tx, error) {
return c.Db.Begin()
}
func CheckEmpty(f url.Values, path string) bool { func CheckEmpty(f url.Values, path string) bool {
return !f.Has(path) || f.Get(path) == "" return !f.Has(path) || f.Get(path) == ""
} }

View File

@ -7,15 +7,15 @@ import (
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types" . "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
) )
type ModelClass struct { type ModelClassJSON struct {
Id string `json:"id"` Id string `json:"id"`
ModelId string `json:"model_id" db:"model_id"` ModelId string `json:"model_id" db:"model_id"`
Name string `json:"name"` Name string `json:"name"`
Status int `json:"status"` Status int `json:"status"`
} }
func ListClasses(c BasePack, model_id string) (cls []*ModelClass, err error) { func ListClassesJSON(c BasePack, model_id string) (cls []*ModelClassJSON, err error) {
return GetDbMultitple[ModelClass](c.GetDb(), "model_classes where model_id=$1", model_id) return GetDbMultitple[ModelClassJSON](c.GetDb(), "model_classes where model_id=$1", model_id)
} }
func ModelHasDataPoints(db db.Db, model_id string) (result bool, err error) { func ModelHasDataPoints(db db.Db, model_id string) (result bool, err error) {

View File

@ -435,7 +435,7 @@ func handleDataUpload(handle *Handle) {
} }
model, err := GetBaseModel(handle.Db, id) model, err := GetBaseModel(handle.Db, id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.SendJSONStatus(http.StatusNotFound, "Model not found") return c.SendJSONStatus(http.StatusNotFound, "Model not found")
} else if err != nil { } else if err != nil {
return c.Error500(err) return c.Error500(err)
@ -468,7 +468,7 @@ func handleDataUpload(handle *Handle) {
} }
PostAuthJson(handle, "/models/data/class/new", User_Normal, func(c *Context, obj *CreateNewEmptyClass) *Error { PostAuthJson(handle, "/models/data/class/new", User_Normal, func(c *Context, obj *CreateNewEmptyClass) *Error {
model, err := GetBaseModel(c.Db, obj.Id) model, err := GetBaseModel(c.Db, obj.Id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.JsonBadRequest("Model not found") return c.JsonBadRequest("Model not found")
} else if err != nil { } else if err != nil {
return c.E500M("Failed to get model information", err) return c.E500M("Failed to get model information", err)
@ -495,7 +495,7 @@ func handleDataUpload(handle *Handle) {
return c.E500M("Could not create class", err) return c.E500M("Could not create class", err)
} }
var modelClass model_classes.ModelClass var modelClass model_classes.ModelClassJSON
err = GetDBOnce(c, &modelClass, "model_classes where id=$1;", id) err = GetDBOnce(c, &modelClass, "model_classes where id=$1;", id)
if err != nil { if err != nil {
return c.E500M("Failed to get class information but class was creted", err) return c.E500M("Failed to get class information but class was creted", err)
@ -518,7 +518,7 @@ func handleDataUpload(handle *Handle) {
c.Logger.Info("model", "model", *model_id) c.Logger.Info("model", "model", *model_id)
model, err := GetBaseModel(c.Db, *model_id) model, err := GetBaseModel(c.Db, *model_id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.JsonBadRequest("Could not find the model") return c.JsonBadRequest("Could not find the model")
} else if err != nil { } else if err != nil {
return c.E500M("Error getting model information", err) return c.E500M("Error getting model information", err)
@ -626,7 +626,7 @@ func handleDataUpload(handle *Handle) {
c.Logger.Info("Trying to expand model", "id", id) c.Logger.Info("Trying to expand model", "id", id)
model, err := GetBaseModel(handle.Db, id) model, err := GetBaseModel(handle.Db, id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.SendJSONStatus(http.StatusNotFound, "Model not found") return c.SendJSONStatus(http.StatusNotFound, "Model not found")
} else if err != nil { } else if err != nil {
return c.Error500(err) return c.Error500(err)
@ -670,7 +670,7 @@ func handleDataUpload(handle *Handle) {
} }
model, err := GetBaseModel(handle.Db, dat.Id) model, err := GetBaseModel(handle.Db, dat.Id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.SendJSONStatus(http.StatusNotFound, "Model not found") return c.SendJSONStatus(http.StatusNotFound, "Model not found")
} else if err != nil { } else if err != nil {
return c.Error500(err) return c.Error500(err)
@ -704,7 +704,7 @@ func handleDataUpload(handle *Handle) {
return c.Error500(err) return c.Error500(err)
} }
} else { } else {
_, err = handle.Db.Exec("delete from model_classes where model_id=$1 and status=$2;", model.Id, MODEL_CLASS_STATUS_TO_TRAIN) _, err = handle.Db.Exec("delete from model_classes where model_id=$1 and status=$2;", model.Id, CLASS_STATUS_TO_TRAIN)
if err != nil { if err != nil {
return c.Error500(err) return c.Error500(err)
} }

View File

@ -24,7 +24,7 @@ func handleEdit(handle *Handle) {
return c.Error500(err) return c.Error500(err)
} }
cls, err := model_classes.ListClasses(c, model.Id) cls, err := model_classes.ListClassesJSON(c, model.Id)
if err != nil { if err != nil {
return c.Error500(err) return c.Error500(err)
} }
@ -35,9 +35,9 @@ func handleEdit(handle *Handle) {
} }
type ReturnType struct { type ReturnType struct {
Classes []*model_classes.ModelClass `json:"classes"` Classes []*model_classes.ModelClassJSON `json:"classes"`
HasData bool `json:"has_data"` HasData bool `json:"has_data"`
NumberOfInvalidImages int `json:"number_of_invalid_images"` NumberOfInvalidImages int `json:"number_of_invalid_images"`
} }
c.ShowMessage = false c.ShowMessage = false
@ -109,7 +109,7 @@ func handleEdit(handle *Handle) {
layers := []layerdef{} layers := []layerdef{}
for _, def := range defs { for _, def := range defs {
if def.Status == MODEL_DEFINITION_STATUS_TRAINING { if def.Status == DEFINITION_STATUS_TRAINING {
rows, err := c.Db.Query("select id, layer_type, shape from model_definition_layer where def_id=$1 order by layer_order asc;", def.Id) rows, err := c.Db.Query("select id, layer_type, shape from model_definition_layer where def_id=$1 order by layer_order asc;", def.Id)
if err != nil { if err != nil {
return c.Error500(err) return c.Error500(err)
@ -166,7 +166,7 @@ func handleEdit(handle *Handle) {
for i, def := range defs { for i, def := range defs {
var lay *[]layerdef = nil var lay *[]layerdef = nil
if def.Status == MODEL_DEFINITION_STATUS_TRAINING && !setLayers { if def.Status == DEFINITION_STATUS_TRAINING && !setLayers {
lay = &layers lay = &layers
setLayers = true setLayers = true
} }

View File

@ -11,7 +11,7 @@ import (
func handleRest(handle *Handle) { func handleRest(handle *Handle) {
DeleteAuthJson(handle, "/models/train/reset", User_Normal, func(c *Context, dat *JustId) *Error { DeleteAuthJson(handle, "/models/train/reset", User_Normal, func(c *Context, dat *JustId) *Error {
model, err := GetBaseModel(c.Db, dat.Id) model, err := GetBaseModel(c.Db, dat.Id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.JsonBadRequest("Model not found") return c.JsonBadRequest("Model not found")
} else if err != nil { } else if err != nil {
return c.E500M("Failed to get model", err) return c.E500M("Failed to get model", err)

View File

@ -0,0 +1,179 @@
package imageloader
import (
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
types "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
"git.andr3h3nriqu3s.com/andr3/gotch"
torch "git.andr3h3nriqu3s.com/andr3/gotch/ts"
"git.andr3h3nriqu3s.com/andr3/gotch/vision"
)
type Dataset struct {
TrainImages *torch.Tensor
TrainLabels *torch.Tensor
TestImages *torch.Tensor
TestLabels *torch.Tensor
TrainImagesSize int
TestImagesSize int
Device gotch.Device
}
func LoadImagesAndLables(db db.Db, m *types.BaseModel, mode types.DATA_POINT_MODE, classStart int, classEnd int) (imgs, labels *torch.Tensor, count int, err error) {
train_points, err := m.DataPoints(db, types.DATA_POINT_MODE_TRAINING)
if err != nil {
return
}
size := int64(classEnd - classStart + 1)
pimgs := []*torch.Tensor{}
plabels := []*torch.Tensor{}
for _, point := range train_points {
var img, label *torch.Tensor
img, err = vision.Load(point.Path)
if err != nil {
return
}
pimgs = append(pimgs, img)
t_label := make([]int, size)
if point.Class <= classEnd && point.Class >= classStart {
t_label[point.Class-classStart] = 1
}
label, err = torch.OfSlice(t_label)
if err != nil {
return
}
plabels = append(plabels, label)
}
imgs, err = torch.Concat(pimgs, 0)
if err != nil {
return
}
labels, err = torch.Stack(plabels, 0)
if err != nil {
return
}
count = len(pimgs)
imgs, err = torch.Stack(pimgs, 0)
if err != nil {
return
}
imgs, err = imgs.ToDtype(gotch.Float, false, false, true)
if err != nil {
return
}
labels, err = labels.ToDtype(gotch.Float, false, false, true)
if err != nil {
return
}
return
}
func NewDataset(db db.Db, m *types.BaseModel, classStart int, classEnd int) (ds *Dataset, err error) {
trainImages, trainLabels, train_count, err := LoadImagesAndLables(db, m, types.DATA_POINT_MODE_TRAINING, classStart, classEnd)
if err != nil {
return
}
testImages, testLabels, test_count, err := LoadImagesAndLables(db, m, types.DATA_POINT_MODE_TESTING, classStart, classEnd)
if err != nil {
return
}
ds = &Dataset{
TrainImages: trainImages,
TrainLabels: trainLabels,
TestImages: testImages,
TestLabels: testLabels,
TrainImagesSize: train_count,
TestImagesSize: test_count,
Device: gotch.CPU,
}
return
}
func (ds *Dataset) To(device gotch.Device) (err error) {
ds.TrainImages, err = ds.TrainImages.ToDevice(device, ds.TrainImages.DType(), device.IsCuda(), true, true)
if err != nil {
return
}
ds.TrainLabels, err = ds.TrainLabels.ToDevice(device, ds.TrainLabels.DType(), device.IsCuda(), true, true)
if err != nil {
return
}
ds.TestImages, err = ds.TestImages.ToDevice(device, ds.TestImages.DType(), device.IsCuda(), true, true)
if err != nil {
return
}
ds.TestLabels, err = ds.TestLabels.ToDevice(device, ds.TestLabels.DType(), device.IsCuda(), true, true)
if err != nil {
return
}
ds.Device = device
return
}
func (ds *Dataset) TestIter(batchSize int64) *torch.Iter2 {
return torch.MustNewIter2(ds.TestImages, ds.TestLabels, batchSize)
}
func (ds *Dataset) TrainIter(batchSize int64) (iter *torch.Iter2, err error) {
// Create a clone of the trainimages
size, err := ds.TrainImages.Size()
if err != nil {
return
}
train_images, err := torch.Zeros(size, gotch.Float, ds.Device)
if err != nil {
return
}
ds.TrainImages, err = ds.TrainImages.Clone(train_images, false)
if err != nil {
return
}
// Create a clone of the labels
size, err = ds.TrainLabels.Size()
if err != nil {
return
}
train_labels, err := torch.Zeros(size, gotch.Float, ds.Device)
if err != nil {
return
}
ds.TrainLabels, err = ds.TrainLabels.Clone(train_labels, false)
if err != nil {
return
}
iter, err = torch.NewIter2(train_images, train_labels, batchSize)
if err != nil {
return
}
return
}

View File

@ -0,0 +1,174 @@
package my_nn
// linear is a fully-connected layer
import (
"math"
"git.andr3h3nriqu3s.com/andr3/gotch/nn"
"git.andr3h3nriqu3s.com/andr3/gotch/ts"
"github.com/charmbracelet/log"
)
// LinearConfig is a configuration for a linear layer
type LinearConfig struct {
WsInit nn.Init // iniital weights
BsInit nn.Init // optional initial bias
Bias bool
}
// DefaultLinearConfig creates default LinearConfig with
// weights initiated using KaimingUniform and Bias is set to true
func DefaultLinearConfig() *LinearConfig {
negSlope := math.Sqrt(5)
return &LinearConfig{
// NOTE. KaimingUniform cause mem leak due to ts.Uniform()!!!
// Avoid using it now.
WsInit: nn.NewKaimingUniformInit(nn.WithKaimingNegativeSlope(negSlope)),
BsInit: nil,
Bias: true,
}
}
// Linear is a linear fully-connected layer
type Linear struct {
Ws *ts.Tensor
weight_name string
Bs *ts.Tensor
bias_name string
}
// NewLinear creates a new linear layer
// y = x*wT + b
// inDim - input dimension (x) [input features - columns]
// outDim - output dimension (y) [output features - columns]
// NOTE: w will have shape{outDim, inDim}; b will have shape{outDim}
func NewLinear(vs *Path, inDim, outDim int64, c *LinearConfig) *Linear {
var bias_name string
var bs *ts.Tensor
var err error
if c.Bias {
switch {
case c.BsInit == nil:
shape := []int64{inDim, outDim}
fanIn, _, err := nn.CalculateFans(shape)
or_panic(err)
bound := 0.0
if fanIn > 0 {
bound = 1 / math.Sqrt(float64(fanIn))
}
bsInit := nn.NewUniformInit(-bound, bound)
bs, bias_name, err = vs.NewVarNamed("bias", []int64{outDim}, bsInit)
or_panic(err)
// Find better way to do this
bs, err = bs.T(true)
or_panic(err)
bs, err = bs.T(true)
or_panic(err)
bs, err = bs.SetRequiresGrad(true, true)
or_panic(err)
err = bs.RetainGrad(false)
or_panic(err)
vs.varstore.UpdateVarTensor(bias_name, bs, true)
case c.BsInit != nil:
bs, bias_name, err = vs.NewVarNamed("bias", []int64{outDim}, c.BsInit)
or_panic(err)
}
}
ws, weight_name, err := vs.NewVarNamed("weight", []int64{outDim, inDim}, c.WsInit)
or_panic(err)
ws, err = ws.T(true)
or_panic(err)
ws, err = ws.SetRequiresGrad(true, true)
or_panic(err)
err = ws.RetainGrad(false)
or_panic(err)
vs.varstore.UpdateVarTensor(weight_name, ws, true)
return &Linear{
Ws: ws,
weight_name: weight_name,
Bs: bs,
bias_name: bias_name,
}
}
func (l *Linear) Debug() {
log.Info("Ws", "ws", l.Ws.MustGrad(false).MustMax(false).Float64Values())
log.Info("Bs", "bs", l.Bs.MustGrad(false).MustMax(false).Float64Values())
}
func (l *Linear) ExtractFromVarstore(vs *VarStore) {
l.Ws = vs.GetTensorOfVar(l.weight_name)
l.Bs = vs.GetTensorOfVar(l.bias_name)
}
// Implement `Module` for `Linear` struct:
// =======================================
// Forward proceeds input node through linear layer.
// NOTE:
// - It assumes that node has dimensions of 2 (matrix).
// To make it work for matrix multiplication, input node should
// has same number of **column** as number of **column** in
// `LinearLayer` `Ws` property as weights matrix will be
// transposed before multiplied to input node. (They are all used `inDim`)
// - Input node should have shape of `shape{batch size, input features}`.
// (shape{batchSize, inDim}). The input features is `inDim` while the
// output feature is `outDim` in `LinearConfig` struct.
//
// Example:
//
// inDim := 3
// outDim := 2
// batchSize := 4
// weights: 2x3
// [ 1 1 1
// 1 1 1 ]
//
// input node: 3x4
// [ 1 1 1
// 1 1 1
// 1 1 1
// 1 1 1 ]
func (l *Linear) Forward(xs *ts.Tensor) (retVal *ts.Tensor) {
mul, err := xs.Matmul(l.Ws, false)
or_panic(err)
if l.Bs != nil {
mul, err = mul.Add(l.Bs, false)
or_panic(err)
}
out, err := mul.Relu(false)
or_panic(err)
return out
}
// ForwardT implements ModuleT interface for Linear layer.
//
// NOTE: train param will not be used.
func (l *Linear) ForwardT(xs *ts.Tensor, train bool) (retVal *ts.Tensor) {
mul, err := xs.Matmul(l.Ws, true)
or_panic(err)
mul, err = mul.Add(l.Bs, true)
or_panic(err)
out, err := mul.Relu(true)
or_panic(err)
return out
}

View File

@ -0,0 +1,603 @@
package my_nn
// Optimizers to be used for gradient-descent based training.
import (
"fmt"
"math"
"github.com/charmbracelet/log"
"git.andr3h3nriqu3s.com/andr3/gotch/ts"
)
// Optimizer is a struct object to run gradient descent.
type Optimizer struct {
varstore *VarStore
opt *ts.COptimizer
// variablesInOptimizer uint8
variablesInOptimizer map[string]struct{}
config OptimizerConfig //interface{}
stepCount int
lr float64
}
func (o *Optimizer) Debug() {
for n, _ := range o.variablesInOptimizer {
v := o.varstore.GetVarOfName(n)
leaf, err := v.Tensor.IsLeaf(false)
or_panic(err)
retains, err := v.Tensor.RetainsGrad(false)
or_panic(err)
log.Info("[opt] var test", "n", n, "leaf", leaf, "retains", retains)
}
}
func (o *Optimizer) RefreshValues() (err error) {
opt, err := o.config.buildCOpt(o.lr)
if err != nil {
return
}
for name := range o.variablesInOptimizer {
v := o.varstore.GetVarOfName(name)
if v.Trainable {
if err = opt.AddParameter(v.Tensor, v.Group); err != nil {
err = fmt.Errorf("Optimizer defaultBuild - AddParameter failed: %w\n", err)
return
}
}
}
o.opt = opt
return
}
// OptimizerConfig defines Optimizer configurations. These configs can be used to build optimizer.
type OptimizerConfig interface {
buildCOpt(lr float64) (*ts.COptimizer, error)
// Build builds an optimizer with the specified learning rate handling variables stored in `vs`.
//
// NOTE: Build is a 'default' method. It can be called by wrapping
// 'DefaultBuild' function
// E.g. AdamOptimizerConfig struct have a method to fullfil `Build` method of
// OptimizerConfig by wrapping `DefaultBuild` like
// (config AdamOptimizerConfig) Build(vs VarStore, lr float64) (retVal Optimizer, err error){
// return defaultBuild(config, vs, lr)
// }
Build(vs *VarStore, lr float64) (*Optimizer, error)
}
// defaultBuild is `default` Build method for OptimizerConfig interface
func defaultBuild(config OptimizerConfig, vs *VarStore, lr float64) (*Optimizer, error) {
opt, err := config.buildCOpt(lr)
if err != nil {
return nil, err
}
names := make(map[string]struct{})
for name, v := range vs.vars {
if v.Trainable {
log.Info("Adding parameter", "name", name, "g", v.Group)
if err = opt.AddParameter(v.Tensor, v.Group); err != nil {
err = fmt.Errorf("Optimizer defaultBuild - AddParameter failed: %w\n", err)
return nil, err
}
}
names[name] = struct{}{}
}
return &Optimizer{
varstore: vs,
opt: opt,
variablesInOptimizer: names,
config: config,
stepCount: 0,
lr: 0,
}, nil
}
// SGD Optimizer:
//===============
// SGDConfig holds parameters for building the SGD (Stochastic Gradient Descent) optimizer.
type SGDConfig struct {
Momentum float64
Dampening float64
Wd float64
Nesterov bool
}
// DefaultSGDConfig creates SGDConfig with default values.
func DefaultSGDConfig() *SGDConfig {
return &SGDConfig{
Momentum: 0.0,
Dampening: 0.0,
Wd: 0.0,
Nesterov: false,
}
}
// NewSGD creates the configuration for a SGD optimizer with specified values
func NewSGDConfig(momentum, dampening, wd float64, nesterov bool) *SGDConfig {
return &SGDConfig{
Momentum: momentum,
Dampening: dampening,
Wd: wd,
Nesterov: nesterov,
}
}
// Implement OptimizerConfig interface for SGDConfig
func (c *SGDConfig) buildCOpt(lr float64) (*ts.COptimizer, error) {
return ts.Sgd(lr, c.Momentum, c.Dampening, c.Wd, c.Nesterov)
}
func (c *SGDConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) {
return defaultBuild(c, vs, lr)
}
// Adam optimizer:
// ===============
type AdamConfig struct {
Beta1 float64
Beta2 float64
Wd float64
}
// DefaultAdamConfig creates AdamConfig with default values
func DefaultAdamConfig() *AdamConfig {
return &AdamConfig{
Beta1: 0.9,
Beta2: 0.999,
Wd: 0.0,
}
}
// NewAdamConfig creates AdamConfig with specified values
func NewAdamConfig(beta1, beta2, wd float64) *AdamConfig {
return &AdamConfig{
Beta1: beta1,
Beta2: beta2,
Wd: wd,
}
}
// Implement OptimizerConfig interface for AdamConfig
func (c *AdamConfig) buildCOpt(lr float64) (*ts.COptimizer, error) {
return ts.Adam(lr, c.Beta1, c.Beta2, c.Wd)
}
func (c *AdamConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) {
return defaultBuild(c, vs, lr)
}
// AdamW optimizer:
// ===============
type AdamWConfig struct {
Beta1 float64
Beta2 float64
Wd float64
}
// DefaultAdamWConfig creates AdamWConfig with default values
func DefaultAdamWConfig() *AdamWConfig {
return &AdamWConfig{
Beta1: 0.9,
Beta2: 0.999,
Wd: 0.01,
}
}
// NewAdamWConfig creates AdamWConfig with specified values
func NewAdamWConfig(beta1, beta2, wd float64) *AdamWConfig {
return &AdamWConfig{
Beta1: beta1,
Beta2: beta2,
Wd: wd,
}
}
// Implement OptimizerConfig interface for AdamWConfig
func (c *AdamWConfig) buildCOpt(lr float64) (*ts.COptimizer, error) {
return ts.AdamW(lr, c.Beta1, c.Beta2, c.Wd)
}
// Build builds AdamW optimizer
func (c *AdamWConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) {
return defaultBuild(c, vs, lr)
}
// RMSProp optimizer:
// ===============
type RMSPropConfig struct {
Alpha float64
Eps float64
Wd float64
Momentum float64
Centered bool
}
// DefaultAdamConfig creates AdamConfig with default values
func DefaultRMSPropConfig() *RMSPropConfig {
return &RMSPropConfig{
Alpha: 0.99,
Eps: 1e-8,
Wd: 0.0,
Momentum: 0.0,
Centered: false,
}
}
// NewRMSPropConfig creates RMSPropConfig with specified values
func NewRMSPropConfig(alpha, eps, wd, momentum float64, centered bool) *RMSPropConfig {
return &RMSPropConfig{
Alpha: alpha,
Eps: eps,
Wd: wd,
Momentum: momentum,
Centered: centered,
}
}
// Implement OptimizerConfig interface for RMSPropConfig
func (c *RMSPropConfig) buildCOpt(lr float64) (*ts.COptimizer, error) {
return ts.RmsProp(lr, c.Alpha, c.Eps, c.Wd, c.Momentum, c.Centered)
}
func (c *RMSPropConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) {
return defaultBuild(c, vs, lr)
}
// Optimizer methods:
// ==================
func (opt *Optimizer) addMissingVariables() {
type param struct {
tensor *ts.Tensor
group uint
}
trainables := make(map[string]param)
for name, v := range opt.varstore.vars {
if v.Trainable {
trainables[name] = param{tensor: v.Tensor, group: v.Group}
}
}
missingVariables := len(trainables) - len(opt.variablesInOptimizer)
if missingVariables > 0 {
log.Info("INFO: Optimizer.addMissingVariables()...")
for name, x := range trainables {
if _, ok := opt.variablesInOptimizer[name]; !ok {
opt.opt.AddParameter(x.tensor, x.group)
opt.variablesInOptimizer[name] = struct{}{}
}
}
}
}
// ZeroGrad zeroes the gradient for the tensors tracked by this optimizer.
func (opt *Optimizer) ZeroGrad() error {
if err := opt.opt.ZeroGrad(); err != nil {
err = fmt.Errorf("Optimizer.ZeroGrad() failed: %w\n", err)
return err
}
return nil
}
// MustZeroGrad zeroes the gradient for the tensors tracked by this optimizer.
func (opt *Optimizer) MustZeroGrad() {
err := opt.ZeroGrad()
if err != nil {
log.Fatal(err)
}
}
// Clips gradient value at some specified maximum value.
func (opt *Optimizer) ClipGradValue(max float64) {
opt.varstore.Lock()
defer opt.varstore.Unlock()
for _, v := range opt.varstore.vars {
if v.Trainable {
// v.Tensor.MustGrad().Clamp_(ts.FloatScalar(-max), ts.FloatScalar(max))
gradTs := v.Tensor.MustGrad(false)
gradTs.Clamp_(ts.FloatScalar(-max), ts.FloatScalar(max))
}
}
}
// Step performs an optimization step, updating the tracked tensors based on their gradients.
func (opt *Optimizer) Step() error {
err := opt.opt.Step()
if err != nil {
err = fmt.Errorf("Optimizer.Step() failed: %w\n", err)
return err
}
opt.stepCount += 1
return nil
}
// MustStep performs an optimization step, updating the tracked tensors based on their gradients.
func (opt *Optimizer) MustStep() {
err := opt.Step()
if err != nil {
log.Fatal(err)
}
}
// ResetStepCount set step count to zero.
func (opt *Optimizer) ResetStepCount() {
opt.stepCount = 0
}
// StepCount get current step count.
func (opt *Optimizer) StepCount() int {
return opt.stepCount
}
// BackwardStep applies a backward step pass, update the gradients, and performs an optimization step.
func (opt *Optimizer) BackwardStep(loss *ts.Tensor) error {
err := opt.opt.ZeroGrad()
if err != nil {
err = fmt.Errorf("Optimizer.BackwardStep() failed: %w\n", err)
return err
}
loss.MustBackward()
err = opt.opt.Step()
if err != nil {
err = fmt.Errorf("Optimizer.BackwardStep() failed: %w\n", err)
return err
}
return nil
}
// MustBackwardStep applies a backward step pass, update the gradients, and performs an optimization step.
func (opt *Optimizer) MustBackwardStep(loss *ts.Tensor) {
err := opt.BackwardStep(loss)
if err != nil {
log.Fatal(err)
}
}
// BackwardStepClip applies a backward step pass, update the gradients, and performs an optimization step.
//
// The gradients are clipped based on `max` before being applied.
func (opt *Optimizer) BackwardStepClip(loss *ts.Tensor, max float64) error {
err := opt.opt.ZeroGrad()
if err != nil {
err = fmt.Errorf("Optimizer.BackwardStepClip() failed: %w\n", err)
return err
}
loss.MustBackward()
opt.ClipGradValue(max)
err = opt.opt.Step()
if err != nil {
err = fmt.Errorf("Optimizer.BackwardStepClip() failed: %w\n", err)
return err
}
return nil
}
// MustBackwardStepClip applies a backward step pass, update the gradients, and performs an optimization step.
//
// The gradients are clipped based on `max` before being applied.
func (opt *Optimizer) MustBackwardStepClip(loss *ts.Tensor, max float64) {
err := opt.BackwardStepClip(loss, max)
if err != nil {
log.Fatal(err)
}
}
type ClipOpts struct {
NormType float64
ErrorIfNonFinite bool
}
type ClipOpt func(*ClipOpts)
func defaultClipOpts() *ClipOpts {
return &ClipOpts{
NormType: 2.0,
ErrorIfNonFinite: false, // will switch to "true" in the future.
}
}
func WithNormType(v float64) ClipOpt {
return func(o *ClipOpts) {
o.NormType = v
}
}
func WithErrorIfNonFinite(v bool) ClipOpt {
return func(o *ClipOpts) {
o.ErrorIfNonFinite = v
}
}
// / Clips gradient L2 norm over all trainable parameters.
//
// The norm is computed over all gradients together, as if they were
// concatenated into a single vector.
//
// / Args:
// - max: max norm of the gradient
// - o.NormType. Type of the used p-norm, can be "inf" for infinity norm. Default= 2.0
// - o.ErrorIfNonFinite bool. If true, throw error if total norm of the gradients from paramters is "nan", "inf" or "-inf". Default=false
// Returns: total norm of the parameters (viewed as a single vector)
// ref. https://github.com/pytorch/pytorch/blob/cb4aeff7d8e4c70bb638cf159878c5204d0cc2da/torch/nn/utils/clip_grad.py#L59
func (opt *Optimizer) ClipGradNorm(max float64, opts ...ClipOpt) error {
o := defaultClipOpts()
for _, option := range opts {
option(o)
}
opt.varstore.Lock()
defer opt.varstore.Unlock()
parameters := opt.varstore.TrainableVariables()
if len(parameters) == 0 {
// return ts.MustOfSlice([]float64{0.0}), nil
return nil
}
var (
norms []*ts.Tensor
totalNorm *ts.Tensor
)
device := opt.varstore.device
// FIXME. What about mixed-precision?
dtype := parameters[0].DType()
if o.NormType == math.Inf(1) {
for _, v := range opt.varstore.vars {
n := v.Tensor.MustGrad(false).MustDetach(true).MustAbs(true).MustMax(true).MustTo(device, true)
norms = append(norms, n)
}
// total_norm = norms[0] if len(norms) == 1 else torch.max(torch.stack(norms))
totalNorm = ts.MustStack(norms, 0).MustMax(true)
} else {
for _, v := range opt.varstore.vars {
// x := v.Tensor.MustGrad(false).MustNorm(true)
// NOTE. tensor.Norm() is going to be deprecated. So use linalg_norm
// Ref. https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm
x := v.Tensor.MustGrad(false).MustDetach(true).MustLinalgNorm(ts.FloatScalar(o.NormType), nil, false, dtype, true)
norms = append(norms, x)
}
}
// totalNorm = ts.MustStack(norms, 0).MustNorm(true).MustAddScalar(ts.FloatScalar(1e-6), true)
// total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
totalNorm = ts.MustStack(norms, 0).MustLinalgNorm(ts.FloatScalar(o.NormType), nil, false, dtype, true)
for _, x := range norms {
x.MustDrop()
}
totalNormVal := totalNorm.Float64Values(true)[0]
// if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
if o.ErrorIfNonFinite && (math.IsNaN(totalNormVal) || math.IsInf(totalNormVal, 1)) {
err := fmt.Errorf("The total norm of order (%v) for gradients from 'parameters' is non-finite, so it cannot be clipped. To disable this error and scale the gradients by the non-finite norm anyway, set option.ErrorIfNonFinite= false", o.NormType)
return err
}
// clip_coef = max_norm / (total_norm + 1e-6)
// clipCoefTs := ts.TensorFrom([]float64{max}).MustDiv(totalNorm, true)
clipCoef := max / (totalNormVal + 1e-6)
// NOTE: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so
// avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization
// when the gradients do not reside in CPU memory.
// clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
if clipCoef > 1.0 {
clipCoef = 1.0
}
for _, v := range opt.varstore.vars {
if v.Trainable {
// p.grad.detach().mul_(clip_coef_clamped.to(p.grad.device))
// v.Tensor.MustGrad(false).MustDetach(true).MustMulScalar_(ts.FloatScalar(clipCoef))
v.Tensor.MustGrad(false).MustMulScalar_(ts.FloatScalar(clipCoef))
}
}
return nil
}
// BackwardStepClipNorm applies a backward step pass, update the gradients, and performs an optimization step.
//
// The gradients L2 norm is clipped based on `max`.
func (opt *Optimizer) BackwardStepClipNorm(loss *ts.Tensor, max float64, opts ...ClipOpt) error {
err := opt.opt.ZeroGrad()
if err != nil {
err := fmt.Errorf("Optimizer.BackwardStepClipNorm() failed: %w\n", err)
return err
}
err = loss.Backward()
if err != nil {
err := fmt.Errorf("Optimizer.BackwardStepClipNorm() failed: %w\n", err)
return err
}
err = opt.ClipGradNorm(max, opts...)
if err != nil {
err := fmt.Errorf("Optimizer.BackwardStepClipNorm() failed: %w\n", err)
return err
}
err = opt.Step()
if err != nil {
err := fmt.Errorf("Optimizer.BackwardStepClipNorm() failed: %w\n", err)
return err
}
return nil
}
// MustBackwardStepClipNorm applies a backward step pass, update the gradients, and performs an optimization step.
//
// The gradients L2 norm is clipped based on `max`.
func (opt *Optimizer) MustBackwardStepClipNorm(loss *ts.Tensor, max float64, opts ...ClipOpt) {
err := opt.BackwardStepClipNorm(loss, max, opts...)
if err != nil {
log.Fatal(err)
}
}
// SetLR sets the optimizer learning rate.
//
// NOTE. it sets a SINGLE value of learning rate for all parameter groups.
// Most of the time, there's one parameter group.
func (opt *Optimizer) SetLR(lr float64) {
err := opt.opt.SetLearningRate(lr)
if err != nil {
log.Fatalf("Optimizer - SetLR method call error: %v\n", err)
}
}
func (opt *Optimizer) GetLRs() []float64 {
lrs, err := opt.opt.GetLearningRates()
if err != nil {
log.Fatalf("Optimizer - GetLRs method call error: %v\n", err)
}
return lrs
}
// SetLRs sets learning rates for ALL parameter groups respectively.
func (opt *Optimizer) SetLRs(lrs []float64) {
err := opt.opt.SetLearningRates(lrs)
if err != nil {
log.Fatalf("Optimizer - SetLRs method call error: %v\n", err)
}
}
// SetMomentum sets the optimizer momentum.
func (opt *Optimizer) SetMomentum(m float64) {
err := opt.opt.SetMomentum(m)
if err != nil {
log.Fatalf("Optimizer - SetMomentum method call error: %v\n", err)
}
}
func (opt *Optimizer) ParamGroupNum() int {
ngroup, err := opt.opt.ParamGroupNum()
if err != nil {
log.Fatalf("Optimizer - ParamGroupNum method call error: %v\n", err)
}
return int(ngroup)
}
func (opt *Optimizer) AddParamGroup(tensors []*ts.Tensor) {
err := opt.opt.AddParamGroup(tensors)
if err != nil {
log.Fatalf("Optimizer - ParamGroupNum method call error: %v\n", err)
}
}

View File

@ -0,0 +1,18 @@
package my_nn
import (
torch "git.andr3h3nriqu3s.com/andr3/gotch/ts"
)
func or_panic(err error) {
if err != nil {
panic(err)
}
}
type MyLayer interface {
torch.ModuleT
ExtractFromVarstore(vs *VarStore)
Debug()
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,120 @@
package train
import (
types "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
my_nn "git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train/torch/nn"
"git.andr3h3nriqu3s.com/andr3/gotch"
"github.com/charmbracelet/log"
torch "git.andr3h3nriqu3s.com/andr3/gotch/ts"
)
type IForwardable interface {
Forward(xs *torch.Tensor) *torch.Tensor
}
// Container for a model
type ContainerModel struct {
Layers []my_nn.MyLayer
Vs *my_nn.VarStore
path *my_nn.Path
}
func (n *ContainerModel) ForwardT(x *torch.Tensor, train bool) *torch.Tensor {
if len(n.Layers) == 0 {
return x.MustShallowClone()
}
if len(n.Layers) == 1 {
log.Info("here")
return n.Layers[0].ForwardT(x, train)
}
// forward sequentially
outs := make([]*torch.Tensor, len(n.Layers))
for i := 0; i < len(n.Layers); i++ {
if i == 0 {
outs[0] = n.Layers[i].ForwardT(x, train)
//defer outs[0].MustDrop()
} else if i == len(n.Layers)-1 {
return n.Layers[i].ForwardT(outs[i-1], train)
} else {
outs[i] = n.Layers[i].ForwardT(outs[i-1], train)
//defer outs[i].MustDrop()
}
}
panic("Do not reach here")
}
func (n *ContainerModel) To(device gotch.Device) {
n.Vs.ToDevice(device)
for _, layer := range n.Layers {
layer.ExtractFromVarstore(n.Vs)
}
}
func (n *ContainerModel) Refresh() {
for _, layer := range n.Layers {
layer.ExtractFromVarstore(n.Vs)
}
}
func BuildModel(layers []*types.Layer, _lastLinearSize int64, addSigmoid bool) *ContainerModel {
base_vs := my_nn.NewVarStore(gotch.CPU)
vs := base_vs.Root()
m_layers := []my_nn.MyLayer{}
var lastLinearSize int64 = _lastLinearSize
lastLinearConv := []int64{}
for _, layer := range layers {
if layer.LayerType == types.LAYER_INPUT {
lastLinearConv = layer.GetShape()
log.Info("Input: ", "In:", lastLinearConv)
} else if layer.LayerType == types.LAYER_DENSE {
shape := layer.GetShape()
log.Info("New Dense: ", "In:", lastLinearSize, "out:", shape[0])
m_layers = append(m_layers, NewLinear(vs, lastLinearSize, shape[0]))
lastLinearSize = shape[0]
} else if layer.LayerType == types.LAYER_FLATTEN {
m_layers = append(m_layers, NewFlatten())
lastLinearSize = 1
for _, i := range lastLinearConv {
lastLinearSize *= i
}
log.Info("Flatten: ", "In:", lastLinearConv, "out:", lastLinearSize)
} else if layer.LayerType == types.LAYER_SIMPLE_BLOCK {
panic("TODO")
log.Info("New Block: ", "In:", lastLinearConv, "out:", []int64{lastLinearConv[1] / 2, lastLinearConv[2] / 2, 128})
//m_layers = append(m_layers, NewSimpleBlock(vs, lastLinearConv[0]))
lastLinearConv[0] = 128
lastLinearConv[1] /= 2
lastLinearConv[2] /= 2
}
}
if addSigmoid {
m_layers = append(m_layers, NewSigmoid())
}
b := &ContainerModel{
Layers: m_layers,
Vs: base_vs,
path: vs,
}
return b
}
func (model *ContainerModel) Debug() {
for _, v := range model.Layers {
v.Debug()
}
}
func SaveModel(model *ContainerModel, modelFn string) (err error) {
model.Vs.ToDevice(gotch.CPU)
return model.Vs.Save(modelFn)
}

View File

@ -0,0 +1,152 @@
package train
import (
"unsafe"
my_nn "git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train/torch/nn"
"github.com/charmbracelet/log"
"git.andr3h3nriqu3s.com/andr3/gotch/nn"
torch "git.andr3h3nriqu3s.com/andr3/gotch/ts"
)
func or_panic(err error) {
if err != nil {
log.Fatal(err)
}
}
type SimpleBlock struct {
C1, C2 *nn.Conv2D
BN1 *nn.BatchNorm
}
// BasicBlock returns a BasicBlockModule instance
func NewSimpleBlock(_vs *my_nn.Path, inplanes int64) *SimpleBlock {
vs := (*nn.Path)(unsafe.Pointer(_vs))
conf1 := nn.DefaultConv2DConfig()
conf1.Stride = []int64{2, 2}
conf2 := nn.DefaultConv2DConfig()
conf2.Padding = []int64{2, 2}
b := &SimpleBlock{
C1: nn.NewConv2D(vs, inplanes, 128, 3, conf1),
C2: nn.NewConv2D(vs, 128, 128, 3, conf2),
BN1: nn.NewBatchNorm(vs, 2, 128, nn.DefaultBatchNormConfig()),
}
return b
}
// Forward method
func (b *SimpleBlock) Forward(x *torch.Tensor) *torch.Tensor {
identity := x
out := b.C1.Forward(x)
out = out.MustRelu(false)
out = b.C2.Forward(out)
out = out.MustRelu(false)
shape, err := out.Size()
or_panic(err)
out, err = out.AdaptiveAvgPool2d(shape, false)
or_panic(err)
out = b.BN1.Forward(out)
out, err = out.LeakyRelu(false)
or_panic(err)
out = out.MustAdd(identity, false)
out = out.MustRelu(false)
return out
}
func (b *SimpleBlock) ForwardT(x *torch.Tensor, train bool) *torch.Tensor {
identity := x
out := b.C1.ForwardT(x, train)
out = out.MustRelu(false)
out = b.C2.ForwardT(out, train)
out = out.MustRelu(false)
shape, err := out.Size()
or_panic(err)
out, err = out.AdaptiveAvgPool2d(shape, false)
or_panic(err)
out = b.BN1.ForwardT(out, train)
out, err = out.LeakyRelu(false)
or_panic(err)
out = out.MustAdd(identity, false)
out = out.MustRelu(false)
return out
}
// BasicBlock returns a BasicBlockModule instance
func NewLinear(vs *my_nn.Path, in, out int64) *my_nn.Linear {
config := my_nn.DefaultLinearConfig()
return my_nn.NewLinear(vs, in, out, config)
}
type Flatten struct{}
// BasicBlock returns a BasicBlockModule instance
func NewFlatten() *Flatten {
return &Flatten{}
}
// The flatten layer does not to move anything to the device
func (b *Flatten) ExtractFromVarstore(vs *my_nn.VarStore) {}
func (b *Flatten) Debug() {}
// Forward method
func (b *Flatten) Forward(x *torch.Tensor) *torch.Tensor {
out, err := x.Flatten(1, -1, false)
or_panic(err)
return out
}
func (b *Flatten) ForwardT(x *torch.Tensor, train bool) *torch.Tensor {
out, err := x.Flatten(1, -1, false)
or_panic(err)
return out
}
type Sigmoid struct{}
func NewSigmoid() *Sigmoid {
return &Sigmoid{}
}
// The sigmoid layer does not need to move anything to another device
func (b *Sigmoid) ExtractFromVarstore(vs *my_nn.VarStore) {}
func (b *Sigmoid) Debug() {}
func (b *Sigmoid) Forward(x *torch.Tensor) *torch.Tensor {
out, err := x.Sigmoid(false)
or_panic(err)
return out
}
func (b *Sigmoid) ForwardT(x *torch.Tensor, train bool) *torch.Tensor {
out, err := x.Sigmoid(false)
or_panic(err)
return out
}

View File

@ -14,7 +14,7 @@ func handleTasksStats(handle *Handle) {
} }
PostAuthJson(handle, "/stats/task/model/day", User_Normal, func(c *Context, dat *ModelTasksStatsRequest) *Error { PostAuthJson(handle, "/stats/task/model/day", User_Normal, func(c *Context, dat *ModelTasksStatsRequest) *Error {
model, err := GetBaseModel(c, dat.ModelId) model, err := GetBaseModel(c, dat.ModelId)
if err == ModelNotFoundError { if err == NotFoundError {
return c.JsonBadRequest("Model not found!") return c.JsonBadRequest("Model not found!")
} else if err != nil { } else if err != nil {
return c.E500M("Failed to get model", err) return c.E500M("Failed to get model", err)

View File

@ -14,7 +14,7 @@ func handleRequests(x *Handle) {
PostAuthJson(x, "/task/agreement", User_Normal, func(c *Context, dat *AgreementRequest) *Error { PostAuthJson(x, "/task/agreement", User_Normal, func(c *Context, dat *AgreementRequest) *Error {
var task Task var task Task
err := GetDBOnce(c, &task, "tasks where id=$1", dat.Id) err := GetDBOnce(c, &task, "tasks where id=$1", dat.Id)
if err == ModelNotFoundError { if err == NotFoundError {
return c.JsonBadRequest("Model not found") return c.JsonBadRequest("Model not found")
} else if err != nil { } else if err != nil {
return c.E500M("Failed to get task data", err) return c.E500M("Failed to get task data", err)

View File

@ -46,7 +46,7 @@ func handleList(handler *Handle) {
if requestData.ModelId != "" { if requestData.ModelId != "" {
_, err := GetBaseModel(c.Db, requestData.ModelId) _, err := GetBaseModel(c.Db, requestData.ModelId)
if err == ModelNotFoundError { if err == NotFoundError {
return c.SendJSONStatus(404, "Model not found!") return c.SendJSONStatus(404, "Model not found!")
} else if err != nil { } else if err != nil {
return c.Error500(err) return c.Error500(err)

View File

@ -11,7 +11,8 @@ import (
"git.andr3h3nriqu3s.com/andr3/fyp/logic/db" "git.andr3h3nriqu3s.com/andr3/fyp/logic/db"
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types" . "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/models"
// . "git.andr3h3nriqu3s.com/andr3/fyp/logic/models"
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train" . "git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train"
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/tasks/utils" . "git.andr3h3nriqu3s.com/andr3/fyp/logic/tasks/utils"
. "git.andr3h3nriqu3s.com/andr3/fyp/logic/users" . "git.andr3h3nriqu3s.com/andr3/fyp/logic/users"
@ -52,9 +53,10 @@ func runner(config Config, db db.Db, task_channel chan Task, index int, back_cha
if task.TaskType == int(TASK_TYPE_CLASSIFICATION) { if task.TaskType == int(TASK_TYPE_CLASSIFICATION) {
logger.Info("Classification Task") logger.Info("Classification Task")
if err = ClassifyTask(base, task); err != nil { /*if err = ClassifyTask(base, task); err != nil {
logger.Error("Classification task failed", "error", err) logger.Error("Classification task failed", "error", err)
} }*/
task.UpdateStatusLog(base, TASK_FAILED_RUNNING, "TODO move tasks to pytorch")
back_channel <- index back_channel <- index
continue continue

View File

@ -392,7 +392,7 @@ func (c *Context) GetModelFromId(id_path string) (*dbtypes.BaseModel, *Error) {
} }
model, err := dbtypes.GetBaseModel(c.Db, id) model, err := dbtypes.GetBaseModel(c.Db, id)
if err == dbtypes.ModelNotFoundError { if err == dbtypes.NotFoundError {
return nil, c.SendJSONStatus(http.StatusNotFound, "Model not found") return nil, c.SendJSONStatus(http.StatusNotFound, "Model not found")
} else if err != nil { } else if err != nil {
return nil, c.Error500(err) return nil, c.Error500(err)

View File

@ -23,7 +23,7 @@ const (
dbname = "aistuff" dbname = "aistuff"
) )
func main() { func main_() {
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+ psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable", "password=%s dbname=%s sslmode=disable",

3
run.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/fish
podman run --rm --network host --gpus all -ti -v (pwd):/app -e "TERM=xterm-256color" fyp-server bash

120
test.go Normal file
View File

@ -0,0 +1,120 @@
package main
import (
"git.andr3h3nriqu3s.com/andr3/gotch"
dbtypes "git.andr3h3nriqu3s.com/andr3/fyp/logic/db_types"
"git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train/torch"
//my_nn "git.andr3h3nriqu3s.com/andr3/fyp/logic/models/train/torch/nn"
torch "git.andr3h3nriqu3s.com/andr3/gotch/ts"
"github.com/charmbracelet/log"
)
func main() {
log.Info("Hello world")
m := train.BuildModel([]*dbtypes.Layer{
&dbtypes.Layer{
LayerType: dbtypes.LAYER_INPUT,
Shape: "[ 3, 28, 28 ]",
},
&dbtypes.Layer{
LayerType: dbtypes.LAYER_FLATTEN,
},
&dbtypes.Layer{
LayerType: dbtypes.LAYER_DENSE,
Shape: "[ 27 ]",
},
&dbtypes.Layer{
LayerType: dbtypes.LAYER_DENSE,
Shape: "[ 18 ]",
},
// &dbtypes.Layer{
// LayerType: dbtypes.LAYER_DENSE,
// Shape: "[ 9 ]",
// },
}, 0, true)
//var err error
d := gotch.CudaIfAvailable()
log.Info("device", "d", d)
m.To(d)
var count = 0
// vars1 := m.Vs.Variables()
//
// for k, v := range vars1 {
// ones := torch.MustOnes(v.MustSize(), gotch.Float, d)
// v := ones.MustSetRequiresGrad(true, false)
// v.MustDrop()
// ones.RetainGrad(false)
//
// m.Vs.UpdateVarTensor(k, ones, true)
// m.Refresh()
// }
//
// opt, err := my_nn.DefaultAdamConfig().Build(m.Vs, 0.001)
// if err != nil {
// return
// }
log.Info("start")
for count < 100 {
ones := torch.MustOnes([]int64{1, 3, 28, 28}, gotch.Float, d)
// ones = ones.MustSetRequiresGrad(true, true)
// ones.RetainGrad(false)
res := m.ForwardT(ones, true)
//res = res.MustSetRequiresGrad(true, true)
//res.RetainGrad(false)
outs := torch.MustZeros([]int64{1, 18}, gotch.Float, d)
loss, err := res.BinaryCrossEntropyWithLogits(outs, &torch.Tensor{}, &torch.Tensor{}, 2, false)
if err != nil {
log.Fatal(err)
}
// loss = loss.MustSetRequiresGrad(true, true)
//opt.ZeroGrad()
log.Info("loss", "loss", loss.Float64Values())
loss.MustBackward()
//opt.Step()
// log.Info(mean.MustGrad(false).Float64Values())
//ones_grad = ones.MustGrad(true).MustMax(true).Float64Values()[0]
// log.Info(res.MustGrad(true).MustMax(true).Float64Values())
// log.Info(ones_grad)
vars := m.Vs.Variables()
for k, v := range vars {
log.Info("[grad check]", "k", k, "grad", v.MustGrad(false).MustMax(true).Float64Values())
}
m.Debug()
outs.MustDrop()
count += 1
log.Fatal("grad zero")
}
log.Warn("out")
}

View File

@ -215,7 +215,7 @@
</div> </div>
{:else if m.status == -3 || m.status == -4} {:else if m.status == -3 || m.status == -4}
<BaseModelInfo model={m} /> <BaseModelInfo model={m} />
<form on:submit={resetModel}> <form on:submit|preventDefault={resetModel}>
Failed Prepare for training.<br /> Failed Prepare for training.<br />
<div class="spacer"></div> <div class="spacer"></div>
<MessageSimple bind:this={resetMessages} /> <MessageSimple bind:this={resetMessages} />