Go to file
2020-11-15 10:32:37 +11:00
docs feat(tensor/index_test.go): added 2020-07-10 15:28:37 +10:00
example updated example code on README and added Colab links 2020-11-15 10:32:37 +11:00
gen added README.md for generating APIs 2020-11-12 17:25:24 +11:00
libtch switched back to lib.ato_add_parameters_old as param group not updated yet 2020-11-03 00:14:56 +11:00
nn more changes to pointer receiver at nn sub-package and clean-up return variable 2020-11-03 11:56:04 +11:00
tensor tensor/print: added padding and precision 2020-11-09 17:16:34 +11:00
vision corrected Int64Option and Float64Option param functions 2020-11-02 22:35:25 +11:00
.gitignore remove .ipynb_checkpoints 2020-11-15 10:03:18 +11:00
.travis.yml fixed travis env 2020-10-13 17:23:11 +11:00
CHANGELOG.md tensor/print: more print options; updated for minor version 2020-11-09 15:59:25 +11:00
device.go fix(nn/rnn): correct LSTM not flatten weights; added Device.IsCuda(); WIP(example/char-rnn): still memory blowup 2020-07-27 17:17:38 +10:00
dtype.go fix(DTypeFromData and FlattenData): fixed type check didn't reach data type is a slice 2020-07-22 15:26:18 +10:00
dune-project initial commit 2020-05-22 23:43:09 +10:00
go.mod example/yolo - added drawing label and added some example images 2020-11-01 13:15:33 +11:00
go.sum more changes to pointer receiver at nn sub-package and clean-up return variable 2020-11-03 11:56:04 +11:00
LICENSE feat(license): added Apache License 2020-07-22 16:42:20 +10:00
README.md updated example code on README and added Colab links 2020-11-15 10:32:37 +11:00
setup-cpu.sh tensor/print: more print options; updated for minor version 2020-11-09 15:59:25 +11:00
setup-gpu.sh tensor/print: more print options; updated for minor version 2020-11-09 15:59:25 +11:00
setup.sh tensor/print: more print options; updated for minor version 2020-11-09 15:59:25 +11:00

GoTch LicenseGo.Dev referenceTravis CIGo Report Card

Overview

  • GoTch is a C++ Libtorch Go binding for developing and implementing deep learning projects in Go.
  • This package is to create a thin wrapper of Libtorch to make use of its tensor APIs and CUDA support while implementing as much idiomatic Go as possible.
  • There are about 1404 auto-generated tensor APIs.

Dependencies

  • Libtorch C++ v1.7.0 library of Pytorch

Installation

  • CPU

    Default values: LIBTORCH_VER=1.7.0 and GOTCH_VER=v0.3.2

    go get -u github.com/sugarme/gotch@v0.3.2
    bash ${GOPATH}/pkg/mod/github.com/sugarme/gotch@v0.3.2/setup-cpu.sh
    
    
  • GPU

    Default values: LIBTORCH_VER=1.7.0, CUDA_VER=10.1 and GOTCH_VER=v0.3.2

    go get -u github.com/sugarme/gotch@v0.3.2
    bash ${GOPATH}/pkg/mod/github.com/sugarme/gotch@v0.3.2/setup-gpu.sh
    
    

Examples

Basic tensor operations


import (
	"fmt"

	"github.com/sugarme/gotch"
	ts "github.com/sugarme/gotch/tensor"
)

func basicOps() {

	xs := ts.MustRand([]int64{3, 5, 6}, gotch.Float, gotch.CPU)
	fmt.Printf("%8.3f\n", xs)
	fmt.Printf("%i", xs)

    /*
    (1,.,.) =
       0.391     0.055     0.638     0.514     0.757     0.446  
       0.817     0.075     0.437     0.452     0.077     0.492  
       0.504     0.945     0.863     0.243     0.254     0.640  
       0.850     0.132     0.763     0.572     0.216     0.116  
       0.410     0.660     0.156     0.336     0.885     0.391  

    (2,.,.) =
       0.952     0.731     0.380     0.390     0.374     0.001  
       0.455     0.142     0.088     0.039     0.862     0.939  
       0.621     0.198     0.728     0.914     0.168     0.057  
       0.655     0.231     0.680     0.069     0.803     0.243  
       0.853     0.729     0.983     0.534     0.749     0.624  

    (3,.,.) =
       0.734     0.447     0.914     0.956     0.269     0.000  
       0.427     0.034     0.477     0.535     0.440     0.972  
       0.407     0.945     0.099     0.184     0.778     0.058  
       0.482     0.996     0.085     0.605     0.282     0.671  
       0.887     0.029     0.005     0.216     0.354     0.262  



    TENSOR INFO:
            Shape:          [3 5 6]
            DType:          float32
            Device:         {CPU 1}
            Defined:        true
    */

	// Basic tensor operations
	ts1 := ts.MustArange(ts.IntScalar(6), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
	defer ts1.MustDrop()
	ts2 := ts.MustOnes([]int64{3, 4}, gotch.Int64, gotch.CPU)
	defer ts2.MustDrop()

	mul := ts1.MustMatmul(ts2, false)
	defer mul.MustDrop()

	fmt.Printf("ts1:\n%2d", ts1)
	fmt.Printf("ts2:\n%2d", ts2)
	fmt.Printf("mul tensor (ts1 x ts2):\n%2d", mul)

    /*
    ts1:
     0   1   2  
     3   4   5  

    ts2:
     1   1   1   1  
     1   1   1   1  
     1   1   1   1  

    mul tensor (ts1 x ts2):
     3   3   3   3  
    12  12  12  12  
    */


	// In-place operation
	ts3 := ts.MustOnes([]int64{2, 3}, gotch.Float, gotch.CPU)
	fmt.Printf("Before:\n%v", ts3)
	ts3.MustAdd1_(ts.FloatScalar(2.0))
	fmt.Printf("After (ts3 + 2.0):\n%v", ts3)

    /*
    Before:
    1  1  1  
    1  1  1  

    After (ts3 + 2.0):
    3  3  3  
    3  3  3  
    */
}

Simplified Convolutional neural network


    import (
        "fmt"

        "github.com/sugarme/gotch"
        "github.com/sugarme/gotch/nn"
        ts "github.com/sugarme/gotch/tensor"
    )

    type Net struct {
        conv1 *nn.Conv2D
        conv2 *nn.Conv2D
        fc    *nn.Linear
    }

    func newNet(vs *nn.Path) *Net {
        conv1 := nn.NewConv2D(vs, 1, 16, 2, nn.DefaultConv2DConfig())
        conv2 := nn.NewConv2D(vs, 16, 10, 2, nn.DefaultConv2DConfig())
        fc := nn.NewLinear(vs, 10, 10, nn.DefaultLinearConfig())

        return &Net{
            conv1,
            conv2,
            fc,
        }
    }

    func (n Net) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor {
        xs = xs.MustView([]int64{-1, 1, 8, 8}, false)

        outC1 := xs.Apply(n.conv1)
        outMP1 := outC1.MaxPool2DDefault(2, true)
        defer outMP1.MustDrop()

        outC2 := outMP1.Apply(n.conv2)
        outMP2 := outC2.MaxPool2DDefault(2, true)
        outView2 := outMP2.MustView([]int64{-1, 10}, true)
        defer outView2.MustDrop()

        outFC := outView2.Apply(n.fc)
        return outFC.MustRelu(true)
    }

    func main() {

        vs := nn.NewVarStore(gotch.CPU)
        net := newNet(vs.Root())

        xs := ts.MustOnes([]int64{8, 8}, gotch.Float, gotch.CPU)

        logits := net.ForwardT(xs, false)
        fmt.Printf("Logits: %0.3f", logits)
    }

    //Logits: 0.000  0.000  0.000  0.225  0.321  0.147  0.000  0.207  0.000  0.000

Play with GoTch on Google Colab

  1. Tensor Initiation Open In Colab
  2. Tensor Indexing Open In Colab
  3. MNIST Open In Colab
  4. Tokenizer - BPE model Open In Colab
  5. transformer - BERT Mask Language Model Open In Colab
  6. YOLO v3 model infering Open In Colab

More coming soon...

Getting Started

License

GoTch is Apache 2.0 licensed.

Acknowledgement

  • This project has been inspired and used many concepts from tch-rs Libtorch Rust binding.