updated APIs changes

This commit is contained in:
sugarme 2021-07-23 00:54:41 +10:00
parent 49bb517da3
commit 5d8751bfaf
26 changed files with 383 additions and 593 deletions

View File

@ -35,7 +35,7 @@ func TestDataLoader_Next(t *testing.T) {
if err != nil {
t.Error(err)
}
want := 100
want := []int{100}
if !reflect.DeepEqual(want, got) {
t.Errorf("Want: %v\n", want)

View File

@ -22,9 +22,9 @@ func createTensors(samples int) []ts.Tensor {
s := ts.FloatScalar(float64(0.23))
for i := 0; i < 1; i++ {
t := ts.MustOfSlice(data).MustMul1(s, true)
t := ts.MustOfSlice(data).MustMulScalar(s, true)
tensors = append(tensors, t)
tensors = append(tensors, *t)
}
return tensors
@ -72,7 +72,7 @@ func main() {
tensors := createTensors(10000)
var gpuTensors []ts.Tensor
for _, t := range tensors {
gpuTensors = append(gpuTensors, t.MustTo(gpu, true))
gpuTensors = append(gpuTensors, *t.MustTo(gpu, true))
}
for _, t := range gpuTensors {

View File

@ -40,12 +40,12 @@ func runLinear() {
loss.MustBackward()
ts.NoGrad(func() {
ws.Add_(ws.MustGrad(false).MustMul1(ts.FloatScalar(-1.0), true))
bs.Add_(bs.MustGrad(false).MustMul1(ts.FloatScalar(-1.0), true))
ws.Add_(ws.MustGrad(false).MustMulScalar(ts.FloatScalar(-1.0), true))
bs.Add_(bs.MustGrad(false).MustMulScalar(ts.FloatScalar(-1.0), true))
})
testLogits := ds.TestImages.MustMm(ws, false).MustAdd(bs, true)
testAccuracy := testLogits.MustArgmax([]int64{-1}, false, true).MustEq1(ds.TestLabels, true).MustTotype(gotch.Float, true).MustMean(gotch.Float, true).MustView([]int64{-1}, true).MustFloat64Value([]int64{0})
testAccuracy := testLogits.MustArgmax([]int64{-1}, false, true).MustEqTensor(ds.TestLabels, true).MustTotype(gotch.Float, true).MustMean(gotch.Float, true).MustView([]int64{-1}, true).MustFloat64Value([]int64{0})
fmt.Printf("Epoch: %v - Loss: %.3f - Test accuracy: %.2f%%\n", epoch, loss.Float64Values()[0], testAccuracy*100)

View File

@ -814,7 +814,26 @@ let write_wrapper funcs filename =
; "UnsafeChunk"
; "UnsafeSplit"
; "UnsafeSplitWithSizes"
; "AlignTensors" ]
; "AlignTensors"
; "UnflattenDenseTensors"
; "TensorSplit"
; "TensorSplitIndices"
; "TensorSplitTensorIndicesOrSections"
; "QuantizePerTensorTensors"
; "Dsplit"
; "DsplitArray"
; "Hsplit"
; "HsplitArray"
; "Vsplit"
; "VsplitArray"
; "DequantizeTensors"
; "Atleast1dSequence"
; "Atleast2dSequence"
; "Atleast3dSequence"
; "Index"
; "IndexPut"
; "IndexPut_"
; "_IndexPutImpl_" ]
in
if
List.exists excluded_funcs ~f:(fun name ->
@ -982,7 +1001,26 @@ let write_must_wrapper funcs filename =
; "UnsafeChunk"
; "UnsafeSplit"
; "UnsafeSplitWithSizes"
; "AlignTensors" ]
; "AlignTensors"
; "UnflattenDenseTensors"
; "TensorSplit"
; "TensorSplitIndices"
; "TensorSplitTensorIndicesOrSections"
; "QuantizePerTensorTensors"
; "Dsplit"
; "DsplitArray"
; "Hsplit"
; "HsplitArray"
; "Vsplit"
; "VsplitArray"
; "DequantizeTensors"
; "Atleast1dSequence"
; "Atleast2dSequence"
; "Atleast3dSequence"
; "Index"
; "IndexPut"
; "IndexPut_"
; "_IndexPutImpl_" ]
in
if
List.exists excluded_funcs ~f:(fun name ->
@ -992,7 +1030,7 @@ let write_must_wrapper funcs filename =
match func.returns with
| `dynamic ->
pm "\n" ;
if is_method then pm "func(ts *Tensor) %s(" gofunc_name
if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name
else pm "func Must%s(" gofunc_name ;
pm "%s" go_args_list ;
pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ;
@ -1031,7 +1069,7 @@ let write_must_wrapper funcs filename =
pm "} \n"
| `bool ->
pm "\n" ;
if is_method then pm "func(ts *Tensor) %s(" gofunc_name
if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name
else pm "func Must%s(" gofunc_name ;
pm "%s" go_args_list ;
pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ;
@ -1048,7 +1086,7 @@ let write_must_wrapper funcs filename =
pm "} \n"
| `int64_t ->
pm "\n" ;
if is_method then pm "func(ts *Tensor) %s(" gofunc_name
if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name
else pm "func Must%s(" gofunc_name ;
pm "%s" go_args_list ;
pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ;
@ -1065,7 +1103,7 @@ let write_must_wrapper funcs filename =
pm "} \n"
| `double ->
pm "\n" ;
if is_method then pm "func(ts *Tensor) %s(" gofunc_name
if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name
else pm "func Must%s(" gofunc_name ;
pm "%s" go_args_list ;
pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ;
@ -1142,9 +1180,32 @@ let write_ffi funcs filename =
exported_name
(Func.c_go_args_list_notype func)
| `dynamic -> pm ""
| `bool -> pm ""
| `int64_t -> pm ""
| `double -> pm ""
| `bool ->
pm "func Atg%s(%s) bool{%s" ffifunc_name
(Func.c_go_args_list func)
(Func.c_go_args_list_body func) ;
pm "\t cResult := C.atg_%s(%s)" exported_name
(Func.c_go_args_list_notype func) ;
pm "\t cbool := *(*int)(unsafe.Pointer(&cResult))" ;
pm "\t if cbool == 1{return true}" ;
pm "\t return false" ;
pm "}"
| `int64_t ->
pm "func Atg%s(%s) int64{%s" ffifunc_name
(Func.c_go_args_list func)
(Func.c_go_args_list_body func) ;
pm "\t cResult := C.atg_%s(%s)" exported_name
(Func.c_go_args_list_notype func) ;
pm "\t return *(*int64)(unsafe.Pointer(&cResult))" ;
pm "}"
| `double ->
pm "func Atg%s(%s) float64{%s" ffifunc_name
(Func.c_go_args_list func)
(Func.c_go_args_list_body func) ;
pm "\t cResult := C.atg_%s(%s)" exported_name
(Func.c_go_args_list_notype func) ;
pm "\t return *(*float64)(unsafe.Pointer(&cResult))" ;
pm "}"
(* TODO: need more implement here *)
(* pm "func Atg%s(%s)(retValPtr *Ctensor)" *)
(* (Func.go_name exported_name) *)

View File

@ -316,8 +316,16 @@ cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cprojSize, cnumLayers, cbatchFirst, cbidirectional)
}
func Atg_CufftGetPlanCacheMaxSize(deviceIndex int64) int64{
cdeviceIndex := *(*C.int64_t)(unsafe.Pointer(&deviceIndex))
cResult := C.atg__cufft_get_plan_cache_max_size(cdeviceIndex)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_CufftGetPlanCacheSize(deviceIndex int64) int64{
cdeviceIndex := *(*C.int64_t)(unsafe.Pointer(&deviceIndex))
cResult := C.atg__cufft_get_plan_cache_size(cdeviceIndex)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64){
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
C.atg__cumprod(ptr, self, cdim)
@ -334,13 +342,22 @@ func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
C.atg__cumsum_out(ptr, out, self, cdim)
}
func Atg_DebugHasInternalOverlap(self Ctensor) int64{
cResult := C.atg__debug_has_internal_overlap(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64){
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
C.atg__dim_arange(ptr, like, cdim)
}
func Atg_Dimi(self Ctensor) int64{
cResult := C.atg__dimi(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_Dimv(self Ctensor) int64{
cResult := C.atg__dimv(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor){
C.atg__dirichlet_grad(ptr, x, alpha, total)
}
@ -499,7 +516,12 @@ cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode))
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
C.atg__grid_sampler_2d_cpu_fallback_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners)
}
func Atg_HasCompatibleShallowCopyType(self Ctensor, from Ctensor) bool{
cResult := C.atg__has_compatible_shallow_copy_type(self, from)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
C.atg__index_copy_(ptr, self, cdim, index, source)
@ -583,7 +605,12 @@ cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0))
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1)
}
func Atg_NnpackAvailable() bool{
cResult := C.atg__nnpack_available()
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
@ -603,7 +630,10 @@ cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen)
}
func Atg_Nnz(self Ctensor) int64{
cResult := C.atg__nnz(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32){
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst)
@ -875,12 +905,30 @@ csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen)
}
func Atg_UseCudnnCtcLoss(logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64) bool{
cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0]))
cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen))
ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0]))
ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen))
cblank := *(*C.int64_t)(unsafe.Pointer(&blank))
cResult := C.atg__use_cudnn_ctc_loss(logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func Atg_UseCudnnRnnFlattenWeight() bool{
cResult := C.atg__use_cudnn_rnn_flatten_weight()
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func Atg_Values(ptr *Ctensor, self Ctensor){
C.atg__values(ptr, self)
}
func Atg_Version(self Ctensor) int64{
cResult := C.atg__version(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
C.atg__weight_norm(ptr, v, g, cdim)
@ -1099,7 +1147,15 @@ cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
C.atg_all_out(ptr, out, self, cdim, ckeepdim)
}
func AtgAllclose(self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32) bool{
crtol := *(*C.double)(unsafe.Pointer(&rtol))
catol := *(*C.double)(unsafe.Pointer(&atol))
cequalNan := *(*C.int)(unsafe.Pointer(&equalNan))
cResult := C.atg_allclose(self, other, crtol, catol, cequalNan)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){
cp := *(*C.double)(unsafe.Pointer(&p))
ctrain := *(*C.int)(unsafe.Pointer(&train))
@ -1671,7 +1727,14 @@ coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32))
cright := *(*C.int)(unsafe.Pointer(&right))
C.atg_bucketize_tensor_out(ptr, out, self, boundaries, coutInt32, cright)
}
func AtgCanCast(from int32, to int32) bool{
cfrom := *(*C.int)(unsafe.Pointer(&from))
cto := *(*C.int)(unsafe.Pointer(&to))
cResult := C.atg_can_cast(cfrom, cto)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
@ -2359,7 +2422,12 @@ func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor){
func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor){
C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput)
}
func AtgCudnnIsAcceptable(self Ctensor) bool{
cResult := C.atg_cudnn_is_acceptable(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgCummax(ptr *Ctensor, self Ctensor, dim int64){
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
C.atg_cummax(ptr, self, cdim)
@ -2426,7 +2494,10 @@ func AtgDeg2rad_(ptr *Ctensor, self Ctensor){
func AtgDeg2radOut(ptr *Ctensor, out Ctensor, self Ctensor){
C.atg_deg2rad_out(ptr, out, self)
}
func AtgDenseDim(self Ctensor) int64{
cResult := C.atg_dense_dim(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func AtgDequantize(ptr *Ctensor, self Ctensor){
C.atg_dequantize(ptr, self)
}
@ -2740,7 +2811,12 @@ func AtgEqTensor_(ptr *Ctensor, self Ctensor, other Ctensor){
func AtgEqTensorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
C.atg_eq_tensor_out(ptr, out, self, other)
}
func AtgEqual(self Ctensor, other Ctensor) bool{
cResult := C.atg_equal(self, other)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgErf(ptr *Ctensor, self Ctensor){
C.atg_erf(ptr, self)
}
@ -3968,17 +4044,72 @@ func AtgInverse(ptr *Ctensor, self Ctensor){
func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor){
C.atg_inverse_out(ptr, out, self)
}
func AtgIsCoalesced(self Ctensor) bool{
cResult := C.atg_is_coalesced(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsComplex(self Ctensor) bool{
cResult := C.atg_is_complex(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsDistributed(self Ctensor) bool{
cResult := C.atg_is_distributed(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsFloatingPoint(self Ctensor) bool{
cResult := C.atg_is_floating_point(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsLeaf(self Ctensor) bool{
cResult := C.atg_is_leaf(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsNonzero(self Ctensor) bool{
cResult := C.atg_is_nonzero(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsPinned(self Ctensor) bool{
cResult := C.atg_is_pinned(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsSameSize(self Ctensor, other Ctensor) bool{
cResult := C.atg_is_same_size(self, other)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsSetTo(self Ctensor, tensor Ctensor) bool{
cResult := C.atg_is_set_to(self, tensor)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsSigned(self Ctensor) bool{
cResult := C.atg_is_signed(self)
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsVulkanAvailable() bool{
cResult := C.atg_is_vulkan_available()
cbool := *(*int)(unsafe.Pointer(&cResult))
if cbool == 1{return true}
return false
}
func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32){
crtol := *(*C.double)(unsafe.Pointer(&rtol))
catol := *(*C.double)(unsafe.Pointer(&atol))
@ -5968,7 +6099,10 @@ func AtgOuter(ptr *Ctensor, self Ctensor, vec2 Ctensor){
func AtgOuterOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor){
C.atg_outer_out(ptr, out, self, vec2)
}
func AtgOutputNr(self Ctensor) int64{
cResult := C.atg_output_nr(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func AtgPadSequence(ptr *Ctensor, sequencesData []Ctensor, sequencesLen int, batchFirst int32, paddingValue float64){
csequencesDataPtr := (*Ctensor)(unsafe.Pointer(&sequencesData[0]))
csequencesLen := *(*C.int)(unsafe.Pointer(&sequencesLen))
@ -6091,15 +6225,24 @@ func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumula
caccumulate := *(*C.int)(unsafe.Pointer(&accumulate))
C.atg_put_(ptr, self, index, source, caccumulate)
}
func AtgQPerChannelAxis(self Ctensor) int64{
cResult := C.atg_q_per_channel_axis(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func AtgQPerChannelScales(ptr *Ctensor, self Ctensor){
C.atg_q_per_channel_scales(ptr, self)
}
func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor){
C.atg_q_per_channel_zero_points(ptr, self)
}
func AtgQScale(self Ctensor) float64{
cResult := C.atg_q_scale(self)
return *(*float64)(unsafe.Pointer(&cResult))
}
func AtgQZeroPoint(self Ctensor) int64{
cResult := C.atg_q_zero_point(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func AtgQr(ptr *Ctensor, self Ctensor, some int32){
csome := *(*C.int)(unsafe.Pointer(&some))
C.atg_qr(ptr, self, csome)
@ -7064,7 +7207,10 @@ coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
C.atg_sparse_coo_tensor_indices_size(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
}
func AtgSparseDim(self Ctensor) int64{
cResult := C.atg_sparse_dim(self)
return *(*int64)(unsafe.Pointer(&cResult))
}
func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor){
C.atg_sparse_mask(ptr, self, mask)
}

View File

@ -77,7 +77,7 @@ func (r randnInit) InitTensor(dims []int64, device gotch.Device) (retVal *ts.Ten
}
initTs := ts.MustRandn(dims, gotch.Float, device)
return initTs.MustMul1(ts.FloatScalar(r.stdev), true).MustAdd1(ts.FloatScalar(r.mean), true)
return initTs.MustMulScalar(ts.FloatScalar(r.stdev), true).MustAddScalar(ts.FloatScalar(r.mean), true)
}
func (r randnInit) Set(tensor *ts.Tensor) {

View File

@ -107,9 +107,9 @@ func NewLSTM(vs *Path, inDim, hiddenDim int64, cfg *RNNConfig) *LSTM {
// if vs.Device().IsCuda() && gotch.Cuda.CudnnIsAvailable() {
// TODO: check if Cudnn is available here!!!
if vs.Device().IsCuda() {
// NOTE. 2 is for LSTM
// ref. rnn.cpp in Pytorch
ts.Must_CudnnRnnFlattenWeight(flatWeights, 4, inDim, 2, hiddenDim, cfg.NumLayers, cfg.BatchFirst, cfg.Bidirectional)
// 2: for LSTM
// 0: disables projections
ts.Must_CudnnRnnFlattenWeight(flatWeights, 4, inDim, 2, hiddenDim, 0, cfg.NumLayers, cfg.BatchFirst, cfg.Bidirectional)
}
return &LSTM{
@ -227,9 +227,9 @@ func NewGRU(vs *Path, inDim, hiddenDim int64, cfg *RNNConfig) (retVal *GRU) {
}
if vs.Device().IsCuda() {
// NOTE. 3 is for GRU
// ref. rnn.cpp in Pytorch
ts.Must_CudnnRnnFlattenWeight(flatWeights, 4, inDim, 3, hiddenDim, cfg.NumLayers, cfg.BatchFirst, cfg.Bidirectional)
// 3: for GRU
// 0: disable projections
ts.Must_CudnnRnnFlattenWeight(flatWeights, 4, inDim, 3, hiddenDim, 0, cfg.NumLayers, cfg.BatchFirst, cfg.Bidirectional)
}
return &GRU{

View File

@ -68,8 +68,8 @@ func TestSaveLoad(t *testing.T) {
u2, v2 := add(vs2.Root())
ts.NoGrad(func() {
u1.Add1_(ts.FloatScalar(42.0))
v1.Mul1_(ts.FloatScalar(2.0))
u1.AddScalar_(ts.FloatScalar(42.0))
v1.MulScalar_(ts.FloatScalar(2.0))
})
wantU1 := float64(42.0)

View File

@ -7,8 +7,8 @@ import (
ts "github.com/sugarme/gotch/tensor"
)
func ExampleTensor_MustArange1() {
tensor := ts.MustArange1(ts.FloatScalar(0), ts.FloatScalar(12), gotch.Int64, gotch.CPU).MustView([]int64{3, 4}, true)
func ExampleTensor_MustArange() {
tensor := ts.MustArange(ts.FloatScalar(12), gotch.Int64, gotch.CPU).MustView([]int64{3, 4}, true)
fmt.Printf("%v", tensor)
@ -50,12 +50,12 @@ func ExampleTensor_Matmul() {
}
func ExampleTensor_Add1_() {
func ExampleTensor_AddScalar_() {
// In-place operation
ts3 := ts.MustOnes([]int64{2, 3}, gotch.Float, gotch.CPU)
fmt.Println("Before:")
ts3.Print()
ts3.MustAdd1_(ts.FloatScalar(2.0))
ts3.MustAddScalar_(ts.FloatScalar(2.0))
fmt.Printf("After (ts3 + 2.0): \n")
ts3.Print()
ts3.MustDrop()

View File

@ -11,7 +11,7 @@ import (
func TestIntegerIndex(t *testing.T) {
// [ 0 1 2
// 3 4 5 ]
tensor := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(2*3), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
tensor := ts.MustArange(ts.IntScalar(2*3), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
// tensor, err := ts.NewTensorFromData([]bool{true, false, false, false, false, false}, []int64{2, 3})
// if err != nil {
// panic(err)
@ -71,7 +71,7 @@ func TestIntegerIndex(t *testing.T) {
}
func TestNewInsertAxis(t *testing.T) {
tensor := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(2*3), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
tensor := ts.MustArange(ts.IntScalar(2*3), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
var idxs1 []ts.TensorIndexer = []ts.TensorIndexer{
ts.NewInsertNewAxis(),
}
@ -112,7 +112,7 @@ func TestNewInsertAxis(t *testing.T) {
func TestRangeIndex(t *testing.T) {
// Range
tensor1 := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(4*3), gotch.Int64, gotch.CPU).MustView([]int64{4, 3}, true)
tensor1 := ts.MustArange(ts.IntScalar(4*3), gotch.Int64, gotch.CPU).MustView([]int64{4, 3}, true)
idx1 := []ts.TensorIndexer{
ts.NewNarrow(1, 3),
}
@ -131,7 +131,7 @@ func TestRangeIndex(t *testing.T) {
}
// Full range
tensor2 := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(2*3), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
tensor2 := ts.MustArange(ts.IntScalar(2*3), gotch.Int64, gotch.CPU).MustView([]int64{2, 3}, true)
idx2 := []ts.TensorIndexer{
ts.NewNarrow(0, tensor2.MustSize()[0]),
}
@ -150,7 +150,7 @@ func TestRangeIndex(t *testing.T) {
}
// Range from
tensor3 := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(4*3), gotch.Int64, gotch.CPU).MustView([]int64{4, 3}, true)
tensor3 := ts.MustArange(ts.IntScalar(4*3), gotch.Int64, gotch.CPU).MustView([]int64{4, 3}, true)
idx3 := []ts.TensorIndexer{
ts.NewNarrow(2, tensor3.MustSize()[0]),
}
@ -169,7 +169,7 @@ func TestRangeIndex(t *testing.T) {
}
// Range to
tensor4 := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(4*3), gotch.Int64, gotch.CPU).MustView([]int64{4, 3}, true)
tensor4 := ts.MustArange(ts.IntScalar(4*3), gotch.Int64, gotch.CPU).MustView([]int64{4, 3}, true)
idx4 := []ts.TensorIndexer{
ts.NewNarrow(0, 2),
}
@ -189,7 +189,7 @@ func TestRangeIndex(t *testing.T) {
}
func TestSliceIndex(t *testing.T) {
tensor1 := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(6*2), gotch.Int64, gotch.CPU).MustView([]int64{6, 2}, true)
tensor1 := ts.MustArange(ts.IntScalar(6*2), gotch.Int64, gotch.CPU).MustView([]int64{6, 2}, true)
idx1 := []ts.TensorIndexer{
ts.NewSliceIndex([]int64{1, 3, 5}),
}
@ -207,7 +207,7 @@ func TestSliceIndex(t *testing.T) {
t.Errorf("Got tensor values: %v\n", got1Shape)
}
tensor2 := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(3*4), gotch.Int64, gotch.CPU).MustView([]int64{3, 4}, true)
tensor2 := ts.MustArange(ts.IntScalar(3*4), gotch.Int64, gotch.CPU).MustView([]int64{3, 4}, true)
idx2 := []ts.TensorIndexer{
ts.NewNarrow(0, tensor2.MustSize()[0]),
ts.NewSliceIndex([]int64{3, 0}),
@ -229,7 +229,7 @@ func TestSliceIndex(t *testing.T) {
}
func TestComplexIndex(t *testing.T) {
tensor := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(2*3*5*7), gotch.Int64, gotch.CPU).MustView([]int64{2, 3, 5, 7}, true)
tensor := ts.MustArange(ts.IntScalar(2*3*5*7), gotch.Int64, gotch.CPU).MustView([]int64{2, 3, 5, 7}, true)
idx := []ts.TensorIndexer{
ts.NewSelect(1),
ts.NewNarrow(1, 2),
@ -253,7 +253,7 @@ func TestComplexIndex(t *testing.T) {
}
func TestIndex3D(t *testing.T) {
tensor := ts.MustArange1(ts.IntScalar(0), ts.IntScalar(24), gotch.Int64, gotch.CPU).MustView([]int64{2, 3, 4}, true)
tensor := ts.MustArange(ts.IntScalar(24), gotch.Int64, gotch.CPU).MustView([]int64{2, 3, 4}, true)
idx1 := []ts.TensorIndexer{
ts.NewSelect(0),

View File

@ -513,7 +513,7 @@ func(ts *Tensor) Must_CumsumOut(out *Tensor, dim int64, del bool)(retVal *Tensor
return retVal
}
func(ts *Tensor) _DebugHasInternalOverlap(del bool)(retVal int64) {
func(ts *Tensor) Must_DebugHasInternalOverlap(del bool)(retVal int64) {
retVal, err := ts._DebugHasInternalOverlap(del)
if err != nil { log.Fatal(err) }
@ -529,7 +529,7 @@ func Must_DimArange(like *Tensor, dim int64)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) _Dimi(del bool)(retVal int64) {
func(ts *Tensor) Must_Dimi(del bool)(retVal int64) {
retVal, err := ts._Dimi(del)
if err != nil { log.Fatal(err) }
@ -537,7 +537,7 @@ func(ts *Tensor) _Dimi(del bool)(retVal int64) {
return retVal
}
func(ts *Tensor) _Dimv(del bool)(retVal int64) {
func(ts *Tensor) Must_Dimv(del bool)(retVal int64) {
retVal, err := ts._Dimv(del)
if err != nil { log.Fatal(err) }
@ -697,7 +697,7 @@ func Must_GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMod
return retVal
}
func(ts *Tensor) _HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool) {
func(ts *Tensor) Must_HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool) {
retVal, err := ts._HasCompatibleShallowCopyType(from, del)
if err != nil { log.Fatal(err) }
@ -713,14 +713,6 @@ func(ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor)() {
return
}
func(ts *Tensor) Must_IndexPutImpl_(indices []Tensor, values *Tensor, accumulate bool, unsafety bool)() {
err := ts._IndexPutImpl_(indices, values, accumulate, unsafety)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_Indices(del bool)(retVal *Tensor) {
retVal, err := ts._Indices(del)
@ -873,7 +865,7 @@ func Must_NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int
return retVal
}
func(ts *Tensor) _Nnz(del bool)(retVal int64) {
func(ts *Tensor) Must_Nnz(del bool)(retVal int64) {
retVal, err := ts._Nnz(del)
if err != nil { log.Fatal(err) }
@ -1265,7 +1257,7 @@ func(ts *Tensor) Must_Values(del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) _Version(del bool)(retVal int64) {
func(ts *Tensor) Must_Version(del bool)(retVal int64) {
retVal, err := ts._Version(del)
if err != nil { log.Fatal(err) }
@ -1697,7 +1689,7 @@ func(ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool)(retV
return retVal
}
func(ts *Tensor) Allclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool) {
func(ts *Tensor) MustAllclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool) {
retVal, err := ts.Allclose(other, rtol, atol, equalNan, del)
if err != nil { log.Fatal(err) }
@ -2161,14 +2153,6 @@ func(ts *Tensor) MustAtleast1d(del bool)(retVal *Tensor) {
return retVal
}
func MustAtleast1dSequence(tensors []Tensor)(retVal []Tensor) {
retVal, err := Atleast1dSequence(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast2d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast2d(del)
@ -2177,14 +2161,6 @@ func(ts *Tensor) MustAtleast2d(del bool)(retVal *Tensor) {
return retVal
}
func MustAtleast2dSequence(tensors []Tensor)(retVal []Tensor) {
retVal, err := Atleast2dSequence(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast3d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast3d(del)
@ -2193,14 +2169,6 @@ func(ts *Tensor) MustAtleast3d(del bool)(retVal *Tensor) {
return retVal
}
func MustAtleast3dSequence(tensors []Tensor)(retVal []Tensor) {
retVal, err := Atleast3dSequence(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del)
@ -3577,7 +3545,7 @@ func(ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) CudnnIsAcceptable(del bool)(retVal bool) {
func(ts *Tensor) MustCudnnIsAcceptable(del bool)(retVal bool) {
retVal, err := ts.CudnnIsAcceptable(del)
if err != nil { log.Fatal(err) }
@ -3681,7 +3649,7 @@ func(ts *Tensor) MustDeg2radOut(out *Tensor, del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) DenseDim(del bool)(retVal int64) {
func(ts *Tensor) MustDenseDim(del bool)(retVal int64) {
retVal, err := ts.DenseDim(del)
if err != nil { log.Fatal(err) }
@ -3697,14 +3665,6 @@ func(ts *Tensor) MustDequantize(del bool)(retVal *Tensor) {
return retVal
}
func MustDequantizeTensors(tensors []Tensor)(retVal []Tensor) {
retVal, err := DequantizeTensors(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDet(del bool)(retVal *Tensor) {
retVal, err := ts.Det(del)
@ -4025,22 +3985,6 @@ func(ts *Tensor) MustDropout_(p float64, train bool)() {
return
}
func(ts *Tensor) Dsplit(sections int64, del bool)(retVal []Tensor) {
retVal, err := ts.Dsplit(sections, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) DsplitArray(indices []int64, del bool)(retVal []Tensor) {
retVal, err := ts.DsplitArray(indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDstack(tensors []Tensor)(retVal *Tensor) {
retVal, err := Dstack(tensors)
@ -4225,7 +4169,7 @@ func(ts *Tensor) MustEqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *T
return retVal
}
func(ts *Tensor) Equal(other *Tensor, del bool)(retVal bool) {
func(ts *Tensor) MustEqual(other *Tensor, del bool)(retVal bool) {
retVal, err := ts.Equal(other, del)
if err != nil { log.Fatal(err) }
@ -5809,22 +5753,6 @@ func(ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor)
return retVal
}
func(ts *Tensor) Hsplit(sections int64, del bool)(retVal []Tensor) {
retVal, err := ts.Hsplit(sections, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) HsplitArray(indices []int64, del bool)(retVal []Tensor) {
retVal, err := ts.HsplitArray(indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) {
retVal, err := Hspmm(mat1, mat2)
@ -6025,14 +5953,6 @@ func(ts *Tensor) MustImag(del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) MustIndex(indices []Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Index(indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexAdd(dim, index, source, del)
@ -6113,22 +6033,6 @@ func(ts *Tensor) MustIndexFillIntTensor_(dim int64, index *Tensor, value *Tensor
return
}
func(ts *Tensor) MustIndexPut(indices []Tensor, values *Tensor, accumulate bool, del bool)(retVal *Tensor) {
retVal, err := ts.IndexPut(indices, values, accumulate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexPut_(indices []Tensor, values *Tensor, accumulate bool)() {
err := ts.IndexPut_(indices, values, accumulate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexSelect(dim, index, del)
@ -6217,7 +6121,7 @@ func(ts *Tensor) MustInverseOut(out *Tensor, del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) IsCoalesced(del bool)(retVal bool) {
func(ts *Tensor) MustIsCoalesced(del bool)(retVal bool) {
retVal, err := ts.IsCoalesced(del)
if err != nil { log.Fatal(err) }
@ -6225,7 +6129,7 @@ func(ts *Tensor) IsCoalesced(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsComplex(del bool)(retVal bool) {
func(ts *Tensor) MustIsComplex(del bool)(retVal bool) {
retVal, err := ts.IsComplex(del)
if err != nil { log.Fatal(err) }
@ -6233,7 +6137,7 @@ func(ts *Tensor) IsComplex(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsDistributed(del bool)(retVal bool) {
func(ts *Tensor) MustIsDistributed(del bool)(retVal bool) {
retVal, err := ts.IsDistributed(del)
if err != nil { log.Fatal(err) }
@ -6241,7 +6145,7 @@ func(ts *Tensor) IsDistributed(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsFloatingPoint(del bool)(retVal bool) {
func(ts *Tensor) MustIsFloatingPoint(del bool)(retVal bool) {
retVal, err := ts.IsFloatingPoint(del)
if err != nil { log.Fatal(err) }
@ -6249,7 +6153,7 @@ func(ts *Tensor) IsFloatingPoint(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsLeaf(del bool)(retVal bool) {
func(ts *Tensor) MustIsLeaf(del bool)(retVal bool) {
retVal, err := ts.IsLeaf(del)
if err != nil { log.Fatal(err) }
@ -6257,7 +6161,7 @@ func(ts *Tensor) IsLeaf(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsNonzero(del bool)(retVal bool) {
func(ts *Tensor) MustIsNonzero(del bool)(retVal bool) {
retVal, err := ts.IsNonzero(del)
if err != nil { log.Fatal(err) }
@ -6265,7 +6169,7 @@ func(ts *Tensor) IsNonzero(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsPinned(del bool)(retVal bool) {
func(ts *Tensor) MustIsPinned(del bool)(retVal bool) {
retVal, err := ts.IsPinned(del)
if err != nil { log.Fatal(err) }
@ -6273,7 +6177,7 @@ func(ts *Tensor) IsPinned(del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsSameSize(other *Tensor, del bool)(retVal bool) {
func(ts *Tensor) MustIsSameSize(other *Tensor, del bool)(retVal bool) {
retVal, err := ts.IsSameSize(other, del)
if err != nil { log.Fatal(err) }
@ -6281,7 +6185,7 @@ func(ts *Tensor) IsSameSize(other *Tensor, del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsSetTo(tensor *Tensor, del bool)(retVal bool) {
func(ts *Tensor) MustIsSetTo(tensor *Tensor, del bool)(retVal bool) {
retVal, err := ts.IsSetTo(tensor, del)
if err != nil { log.Fatal(err) }
@ -6289,7 +6193,7 @@ func(ts *Tensor) IsSetTo(tensor *Tensor, del bool)(retVal bool) {
return retVal
}
func(ts *Tensor) IsSigned(del bool)(retVal bool) {
func(ts *Tensor) MustIsSigned(del bool)(retVal bool) {
retVal, err := ts.IsSigned(del)
if err != nil { log.Fatal(err) }
@ -9113,7 +9017,7 @@ func(ts *Tensor) MustOuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tenso
return retVal
}
func(ts *Tensor) OutputNr(del bool)(retVal int64) {
func(ts *Tensor) MustOutputNr(del bool)(retVal int64) {
retVal, err := ts.OutputNr(del)
if err != nil { log.Fatal(err) }
@ -9361,7 +9265,7 @@ func(ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool)() {
return
}
func(ts *Tensor) QPerChannelAxis(del bool)(retVal int64) {
func(ts *Tensor) MustQPerChannelAxis(del bool)(retVal int64) {
retVal, err := ts.QPerChannelAxis(del)
if err != nil { log.Fatal(err) }
@ -9385,7 +9289,7 @@ func(ts *Tensor) MustQPerChannelZeroPoints(del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) QScale(del bool)(retVal float64) {
func(ts *Tensor) MustQScale(del bool)(retVal float64) {
retVal, err := ts.QScale(del)
if err != nil { log.Fatal(err) }
@ -9393,7 +9297,7 @@ func(ts *Tensor) QScale(del bool)(retVal float64) {
return retVal
}
func(ts *Tensor) QZeroPoint(del bool)(retVal int64) {
func(ts *Tensor) MustQZeroPoint(del bool)(retVal int64) {
retVal, err := ts.QZeroPoint(del)
if err != nil { log.Fatal(err) }
@ -9481,14 +9385,6 @@ func(ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype got
return retVal
}
func MustQuantizePerTensorTensors(tensors []Tensor, scales *Tensor, zeroPoints *Tensor, dtype gotch.DType)(retVal []Tensor) {
retVal, err := QuantizePerTensorTensors(tensors, scales, zeroPoints, dtype)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor) {
retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint)
@ -10889,7 +10785,7 @@ func MustSparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int6
return retVal
}
func(ts *Tensor) SparseDim(del bool)(retVal int64) {
func(ts *Tensor) MustSparseDim(del bool)(retVal int64) {
retVal, err := ts.SparseDim(del)
if err != nil { log.Fatal(err) }
@ -11545,30 +11441,6 @@ func(ts *Tensor) MustTanhOut(out *Tensor, del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) TensorSplit(sections int64, dim int64, del bool)(retVal []Tensor) {
retVal, err := ts.TensorSplit(sections, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) TensorSplitIndices(indices []int64, dim int64, del bool)(retVal []Tensor) {
retVal, err := ts.TensorSplitIndices(indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) TensorSplitTensorIndicesOrSections(tensorIndicesOrSections *Tensor, dim int64, del bool)(retVal []Tensor) {
retVal, err := ts.TensorSplitTensorIndicesOrSections(tensorIndicesOrSections, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del)
@ -11929,14 +11801,6 @@ func(ts *Tensor) MustUnflatten(dim int64, sizes []int64, del bool)(retVal *Tenso
return retVal
}
func MustUnflattenDenseTensors(flat *Tensor, tensors []Tensor)(retVal []Tensor) {
retVal, err := UnflattenDenseTensors(flat, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unfold(dimension, size, step, del)
@ -12321,22 +12185,6 @@ func(ts *Tensor) MustViewDtype(dtype gotch.DType, del bool)(retVal *Tensor) {
return retVal
}
func(ts *Tensor) Vsplit(sections int64, del bool)(retVal []Tensor) {
retVal, err := ts.Vsplit(sections, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) VsplitArray(indices []int64, del bool)(retVal []Tensor) {
retVal, err := ts.VsplitArray(indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVstack(tensors []Tensor)(retVal *Tensor) {
retVal, err := Vstack(tensors)

View File

@ -20,7 +20,7 @@ func (ts *Tensor) CrossEntropyForLogits(targets *Tensor) (retVal *Tensor) {
// targets represent ground-truth.
func (ts *Tensor) AccuracyForLogits(targets *Tensor) (retVal *Tensor) {
argmax := ts.MustArgmax([]int64{-1}, false, false)
eq1 := argmax.MustEq1(targets, true)
eq1 := argmax.MustEqTensor(targets, true)
return eq1.MustTotype(gotch.Float, true).MustMean(gotch.Float, true)
}

View File

@ -8,7 +8,7 @@ import (
)
func ExampleTensor_Split(t *testing.T) {
tensor := ts.MustArange1(ts.FloatScalar(0), ts.FloatScalar(10), gotch.Float, gotch.CPU).MustView([]int64{5, 2}, true)
tensor := ts.MustArange(ts.FloatScalar(10), gotch.Float, gotch.CPU).MustView([]int64{5, 2}, true)
splitTensors := tensor.MustSplit(2, 0, false)
for _, t := range splitTensors {
@ -27,7 +27,7 @@ func ExampleTensor_Split(t *testing.T) {
}
func ExampleTensorSplitWithSizes(t *testing.T) {
tensor := ts.MustArange1(ts.FloatScalar(0), ts.FloatScalar(10), gotch.Float, gotch.CPU).MustView([]int64{5, 2}, true)
tensor := ts.MustArange(ts.FloatScalar(10), gotch.Float, gotch.CPU).MustView([]int64{5, 2}, true)
splitTensors := tensor.MustSplitWithSizes([]int64{1, 4}, 0, false)
for _, t := range splitTensors {

View File

@ -1168,24 +1168,6 @@ func(ts *Tensor) _IndexCopy_(dim int64, index *Tensor, source *Tensor)(err error
return err
}
func(ts *Tensor) _IndexPutImpl_(indices []Tensor, values *Tensor, accumulate bool, unsafety bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
cunsafety := int32(0)
if unsafety { cunsafety = int32(1) }
lib.Atg_IndexPutImpl_(ptr, ts.ctensor, indices, values.ctensor, caccumulate, cunsafety)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func(ts *Tensor) _Indices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -3530,20 +3512,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func Atleast1dSequence(tensors []Tensor)(retVal []Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgAtleast1dSequence(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) Atleast2d(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -3557,20 +3525,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func Atleast2dSequence(tensors []Tensor)(retVal []Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgAtleast2dSequence(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) Atleast3d(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -3584,20 +3538,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func Atleast3dSequence(tensors []Tensor)(retVal []Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgAtleast3dSequence(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -6165,20 +6105,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func DequantizeTensors(tensors []Tensor)(retVal []Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgDequantizeTensors(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) Det(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -6689,32 +6615,6 @@ lib.AtgDropout_(ptr, ts.ctensor, p, ctrain)
return err
}
func(ts *Tensor) Dsplit(sections int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDsplit(ptr, ts.ctensor, sections)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) DsplitArray(indices []int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDsplitArray(ptr, ts.ctensor, indices, len(indices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func Dstack(tensors []Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -9638,32 +9538,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func(ts *Tensor) Hsplit(sections int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHsplit(ptr, ts.ctensor, sections)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) HsplitArray(indices []int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHsplitArray(ptr, ts.ctensor, indices, len(indices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func Hspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -9983,21 +9857,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func(ts *Tensor) Index(indices []Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
lib.AtgIndex(ptr, ts.ctensor, indices)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -10123,39 +9982,6 @@ func(ts *Tensor) IndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)(er
return err
}
func(ts *Tensor) IndexPut(indices []Tensor, values *Tensor, accumulate bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
lib.AtgIndexPut(ptr, ts.ctensor, indices, values.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) IndexPut_(indices []Tensor, values *Tensor, accumulate bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
lib.AtgIndexPut_(ptr, ts.ctensor, indices, values.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func(ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -15918,20 +15744,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func QuantizePerTensorTensors(tensors []Tensor, scales *Tensor, zeroPoints *Tensor, dtype gotch.DType)(retVal []Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgQuantizePerTensorTensors(ptr, ctensors, len(ctensors), scales.ctensor, zeroPoints.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -19329,45 +19141,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func(ts *Tensor) TensorSplit(sections int64, dim int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTensorSplit(ptr, ts.ctensor, sections, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) TensorSplitIndices(indices []int64, dim int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTensorSplitIndices(ptr, ts.ctensor, indices, len(indices), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) TensorSplitTensorIndicesOrSections(tensorIndicesOrSections *Tensor, dim int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTensorSplitTensorIndicesOrSections(ptr, ts.ctensor, tensorIndicesOrSections.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -19956,20 +19729,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func UnflattenDenseTensors(flat *Tensor, tensors []Tensor)(retVal []Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgUnflattenDenseTensors(ptr, flat.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
@ -20979,32 +20738,6 @@ if del { defer ts.MustDrop() }
return retVal, err
}
func(ts *Tensor) Vsplit(sections int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgVsplit(ptr, ts.ctensor, sections)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func(ts *Tensor) VsplitArray(indices []int64, del bool)(retVal []Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgVsplitArray(ptr, ts.ctensor, indices, len(indices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
func Vstack(tensors []Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))

View File

@ -1207,7 +1207,7 @@ func (ts *Tensor) Onehot(labels int64) *Tensor {
inputTs := unsqueezeTs.MustTotype(gotch.Int64, true)
zerosTs := MustZeros(dims, gotch.Float, gotch.CPU)
retVal := zerosTs.MustScatter1(-1, inputTs, FloatScalar(1.0), true)
retVal := zerosTs.MustScatterValue(-1, inputTs, FloatScalar(1.0), true)
inputTs.MustDrop()
return retVal

View File

@ -9,9 +9,9 @@ import (
)
func TestTensorInit(t *testing.T) {
tensor := ts.MustArange1(ts.IntScalar(1), ts.IntScalar(5), gotch.Int64, gotch.CPU)
tensor := ts.MustArange(ts.IntScalar(5), gotch.Int64, gotch.CPU)
want := []float64{1, 2, 3, 4}
want := []float64{0, 1, 2, 3, 4}
got := tensor.Float64Values()
if !reflect.DeepEqual(want, got) {
@ -23,9 +23,9 @@ func TestTensorInit(t *testing.T) {
func TestInplaceAssign(t *testing.T) {
tensor := ts.MustOfSlice([]int64{3, 1, 4, 1, 5})
tensor.MustAdd1_(ts.IntScalar(1))
tensor.MustMul1_(ts.IntScalar(2))
tensor.MustSub1_(ts.IntScalar(1))
tensor.MustAddScalar_(ts.IntScalar(1))
tensor.MustMulScalar_(ts.IntScalar(2))
tensor.MustSubScalar_(ts.IntScalar(1))
want := []int64{7, 3, 9, 3, 11}
got := tensor.Vals()
@ -38,7 +38,7 @@ func TestInplaceAssign(t *testing.T) {
func TestConstantOp(t *testing.T) {
tensor := ts.MustOfSlice([]int64{3, 9, 3, 11})
resTs1 := tensor.MustMul1(ts.IntScalar(-1), true)
resTs1 := tensor.MustMulScalar(ts.IntScalar(-1), true)
want1 := []int64{-3, -9, -3, -11}
got1 := resTs1.Vals()

View File

@ -3,6 +3,7 @@ package aug
import (
"fmt"
"log"
// "math"
"github.com/sugarme/gotch"
@ -38,11 +39,11 @@ func (c *RandomCrop) params(x *ts.Tensor) (int64, int64, int64, int64) {
return 0, 0, h, w
}
iTs := ts.MustRandint1(0, h-th+1, []int64{1}, gotch.Int64, gotch.CPU)
iTs := ts.MustRandint(h-th+1, []int64{1}, gotch.Int64, gotch.CPU)
i := iTs.Int64Values()[0]
iTs.MustDrop()
jTs := ts.MustRandint1(0, w-tw+1, []int64{1}, gotch.Int64, gotch.CPU)
jTs := ts.MustRandint(w-tw+1, []int64{1}, gotch.Int64, gotch.CPU)
j := jTs.Int64Values()[0]
jTs.MustDrop()

View File

@ -130,11 +130,11 @@ func (rc *RandomCutout) cutoutParams(x *ts.Tensor) (int64, int64, int64, int64,
v := ts.MustOfSlice(rc.rgbVal).MustUnsqueeze(1, true).MustUnsqueeze(1, true)
// i = torch.randint(0, img_h - h + 1, size=(1, )).item()
iTs := ts.MustRandint1(0, imgH-h+1, []int64{1}, gotch.Int64, gotch.CPU)
iTs := ts.MustRandint(imgH-h+1, []int64{1}, gotch.Int64, gotch.CPU)
i := iTs.Int64Values()[0]
iTs.MustDrop()
// j = torch.randint(0, img_w - w + 1, size=(1, )).item()
jTs := ts.MustRandint1(0, imgW-w+1, []int64{1}, gotch.Int64, gotch.CPU)
jTs := ts.MustRandint(imgW-w+1, []int64{1}, gotch.Int64, gotch.CPU)
j := jTs.Int64Values()[0]
jTs.MustDrop()
return i, j, h, w, v

View File

@ -16,7 +16,7 @@ func gaussianKernel1D(ks int64, sigma float64, dtype gotch.DType, device gotch.D
x := ts.MustLinspace(ts.IntScalar(-ksHalf), ts.IntScalar(ksHalf), []int64{ks}, dtype, device)
// pdf = torch.exp(-0.5 * (x / sigma).pow(2))
pdf := x.MustDiv1(ts.FloatScalar(sigma), true).MustPow(ts.IntScalar(2), true).MustMul1(ts.FloatScalar(0.5), true).MustExp(true)
pdf := x.MustDivScalar(ts.FloatScalar(sigma), true).MustPow(ts.IntScalar(2), true).MustMulScalar(ts.FloatScalar(0.5), true).MustExp(true)
// kernel1d = pdf / pdf.sum()
pdfSum := pdf.MustSum(dtype, false)
kernel1d := pdf.MustDiv(pdfSum, true)
@ -76,7 +76,7 @@ func castSqueezeOut(x *ts.Tensor, needCast, needSqueeze bool, outDType gotch.DTy
)
switch needSqueeze {
case true:
squeezeTs = x.MustSqueeze1(0, false)
squeezeTs = x.MustSqueezeDim(0, false)
case false:
squeezeTs = x.MustShallowClone()
}
@ -192,8 +192,8 @@ func blend(img1, img2 *ts.Tensor, ratio float64) *ts.Tensor {
bound := 255.0
// (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
i1 := img1.MustMul1(ts.FloatScalar(ratio), false)
i2 := img2.MustMul1(ts.FloatScalar(1.0-ratio), false)
i1 := img1.MustMulScalar(ts.FloatScalar(ratio), false)
i2 := img2.MustMulScalar(ts.FloatScalar(1.0-ratio), false)
sumTs := i1.MustAdd(i2, true)
i2.MustDrop()
out := sumTs.MustClamp(ts.FloatScalar(0), ts.FloatScalar(bound), true).MustTotype(dtype, true)
@ -262,9 +262,9 @@ func rgb2Gray(x *ts.Tensor, outChanOpt ...int64) *ts.Tensor {
// This implementation closely follows the TF one:
// https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
// l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
rmul := r.MustMul1(ts.FloatScalar(0.2989), true)
gmul := g.MustMul1(ts.FloatScalar(0.587), true)
bmul := b.MustMul1(ts.FloatScalar(0.114), true)
rmul := r.MustMulScalar(ts.FloatScalar(0.2989), true)
gmul := g.MustMulScalar(ts.FloatScalar(0.587), true)
bmul := b.MustMulScalar(ts.FloatScalar(0.114), true)
addTs := rmul.MustAdd(gmul, true).MustAdd(bmul, true)
gmul.MustDrop()
bmul.MustDrop()
@ -288,7 +288,7 @@ func adjustContrast(x *ts.Tensor, contrast float64) *ts.Tensor {
grayTs := rgb2Gray(x).MustTotype(x.DType(), true)
mean := grayTs.MustMean1([]int64{-3, -2, -1}, true, gotch.Float, true).MustTotype(x.DType(), true)
mean := grayTs.MustMeanDim([]int64{-3, -2, -1}, true, gotch.Float, true).MustTotype(x.DType(), true)
out := blend(x, mean, contrast)
mean.MustDrop()
@ -331,7 +331,7 @@ func rgb2HSV(x *ts.Tensor) *ts.Tensor {
// # we don't need to deal with it in case we save the NaN in a buffer in
// # backprop, if it is ever supported, but it doesn't hurt to do so.
// eqc = maxc == minc
eqC := maxC.MustEq1(minC, false)
eqC := maxC.MustEqTensor(minC, false)
// cr = maxc - minc
cr := maxC.MustSub(minC, false)
@ -340,7 +340,7 @@ func rgb2HSV(x *ts.Tensor) *ts.Tensor {
ones := maxC.MustOnesLike(false)
// s = cr / torch.where(eqc, ones, maxc)
condMaxC := ones.MustWhere1(eqC, maxC, false)
condMaxC := ones.MustWhereSelf(eqC, maxC, false)
s := cr.MustDiv(condMaxC, false)
// # Note that `eqc => maxc = minc = r = g = b`. So the following calculation
@ -351,27 +351,27 @@ func rgb2HSV(x *ts.Tensor) *ts.Tensor {
// rc = (maxc - r) / cr_divisor
// gc = (maxc - g) / cr_divisor
// bc = (maxc - b) / cr_divisor
crDivisor := ones.MustWhere1(eqC, cr, true) // delete ones
crDivisor := ones.MustWhereSelf(eqC, cr, true) // delete ones
rc := maxC.MustSub(r, false).MustDiv(crDivisor, true)
gc := maxC.MustSub(g, false).MustDiv(crDivisor, true)
bc := maxC.MustSub(b, false).MustDiv(crDivisor, true)
// hr = (maxc == r) * (bc - gc)
rSub := bc.MustSub(gc, false)
hr := maxC.MustEq1(r, false).MustMul(rSub, true)
hr := maxC.MustEqTensor(r, false).MustMul(rSub, true)
rSub.MustDrop()
// hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
maxcCond1 := maxC.MustNotEqual1(r, false)
hgMul := rc.MustSub(bc, false).MustAdd1(ts.FloatScalar(2.0), true)
hg := maxC.MustEq1(g, false).MustLogicalAnd(maxcCond1, true).MustMul(hgMul, true)
maxcCond1 := maxC.MustNotEqualTensor(r, false)
hgMul := rc.MustSub(bc, false).MustAddScalar(ts.FloatScalar(2.0), true)
hg := maxC.MustEqTensor(g, false).MustLogicalAnd(maxcCond1, true).MustMul(hgMul, true)
maxcCond1.MustDrop()
hgMul.MustDrop()
// hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
maxcCond2 := maxC.MustNotEqual1(r, false)
hbMul := gc.MustSub(rc, false).MustAdd1(ts.FloatScalar(4.0), true)
hb := maxC.MustNotEqual1(g, false).MustLogicalAnd(maxcCond2, true).MustMul(hbMul, true)
maxcCond2 := maxC.MustNotEqualTensor(r, false)
hbMul := gc.MustSub(rc, false).MustAddScalar(ts.FloatScalar(4.0), true)
hb := maxC.MustNotEqualTensor(g, false).MustLogicalAnd(maxcCond2, true).MustMul(hbMul, true)
maxcCond2.MustDrop()
hbMul.MustDrop()
@ -379,8 +379,8 @@ func rgb2HSV(x *ts.Tensor) *ts.Tensor {
h1 := hr.MustAdd(hg, false).MustAdd(hb, true)
// h = torch.fmod((h / 6.0 + 1.0), 1.0)
h2 := h1.MustDiv1(ts.FloatScalar(6.0), true).MustAdd1(ts.FloatScalar(1.0), true) // delete h1
h3 := h2.MustFmod(ts.FloatScalar(1.0), true) // delete h2
h2 := h1.MustDivScalar(ts.FloatScalar(6.0), true).MustAddScalar(ts.FloatScalar(1.0), true) // delete h1
h3 := h2.MustFmod(ts.FloatScalar(1.0), true) // delete h2
// torch.stack((h, s, maxc), dim=-3)
out := ts.MustStack([]ts.Tensor{*h3, *s, *maxC}, -3)
@ -413,26 +413,26 @@ func hsv2RGB(x *ts.Tensor) *ts.Tensor {
s := &hsvTs[1]
v := &hsvTs[2]
// i = torch.floor(h * 6.0)
i := h.MustMul1(ts.FloatScalar(6.0), false).MustFloor(true)
i := h.MustMulScalar(ts.FloatScalar(6.0), false).MustFloor(true)
// f = (h * 6.0) - i
f := h.MustMul1(ts.FloatScalar(6.0), false).MustSub(i, true)
f := h.MustMulScalar(ts.FloatScalar(6.0), false).MustSub(i, true)
// p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
x1 := s.MustMul1(ts.FloatScalar(-1), false).MustAdd1(ts.FloatScalar(1.0), true)
x1 := s.MustMulScalar(ts.FloatScalar(-1), false).MustAddScalar(ts.FloatScalar(1.0), true)
p := v.MustMul(x1, false).MustClamp(ts.FloatScalar(0.0), ts.FloatScalar(1.0), true)
x1.MustDrop()
// q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
x2 := s.MustMul(f, false).MustMul1(ts.FloatScalar(-1), true).MustAdd1(ts.FloatScalar(1.0), true)
x2 := s.MustMul(f, false).MustMulScalar(ts.FloatScalar(-1), true).MustAddScalar(ts.FloatScalar(1.0), true)
q := v.MustMul(x2, false).MustClamp(ts.FloatScalar(0.0), ts.FloatScalar(1.0), true)
x2.MustDrop()
//t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
// step1. s * (1.0 - f)
sub1 := f.MustMul1(ts.FloatScalar(-1), false).MustAdd1(ts.FloatScalar(1.0), true).MustMul(s, true)
sub1 := f.MustMulScalar(ts.FloatScalar(-1), false).MustAddScalar(ts.FloatScalar(1.0), true).MustMul(s, true)
// step 2: v *(1.0 - step1)
x3 := sub1.MustMul1(ts.FloatScalar(-1), true).MustAdd1(ts.FloatScalar(1.0), true).MustMul(v, true) // deleted sub1
t := x3.MustClamp(ts.FloatScalar(0.0), ts.FloatScalar(1.0), true) // deleted x3
x3 := sub1.MustMulScalar(ts.FloatScalar(-1), true).MustAddScalar(ts.FloatScalar(1.0), true).MustMul(v, true) // deleted sub1
t := x3.MustClamp(ts.FloatScalar(0.0), ts.FloatScalar(1.0), true) // deleted x3
// i = i.to(dtype=torch.int32)
i = i.MustTotype(gotch.Int, true)
@ -441,7 +441,7 @@ func hsv2RGB(x *ts.Tensor) *ts.Tensor {
// torch.arange(6, device=i.device).view(-1, 1, 1)
x4 := ts.MustArange(ts.FloatScalar(6), gotch.Float, iremainder.MustDevice()).MustView([]int64{-1, 1, 1}, true)
// mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
mask := iremainder.MustUnsqueeze(-3, true).MustEq1(x4, true).MustTotype(x.DType(), true) // delete iremainder
mask := iremainder.MustUnsqueeze(-3, true).MustEqTensor(x4, true).MustTotype(x.DType(), true) // delete iremainder
x4.MustDrop()
// a1 = torch.stack((v, q, p, p, t, v), dim=-3)
@ -487,7 +487,7 @@ func adjustHue(x *ts.Tensor, hue float64) *ts.Tensor {
return out
}
imgFl := x.MustTotype(gotch.Float, false).MustDiv1(ts.FloatScalar(255.0), true)
imgFl := x.MustTotype(gotch.Float, false).MustDivScalar(ts.FloatScalar(255.0), true)
hsvImg := rgb2HSV(imgFl)
hsvTs := hsvImg.MustUnbind(-3, true)
@ -495,13 +495,13 @@ func adjustHue(x *ts.Tensor, hue float64) *ts.Tensor {
s := &hsvTs[1]
v := &hsvTs[2]
// h = (h + hue_factor) % 1.0
hAdj := h.MustAdd1(ts.FloatScalar(hue), false).MustRemainder(ts.FloatScalar(1.0), true)
hAdj := h.MustAddScalar(ts.FloatScalar(hue), false).MustRemainder(ts.FloatScalar(1.0), true)
hsvAdj := ts.MustStack([]ts.Tensor{*hAdj, *s, *v}, -3)
imgHueAdj := hsv2RGB(hsvAdj)
out := imgHueAdj.MustMul1(ts.FloatScalar(255.0), true)
out := imgHueAdj.MustMulScalar(ts.FloatScalar(255.0), true)
imgFl.MustDrop()
h.MustDrop()
@ -658,7 +658,7 @@ func cutout(x *ts.Tensor, top, left, height, width int64, rgbVal []int64) *ts.Te
srcIdx := []ts.TensorIndexer{cIdx, hNar, wNar}
view := output.Idx(srcIdx)
oneTs := view.MustOnesLike(false)
vTs := oneTs.MustMul1(ts.IntScalar(rgbVal[i]), true)
vTs := oneTs.MustMulScalar(ts.IntScalar(rgbVal[i]), true)
view.Copy_(vTs)
vTs.MustDrop()
view.MustDrop()
@ -760,7 +760,7 @@ func applyGridTransform(x, gridInput *ts.Tensor, mode string, fillValue []float6
fillImg := ts.MustOfSlice(fillValue).MustTotype(image.DType(), true).MustTo(image.MustDevice(), true).MustView([]int64{1, 3, 1, 1}, true).MustExpandAs(image, true)
// img = img * mask + (1.0 - mask) * fill_img
addTs := mask.MustMul1(ts.FloatScalar(-1), false).MustAdd1(ts.FloatScalar(1.0), true).MustMul(fillImg, true)
addTs := mask.MustMulScalar(ts.FloatScalar(-1), false).MustAddScalar(ts.FloatScalar(1.0), true).MustMul(fillImg, true)
imgOut := image.MustMul(mask, true).MustAdd(addTs, true)
addTs.MustDrop()
mask.MustDrop()
@ -817,7 +817,7 @@ func perspectiveCoeff(startPoints, endPoints [][]int64) []float64 {
res := bMat.MustLstsq(aMat, true)
aMat.MustDrop()
outputTs := res.MustSqueeze1(1, true)
outputTs := res.MustSqueezeDim(1, true)
output := outputTs.Float64Values()
outputTs.MustDrop()
@ -897,7 +897,7 @@ func perspectiveGrid(coef []float64, ow, oh int64, dtype gotch.DType, device got
rescaledTheta1.MustDrop()
rescaledTheta2.MustDrop()
outputGrid := outputGrid1.MustDiv(outputGrid2, true).MustSub1(ts.FloatScalar(1.0), true).MustView([]int64{1, oh, ow, 2}, true)
outputGrid := outputGrid1.MustDiv(outputGrid2, true).MustSubScalar(ts.FloatScalar(1.0), true).MustView([]int64{1, oh, ow, 2}, true)
outputGrid2.MustDrop()
baseGrid.MustDrop()
@ -1132,7 +1132,7 @@ func solarize(img *ts.Tensor, threshold float64) *ts.Tensor {
// return torch.where(img >= threshold, inverted_img, img)
conditionTs := img.MustGe(ts.FloatScalar(threshold), false)
out := img.MustWhere1(conditionTs, invertedImg, false)
out := img.MustWhereSelf(conditionTs, invertedImg, false)
invertedImg.MustDrop()
conditionTs.MustDrop()
@ -1153,7 +1153,7 @@ func invert(img *ts.Tensor) *ts.Tensor {
var bound int64 = 255
// return bound - img
out := img.MustMul1(ts.IntScalar(-1), false).MustAdd1(ts.IntScalar(bound), true)
out := img.MustMulScalar(ts.IntScalar(-1), false).MustAddScalar(ts.IntScalar(bound), true)
return out
}
@ -1201,7 +1201,7 @@ func autocontrast(img *ts.Tensor) *ts.Tensor {
// eq_idxs = torch.where(minimum == maximum)[0]
// NOTE. Eq(minTs, maxTs) give [n, c, 1, 1] or [channels, 1, 1]
eqIdx := minTs.MustEq1(maxTs, false).MustSqueeze1(-1, true).MustSqueeze1(-1, true).MustTotype(gotch.Int64, true)
eqIdx := minTs.MustEqTensor(maxTs, false).MustSqueezeDim(-1, true).MustSqueezeDim(-1, true).MustTotype(gotch.Int64, true)
// minimum[eq_idxs] = 0
minTsView := minTs.MustIndexSelect(0, eqIdx, false)
@ -1212,13 +1212,13 @@ func autocontrast(img *ts.Tensor) *ts.Tensor {
// maximum[eq_idxs] = bound
maxTsView := maxTs.MustIndexSelect(0, eqIdx, false)
boundTs := maxTsView.MustOnesLike(false).MustMul1(ts.FloatScalar(bound), true)
boundTs := maxTsView.MustOnesLike(false).MustMulScalar(ts.FloatScalar(bound), true)
maxTsView.Copy_(boundTs)
boundTs.MustDrop()
maxTsView.MustDrop()
// scale = bound / (maximum - minimum)
scale := maxTs.MustSub(minTs, false).MustPow(ts.IntScalar(-1), true).MustMul1(ts.FloatScalar(bound), true)
scale := maxTs.MustSub(minTs, false).MustPow(ts.IntScalar(-1), true).MustMulScalar(ts.FloatScalar(bound), true)
//
// return ((img - minimum) * scale).clamp(0, bound).to(img.dtype)
out := img.MustSub(minTs, false).MustMul(scale, true).MustClamp(ts.IntScalar(0), ts.FloatScalar(bound), true).MustTotype(dtype, true)
@ -1265,7 +1265,7 @@ func blurredDegenerateImage(img *ts.Tensor) *ts.Tensor {
// kernel[1, 1] = 5.0
kernelView := kernel.MustNarrow(1, 1, 1, false).MustNarrow(0, 1, 1, true)
centerVal := kernelView.MustOnesLike(false).MustMul1(ts.FloatScalar(5.0), true)
centerVal := kernelView.MustOnesLike(false).MustMulScalar(ts.FloatScalar(5.0), true)
kernelView.Copy_(centerVal) // center kernel value
centerVal.MustDrop()
kernelView.MustDrop()
@ -1393,7 +1393,7 @@ func scaleChannel(imgChan *ts.Tensor) *ts.Tensor {
// step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode='floor')
histoLen := nonzeroHisto.MustSize()[0]
step := nonzeroHisto.MustNarrow(0, 0, histoLen-1, true).MustSum(gotch.Float, true).MustFloorDivide1(ts.FloatScalar(255.0), true)
step := nonzeroHisto.MustNarrow(0, 0, histoLen-1, true).MustSum(gotch.Float, true).MustFloorDivideScalar(ts.FloatScalar(255.0), true)
stepVal := step.Float64Values()[0]
if stepVal == 0 {
@ -1404,7 +1404,7 @@ func scaleChannel(imgChan *ts.Tensor) *ts.Tensor {
}
// lut = torch.div(torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode='floor'), step, rounding_mode='floor')
halfStep := step.MustFloorDivide1(ts.FloatScalar(2.0), false)
halfStep := step.MustFloorDivideScalar(ts.FloatScalar(2.0), false)
lut := histo.Must_Cumsum(0, true).MustAdd(halfStep, true).MustFloorDivide(step, true)
step.MustDrop()
halfStep.MustDrop()
@ -1491,7 +1491,7 @@ func Byte2FloatImage(x *ts.Tensor) *ts.Tensor {
panic(err)
}
return x.MustDiv1(ts.FloatScalar(255.0), false)
return x.MustDivScalar(ts.FloatScalar(255.0), false)
}
// Float2ByteImage converts float dtype image to uint8 dtype image.
@ -1503,5 +1503,5 @@ func Float2ByteImage(x *ts.Tensor) *ts.Tensor {
panic(err)
}
return x.MustMul1(ts.IntScalar(255), false).MustTotype(gotch.Uint8, true)
return x.MustMulScalar(ts.IntScalar(255), false).MustTotype(gotch.Uint8, true)
}

View File

@ -107,11 +107,11 @@ func (rp *RandomPerspective) getParams(w, h int64) ([][]int64, [][]int64) {
// int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
// ]
tlVal1 := int64(rp.distortionScale*float64(halfW)) + 1
tlTs1 := ts.MustRandint1(0, tlVal1, []int64{1}, gotch.Int64, gotch.CPU)
tlTs1 := ts.MustRandint(tlVal1, []int64{1}, gotch.Int64, gotch.CPU)
tl1 := tlTs1.Int64Values()[0]
tlTs1.MustDrop()
tlVal2 := int64(rp.distortionScale*float64(halfH)) + 1
tlTs2 := ts.MustRandint1(0, tlVal2, []int64{1}, gotch.Int64, gotch.CPU)
tlTs2 := ts.MustRandint(tlVal2, []int64{1}, gotch.Int64, gotch.CPU)
tl2 := tlTs2.Int64Values()[0]
tlTs2.MustDrop()
topLeft = []int64{tl1, tl2}
@ -121,11 +121,11 @@ func (rp *RandomPerspective) getParams(w, h int64) ([][]int64, [][]int64) {
// int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
// ]
trVal1 := w - int64(rp.distortionScale*float64(halfW)) - 1
trTs1 := ts.MustRandint1(trVal1, w, []int64{1}, gotch.Int64, gotch.CPU)
trTs1 := ts.MustRandintLow(trVal1, w, []int64{1}, gotch.Int64, gotch.CPU)
tr1 := trTs1.Int64Values()[0]
trTs1.MustDrop()
trVal2 := int64(rp.distortionScale*float64(halfH)) + 1
trTs2 := ts.MustRandint1(0, trVal2, []int64{1}, gotch.Int64, gotch.CPU)
trTs2 := ts.MustRandint(trVal2, []int64{1}, gotch.Int64, gotch.CPU)
tr2 := trTs2.Int64Values()[0]
trTs2.MustDrop()
topRight = []int64{tr1, tr2}
@ -135,11 +135,11 @@ func (rp *RandomPerspective) getParams(w, h int64) ([][]int64, [][]int64) {
// int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
// ]
brVal1 := w - int64(rp.distortionScale*float64(halfW)) - 1
brTs1 := ts.MustRandint1(brVal1, w, []int64{1}, gotch.Int64, gotch.CPU)
brTs1 := ts.MustRandintLow(brVal1, w, []int64{1}, gotch.Int64, gotch.CPU)
br1 := brTs1.Int64Values()[0]
brTs1.MustDrop()
brVal2 := h - int64(rp.distortionScale*float64(halfH)) - 1
brTs2 := ts.MustRandint1(brVal2, h, []int64{1}, gotch.Int64, gotch.CPU)
brTs2 := ts.MustRandintLow(brVal2, h, []int64{1}, gotch.Int64, gotch.CPU)
br2 := brTs2.Int64Values()[0]
brTs2.MustDrop()
bottomRight = []int64{br1, br2}
@ -149,11 +149,11 @@ func (rp *RandomPerspective) getParams(w, h int64) ([][]int64, [][]int64) {
// int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
// ]
blVal1 := int64(rp.distortionScale*float64(halfW)) + 1
blTs1 := ts.MustRandint1(0, blVal1, []int64{1}, gotch.Int64, gotch.CPU)
blTs1 := ts.MustRandint(blVal1, []int64{1}, gotch.Int64, gotch.CPU)
bl1 := blTs1.Int64Values()[0]
blTs1.MustDrop()
blVal2 := h - int64(rp.distortionScale*float64(halfH)) - 1
blTs2 := ts.MustRandint1(blVal2, h, []int64{1}, gotch.Int64, gotch.CPU)
blTs2 := ts.MustRandintLow(blVal2, h, []int64{1}, gotch.Int64, gotch.CPU)
bl2 := blTs2.Int64Values()[0]
blTs2.MustDrop()
bottomLeft = []int64{bl1, bl2}

View File

@ -67,7 +67,7 @@ func readFile(filename string) (imagesTs *ts.Tensor, labelsTs *ts.Tensor) {
}
tmp1 := images.MustTotype(gotch.Float, true)
imagesTs = tmp1.MustDiv1(ts.FloatScalar(255.0), true)
imagesTs = tmp1.MustDivScalar(ts.FloatScalar(255.0), true)
labelsTs = labels

View File

@ -313,8 +313,8 @@ func efficientnet(p *nn.Path, params *params, nclasses int64) ts.ModuleT {
tmp6.MustDrop()
tmp8 := tmp7.MustAdaptiveAvgPool2d([]int64{1, 1}, false)
tmp7.MustDrop()
tmp9 := tmp8.MustSqueeze1(-1, true)
tmp10 := tmp9.MustSqueeze1(-1, true)
tmp9 := tmp8.MustSqueezeDim(-1, true)
tmp10 := tmp9.MustSqueezeDim(-1, true)
res := tmp10.ApplyT(classifier, train)
tmp10.MustDrop()

View File

@ -68,7 +68,7 @@ func Save(tensor *ts.Tensor, path string) error {
var tsCHW, tsHWC *ts.Tensor
switch {
case len(shape) == 4 && shape[0] == 1:
tsCHW = t.MustSqueeze1(int64(0), true)
tsCHW = t.MustSqueezeDim(int64(0), true)
chwTs := chwToHWC(tsCHW)
tsCHW.MustDrop()
tsHWC = chwTs.MustTo(gotch.CPU, true)

View File

@ -4,6 +4,7 @@ import (
"fmt"
"io/ioutil"
"log"
// "os"
"path/filepath"
"reflect"
@ -38,7 +39,7 @@ func (in *ImageNet) Normalize(tensor *ts.Tensor) (*ts.Tensor, error) {
return nil, err
}
resDiv1, err := res.Div1(ts.FloatScalar(float64(255.0)), true)
resDiv1, err := res.DivScalar(ts.FloatScalar(float64(255.0)), true)
if err != nil {
return nil, err
}
@ -69,7 +70,7 @@ func (in *ImageNet) UnNormalize(tensor *ts.Tensor) (*ts.Tensor, error) {
return nil, err
}
resMul1, err := resAdd.Mul1(ts.FloatScalar(float64(255.0)), true)
resMul1, err := resAdd.MulScalar(ts.FloatScalar(float64(255.0)), true)
if err != nil {
return nil, err
}
@ -263,7 +264,7 @@ func (in *ImageNet) LoadFromDir(path string) (*Dataset, error) {
trainImages = append(trainImages, *trainTs)
trainLabelOnes := ts.MustOnes([]int64{ntrainTs}, gotch.Int64, gotch.CPU)
trainLabels = append(trainLabels, *trainLabelOnes.MustMul1(ts.IntScalar(labelIndex), true))
trainLabels = append(trainLabels, *trainLabelOnes.MustMulScalar(ts.IntScalar(labelIndex), true))
// test
testDir := fmt.Sprintf("%v/%v", validPath, labelDir)
@ -276,7 +277,7 @@ func (in *ImageNet) LoadFromDir(path string) (*Dataset, error) {
testImages = append(testImages, *testTs)
testLabelOnes := ts.MustOnes([]int64{ntestTs}, gotch.Int64, gotch.CPU)
testLabels = append(testLabels, *testLabelOnes.MustMul1(ts.IntScalar(labelIndex), true))
testLabels = append(testLabels, *testLabelOnes.MustMulScalar(ts.IntScalar(labelIndex), true))
}
trainImageTs := ts.MustCat(trainImages, 0)

View File

@ -124,7 +124,7 @@ func readImages(filename string) *ts.Tensor {
log.Fatal(err)
}
return imagesTs.MustView([]int64{int64(samples), int64(rows * cols)}, true).MustTotype(gotch.Float, true).MustDiv1(ts.FloatScalar(255.0), true)
return imagesTs.MustView([]int64{int64(samples), int64(rows * cols)}, true).MustTotype(gotch.Float, true).MustDivScalar(ts.FloatScalar(255.0), true)
}
// LoadMNISTDir loads all MNIST data from a given directory to Dataset

View File

@ -117,8 +117,8 @@ func MobileNetV2(p *nn.Path, nclasses int64) ts.ModuleT {
return nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
tmp1 := xs.ApplyT(features, train)
tmp2 := tmp1.MustMean1([]int64{2}, false, gotch.Float, true)
tmp3 := tmp2.MustMean1([]int64{2}, false, gotch.Float, true)
tmp2 := tmp1.MustMeanDim([]int64{2}, false, gotch.Float, true)
tmp3 := tmp2.MustMeanDim([]int64{2}, false, gotch.Float, true)
res := tmp3.ApplyT(classifier, train)
tmp3.MustDrop()